aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-01 18:02:27 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-01 18:02:27 -0400
commitceeec3dc375e3b0618f16b34efc56fe093918f8b (patch)
tree2293d02721ee05131aaf1c60e4fba7e281585eec
parentfbff868db3a4cc6a89d51da9a6d49b26c29d04fb (diff)
parente3ee3b78f83688a0ae4315e8be71b2eac559904a (diff)
/spare/repo/netdev-2.6 branch 'ieee80211'
-rw-r--r--Documentation/feature-removal-schedule.txt12
-rw-r--r--Documentation/networking/cxgb.txt352
-rw-r--r--Documentation/networking/phy.txt288
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt1
-rw-r--r--Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl15
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/signal.c11
-rw-r--r--arch/arm/Kconfig39
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/gic.c166
-rw-r--r--arch/arm/kernel/signal.c5
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c2
-rw-r--r--arch/arm/mach-sa1100/assabet.c7
-rw-r--r--arch/arm/mach-sa1100/cerf.c7
-rw-r--r--arch/arm/mach-sa1100/generic.c5
-rw-r--r--arch/arm/mach-sa1100/generic.h3
-rw-r--r--arch/arm/mach-sa1100/lart.c12
-rw-r--r--arch/arm/mach-sa1100/shannon.c7
-rw-r--r--arch/arm/mach-sa1100/simpad.c7
-rw-r--r--arch/arm26/kernel/signal.c13
-rw-r--r--arch/cris/arch-v10/kernel/signal.c11
-rw-r--r--arch/cris/arch-v32/kernel/signal.c11
-rw-r--r--arch/frv/kernel/signal.c11
-rw-r--r--arch/h8300/kernel/signal.c11
-rw-r--r--arch/i386/kernel/signal.c5
-rw-r--r--arch/i386/pci/common.c1
-rw-r--r--arch/i386/pci/i386.c49
-rw-r--r--arch/ia64/kernel/signal.c15
-rw-r--r--arch/ia64/pci/pci.c1
-rw-r--r--arch/m32r/kernel/signal.c11
-rw-r--r--arch/m68knommu/kernel/signal.c11
-rw-r--r--arch/mips/kernel/irixsig.c11
-rw-r--r--arch/mips/kernel/signal.c11
-rw-r--r--arch/mips/kernel/signal32.c11
-rw-r--r--arch/parisc/kernel/signal.c11
-rw-r--r--arch/ppc/Makefile11
-rw-r--r--arch/ppc/boot/utils/addRamDisk.c203
-rw-r--r--arch/ppc/kernel/signal.c11
-rw-r--r--arch/ppc/syslib/m8xx_setup.c8
-rw-r--r--arch/ppc64/Kconfig74
-rw-r--r--arch/ppc64/Makefile9
-rw-r--r--arch/ppc64/boot/Makefile4
-rw-r--r--arch/ppc64/boot/addnote.c4
-rw-r--r--arch/ppc64/boot/crt0.S2
-rw-r--r--arch/ppc64/boot/div64.S2
-rw-r--r--arch/ppc64/boot/elf.h149
-rw-r--r--arch/ppc64/boot/main.c55
-rw-r--r--arch/ppc64/boot/page.h34
-rw-r--r--arch/ppc64/boot/ppc32-types.h36
-rw-r--r--arch/ppc64/boot/ppc_asm.h62
-rw-r--r--arch/ppc64/boot/prom.c196
-rw-r--r--arch/ppc64/boot/prom.h18
-rw-r--r--arch/ppc64/boot/stdio.h16
-rw-r--r--arch/ppc64/boot/string.S2
-rw-r--r--arch/ppc64/boot/string.h16
-rw-r--r--arch/ppc64/boot/zlib.c2
-rw-r--r--arch/ppc64/configs/g5_defconfig6
-rw-r--r--arch/ppc64/configs/iSeries_defconfig7
-rw-r--r--arch/ppc64/configs/maple_defconfig6
-rw-r--r--arch/ppc64/configs/pSeries_defconfig6
-rw-r--r--arch/ppc64/defconfig6
-rw-r--r--arch/ppc64/kernel/LparData.c37
-rw-r--r--arch/ppc64/kernel/Makefile7
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/cputable.c40
-rw-r--r--arch/ppc64/kernel/firmware.c47
-rw-r--r--arch/ppc64/kernel/head.S523
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c5
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c30
-rw-r--r--arch/ppc64/kernel/iSeries_vio.c155
-rw-r--r--arch/ppc64/kernel/lmb.c151
-rw-r--r--arch/ppc64/kernel/lparcfg.c7
-rw-r--r--arch/ppc64/kernel/misc.S98
-rw-r--r--arch/ppc64/kernel/of_device.c2
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c3
-rw-r--r--arch/ppc64/kernel/pSeries_lpar.c4
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c39
-rw-r--r--arch/ppc64/kernel/pSeries_smp.c3
-rw-r--r--arch/ppc64/kernel/pSeries_vio.c273
-rw-r--r--arch/ppc64/kernel/pacaData.c4
-rw-r--r--arch/ppc64/kernel/pmac_setup.c2
-rw-r--r--arch/ppc64/kernel/pmc.c21
-rw-r--r--arch/ppc64/kernel/process.c12
-rw-r--r--arch/ppc64/kernel/prom.c184
-rw-r--r--arch/ppc64/kernel/prom_init.c93
-rw-r--r--arch/ppc64/kernel/rtas_pci.c19
-rw-r--r--arch/ppc64/kernel/setup.c30
-rw-r--r--arch/ppc64/kernel/signal.c5
-rw-r--r--arch/ppc64/kernel/signal32.c5
-rw-r--r--arch/ppc64/kernel/sysfs.c57
-rw-r--r--arch/ppc64/kernel/time.c7
-rw-r--r--arch/ppc64/kernel/vio.c444
-rw-r--r--arch/ppc64/mm/hash_low.S4
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c4
-rw-r--r--arch/ppc64/mm/hugetlbpage.c388
-rw-r--r--arch/ppc64/mm/imalloc.c2
-rw-r--r--arch/ppc64/mm/init.c96
-rw-r--r--arch/ppc64/mm/numa.c2
-rw-r--r--arch/ppc64/mm/slb_low.S25
-rw-r--r--arch/ppc64/mm/tlb.c95
-rw-r--r--arch/ppc64/oprofile/common.c1
-rw-r--r--arch/ppc64/xmon/start.c2
-rw-r--r--arch/ppc64/xmon/xmon.c28
-rw-r--r--arch/s390/kernel/compat_signal.c11
-rw-r--r--arch/s390/kernel/signal.c11
-rw-r--r--arch/sh/kernel/signal.c11
-rw-r--r--arch/sh64/kernel/signal.c11
-rw-r--r--arch/sparc/kernel/setup.c1
-rw-r--r--arch/sparc/kernel/signal.c11
-rw-r--r--arch/sparc/kernel/tick14.c1
-rw-r--r--arch/sparc/kernel/time.c1
-rw-r--r--arch/sparc/mm/fault.c1
-rw-r--r--arch/sparc/mm/init.c1
-rw-r--r--arch/sparc64/kernel/entry.S293
-rw-r--r--arch/sparc64/kernel/pci.c1
-rw-r--r--arch/sparc64/kernel/pci_iommu.c2
-rw-r--r--arch/sparc64/kernel/process.c2
-rw-r--r--arch/sparc64/kernel/sbus.c2
-rw-r--r--arch/sparc64/kernel/setup.c1
-rw-r--r--arch/sparc64/kernel/signal.c11
-rw-r--r--arch/sparc64/kernel/signal32.c33
-rw-r--r--arch/sparc64/kernel/smp.c30
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c32
-rw-r--r--arch/sparc64/kernel/traps.c269
-rw-r--r--arch/sparc64/kernel/ttable.S27
-rw-r--r--arch/sparc64/kernel/unaligned.c18
-rw-r--r--arch/sparc64/kernel/winfixup.S6
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/debuglocks.c56
-rw-r--r--arch/sparc64/lib/mb.S73
-rw-r--r--arch/sparc64/solaris/misc.c6
-rw-r--r--arch/um/drivers/mmapper_kern.c41
-rw-r--r--arch/um/kernel/signal_kern.c6
-rw-r--r--arch/v850/kernel/signal.c11
-rw-r--r--arch/x86_64/defconfig21
-rw-r--r--arch/x86_64/kernel/e820.c34
-rw-r--r--arch/x86_64/kernel/signal.c5
-rw-r--r--arch/x86_64/mm/init.c16
-rw-r--r--arch/x86_64/mm/numa.c8
-rw-r--r--arch/xtensa/kernel/signal.c11
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/sleep/poweroff.c6
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c167
-rw-r--r--drivers/atm/nicstar.h16
-rw-r--r--drivers/atm/zatm.c10
-rw-r--r--drivers/block/aoe/aoenet.c2
-rw-r--r--drivers/block/cfq-iosched.c31
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/bluetooth/bfusb.c16
-rw-r--r--drivers/bluetooth/bluecard_cs.c24
-rw-r--r--drivers/bluetooth/bpa10x.c17
-rw-r--r--drivers/bluetooth/bt3c_cs.c12
-rw-r--r--drivers/bluetooth/btuart_cs.c10
-rw-r--r--drivers/bluetooth/dtl1_cs.c10
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c4
-rw-r--r--drivers/bluetooth/hci_usb.c23
-rw-r--r--drivers/bluetooth/hci_vhci.c386
-rw-r--r--drivers/bluetooth/hci_vhci.h50
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/drm/Kconfig16
-rw-r--r--drivers/char/drm/Makefile7
-rw-r--r--drivers/char/drm/drm.h8
-rw-r--r--drivers/char/drm/drmP.h137
-rw-r--r--drivers/char/drm/drm_agpsupport.c143
-rw-r--r--drivers/char/drm/drm_bufs.c647
-rw-r--r--drivers/char/drm/drm_context.c17
-rw-r--r--drivers/char/drm/drm_drv.c79
-rw-r--r--drivers/char/drm/drm_fops.c6
-rw-r--r--drivers/char/drm/drm_ioctl.c2
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_pci.c55
-rw-r--r--drivers/char/drm/drm_pciids.h79
-rw-r--r--drivers/char/drm/drm_proc.c17
-rw-r--r--drivers/char/drm/drm_scatter.c11
-rw-r--r--drivers/char/drm/drm_stub.c8
-rw-r--r--drivers/char/drm/drm_vm.c92
-rw-r--r--drivers/char/drm/ffb_drv.c5
-rw-r--r--drivers/char/drm/gamma_context.h492
-rw-r--r--drivers/char/drm/gamma_dma.c946
-rw-r--r--drivers/char/drm/gamma_drm.h90
-rw-r--r--drivers/char/drm/gamma_drv.c59
-rw-r--r--drivers/char/drm/gamma_drv.h147
-rw-r--r--drivers/char/drm/gamma_lists.h215
-rw-r--r--drivers/char/drm/gamma_lock.h140
-rw-r--r--drivers/char/drm/gamma_old_dma.h313
-rw-r--r--drivers/char/drm/i810_dma.c22
-rw-r--r--drivers/char/drm/i810_drv.c1
-rw-r--r--drivers/char/drm/i810_drv.h1
-rw-r--r--drivers/char/drm/i830_dma.c22
-rw-r--r--drivers/char/drm/i830_drv.c1
-rw-r--r--drivers/char/drm/i830_drv.h1
-rw-r--r--drivers/char/drm/i915_dma.c31
-rw-r--r--drivers/char/drm/i915_drv.c1
-rw-r--r--drivers/char/drm/i915_drv.h4
-rw-r--r--drivers/char/drm/mga_dma.c602
-rw-r--r--drivers/char/drm/mga_drm.h98
-rw-r--r--drivers/char/drm/mga_drv.c45
-rw-r--r--drivers/char/drm/mga_drv.h94
-rw-r--r--drivers/char/drm/mga_ioc32.c67
-rw-r--r--drivers/char/drm/mga_irq.c72
-rw-r--r--drivers/char/drm/mga_state.c158
-rw-r--r--drivers/char/drm/mga_warp.c141
-rw-r--r--drivers/char/drm/r128_cce.c6
-rw-r--r--drivers/char/drm/r128_drm.h2
-rw-r--r--drivers/char/drm/r300_cmdbuf.c801
-rw-r--r--drivers/char/drm/r300_reg.h1412
-rw-r--r--drivers/char/drm/radeon_cp.c35
-rw-r--r--drivers/char/drm/radeon_drm.h46
-rw-r--r--drivers/char/drm/radeon_drv.c1
-rw-r--r--drivers/char/drm/radeon_drv.h30
-rw-r--r--drivers/char/drm/radeon_state.c75
-rw-r--r--drivers/char/drm/savage_bci.c1096
-rw-r--r--drivers/char/drm/savage_drm.h209
-rw-r--r--drivers/char/drm/savage_drv.c112
-rw-r--r--drivers/char/drm/savage_drv.h579
-rw-r--r--drivers/char/drm/savage_state.c1146
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/random.c34
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/hwmon/adm1026.c4
-rw-r--r--drivers/hwmon/adm1031.c4
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/fscpos.c2
-rw-r--r--drivers/hwmon/smsc47b397.c2
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/ieee1394/ohci1394.c8
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/agent.c13
-rw-r--r--drivers/infiniband/core/agent_priv.h10
-rw-r--r--drivers/infiniband/core/cache.c6
-rw-r--r--drivers/infiniband/core/cm.c125
-rw-r--r--drivers/infiniband/core/cm_msgs.h194
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/fmr_pool.c8
-rw-r--r--drivers/infiniband/core/mad.c15
-rw-r--r--drivers/infiniband/core/mad_priv.h10
-rw-r--r--drivers/infiniband/core/mad_rmpp.c311
-rw-r--r--drivers/infiniband/core/packer.c3
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/smi.c13
-rw-r--r--drivers/infiniband/core/sysfs.c40
-rw-r--r--drivers/infiniband/core/ucm.c464
-rw-r--r--drivers/infiniband/core/ucm.h13
-rw-r--r--drivers/infiniband/core/ud_header.c11
-rw-r--r--drivers/infiniband/core/user_mad.c10
-rw-r--r--drivers/infiniband/core/uverbs.h11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c182
-rw-r--r--drivers/infiniband/core/uverbs_main.c25
-rw-r--r--drivers/infiniband/core/uverbs_mem.c1
-rw-r--r--drivers/infiniband/core/verbs.c65
-rw-r--r--drivers/infiniband/hw/mthca/Makefile4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c116
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c106
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c256
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h52
-rw-r--r--drivers/infiniband/hw/mthca/mthca_doorbell.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c63
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c179
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c36
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c115
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h54
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c362
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c591
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h114
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/isdn/act2000/capi.c2
-rw-r--r--drivers/isdn/capi/capifs.c4
-rw-r--r--drivers/isdn/i4l/isdn_net.c1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c1
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c19
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-dvb.c5
-rw-r--r--drivers/media/dvb/frontends/tda80xx.c1
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/mcp-core.c255
-rw-r--r--drivers/mfd/mcp-sa11x0.c275
-rw-r--r--drivers/mfd/mcp.h66
-rw-r--r--drivers/mmc/mmc.c29
-rw-r--r--drivers/mmc/mmc.h5
-rw-r--r--drivers/mmc/mmc_sysfs.c90
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/wbsd.c2
-rw-r--r--drivers/net/Kconfig32
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/Space.c12
-rw-r--r--drivers/net/bnx2.c229
-rw-r--r--drivers/net/bnx2.h10
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c22
-rw-r--r--drivers/net/bonding/bond_main.c58
-rw-r--r--drivers/net/bonding/bonding.h3
-rw-r--r--drivers/net/chelsio/Makefile11
-rw-r--r--drivers/net/chelsio/common.h314
-rw-r--r--drivers/net/chelsio/cphy.h148
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h145
-rw-r--r--drivers/net/chelsio/cxgb2.c1256
-rw-r--r--drivers/net/chelsio/elmer0.h151
-rw-r--r--drivers/net/chelsio/espi.c346
-rw-r--r--drivers/net/chelsio/espi.h68
-rw-r--r--drivers/net/chelsio/gmac.h134
-rw-r--r--drivers/net/chelsio/mv88x201x.c252
-rw-r--r--drivers/net/chelsio/pm3393.c826
-rw-r--r--drivers/net/chelsio/regs.h468
-rw-r--r--drivers/net/chelsio/sge.c1684
-rw-r--r--drivers/net/chelsio/sge.h105
-rw-r--r--drivers/net/chelsio/subr.c812
-rw-r--r--drivers/net/chelsio/suni1x10gexp_regs.h213
-rw-r--r--drivers/net/e100.c241
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/forcedeth.c582
-rw-r--r--drivers/net/hamradio/6pack.c9
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/baycom_par.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c3
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hamradio/mkiss.c1086
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c170
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c59
-rw-r--r--drivers/net/ixgb/ixgb_hw.h9
-rw-r--r--drivers/net/ixgb/ixgb_main.c53
-rw-r--r--drivers/net/jazzsonic.c186
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/macsonic.c538
-rw-r--r--drivers/net/mv643xx_eth.c29
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c25
-rw-r--r--drivers/net/phy/Kconfig57
-rw-r--r--drivers/net/phy/Makefile10
-rw-r--r--drivers/net/phy/cicada.c134
-rw-r--r--drivers/net/phy/davicom.c195
-rw-r--r--drivers/net/phy/lxt.c179
-rw-r--r--drivers/net/phy/marvell.c140
-rw-r--r--drivers/net/phy/mdio_bus.c176
-rw-r--r--drivers/net/phy/phy.c871
-rw-r--r--drivers/net/phy/phy_device.c696
-rw-r--r--drivers/net/phy/qsemi.c143
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/r8169.c1
-rw-r--r--drivers/net/rrunner.c3
-rw-r--r--drivers/net/s2io-regs.h87
-rw-r--r--drivers/net/s2io.c3085
-rw-r--r--drivers/net/s2io.h360
-rw-r--r--drivers/net/shaper.c50
-rw-r--r--drivers/net/sis190.c1843
-rw-r--r--drivers/net/skge.c65
-rw-r--r--drivers/net/skge.h19
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/sonic.c676
-rw-r--r--drivers/net/sonic.h460
-rw-r--r--drivers/net/tg3.c331
-rw-r--r--drivers/net/tg3.h10
-rw-r--r--drivers/net/tokenring/Kconfig4
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/madgemc.c521
-rw-r--r--drivers/net/tokenring/proteon.c104
-rw-r--r--drivers/net/tokenring/skisa.c104
-rw-r--r--drivers/net/tokenring/tms380tr.c46
-rw-r--r--drivers/net/tokenring/tms380tr.h9
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/tulip/Kconfig12
-rw-r--r--drivers/net/tulip/Makefile1
-rw-r--r--drivers/net/tulip/media.c36
-rw-r--r--drivers/net/tulip/timer.c1
-rw-r--r--drivers/net/tulip/tulip.h8
-rw-r--r--drivers/net/tulip/tulip_core.c34
-rw-r--r--drivers/net/tulip/uli526x.c1749
-rw-r--r--drivers/net/wan/cycx_drv.c24
-rw-r--r--drivers/net/wan/hdlc_generic.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/sdla_fr.c22
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/orinoco.c78
-rw-r--r--drivers/parport/parport_serial.c339
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/s390/cio/qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c28
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c25
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c25
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_port.c2
-rw-r--r--drivers/scsi/ahci.c52
-rw-r--r--drivers/scsi/ata_piix.c72
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/libata-core.c299
-rw-r--r--drivers/scsi/libata-scsi.c119
-rw-r--r--drivers/scsi/libata.h47
-rw-r--r--drivers/scsi/sata_nv.c62
-rw-r--r--drivers/scsi/sata_promise.c95
-rw-r--r--drivers/scsi/sata_promise.h31
-rw-r--r--drivers/scsi/sata_qstor.c43
-rw-r--r--drivers/scsi/sata_sil.c69
-rw-r--r--drivers/scsi/sata_sis.c35
-rw-r--r--drivers/scsi/sata_svw.c43
-rw-r--r--drivers/scsi/sata_sx4.c179
-rw-r--r--drivers/scsi/sata_uli.c35
-rw-r--r--drivers/scsi/sata_via.c64
-rw-r--r--drivers/scsi/sata_vsc.c31
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/serial/8250_pci.c474
-rw-r--r--drivers/usb/net/usbnet.c21
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--fs/adfs/adfs.h4
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/hppfs/hppfs_kern.c30
-rw-r--r--fs/inotify.c2
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/smbfs/sock.c2
-rw-r--r--fs/sysfs/inode.c4
-rw-r--r--include/asm-alpha/socket.h2
-rw-r--r--include/asm-arm/arch-sa1100/mcp.h21
-rw-r--r--include/asm-arm/hardware/gic.h41
-rw-r--r--include/asm-arm/socket.h2
-rw-r--r--include/asm-arm26/socket.h2
-rw-r--r--include/asm-cris/socket.h2
-rw-r--r--include/asm-frv/socket.h2
-rw-r--r--include/asm-h8300/socket.h2
-rw-r--r--include/asm-i386/checksum.h2
-rw-r--r--include/asm-i386/socket.h2
-rw-r--r--include/asm-ia64/socket.h2
-rw-r--r--include/asm-m32r/checksum.h2
-rw-r--r--include/asm-m32r/socket.h2
-rw-r--r--include/asm-m68k/page.h6
-rw-r--r--include/asm-m68k/socket.h2
-rw-r--r--include/asm-mips/socket.h2
-rw-r--r--include/asm-parisc/socket.h2
-rw-r--r--include/asm-powerpc/8253pit.h (renamed from include/asm-ppc/8253pit.h)2
-rw-r--r--include/asm-powerpc/agp.h (renamed from include/asm-ppc/agp.h)0
-rw-r--r--include/asm-powerpc/cputime.h1
-rw-r--r--include/asm-powerpc/div64.h (renamed from include/asm-ppc/div64.h)0
-rw-r--r--include/asm-powerpc/emergency-restart.h1
-rw-r--r--include/asm-powerpc/errno.h (renamed from include/asm-ppc/errno.h)0
-rw-r--r--include/asm-powerpc/ioctl.h (renamed from include/asm-ppc/ioctl.h)0
-rw-r--r--include/asm-powerpc/ioctls.h (renamed from include/asm-ppc/ioctls.h)0
-rw-r--r--include/asm-powerpc/ipc.h (renamed from include/asm-ppc/ipc.h)0
-rw-r--r--include/asm-powerpc/linkage.h (renamed from include/asm-ppc/linkage.h)0
-rw-r--r--include/asm-powerpc/local.h (renamed from include/asm-ppc64/local.h)0
-rw-r--r--include/asm-powerpc/namei.h (renamed from include/asm-ppc/namei.h)0
-rw-r--r--include/asm-powerpc/percpu.h1
-rw-r--r--include/asm-powerpc/poll.h (renamed from include/asm-ppc/poll.h)0
-rw-r--r--include/asm-powerpc/resource.h1
-rw-r--r--include/asm-powerpc/shmparam.h (renamed from include/asm-ppc/shmparam.h)0
-rw-r--r--include/asm-powerpc/string.h (renamed from include/asm-ppc/string.h)0
-rw-r--r--include/asm-powerpc/unaligned.h (renamed from include/asm-ppc/unaligned.h)0
-rw-r--r--include/asm-powerpc/xor.h (renamed from include/asm-ppc/xor.h)0
-rw-r--r--include/asm-ppc/cputime.h6
-rw-r--r--include/asm-ppc/emergency-restart.h6
-rw-r--r--include/asm-ppc/hdreg.h1
-rw-r--r--include/asm-ppc/local.h6
-rw-r--r--include/asm-ppc/percpu.h6
-rw-r--r--include/asm-ppc/resource.h6
-rw-r--r--include/asm-ppc/socket.h2
-rw-r--r--include/asm-ppc64/8253pit.h10
-rw-r--r--include/asm-ppc64/abs_addr.h86
-rw-r--r--include/asm-ppc64/agp.h23
-rw-r--r--include/asm-ppc64/bug.h7
-rw-r--r--include/asm-ppc64/cputable.h47
-rw-r--r--include/asm-ppc64/cputime.h6
-rw-r--r--include/asm-ppc64/div64.h1
-rw-r--r--include/asm-ppc64/emergency-restart.h6
-rw-r--r--include/asm-ppc64/errno.h18
-rw-r--r--include/asm-ppc64/firmware.h101
-rw-r--r--include/asm-ppc64/hdreg.h1
-rw-r--r--include/asm-ppc64/imalloc.h2
-rw-r--r--include/asm-ppc64/ioctl.h74
-rw-r--r--include/asm-ppc64/ioctls.h114
-rw-r--r--include/asm-ppc64/iommu.h3
-rw-r--r--include/asm-ppc64/ipc.h1
-rw-r--r--include/asm-ppc64/linkage.h6
-rw-r--r--include/asm-ppc64/lmb.h1
-rw-r--r--include/asm-ppc64/machdep.h3
-rw-r--r--include/asm-ppc64/mmu.h16
-rw-r--r--include/asm-ppc64/naca.h7
-rw-r--r--include/asm-ppc64/namei.h23
-rw-r--r--include/asm-ppc64/page.h55
-rw-r--r--include/asm-ppc64/param.h4
-rw-r--r--include/asm-ppc64/percpu.h6
-rw-r--r--include/asm-ppc64/pgalloc.h93
-rw-r--r--include/asm-ppc64/pgtable.h92
-rw-r--r--include/asm-ppc64/pmc.h2
-rw-r--r--include/asm-ppc64/poll.h32
-rw-r--r--include/asm-ppc64/processor.h5
-rw-r--r--include/asm-ppc64/prom.h14
-rw-r--r--include/asm-ppc64/resource.h6
-rw-r--r--include/asm-ppc64/shmparam.h13
-rw-r--r--include/asm-ppc64/socket.h2
-rw-r--r--include/asm-ppc64/string.h35
-rw-r--r--include/asm-ppc64/system.h4
-rw-r--r--include/asm-ppc64/unaligned.h21
-rw-r--r--include/asm-ppc64/vio.h91
-rw-r--r--include/asm-ppc64/xor.h1
-rw-r--r--include/asm-s390/socket.h2
-rw-r--r--include/asm-sh/socket.h2
-rw-r--r--include/asm-sparc/processor.h1
-rw-r--r--include/asm-sparc/segment.h6
-rw-r--r--include/asm-sparc/socket.h2
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/atomic.h8
-rw-r--r--include/asm-sparc64/bitops.h4
-rw-r--r--include/asm-sparc64/processor.h1
-rw-r--r--include/asm-sparc64/segment.h6
-rw-r--r--include/asm-sparc64/sfafsr.h82
-rw-r--r--include/asm-sparc64/socket.h2
-rw-r--r--include/asm-sparc64/spinlock.h42
-rw-r--r--include/asm-sparc64/system.h17
-rw-r--r--include/asm-v850/socket.h2
-rw-r--r--include/asm-x86_64/checksum.h2
-rw-r--r--include/asm-x86_64/e820.h2
-rw-r--r--include/asm-x86_64/socket.h2
-rw-r--r--include/asm-xtensa/socket.h2
-rw-r--r--include/linux/8250_pci.h39
-rw-r--r--include/linux/ata.h45
-rw-r--r--include/linux/dccp.h456
-rw-r--r--include/linux/ethtool.h17
-rw-r--r--include/linux/hippidevice.h8
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_fc.h2
-rw-r--r--include/linux/if_fddi.h2
-rw-r--r--include/linux/if_frad.h6
-rw-r--r--include/linux/if_hippi.h6
-rw-r--r--include/linux/if_tr.h4
-rw-r--r--include/linux/if_vlan.h1
-rw-r--r--include/linux/igmp.h3
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/inet_diag.h138
-rw-r--r--include/linux/ip.h2
-rw-r--r--include/linux/ipv6.h52
-rw-r--r--include/linux/libata.h49
-rw-r--r--include/linux/list.h65
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mod_devicetable.h17
-rw-r--r--include/linux/net.h11
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h88
-rw-r--r--include/linux/netfilter/nfnetlink.h169
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h124
-rw-r--r--include/linux/netfilter/nfnetlink_log.h88
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h89
-rw-r--r--include/linux/netfilter_decnet.h17
-rw-r--r--include/linux/netfilter_ipv4.h8
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h189
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_core.h18
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_helper.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_protocol.h24
-rw-r--r--include/linux/netfilter_ipv4/ip_logging.h20
-rw-r--r--include/linux/netfilter_ipv4/ip_nat_protocol.h25
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h3
-rw-r--r--include/linux/netfilter_ipv4/ipt_LOG.h1
-rw-r--r--include/linux/netfilter_ipv4/ipt_NFQUEUE.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_TTL.h21
-rw-r--r--include/linux/netfilter_ipv4/ipt_connbytes.h25
-rw-r--r--include/linux/netfilter_ipv4/ipt_dccp.h23
-rw-r--r--include/linux/netfilter_ipv4/ipt_string.h18
-rw-r--r--include/linux/netfilter_ipv6.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6_logging.h20
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6t_HL.h22
-rw-r--r--include/linux/netfilter_ipv6/ip6t_LOG.h1
-rw-r--r--include/linux/netfilter_ipv6/ip6t_REJECT.h18
-rw-r--r--include/linux/netlink.h18
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy.h377
-rw-r--r--include/linux/random.h2
-rw-r--r--include/linux/rtnetlink.h42
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/selinux_netlink.h13
-rw-r--r--include/linux/serialP.h40
-rw-r--r--include/linux/skbuff.h118
-rw-r--r--include/linux/socket.h9
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/tcp.h82
-rw-r--r--include/linux/tcp_diag.h127
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/xfrm.h18
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h6
-rw-r--r--include/net/af_unix.h15
-rw-r--r--include/net/arp.h2
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/bluetooth/bluetooth.h5
-rw-r--r--include/net/bluetooth/hci.h15
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bluetooth/rfcomm.h14
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dn.h1
-rw-r--r--include/net/icmp.h7
-rw-r--r--include/net/inet6_hashtables.h130
-rw-r--r--include/net/inet_common.h6
-rw-r--r--include/net/inet_connection_sock.h276
-rw-r--r--include/net/inet_hashtables.h427
-rw-r--r--include/net/inet_timewait_sock.h219
-rw-r--r--include/net/ip.h32
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ipv6.h39
-rw-r--r--include/net/llc.h8
-rw-r--r--include/net/neighbour.h9
-rw-r--r--include/net/p8022.h5
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/psnap.h2
-rw-r--r--include/net/raw.h9
-rw-r--r--include/net/rawv6.h5
-rw-r--r--include/net/request_sock.h14
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sctp/constants.h2
-rw-r--r--include/net/sock.h113
-rw-r--r--include/net/tcp.h723
-rw-r--r--include/net/tcp_ecn.h2
-rw-r--r--include/net/tcp_states.h34
-rw-r--r--include/net/udp.h5
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/x25device.h1
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/rdma/ib_cache.h (renamed from drivers/infiniband/include/ib_cache.h)4
-rw-r--r--include/rdma/ib_cm.h (renamed from drivers/infiniband/include/ib_cm.h)93
-rw-r--r--include/rdma/ib_fmr_pool.h (renamed from drivers/infiniband/include/ib_fmr_pool.h)2
-rw-r--r--include/rdma/ib_mad.h (renamed from drivers/infiniband/include/ib_mad.h)26
-rw-r--r--include/rdma/ib_pack.h (renamed from drivers/infiniband/include/ib_pack.h)2
-rw-r--r--include/rdma/ib_sa.h (renamed from drivers/infiniband/include/ib_sa.h)22
-rw-r--r--include/rdma/ib_smi.h (renamed from drivers/infiniband/include/ib_smi.h)20
-rw-r--r--include/rdma/ib_user_cm.h (renamed from drivers/infiniband/include/ib_user_cm.h)28
-rw-r--r--include/rdma/ib_user_mad.h (renamed from drivers/infiniband/include/ib_user_mad.h)10
-rw-r--r--include/rdma/ib_user_verbs.h (renamed from drivers/infiniband/include/ib_user_verbs.h)39
-rw-r--r--include/rdma/ib_verbs.h (renamed from drivers/infiniband/include/ib_verbs.h)118
-rw-r--r--include/sound/ac97_codec.h9
-rw-r--r--include/sound/ad1816a.h1
-rw-r--r--include/sound/asound.h6
-rw-r--r--include/sound/cs46xx.h2
-rw-r--r--include/sound/emu10k1.h2
-rw-r--r--include/sound/gus.h8
-rw-r--r--include/sound/pcm.h1
-rw-r--r--include/sound/version.h4
-rw-r--r--include/sound/ymfpci.h6
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c3
-rw-r--r--kernel/cpuset.c30
-rw-r--r--kernel/sysctl.c4
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/ts_bm.c185
-rw-r--r--mm/memory.c11
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c4
-rw-r--r--net/802/hippi.c6
-rw-r--r--net/802/p8022.c3
-rw-r--r--net/802/p8023.c1
-rw-r--r--net/802/psnap.c7
-rw-r--r--net/802/sysctl_net_802.c3
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/Kconfig3
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/ipcommon.c3
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_ds_in.c3
-rw-r--r--net/ax25/ax25_ds_timer.c2
-rw-r--r--net/ax25/ax25_in.c17
-rw-r--r--net/ax25/ax25_std_in.c3
-rw-r--r--net/ax25/ax25_std_timer.c2
-rw-r--r--net/ax25/ax25_subr.c4
-rw-r--r--net/bluetooth/hci_core.c18
-rw-r--r--net/bluetooth/hci_event.c81
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bluetooth/l2cap.c2
-rw-r--r--net/bluetooth/rfcomm/core.c73
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c206
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c5
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c79
-rw-r--r--net/core/ethtool.c49
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/netfilter.c648
-rw-r--r--net/core/request_sock.c28
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c158
-rw-r--r--net/core/sock.c133
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/wireless.c8
-rw-r--r--net/dccp/Kconfig50
-rw-r--r--net/dccp/Makefile10
-rw-r--r--net/dccp/ccid.c139
-rw-r--r--net/dccp/ccid.h180
-rw-r--r--net/dccp/ccids/Kconfig29
-rw-r--r--net/dccp/ccids/Makefile5
-rw-r--r--net/dccp/ccids/ccid3.c1221
-rw-r--r--net/dccp/ccids/ccid3.h137
-rw-r--r--net/dccp/ccids/lib/Makefile3
-rw-r--r--net/dccp/ccids/lib/loss_interval.c144
-rw-r--r--net/dccp/ccids/lib/loss_interval.h61
-rw-r--r--net/dccp/ccids/lib/packet_history.c398
-rw-r--r--net/dccp/ccids/lib/packet_history.h199
-rw-r--r--net/dccp/ccids/lib/tfrc.h22
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c644
-rw-r--r--net/dccp/dccp.h493
-rw-r--r--net/dccp/diag.c71
-rw-r--r--net/dccp/input.c600
-rw-r--r--net/dccp/ipv4.c1356
-rw-r--r--net/dccp/minisocks.c264
-rw-r--r--net/dccp/options.c855
-rw-r--r--net/dccp/output.c528
-rw-r--r--net/dccp/proto.c826
-rw-r--r--net/dccp/timer.c255
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_dev.c8
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/decnet/dn_table.c6
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c11
-rw-r--r--net/econet/af_econet.c8
-rw-r--r--net/ethernet/eth.c3
-rw-r--r--net/ethernet/sysctl_net_ether.c1
-rw-r--r--net/ipv4/Kconfig17
-rw-r--r--net/ipv4/Makefile8
-rw-r--r--net/ipv4/af_inet.c179
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/datagram.c3
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_hash.c4
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c7
-rw-r--r--net/ipv4/fib_trie.c1628
-rw-r--r--net/ipv4/icmp.c14
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c641
-rw-r--r--net/ipv4/inet_diag.c868
-rw-r--r--net/ipv4/inet_hashtables.c165
-rw-r--r--net/ipv4/inet_timewait_sock.c384
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_forward.c6
-rw-r--r--net/ipv4/ip_fragment.c10
-rw-r--r--net/ipv4/ip_input.c141
-rw-r--r--net/ipv4/ip_options.c52
-rw-r--r--net/ipv4/ip_output.c24
-rw-r--r--net/ipv4/ip_sockglue.c8
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c9
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/multipath_drr.c2
-rw-r--r--net/ipv4/netfilter.c139
-rw-r--r--net/ipv4/netfilter/Kconfig70
-rw-r--r--net/ipv4/netfilter/Makefile9
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c18
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c379
-rw-r--r--net/ipv4/netfilter/ip_conntrack_ftp.c21
-rw-r--r--net/ipv4/netfilter/ip_conntrack_irc.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c1579
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c73
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c9
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c48
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_udp.c14
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c49
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c104
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c8
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_icmp.c23
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c24
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c23
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_unknown.c13
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_queue.c51
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLASSIFY.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/ipt_CONNMARK.c15
-rw-r--r--net/ipv4/netfilter/ipt_DSCP.c3
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c6
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c86
-rw-r--r--net/ipv4/netfilter/ipt_MARK.c22
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c5
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c8
-rw-r--r--net/ipv4/netfilter/ipt_NFQUEUE.c70
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c3
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c3
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c119
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c53
-rw-r--r--net/ipv4/netfilter/ipt_connbytes.c162
-rw-r--r--net/ipv4/netfilter/ipt_connmark.c7
-rw-r--r--net/ipv4/netfilter/ipt_dccp.c176
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c2
-rw-r--r--net/ipv4/netfilter/ipt_mark.c7
-rw-r--r--net/ipv4/netfilter/ipt_owner.c132
-rw-r--r--net/ipv4/netfilter/ipt_string.c91
-rw-r--r--net/ipv4/proc.c5
-rw-r--r--net/ipv4/protocol.c1
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c47
-rw-r--r--net/ipv4/tcp.c402
-rw-r--r--net/ipv4/tcp_bic.c46
-rw-r--r--net/ipv4/tcp_cong.c44
-rw-r--r--net/ipv4/tcp_diag.c784
-rw-r--r--net/ipv4/tcp_highspeed.c17
-rw-r--r--net/ipv4/tcp_htcp.c53
-rw-r--r--net/ipv4/tcp_hybla.c31
-rw-r--r--net/ipv4/tcp_input.c513
-rw-r--r--net/ipv4/tcp_ipv4.c944
-rw-r--r--net/ipv4/tcp_minisocks.c605
-rw-r--r--net/ipv4/tcp_output.c143
-rw-r--r--net/ipv4/tcp_scalable.c6
-rw-r--r--net/ipv4/tcp_timer.c253
-rw-r--r--net/ipv4/tcp_vegas.c50
-rw-r--r--net/ipv4/tcp_westwood.c64
-rw-r--r--net/ipv4/udp.c37
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/af_inet6.c62
-rw-r--r--net/ipv6/ah6.c13
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/exthdrs.c8
-rw-r--r--net/ipv6/icmp.c25
-rw-r--r--net/ipv6/inet6_hashtables.c81
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_output.c54
-rw-r--r--net/ipv6/ipv6_sockglue.c25
-rw-r--r--net/ipv6/ipv6_syms.c3
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter.c104
-rw-r--r--net/ipv6/netfilter/Kconfig37
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6_queue.c48
-rw-r--r--net/ipv6/netfilter/ip6_tables.c5
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c118
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c93
-rw-r--r--net/ipv6/netfilter/ip6t_MARK.c5
-rw-r--r--net/ipv6/netfilter/ip6t_NFQUEUE.c70
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c284
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c90
-rw-r--r--net/ipv6/raw.c20
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c3
-rw-r--r--net/ipv6/tcp_ipv6.c437
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/ipx/af_ipx.c10
-rw-r--r--net/ipx/ipx_proc.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irlap_frame.c8
-rw-r--r--net/irda/irlmp.c3
-rw-r--r--net/irda/irmod.c2
-rw-r--r--net/irda/irnet/irnet.h3
-rw-r--r--net/irda/irnet/irnet_ppp.c2
-rw-r--r--net/irda/irqueue.c1
-rw-r--r--net/lapb/lapb_subr.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/llc/llc_conn.c8
-rw-r--r--net/llc/llc_core.c3
-rw-r--r--net/llc/llc_if.c2
-rw-r--r--net/llc/llc_input.c4
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/netfilter/Kconfig24
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/core.c216
-rw-r--r--net/netfilter/nf_internals.h39
-rw-r--r--net/netfilter/nf_log.c178
-rw-r--r--net/netfilter/nf_queue.c343
-rw-r--r--net/netfilter/nf_sockopt.c132
-rw-r--r--net/netfilter/nfnetlink.c376
-rw-r--r--net/netfilter/nfnetlink_log.c1055
-rw-r--r--net/netfilter/nfnetlink_queue.c1132
-rw-r--r--net/netlink/af_netlink.c312
-rw-r--r--net/netrom/af_netrom.c7
-rw-r--r--net/netrom/nr_dev.c5
-rw-r--r--net/netrom/nr_in.c3
-rw-r--r--net/netrom/nr_subr.c4
-rw-r--r--net/netrom/nr_timer.c2
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rose/rose_in.c3
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/rose/rose_subr.c4
-rw-r--r--net/rose/rose_timer.c2
-rw-r--r--net/rxrpc/transport.c2
-rw-r--r--net/sched/Kconfig1
-rw-r--r--net/sched/act_api.c15
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/gact.c2
-rw-r--r--net/sched/ipt.c2
-rw-r--r--net/sched/mirred.c2
-rw-r--r--net/sched/pedit.c2
-rw-r--r--net/sched/police.c3
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_generic.c16
-rw-r--r--net/sched/simple.c2
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/sctp/protocol.c14
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/ulpqueue.c63
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/svcsock.c13
-rw-r--r--net/sysctl_net.c8
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/unix/garbage.c14
-rw-r--r--net/unix/sysctl_net_unix.c2
-rw-r--r--net/wanrouter/af_wanpipe.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/x25/x25_subr.c4
-rw-r--r--net/x25/x25_timer.c2
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c31
-rw-r--r--scripts/mod/file2alias.c19
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/netlink.c6
-rw-r--r--security/selinux/nlmsgtab.c3
-rw-r--r--sound/arm/pxa2xx-ac97.c12
-rw-r--r--sound/core/memalloc.c5
-rw-r--r--sound/core/memory.c2
-rw-r--r--sound/core/oss/pcm_oss.c11
-rw-r--r--sound/core/pcm_compat.c49
-rw-r--r--sound/core/pcm_lib.c20
-rw-r--r--sound/core/pcm_native.c14
-rw-r--r--sound/core/sound_oss.c7
-rw-r--r--sound/core/timer.c16
-rw-r--r--sound/drivers/vx/vx_mixer.c4
-rw-r--r--sound/drivers/vx/vx_pcm.c8
-rw-r--r--sound/isa/ad1816a/ad1816a.c5
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c14
-rw-r--r--sound/isa/ad1848/ad1848_lib.c1
-rw-r--r--sound/isa/cmi8330.c4
-rw-r--r--sound/isa/cs423x/cs4231_lib.c2
-rw-r--r--sound/isa/gus/gus_io.c6
-rw-r--r--sound/isa/opl3sa2.c114
-rw-r--r--sound/isa/sb/sb16_main.c2
-rw-r--r--sound/pci/Kconfig10
-rw-r--r--sound/pci/ac97/Makefile2
-rw-r--r--sound/pci/ac97/ac97_bus.c79
-rw-r--r--sound/pci/ac97/ac97_codec.c85
-rw-r--r--sound/pci/ac97/ac97_patch.c449
-rw-r--r--sound/pci/ac97/ac97_patch.h1
-rw-r--r--sound/pci/ali5451/ali5451.c6
-rw-r--r--sound/pci/atiixp.c10
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c10
-rw-r--r--sound/pci/ca0106/ca0106_main.c8
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c4
-rw-r--r--sound/pci/cmipci.c2
-rw-r--r--sound/pci/cs46xx/cs46xx.c2
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c18
-rw-r--r--sound/pci/emu10k1/emu10k1.c2
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c8
-rw-r--r--sound/pci/emu10k1/emu10k1x.c4
-rw-r--r--sound/pci/emu10k1/emufx.c26
-rw-r--r--sound/pci/emu10k1/emumixer.c23
-rw-r--r--sound/pci/emu10k1/emupcm.c7
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/fm801.c8
-rw-r--r--sound/pci/hda/Makefile2
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_generic.c5
-rw-r--r--sound/pci/hda/hda_intel.c157
-rw-r--r--sound/pci/hda/hda_patch.h3
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_cmedia.c1
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/pci/hda/patch_si3054.c300
-rw-r--r--sound/pci/ice1712/delta.c10
-rw-r--r--sound/pci/ice1712/ice1712.c10
-rw-r--r--sound/pci/ice1712/ice1724.c6
-rw-r--r--sound/pci/intel8x0.c57
-rw-r--r--sound/pci/korg1212/korg1212.c4
-rw-r--r--sound/pci/nm256/nm256.c93
-rw-r--r--sound/pci/rme32.c4
-rw-r--r--sound/pci/rme96.c4
-rw-r--r--sound/pci/rme9652/hdsp.c55
-rw-r--r--sound/pci/rme9652/hdspm.c33
-rw-r--r--sound/pci/rme9652/rme9652.c24
-rw-r--r--sound/pci/trident/trident_main.c10
-rw-r--r--sound/pci/via82xx.c17
-rw-r--r--sound/pci/via82xx_modem.c3
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c232
-rw-r--r--sound/pcmcia/vx/vxpocket.c12
-rw-r--r--sound/sound_core.c27
-rw-r--r--sound/synth/emux/emux_synth.c1
-rw-r--r--sound/usb/usbaudio.c320
-rw-r--r--sound/usb/usbmidi.c111
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
1068 files changed, 63363 insertions, 21699 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 8b1430b46655..0665cb12bd66 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -135,3 +135,15 @@ Why: With the 16-bit PCMCIA subsystem now behaving (almost) like a
135 pcmciautils package available at 135 pcmciautils package available at
136 http://kernel.org/pub/linux/utils/kernel/pcmcia/ 136 http://kernel.org/pub/linux/utils/kernel/pcmcia/
137Who: Dominik Brodowski <linux@brodo.de> 137Who: Dominik Brodowski <linux@brodo.de>
138
139---------------------------
140
141What: ip_queue and ip6_queue (old ipv4-only and ipv6-only netfilter queue)
142When: December 2005
143Why: This interface has been obsoleted by the new layer3-independent
144 "nfnetlink_queue". The Kernel interface is compatible, so the old
145 ip[6]tables "QUEUE" targets still work and will transparently handle
146 all packets into nfnetlink queue number 0. Userspace users will have
147 to link against API-compatible library on top of libnfnetlink_queue
148 instead of the current 'libipq'.
149Who: Harald Welte <laforge@netfilter.org>
diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/cxgb.txt
new file mode 100644
index 000000000000..76324638626b
--- /dev/null
+++ b/Documentation/networking/cxgb.txt
@@ -0,0 +1,352 @@
1 Chelsio N210 10Gb Ethernet Network Controller
2
3 Driver Release Notes for Linux
4
5 Version 2.1.1
6
7 June 20, 2005
8
9CONTENTS
10========
11 INTRODUCTION
12 FEATURES
13 PERFORMANCE
14 DRIVER MESSAGES
15 KNOWN ISSUES
16 SUPPORT
17
18
19INTRODUCTION
20============
21
22 This document describes the Linux driver for Chelsio 10Gb Ethernet Network
23 Controller. This driver supports the Chelsio N210 NIC and is backward
24 compatible with the Chelsio N110 model 10Gb NICs.
25
26
27FEATURES
28========
29
30 Adaptive Interrupts (adaptive-rx)
31 ---------------------------------
32
33 This feature provides an adaptive algorithm that adjusts the interrupt
34 coalescing parameters, allowing the driver to dynamically adapt the latency
35 settings to achieve the highest performance during various types of network
36 load.
37
38 The interface used to control this feature is ethtool. Please see the
39 ethtool manpage for additional usage information.
40
41 By default, adaptive-rx is disabled.
42 To enable adaptive-rx:
43
44 ethtool -C <interface> adaptive-rx on
45
46 To disable adaptive-rx, use ethtool:
47
48 ethtool -C <interface> adaptive-rx off
49
50 After disabling adaptive-rx, the timer latency value will be set to 50us.
51 You may set the timer latency after disabling adaptive-rx:
52
53 ethtool -C <interface> rx-usecs <microseconds>
54
55 An example to set the timer latency value to 100us on eth0:
56
57 ethtool -C eth0 rx-usecs 100
58
59 You may also provide a timer latency value while disabling adpative-rx:
60
61 ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
62
63 If adaptive-rx is disabled and a timer latency value is specified, the timer
64 will be set to the specified value until changed by the user or until
65 adaptive-rx is enabled.
66
67 To view the status of the adaptive-rx and timer latency values:
68
69 ethtool -c <interface>
70
71
72 TCP Segmentation Offloading (TSO) Support
73 -----------------------------------------
74
75 This feature, also known as "large send", enables a system's protocol stack
76 to offload portions of outbound TCP processing to a network interface card
77 thereby reducing system CPU utilization and enhancing performance.
78
79 The interface used to control this feature is ethtool version 1.8 or higher.
80 Please see the ethtool manpage for additional usage information.
81
82 By default, TSO is enabled.
83 To disable TSO:
84
85 ethtool -K <interface> tso off
86
87 To enable TSO:
88
89 ethtool -K <interface> tso on
90
91 To view the status of TSO:
92
93 ethtool -k <interface>
94
95
96PERFORMANCE
97===========
98
99 The following information is provided as an example of how to change system
100 parameters for "performance tuning" an what value to use. You may or may not
101 want to change these system parameters, depending on your server/workstation
102 application. Doing so is not warranted in any way by Chelsio Communications,
103 and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
104 of data or damage to equipment.
105
106 Your distribution may have a different way of doing things, or you may prefer
107 a different method. These commands are shown only to provide an example of
108 what to do and are by no means definitive.
109
110 Making any of the following system changes will only last until you reboot
111 your system. You may want to write a script that runs at boot-up which
112 includes the optimal settings for your system.
113
114 Setting PCI Latency Timer:
115 setpci -d 1425:* 0x0c.l=0x0000F800
116
117 Disabling TCP timestamp:
118 sysctl -w net.ipv4.tcp_timestamps=0
119
120 Disabling SACK:
121 sysctl -w net.ipv4.tcp_sack=0
122
123 Setting large number of incoming connection requests:
124 sysctl -w net.ipv4.tcp_max_syn_backlog=3000
125
126 Setting maximum receive socket buffer size:
127 sysctl -w net.core.rmem_max=1024000
128
129 Setting maximum send socket buffer size:
130 sysctl -w net.core.wmem_max=1024000
131
132 Set smp_affinity (on a multiprocessor system) to a single CPU:
133 echo 1 > /proc/irq/<interrupt_number>/smp_affinity
134
135 Setting default receive socket buffer size:
136 sysctl -w net.core.rmem_default=524287
137
138 Setting default send socket buffer size:
139 sysctl -w net.core.wmem_default=524287
140
141 Setting maximum option memory buffers:
142 sysctl -w net.core.optmem_max=524287
143
144 Setting maximum backlog (# of unprocessed packets before kernel drops):
145 sysctl -w net.core.netdev_max_backlog=300000
146
147 Setting TCP read buffers (min/default/max):
148 sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
149
150 Setting TCP write buffers (min/pressure/max):
151 sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
152
153 Setting TCP buffer space (min/pressure/max):
154 sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
155
156 TCP window size for single connections:
157 The receive buffer (RX_WINDOW) size must be at least as large as the
158 Bandwidth-Delay Product of the communication link between the sender and
159 receiver. Due to the variations of RTT, you may want to increase the buffer
160 size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
161 "TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
162 At 10Gb speeds, use the following formula:
163 RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
164 Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
165 RX_WINDOW sizes of 256KB - 512KB should be sufficient.
166 Setting the min, max, and default receive buffer (RX_WINDOW) size:
167 sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
168
169 TCP window size for multiple connections:
170 The receive buffer (RX_WINDOW) size may be calculated the same as single
171 connections, but should be divided by the number of connections. The
172 smaller window prevents congestion and facilitates better pacing,
173 especially if/when MAC level flow control does not work well or when it is
174 not supported on the machine. Experimentation may be necessary to attain
175 the correct value. This method is provided as a starting point fot the
176 correct receive buffer size.
177 Setting the min, max, and default receive buffer (RX_WINDOW) size is
178 performed in the same manner as single connection.
179
180
181DRIVER MESSAGES
182===============
183
184 The following messages are the most common messages logged by syslog. These
185 may be found in /var/log/messages.
186
187 Driver up:
188 Chelsio Network Driver - version 2.1.1
189
190 NIC detected:
191 eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
192
193 Link up:
194 eth#: link is up at 10 Gbps, full duplex
195
196 Link down:
197 eth#: link is down
198
199
200KNOWN ISSUES
201============
202
203 These issues have been identified during testing. The following information
204 is provided as a workaround to the problem. In some cases, this problem is
205 inherent to Linux or to a particular Linux Distribution and/or hardware
206 platform.
207
208 1. Large number of TCP retransmits on a multiprocessor (SMP) system.
209
210 On a system with multiple CPUs, the interrupt (IRQ) for the network
211 controller may be bound to more than one CPU. This will cause TCP
212 retransmits if the packet data were to be split across different CPUs
213 and re-assembled in a different order than expected.
214
215 To eliminate the TCP retransmits, set smp_affinity on the particular
216 interrupt to a single CPU. You can locate the interrupt (IRQ) used on
217 the N110/N210 by using ifconfig:
218 ifconfig <dev_name> | grep Interrupt
219 Set the smp_affinity to a single CPU:
220 echo 1 > /proc/irq/<interrupt_number>/smp_affinity
221
222 It is highly suggested that you do not run the irqbalance daemon on your
223 system, as this will change any smp_affinity setting you have applied.
224 The irqbalance daemon runs on a 10 second interval and binds interrupts
225 to the least loaded CPU determined by the daemon. To disable this daemon:
226 chkconfig --level 2345 irqbalance off
227
228 By default, some Linux distributions enable the kernel feature,
229 irqbalance, which performs the same function as the daemon. To disable
230 this feature, add the following line to your bootloader:
231 noirqbalance
232
233 Example using the Grub bootloader:
234 title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
235 root (hd0,0)
236 kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
237 initrd /initrd-2.4.21-27.ELsmp.img
238
239 2. After running insmod, the driver is loaded and the incorrect network
240 interface is brought up without running ifup.
241
242 When using 2.4.x kernels, including RHEL kernels, the Linux kernel
243 invokes a script named "hotplug". This script is primarily used to
244 automatically bring up USB devices when they are plugged in, however,
245 the script also attempts to automatically bring up a network interface
246 after loading the kernel module. The hotplug script does this by scanning
247 the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
248 for HWADDR=<mac_address>.
249
250 If the hotplug script does not find the HWADDRR within any of the
251 ifcfg-eth# files, it will bring up the device with the next available
252 interface name. If this interface is already configured for a different
253 network card, your new interface will have incorrect IP address and
254 network settings.
255
256 To solve this issue, you can add the HWADDR=<mac_address> key to the
257 interface config file of your network controller.
258
259 To disable this "hotplug" feature, you may add the driver (module name)
260 to the "blacklist" file located in /etc/hotplug. It has been noted that
261 this does not work for network devices because the net.agent script
262 does not use the blacklist file. Simply remove, or rename, the net.agent
263 script located in /etc/hotplug to disable this feature.
264
265 3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
266 on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
267
268 If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
269 chipset, you may experience the "133-Mhz Mode Split Completion Data
270 Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
271 bus PCI-X bus.
272
273 AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
274 can provide stale data via split completion cycles to a PCI-X card that
275 is operating at 133 Mhz", causing data corruption.
276
277 AMD's provides three workarounds for this problem, however, Chelsio
278 recommends the first option for best performance with this bug:
279
280 For 133Mhz secondary bus operation, limit the transaction length and
281 the number of outstanding transactions, via BIOS configuration
282 programming of the PCI-X card, to the following:
283
284 Data Length (bytes): 1k
285 Total allowed outstanding transactions: 2
286
287 Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
288 section 56, "133-MHz Mode Split Completion Data Corruption" for more
289 details with this bug and workarounds suggested by AMD.
290
291 It may be possible to work outside AMD's recommended PCI-X settings, try
292 increasing the Data Length to 2k bytes for increased performance. If you
293 have issues with these settings, please revert to the "safe" settings
294 and duplicate the problem before submitting a bug or asking for support.
295
296 NOTE: The default setting on most systems is 8 outstanding transactions
297 and 2k bytes data length.
298
299 4. On multiprocessor systems, it has been noted that an application which
300 is handling 10Gb networking can switch between CPUs causing degraded
301 and/or unstable performance.
302
303 If running on an SMP system and taking performance measurements, it
304 is suggested you either run the latest netperf-2.4.0+ or use a binding
305 tool such as Tim Hockin's procstate utilities (runon)
306 <http://www.hockin.org/~thockin/procstate/>.
307
308 Binding netserver and netperf (or other applications) to particular
309 CPUs will have a significant difference in performance measurements.
310 You may need to experiment which CPU to bind the application to in
311 order to achieve the best performance for your system.
312
313 If you are developing an application designed for 10Gb networking,
314 please keep in mind you may want to look at kernel functions
315 sched_setaffinity & sched_getaffinity to bind your application.
316
317 If you are just running user-space applications such as ftp, telnet,
318 etc., you may want to try the runon tool provided by Tim Hockin's
319 procstate utility. You could also try binding the interface to a
320 particular CPU: runon 0 ifup eth0
321
322
323SUPPORT
324=======
325
326 If you have problems with the software or hardware, please contact our
327 customer support team via email at support@chelsio.com or check our website
328 at http://www.chelsio.com
329
330===============================================================================
331
332 Chelsio Communications
333 370 San Aleso Ave.
334 Suite 100
335 Sunnyvale, CA 94085
336 http://www.chelsio.com
337
338This program is free software; you can redistribute it and/or modify
339it under the terms of the GNU General Public License, version 2, as
340published by the Free Software Foundation.
341
342You should have received a copy of the GNU General Public License along
343with this program; if not, write to the Free Software Foundation, Inc.,
34459 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
345
346THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
347WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
348MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
349
350 Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
351
352===============================================================================
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
new file mode 100644
index 000000000000..29ccae409031
--- /dev/null
+++ b/Documentation/networking/phy.txt
@@ -0,0 +1,288 @@
1
2-------
3PHY Abstraction Layer
4(Updated 2005-07-21)
5
6Purpose
7
8 Most network devices consist of set of registers which provide an interface
9 to a MAC layer, which communicates with the physical connection through a
10 PHY. The PHY concerns itself with negotiating link parameters with the link
11 partner on the other side of the network connection (typically, an ethernet
12 cable), and provides a register interface to allow drivers to determine what
13 settings were chosen, and to configure what settings are allowed.
14
15 While these devices are distinct from the network devices, and conform to a
16 standard layout for the registers, it has been common practice to integrate
17 the PHY management code with the network driver. This has resulted in large
18 amounts of redundant code. Also, on embedded systems with multiple (and
19 sometimes quite different) ethernet controllers connected to the same
20 management bus, it is difficult to ensure safe use of the bus.
21
22 Since the PHYs are devices, and the management busses through which they are
23 accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
24 In doing so, it has these goals:
25
26 1) Increase code-reuse
27 2) Increase overall code-maintainability
28 3) Speed development time for new network drivers, and for new systems
29
30 Basically, this layer is meant to provide an interface to PHY devices which
31 allows network driver writers to write as little code as possible, while
32 still providing a full feature set.
33
34The MDIO bus
35
36 Most network devices are connected to a PHY by means of a management bus.
37 Different devices use different busses (though some share common interfaces).
38 In order to take advantage of the PAL, each bus interface needs to be
39 registered as a distinct device.
40
41 1) read and write functions must be implemented. Their prototypes are:
42
43 int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44 int read(struct mii_bus *bus, int mii_id, int regnum);
45
46 mii_id is the address on the bus for the PHY, and regnum is the register
47 number. These functions are guaranteed not to be called from interrupt
48 time, so it is safe for them to block, waiting for an interrupt to signal
49 the operation is complete
50
51 2) A reset function is necessary. This is used to return the bus to an
52 initialized state.
53
54 3) A probe function is needed. This function should set up anything the bus
55 driver needs, setup the mii_bus structure, and register with the PAL using
56 mdiobus_register. Similarly, there's a remove function to undo all of
57 that (use mdiobus_unregister).
58
59 4) Like any driver, the device_driver structure must be configured, and init
60 exit functions are used to register the driver.
61
62 5) The bus must also be declared somewhere as a device, and registered.
63
64 As an example for how one driver implemented an mdio bus driver, see
65 drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c
66
67Connecting to a PHY
68
69 Sometime during startup, the network driver needs to establish a connection
70 between the PHY device, and the network device. At this time, the PHY's bus
71 and drivers need to all have been loaded, so it is ready for the connection.
72 At this point, there are several ways to connect to the PHY:
73
74 1) The PAL handles everything, and only calls the network driver when
75 the link state changes, so it can react.
76
77 2) The PAL handles everything except interrupts (usually because the
78 controller has the interrupt registers).
79
80 3) The PAL handles everything, but checks in with the driver every second,
81 allowing the network driver to react first to any changes before the PAL
82 does.
83
84 4) The PAL serves only as a library of functions, with the network device
85 manually calling functions to update status, and configure the PHY
86
87
88Letting the PHY Abstraction Layer do Everything
89
90 If you choose option 1 (The hope is that every driver can, but to still be
91 useful to drivers that can't), connecting to the PHY is simple:
92
93 First, you need a function to react to changes in the link state. This
94 function follows this protocol:
95
96 static void adjust_link(struct net_device *dev);
97
98 Next, you need to know the device name of the PHY connected to this device.
99 The name will look something like, "phy0:0", where the first number is the
100 bus id, and the second is the PHY's address on that bus.
101
102 Now, to connect, just call this function:
103
104 phydev = phy_connect(dev, phy_name, &adjust_link, flags);
105
106 phydev is a pointer to the phy_device structure which represents the PHY. If
107 phy_connect is successful, it will return the pointer. dev, here, is the
108 pointer to your net_device. Once done, this function will have started the
109 PHY's software state machine, and registered for the PHY's interrupt, if it
110 has one. The phydev structure will be populated with information about the
111 current state, though the PHY will not yet be truly operational at this
112 point.
113
114 flags is a u32 which can optionally contain phy-specific flags.
115 This is useful if the system has put hardware restrictions on
116 the PHY/controller, of which the PHY needs to be aware.
117
118 Now just make sure that phydev->supported and phydev->advertising have any
119 values pruned from them which don't make sense for your controller (a 10/100
120 controller may be connected to a gigabit capable PHY, so you would need to
121 mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
122 for these bitfields. Note that you should not SET any bits, or the PHY may
123 get put into an unsupported state.
124
125 Lastly, once the controller is ready to handle network traffic, you call
126 phy_start(phydev). This tells the PAL that you are ready, and configures the
127 PHY to connect to the network. If you want to handle your own interrupts,
128 just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
129 Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
130
131 When you want to disconnect from the network (even if just briefly), you call
132 phy_stop(phydev).
133
134Keeping Close Tabs on the PAL
135
136 It is possible that the PAL's built-in state machine needs a little help to
137 keep your network device and the PHY properly in sync. If so, you can
138 register a helper function when connecting to the PHY, which will be called
139 every second before the state machine reacts to any changes. To do this, you
140 need to manually call phy_attach() and phy_prepare_link(), and then call
141 phy_start_machine() with the second argument set to point to your special
142 handler.
143
144 Currently there are no examples of how to use this functionality, and testing
145 on it has been limited because the author does not have any drivers which use
146 it (they all use option 1). So Caveat Emptor.
147
148Doing it all yourself
149
150 There's a remote chance that the PAL's built-in state machine cannot track
151 the complex interactions between the PHY and your network device. If this is
152 so, you can simply call phy_attach(), and not call phy_start_machine or
153 phy_prepare_link(). This will mean that phydev->state is entirely yours to
154 handle (phy_start and phy_stop toggle between some of the states, so you
155 might need to avoid them).
156
157 An effort has been made to make sure that useful functionality can be
158 accessed without the state-machine running, and most of these functions are
159 descended from functions which did not interact with a complex state-machine.
160 However, again, no effort has been made so far to test running without the
161 state machine, so tryer beware.
162
163 Here is a brief rundown of the functions:
164
165 int phy_read(struct phy_device *phydev, u16 regnum);
166 int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
167
168 Simple read/write primitives. They invoke the bus's read/write function
169 pointers.
170
171 void phy_print_status(struct phy_device *phydev);
172
173 A convenience function to print out the PHY status neatly.
174
175 int phy_clear_interrupt(struct phy_device *phydev);
176 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
177
178 Clear the PHY's interrupt, and configure which ones are allowed,
179 respectively. Currently only supports all on, or all off.
180
181 int phy_enable_interrupts(struct phy_device *phydev);
182 int phy_disable_interrupts(struct phy_device *phydev);
183
184 Functions which enable/disable PHY interrupts, clearing them
185 before and after, respectively.
186
187 int phy_start_interrupts(struct phy_device *phydev);
188 int phy_stop_interrupts(struct phy_device *phydev);
189
190 Requests the IRQ for the PHY interrupts, then enables them for
191 start, or disables then frees them for stop.
192
193 struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
194 u32 flags);
195
196 Attaches a network device to a particular PHY, binding the PHY to a generic
197 driver if none was found during bus initialization. Passes in
198 any phy-specific flags as needed.
199
200 int phy_start_aneg(struct phy_device *phydev);
201
202 Using variables inside the phydev structure, either configures advertising
203 and resets autonegotiation, or disables autonegotiation, and configures
204 forced settings.
205
206 static inline int phy_read_status(struct phy_device *phydev);
207
208 Fills the phydev structure with up-to-date information about the current
209 settings in the PHY.
210
211 void phy_sanitize_settings(struct phy_device *phydev)
212
213 Resolves differences between currently desired settings, and
214 supported settings for the given PHY device. Does not make
215 the changes in the hardware, though.
216
217 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
218 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
219
220 Ethtool convenience functions.
221
222 int phy_mii_ioctl(struct phy_device *phydev,
223 struct mii_ioctl_data *mii_data, int cmd);
224
225 The MII ioctl. Note that this function will completely screw up the state
226 machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
227 use this only to write registers which are not standard, and don't set off
228 a renegotiation.
229
230
231PHY Device Drivers
232
233 With the PHY Abstraction Layer, adding support for new PHYs is
234 quite easy. In some cases, no work is required at all! However,
235 many PHYs require a little hand-holding to get up-and-running.
236
237Generic PHY driver
238
239 If the desired PHY doesn't have any errata, quirks, or special
240 features you want to support, then it may be best to not add
241 support, and let the PHY Abstraction Layer's Generic PHY Driver
242 do all of the work.
243
244Writing a PHY driver
245
246 If you do need to write a PHY driver, the first thing to do is
247 make sure it can be matched with an appropriate PHY device.
248 This is done during bus initialization by reading the device's
249 UID (stored in registers 2 and 3), then comparing it to each
250 driver's phy_id field by ANDing it with each driver's
251 phy_id_mask field. Also, it needs a name. Here's an example:
252
253 static struct phy_driver dm9161_driver = {
254 .phy_id = 0x0181b880,
255 .name = "Davicom DM9161E",
256 .phy_id_mask = 0x0ffffff0,
257 ...
258 }
259
260 Next, you need to specify what features (speed, duplex, autoneg,
261 etc) your PHY device and driver support. Most PHYs support
262 PHY_BASIC_FEATURES, but you can look in include/mii.h for other
263 features.
264
265 Each driver consists of a number of function pointers:
266
267 config_init: configures PHY into a sane state after a reset.
268 For instance, a Davicom PHY requires descrambling disabled.
269 probe: Does any setup needed by the driver
270 suspend/resume: power management
271 config_aneg: Changes the speed/duplex/negotiation settings
272 read_status: Reads the current speed/duplex/negotiation settings
273 ack_interrupt: Clear a pending interrupt
274 config_intr: Enable or disable interrupts
275 remove: Does any driver take-down
276
277 Of these, only config_aneg and read_status are required to be
278 assigned by the driver code. The rest are optional. Also, it is
279 preferred to use the generic phy driver's versions of these two
280 functions if at all possible: genphy_read_status and
281 genphy_config_aneg. If this is not possible, it is likely that
282 you only need to perform some actions before and after invoking
283 these functions, and so your functions will wrap the generic
284 ones.
285
286 Feel free to look at the Marvell, Cicada, and Davicom drivers in
287 drivers/net/phy/ for examples (the lxt and qsemi drivers have
288 not been tested as of this writing)
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index a18ecb92b356..5c49ba07e709 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -132,6 +132,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
132 mpu_irq - IRQ # for MPU-401 UART (PnP setup) 132 mpu_irq - IRQ # for MPU-401 UART (PnP setup)
133 dma1 - first DMA # for AD1816A chip (PnP setup) 133 dma1 - first DMA # for AD1816A chip (PnP setup)
134 dma2 - second DMA # for AD1816A chip (PnP setup) 134 dma2 - second DMA # for AD1816A chip (PnP setup)
135 clockfreq - Clock frequency for AD1816A chip (default = 0, 33000Hz)
135 136
136 Module supports up to 8 cards, autoprobe and PnP. 137 Module supports up to 8 cards, autoprobe and PnP.
137 138
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
index db0b7d2dc477..0475478c2484 100644
--- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
@@ -3422,10 +3422,17 @@ struct _snd_pcm_runtime {
3422 3422
3423 <para> 3423 <para>
3424 The <structfield>iface</structfield> field specifies the type of 3424 The <structfield>iface</structfield> field specifies the type of
3425 the control, 3425 the control, <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>, which
3426 <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>. There are 3426 is usually <constant>MIXER</constant>.
3427 <constant>MIXER</constant>, <constant>PCM</constant>, 3427 Use <constant>CARD</constant> for global controls that are not
3428 <constant>CARD</constant>, etc. 3428 logically part of the mixer.
3429 If the control is closely associated with some specific device on
3430 the sound card, use <constant>HWDEP</constant>,
3431 <constant>PCM</constant>, <constant>RAWMIDI</constant>,
3432 <constant>TIMER</constant>, or <constant>SEQUENCER</constant>, and
3433 specify the device number with the
3434 <structfield>device</structfield> and
3435 <structfield>subdevice</structfield> fields.
3429 </para> 3436 </para>
3430 3437
3431 <para> 3438 <para>
diff --git a/MAINTAINERS b/MAINTAINERS
index ed3a50ee30ce..671e32905d28 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2099,6 +2099,12 @@ M: support@simtec.co.uk
2099W: http://www.simtec.co.uk/products/EB2410ITX/ 2099W: http://www.simtec.co.uk/products/EB2410ITX/
2100S: Supported 2100S: Supported
2101 2101
2102SIS 190 ETHERNET DRIVER
2103P: Francois Romieu
2104M: romieu@fr.zoreil.com
2105L: netdev@vger.kernel.org
2106S: Maintained
2107
2102SIS 5513 IDE CONTROLLER DRIVER 2108SIS 5513 IDE CONTROLLER DRIVER
2103P: Lionel Bouton 2109P: Lionel Bouton
2104M: Lionel.Bouton@inet6.fr 2110M: Lionel.Bouton@inet6.fr
diff --git a/Makefile b/Makefile
index 300f61f6f6a2..3d84df581cf2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 13 3SUBLEVEL = 13
4EXTRAVERSION =-rc7 4EXTRAVERSION =
5NAME=Woozy Numbat 5NAME=Affluent Albatross
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 08fe8071a7f8..2e45e8604e32 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -566,13 +566,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
566 if (ka->sa.sa_flags & SA_RESETHAND) 566 if (ka->sa.sa_flags & SA_RESETHAND)
567 ka->sa.sa_handler = SIG_DFL; 567 ka->sa.sa_handler = SIG_DFL;
568 568
569 if (!(ka->sa.sa_flags & SA_NODEFER)) { 569 spin_lock_irq(&current->sighand->siglock);
570 spin_lock_irq(&current->sighand->siglock); 570 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
571 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 571 if (!(ka->sa.sa_flags & SA_NODEFER))
572 sigaddset(&current->blocked,sig); 572 sigaddset(&current->blocked,sig);
573 recalc_sigpending(); 573 recalc_sigpending();
574 spin_unlock_irq(&current->sighand->siglock); 574 spin_unlock_irq(&current->sighand->siglock);
575 }
576} 575}
577 576
578static inline void 577static inline void
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c65c6eb9810d..4bf0e8737e1f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -635,10 +635,6 @@ config PM
635 and the Battery Powered Linux mini-HOWTO, available from 635 and the Battery Powered Linux mini-HOWTO, available from
636 <http://www.tldp.org/docs.html#howto>. 636 <http://www.tldp.org/docs.html#howto>.
637 637
638 Note that, even if you say N here, Linux on the x86 architecture
639 will issue the hlt instruction if nothing is to be done, thereby
640 sending the processor to sleep and saving power.
641
642config APM 638config APM
643 tristate "Advanced Power Management Emulation" 639 tristate "Advanced Power Management Emulation"
644 depends on PM 640 depends on PM
@@ -650,12 +646,6 @@ config APM
650 battery status information, and user-space programs will receive 646 battery status information, and user-space programs will receive
651 notification of APM "events" (e.g. battery status change). 647 notification of APM "events" (e.g. battery status change).
652 648
653 If you select "Y" here, you can disable actual use of the APM
654 BIOS by passing the "apm=off" option to the kernel at boot time.
655
656 Note that the APM support is almost completely disabled for
657 machines with more than one CPU.
658
659 In order to use APM, you will need supporting software. For location 649 In order to use APM, you will need supporting software. For location
660 and more information, read <file:Documentation/pm.txt> and the 650 and more information, read <file:Documentation/pm.txt> and the
661 Battery Powered Linux mini-HOWTO, available from 651 Battery Powered Linux mini-HOWTO, available from
@@ -665,39 +655,12 @@ config APM
665 manpage ("man 8 hdparm") for that), and it doesn't turn off 655 manpage ("man 8 hdparm") for that), and it doesn't turn off
666 VESA-compliant "green" monitors. 656 VESA-compliant "green" monitors.
667 657
668 This driver does not support the TI 4000M TravelMate and the ACER
669 486/DX4/75 because they don't have compliant BIOSes. Many "green"
670 desktop machines also don't have compliant BIOSes, and this driver
671 may cause those machines to panic during the boot phase.
672
673 Generally, if you don't have a battery in your machine, there isn't 658 Generally, if you don't have a battery in your machine, there isn't
674 much point in using this driver and you should say N. If you get 659 much point in using this driver and you should say N. If you get
675 random kernel OOPSes or reboots that don't seem to be related to 660 random kernel OOPSes or reboots that don't seem to be related to
676 anything, try disabling/enabling this option (or disabling/enabling 661 anything, try disabling/enabling this option (or disabling/enabling
677 APM in your BIOS). 662 APM in your BIOS).
678 663
679 Some other things you should try when experiencing seemingly random,
680 "weird" problems:
681
682 1) make sure that you have enough swap space and that it is
683 enabled.
684 2) pass the "no-hlt" option to the kernel
685 3) switch on floating point emulation in the kernel and pass
686 the "no387" option to the kernel
687 4) pass the "floppy=nodma" option to the kernel
688 5) pass the "mem=4M" option to the kernel (thereby disabling
689 all but the first 4 MB of RAM)
690 6) make sure that the CPU is not over clocked.
691 7) read the sig11 FAQ at <http://www.bitwizard.nl/sig11/>
692 8) disable the cache from your BIOS settings
693 9) install a fan for the video card or exchange video RAM
694 10) install a better fan for the CPU
695 11) exchange RAM chips
696 12) exchange the motherboard.
697
698 To compile this driver as a module, choose M here: the
699 module will be called apm.
700
701endmenu 664endmenu
702 665
703source "net/Kconfig" 666source "net/Kconfig"
@@ -752,6 +715,8 @@ source "drivers/hwmon/Kconfig"
752 715
753source "drivers/misc/Kconfig" 716source "drivers/misc/Kconfig"
754 717
718source "drivers/mfd/Kconfig"
719
755source "drivers/media/Kconfig" 720source "drivers/media/Kconfig"
756 721
757source "drivers/video/Kconfig" 722source "drivers/video/Kconfig"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 692af6b5e8ff..666ba393575b 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,6 +1,9 @@
1config ICST525 1config ICST525
2 bool 2 bool
3 3
4config ARM_GIC
5 bool
6
4config ICST307 7config ICST307
5 bool 8 bool
6 9
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 11f20a43ee3a..a87886564b19 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-y += rtctime.o 5obj-y += rtctime.o
6obj-$(CONFIG_ARM_AMBA) += amba.o 6obj-$(CONFIG_ARM_AMBA) += amba.o
7obj-$(CONFIG_ARM_GIC) += gic.o
7obj-$(CONFIG_ICST525) += icst525.o 8obj-$(CONFIG_ICST525) += icst525.o
8obj-$(CONFIG_ICST307) += icst307.o 9obj-$(CONFIG_ICST307) += icst307.o
9obj-$(CONFIG_SA1111) += sa1111.o 10obj-$(CONFIG_SA1111) += sa1111.o
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
new file mode 100644
index 000000000000..51dbf5489b6b
--- /dev/null
+++ b/arch/arm/common/gic.c
@@ -0,0 +1,166 @@
1/*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt architecture for the GIC:
11 *
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU.
18 *
19 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit
21 * registers are banked per-cpu for these sources.
22 */
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/smp.h>
27
28#include <asm/irq.h>
29#include <asm/io.h>
30#include <asm/mach/irq.h>
31#include <asm/hardware/gic.h>
32
33static void __iomem *gic_dist_base;
34static void __iomem *gic_cpu_base;
35
36/*
37 * Routines to acknowledge, disable and enable interrupts
38 *
39 * Linux assumes that when we're done with an interrupt we need to
40 * unmask it, in the same way we need to unmask an interrupt when
41 * we first enable it.
42 *
43 * The GIC has a seperate notion of "end of interrupt" to re-enable
44 * an interrupt after handling, in order to support hardware
45 * prioritisation.
46 *
47 * We can make the GIC behave in the way that Linux expects by making
48 * our "acknowledge" routine disable the interrupt, then mark it as
49 * complete.
50 */
51static void gic_ack_irq(unsigned int irq)
52{
53 u32 mask = 1 << (irq % 32);
54 writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
55 writel(irq, gic_cpu_base + GIC_CPU_EOI);
56}
57
58static void gic_mask_irq(unsigned int irq)
59{
60 u32 mask = 1 << (irq % 32);
61 writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
62}
63
64static void gic_unmask_irq(unsigned int irq)
65{
66 u32 mask = 1 << (irq % 32);
67 writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4);
68}
69
70static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
71{
72 void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3);
73 unsigned int shift = (irq % 4) * 8;
74 u32 val;
75
76 val = readl(reg) & ~(0xff << shift);
77 val |= 1 << (cpu + shift);
78 writel(val, reg);
79}
80
81static struct irqchip gic_chip = {
82 .ack = gic_ack_irq,
83 .mask = gic_mask_irq,
84 .unmask = gic_unmask_irq,
85#ifdef CONFIG_SMP
86 .set_cpu = gic_set_cpu,
87#endif
88};
89
90void __init gic_dist_init(void __iomem *base)
91{
92 unsigned int max_irq, i;
93 u32 cpumask = 1 << smp_processor_id();
94
95 cpumask |= cpumask << 8;
96 cpumask |= cpumask << 16;
97
98 gic_dist_base = base;
99
100 writel(0, base + GIC_DIST_CTRL);
101
102 /*
103 * Find out how many interrupts are supported.
104 */
105 max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
106 max_irq = (max_irq + 1) * 32;
107
108 /*
109 * The GIC only supports up to 1020 interrupt sources.
110 * Limit this to either the architected maximum, or the
111 * platform maximum.
112 */
113 if (max_irq > max(1020, NR_IRQS))
114 max_irq = max(1020, NR_IRQS);
115
116 /*
117 * Set all global interrupts to be level triggered, active low.
118 */
119 for (i = 32; i < max_irq; i += 16)
120 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
121
122 /*
123 * Set all global interrupts to this CPU only.
124 */
125 for (i = 32; i < max_irq; i += 4)
126 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
127
128 /*
129 * Set priority on all interrupts.
130 */
131 for (i = 0; i < max_irq; i += 4)
132 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
133
134 /*
135 * Disable all interrupts.
136 */
137 for (i = 0; i < max_irq; i += 32)
138 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
139
140 /*
141 * Setup the Linux IRQ subsystem.
142 */
143 for (i = 29; i < max_irq; i++) {
144 set_irq_chip(i, &gic_chip);
145 set_irq_handler(i, do_level_IRQ);
146 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
147 }
148
149 writel(1, base + GIC_DIST_CTRL);
150}
151
152void __cpuinit gic_cpu_init(void __iomem *base)
153{
154 gic_cpu_base = base;
155 writel(0xf0, base + GIC_CPU_PRIMASK);
156 writel(1, base + GIC_CPU_CTRL);
157}
158
159#ifdef CONFIG_SMP
160void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
161{
162 unsigned long map = *cpus_addr(cpumask);
163
164 writel(map << 16 | irq, gic_dist_base + GIC_DIST_SOFTINT);
165}
166#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 5e435e42dacd..a94d75fef598 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -658,11 +658,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
658 /* 658 /*
659 * Block the signal if we were unsuccessful. 659 * Block the signal if we were unsuccessful.
660 */ 660 */
661 if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) { 661 if (ret != 0) {
662 spin_lock_irq(&tsk->sighand->siglock); 662 spin_lock_irq(&tsk->sighand->siglock);
663 sigorsets(&tsk->blocked, &tsk->blocked, 663 sigorsets(&tsk->blocked, &tsk->blocked,
664 &ka->sa.sa_mask); 664 &ka->sa.sa_mask);
665 sigaddset(&tsk->blocked, sig); 665 if (!(ka->sa.sa_flags & SA_NODEFER))
666 sigaddset(&tsk->blocked, sig);
666 recalc_sigpending(); 667 recalc_sigpending();
667 spin_unlock_irq(&tsk->sighand->siglock); 668 spin_unlock_irq(&tsk->sighand->siglock);
668 } 669 }
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 7f58afb27e71..411ea9996190 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -36,7 +36,7 @@ static struct flash_platform_data coyote_flash_data = {
36 36
37static struct resource coyote_flash_resource = { 37static struct resource coyote_flash_resource = {
38 .start = COYOTE_FLASH_BASE, 38 .start = COYOTE_FLASH_BASE,
39 .end = COYOTE_FLASH_BASE + COYOTE_FLASH_SIZE, 39 .end = COYOTE_FLASH_BASE + COYOTE_FLASH_SIZE - 1,
40 .flags = IORESOURCE_MEM, 40 .flags = IORESOURCE_MEM,
41}; 41};
42 42
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 65e356bd10d6..333459d6aa46 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -114,7 +114,7 @@ static struct flash_platform_data gtwx5715_flash_data = {
114 114
115static struct resource gtwx5715_flash_resource = { 115static struct resource gtwx5715_flash_resource = {
116 .start = GTWX5715_FLASH_BASE, 116 .start = GTWX5715_FLASH_BASE,
117 .end = GTWX5715_FLASH_BASE + GTWX5715_FLASH_SIZE, 117 .end = GTWX5715_FLASH_BASE + GTWX5715_FLASH_SIZE - 1,
118 .flags = IORESOURCE_MEM, 118 .flags = IORESOURCE_MEM,
119}; 119};
120 120
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 4633470a6a37..fa0646c8693b 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -36,7 +36,7 @@ static struct flash_platform_data ixdp425_flash_data = {
36 36
37static struct resource ixdp425_flash_resource = { 37static struct resource ixdp425_flash_resource = {
38 .start = IXDP425_FLASH_BASE, 38 .start = IXDP425_FLASH_BASE,
39 .end = IXDP425_FLASH_BASE + IXDP425_FLASH_SIZE, 39 .end = IXDP425_FLASH_BASE + IXDP425_FLASH_SIZE - 1,
40 .flags = IORESOURCE_MEM, 40 .flags = IORESOURCE_MEM,
41}; 41};
42 42
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 4d4d303ee3a8..24687f511bf5 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -35,6 +35,7 @@
35#include <asm/mach/map.h> 35#include <asm/mach/map.h>
36#include <asm/mach/serial_sa1100.h> 36#include <asm/mach/serial_sa1100.h>
37#include <asm/arch/assabet.h> 37#include <asm/arch/assabet.h>
38#include <asm/arch/mcp.h>
38 39
39#include "generic.h" 40#include "generic.h"
40 41
@@ -198,6 +199,11 @@ static struct irda_platform_data assabet_irda_data = {
198 .set_speed = assabet_irda_set_speed, 199 .set_speed = assabet_irda_set_speed,
199}; 200};
200 201
202static struct mcp_plat_data assabet_mcp_data = {
203 .mccr0 = MCCR0_ADM,
204 .sclk_rate = 11981000,
205};
206
201static void __init assabet_init(void) 207static void __init assabet_init(void)
202{ 208{
203 /* 209 /*
@@ -246,6 +252,7 @@ static void __init assabet_init(void)
246 sa11x0_set_flash_data(&assabet_flash_data, assabet_flash_resources, 252 sa11x0_set_flash_data(&assabet_flash_data, assabet_flash_resources,
247 ARRAY_SIZE(assabet_flash_resources)); 253 ARRAY_SIZE(assabet_flash_resources));
248 sa11x0_set_irda_data(&assabet_irda_data); 254 sa11x0_set_irda_data(&assabet_irda_data);
255 sa11x0_set_mcp_data(&assabet_mcp_data);
249} 256}
250 257
251/* 258/*
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 0aa918e24c31..9484be7dc671 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -29,6 +29,7 @@
29#include <asm/mach/serial_sa1100.h> 29#include <asm/mach/serial_sa1100.h>
30 30
31#include <asm/arch/cerf.h> 31#include <asm/arch/cerf.h>
32#include <asm/arch/mcp.h>
32#include "generic.h" 33#include "generic.h"
33 34
34static struct resource cerfuart2_resources[] = { 35static struct resource cerfuart2_resources[] = {
@@ -116,10 +117,16 @@ static void __init cerf_map_io(void)
116 GPDR |= CERF_GPIO_CF_RESET; 117 GPDR |= CERF_GPIO_CF_RESET;
117} 118}
118 119
120static struct mcp_plat_data cerf_mcp_data = {
121 .mccr0 = MCCR0_ADM,
122 .sclk_rate = 11981000,
123};
124
119static void __init cerf_init(void) 125static void __init cerf_init(void)
120{ 126{
121 platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); 127 platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices));
122 sa11x0_set_flash_data(&cerf_flash_data, &cerf_flash_resource, 1); 128 sa11x0_set_flash_data(&cerf_flash_data, &cerf_flash_resource, 1);
129 sa11x0_set_mcp_data(&cerf_mcp_data);
123} 130}
124 131
125MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube") 132MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 95ae217be1bc..3f1e358455e5 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -221,6 +221,11 @@ static struct platform_device sa11x0mcp_device = {
221 .resource = sa11x0mcp_resources, 221 .resource = sa11x0mcp_resources,
222}; 222};
223 223
224void sa11x0_set_mcp_data(struct mcp_plat_data *data)
225{
226 sa11x0mcp_device.dev.platform_data = data;
227}
228
224static struct resource sa11x0ssp_resources[] = { 229static struct resource sa11x0ssp_resources[] = {
225 [0] = { 230 [0] = {
226 .start = 0x80070000, 231 .start = 0x80070000,
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index bfe41da9923e..279e3afa3c39 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -34,5 +34,8 @@ struct resource;
34extern void sa11x0_set_flash_data(struct flash_platform_data *flash, 34extern void sa11x0_set_flash_data(struct flash_platform_data *flash,
35 struct resource *res, int nr); 35 struct resource *res, int nr);
36 36
37struct sa11x0_ssp_plat_ops;
38extern void sa11x0_set_ssp_data(struct sa11x0_ssp_plat_ops *ops);
39
37struct irda_platform_data; 40struct irda_platform_data;
38void sa11x0_set_irda_data(struct irda_platform_data *irda); 41void sa11x0_set_irda_data(struct irda_platform_data *irda);
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index 870b488aeda4..ed6744d480af 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -13,12 +13,23 @@
13#include <asm/mach/arch.h> 13#include <asm/mach/arch.h>
14#include <asm/mach/map.h> 14#include <asm/mach/map.h>
15#include <asm/mach/serial_sa1100.h> 15#include <asm/mach/serial_sa1100.h>
16#include <asm/arch/mcp.h>
16 17
17#include "generic.h" 18#include "generic.h"
18 19
19 20
20#warning "include/asm/arch-sa1100/ide.h needs fixing for lart" 21#warning "include/asm/arch-sa1100/ide.h needs fixing for lart"
21 22
23static struct mcp_plat_data lart_mcp_data = {
24 .mccr0 = MCCR0_ADM,
25 .sclk_rate = 11981000,
26};
27
28static void __init lart_init(void)
29{
30 sa11x0_set_mcp_data(&lart_mcp_data);
31}
32
22static struct map_desc lart_io_desc[] __initdata = { 33static struct map_desc lart_io_desc[] __initdata = {
23 /* virtual physical length type */ 34 /* virtual physical length type */
24 { 0xe8000000, 0x00000000, 0x00400000, MT_DEVICE }, /* main flash memory */ 35 { 0xe8000000, 0x00000000, 0x00400000, MT_DEVICE }, /* main flash memory */
@@ -47,5 +58,6 @@ MACHINE_START(LART, "LART")
47 .boot_params = 0xc0000100, 58 .boot_params = 0xc0000100,
48 .map_io = lart_map_io, 59 .map_io = lart_map_io,
49 .init_irq = sa1100_init_irq, 60 .init_irq = sa1100_init_irq,
61 .init_machine = lart_init,
50 .timer = &sa1100_timer, 62 .timer = &sa1100_timer,
51MACHINE_END 63MACHINE_END
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index 43a00359fcdd..7482288278d9 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -18,6 +18,7 @@
18#include <asm/mach/flash.h> 18#include <asm/mach/flash.h>
19#include <asm/mach/map.h> 19#include <asm/mach/map.h>
20#include <asm/mach/serial_sa1100.h> 20#include <asm/mach/serial_sa1100.h>
21#include <asm/arch/mcp.h>
21#include <asm/arch/shannon.h> 22#include <asm/arch/shannon.h>
22 23
23#include "generic.h" 24#include "generic.h"
@@ -52,9 +53,15 @@ static struct resource shannon_flash_resource = {
52 .flags = IORESOURCE_MEM, 53 .flags = IORESOURCE_MEM,
53}; 54};
54 55
56static struct mcp_plat_data shannon_mcp_data = {
57 .mccr0 = MCCR0_ADM,
58 .sclk_rate = 11981000,
59};
60
55static void __init shannon_init(void) 61static void __init shannon_init(void)
56{ 62{
57 sa11x0_set_flash_data(&shannon_flash_data, &shannon_flash_resource, 1); 63 sa11x0_set_flash_data(&shannon_flash_data, &shannon_flash_resource, 1);
64 sa11x0_set_mcp_data(&shannon_mcp_data);
58} 65}
59 66
60static void __init shannon_map_io(void) 67static void __init shannon_map_io(void)
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 77978586b126..07f6d5fd7bb0 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -23,6 +23,7 @@
23#include <asm/mach/flash.h> 23#include <asm/mach/flash.h>
24#include <asm/mach/map.h> 24#include <asm/mach/map.h>
25#include <asm/mach/serial_sa1100.h> 25#include <asm/mach/serial_sa1100.h>
26#include <asm/arch/mcp.h>
26#include <asm/arch/simpad.h> 27#include <asm/arch/simpad.h>
27 28
28#include <linux/serial_core.h> 29#include <linux/serial_core.h>
@@ -123,6 +124,11 @@ static struct resource simpad_flash_resources [] = {
123 } 124 }
124}; 125};
125 126
127static struct mcp_plat_data simpad_mcp_data = {
128 .mccr0 = MCCR0_ADM,
129 .sclk_rate = 11981000,
130};
131
126 132
127 133
128static void __init simpad_map_io(void) 134static void __init simpad_map_io(void)
@@ -157,6 +163,7 @@ static void __init simpad_map_io(void)
157 163
158 sa11x0_set_flash_data(&simpad_flash_data, simpad_flash_resources, 164 sa11x0_set_flash_data(&simpad_flash_data, simpad_flash_resources,
159 ARRAY_SIZE(simpad_flash_resources)); 165 ARRAY_SIZE(simpad_flash_resources));
166 sa11x0_set_mcp_data(&simpad_mcp_data);
160} 167}
161 168
162static void simpad_power_off(void) 169static void simpad_power_off(void)
diff --git a/arch/arm26/kernel/signal.c b/arch/arm26/kernel/signal.c
index 356d9809cc0b..ce2055bdc9ee 100644
--- a/arch/arm26/kernel/signal.c
+++ b/arch/arm26/kernel/signal.c
@@ -454,14 +454,13 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
454 if (ka->sa.sa_flags & SA_ONESHOT) 454 if (ka->sa.sa_flags & SA_ONESHOT)
455 ka->sa.sa_handler = SIG_DFL; 455 ka->sa.sa_handler = SIG_DFL;
456 456
457 if (!(ka->sa.sa_flags & SA_NODEFER)) { 457 spin_lock_irq(&tsk->sighand->siglock);
458 spin_lock_irq(&tsk->sighand->siglock); 458 sigorsets(&tsk->blocked, &tsk->blocked,
459 sigorsets(&tsk->blocked, &tsk->blocked, 459 &ka->sa.sa_mask);
460 &ka->sa.sa_mask); 460 if (!(ka->sa.sa_flags & SA_NODEFER))
461 sigaddset(&tsk->blocked, sig); 461 sigaddset(&tsk->blocked, sig);
462 recalc_sigpending(); 462 recalc_sigpending();
463 spin_unlock_irq(&tsk->sighand->siglock); 463 spin_unlock_irq(&tsk->sighand->siglock);
464 }
465 return; 464 return;
466 } 465 }
467 466
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index 85e0032e664f..693771961f85 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -517,13 +517,12 @@ handle_signal(int canrestart, unsigned long sig,
517 if (ka->sa.sa_flags & SA_ONESHOT) 517 if (ka->sa.sa_flags & SA_ONESHOT)
518 ka->sa.sa_handler = SIG_DFL; 518 ka->sa.sa_handler = SIG_DFL;
519 519
520 if (!(ka->sa.sa_flags & SA_NODEFER)) { 520 spin_lock_irq(&current->sighand->siglock);
521 spin_lock_irq(&current->sighand->siglock); 521 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
522 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 522 if (!(ka->sa.sa_flags & SA_NODEFER))
523 sigaddset(&current->blocked,sig); 523 sigaddset(&current->blocked,sig);
524 recalc_sigpending(); 524 recalc_sigpending();
525 spin_unlock_irq(&current->sighand->siglock); 525 spin_unlock_irq(&current->sighand->siglock);
526 }
527} 526}
528 527
529/* 528/*
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index fb4c79d5b76b..0a3614dab887 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -568,13 +568,12 @@ handle_signal(int canrestart, unsigned long sig,
568 if (ka->sa.sa_flags & SA_ONESHOT) 568 if (ka->sa.sa_flags & SA_ONESHOT)
569 ka->sa.sa_handler = SIG_DFL; 569 ka->sa.sa_handler = SIG_DFL;
570 570
571 if (!(ka->sa.sa_flags & SA_NODEFER)) { 571 spin_lock_irq(&current->sighand->siglock);
572 spin_lock_irq(&current->sighand->siglock); 572 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
573 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 573 if (!(ka->sa.sa_flags & SA_NODEFER))
574 sigaddset(&current->blocked,sig); 574 sigaddset(&current->blocked,sig);
575 recalc_sigpending(); 575 recalc_sigpending();
576 spin_unlock_irq(&current->sighand->siglock); 576 spin_unlock_irq(&current->sighand->siglock);
577 }
578} 577}
579 578
580/* 579/*
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 36a2dffc8ebd..d4ccc0728dfe 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -506,13 +506,12 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
506 else 506 else
507 setup_frame(sig, ka, oldset, regs); 507 setup_frame(sig, ka, oldset, regs);
508 508
509 if (!(ka->sa.sa_flags & SA_NODEFER)) { 509 spin_lock_irq(&current->sighand->siglock);
510 spin_lock_irq(&current->sighand->siglock); 510 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
511 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 511 if (!(ka->sa.sa_flags & SA_NODEFER))
512 sigaddset(&current->blocked, sig); 512 sigaddset(&current->blocked, sig);
513 recalc_sigpending(); 513 recalc_sigpending();
514 spin_unlock_irq(&current->sighand->siglock); 514 spin_unlock_irq(&current->sighand->siglock);
515 }
516} /* end handle_signal() */ 515} /* end handle_signal() */
517 516
518/*****************************************************************************/ 517/*****************************************************************************/
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 5aab87eae1f9..f13d5e82d4b9 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -488,13 +488,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
488 else 488 else
489 setup_frame(sig, ka, oldset, regs); 489 setup_frame(sig, ka, oldset, regs);
490 490
491 if (!(ka->sa.sa_flags & SA_NODEFER)) { 491 spin_lock_irq(&current->sighand->siglock);
492 spin_lock_irq(&current->sighand->siglock); 492 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
493 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 493 if (!(ka->sa.sa_flags & SA_NODEFER))
494 sigaddset(&current->blocked,sig); 494 sigaddset(&current->blocked,sig);
495 recalc_sigpending(); 495 recalc_sigpending();
496 spin_unlock_irq(&current->sighand->siglock); 496 spin_unlock_irq(&current->sighand->siglock);
497 }
498} 497}
499 498
500/* 499/*
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 89ef7adc63a4..140e340569c6 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -577,10 +577,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
577 else 577 else
578 ret = setup_frame(sig, ka, oldset, regs); 578 ret = setup_frame(sig, ka, oldset, regs);
579 579
580 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) { 580 if (ret) {
581 spin_lock_irq(&current->sighand->siglock); 581 spin_lock_irq(&current->sighand->siglock);
582 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 582 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
583 sigaddset(&current->blocked,sig); 583 if (!(ka->sa.sa_flags & SA_NODEFER))
584 sigaddset(&current->blocked,sig);
584 recalc_sigpending(); 585 recalc_sigpending();
585 spin_unlock_irq(&current->sighand->siglock); 586 spin_unlock_irq(&current->sighand->siglock);
586 } 587 }
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index ade5bc57c34c..c96bea14b98f 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -165,7 +165,6 @@ static int __init pcibios_init(void)
165 if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) 165 if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
166 pcibios_sort(); 166 pcibios_sort();
167#endif 167#endif
168 pci_assign_unassigned_resources();
169 return 0; 168 return 0;
170} 169}
171 170
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 93a364c82150..3cc480998a47 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -170,43 +170,26 @@ static void __init pcibios_allocate_resources(int pass)
170static int __init pcibios_assign_resources(void) 170static int __init pcibios_assign_resources(void)
171{ 171{
172 struct pci_dev *dev = NULL; 172 struct pci_dev *dev = NULL;
173 int idx; 173 struct resource *r, *pr;
174 struct resource *r;
175
176 for_each_pci_dev(dev) {
177 int class = dev->class >> 8;
178
179 /* Don't touch classless devices and host bridges */
180 if (!class || class == PCI_CLASS_BRIDGE_HOST)
181 continue;
182
183 for(idx=0; idx<6; idx++) {
184 r = &dev->resource[idx];
185
186 /*
187 * Don't touch IDE controllers and I/O ports of video cards!
188 */
189 if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
190 (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
191 continue;
192
193 /*
194 * We shall assign a new address to this resource, either because
195 * the BIOS forgot to do so or because we have decided the old
196 * address was unusable for some reason.
197 */
198 if (!r->start && r->end)
199 pci_assign_resource(dev, idx);
200 }
201 174
202 if (pci_probe & PCI_ASSIGN_ROMS) { 175 if (!(pci_probe & PCI_ASSIGN_ROMS)) {
176 /* Try to use BIOS settings for ROMs, otherwise let
177 pci_assign_unassigned_resources() allocate the new
178 addresses. */
179 for_each_pci_dev(dev) {
203 r = &dev->resource[PCI_ROM_RESOURCE]; 180 r = &dev->resource[PCI_ROM_RESOURCE];
204 r->end -= r->start; 181 if (!r->flags || !r->start)
205 r->start = 0; 182 continue;
206 if (r->end) 183 pr = pci_find_parent_resource(dev, r);
207 pci_assign_resource(dev, PCI_ROM_RESOURCE); 184 if (!pr || request_resource(pr, r) < 0) {
185 r->end -= r->start;
186 r->start = 0;
187 }
208 } 188 }
209 } 189 }
190
191 pci_assign_unassigned_resources();
192
210 return 0; 193 return 0;
211} 194}
212 195
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index b8a0a7d257a9..774f34b675cf 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -467,15 +467,12 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
467 if (!setup_frame(sig, ka, info, oldset, scr)) 467 if (!setup_frame(sig, ka, info, oldset, scr))
468 return 0; 468 return 0;
469 469
470 if (!(ka->sa.sa_flags & SA_NODEFER)) { 470 spin_lock_irq(&current->sighand->siglock);
471 spin_lock_irq(&current->sighand->siglock); 471 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
472 { 472 if (!(ka->sa.sa_flags & SA_NODEFER))
473 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 473 sigaddset(&current->blocked, sig);
474 sigaddset(&current->blocked, sig); 474 recalc_sigpending();
475 recalc_sigpending(); 475 spin_unlock_irq(&current->sighand->siglock);
476 }
477 spin_unlock_irq(&current->sighand->siglock);
478 }
479 return 1; 476 return 1;
480} 477}
481 478
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 54d9ed444e4a..f9472c50ab42 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -380,6 +380,7 @@ void pcibios_bus_to_resource(struct pci_dev *dev,
380 res->start = region->start + offset; 380 res->start = region->start + offset;
381 res->end = region->end + offset; 381 res->end = region->end + offset;
382} 382}
383EXPORT_SYMBOL(pcibios_bus_to_resource);
383 384
384static int __devinit is_valid_resource(struct pci_dev *dev, int idx) 385static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
385{ 386{
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 5aef7e406ef5..71763f7a1d19 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -341,13 +341,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
341 /* Set up the stack frame */ 341 /* Set up the stack frame */
342 setup_rt_frame(sig, ka, info, oldset, regs); 342 setup_rt_frame(sig, ka, info, oldset, regs);
343 343
344 if (!(ka->sa.sa_flags & SA_NODEFER)) { 344 spin_lock_irq(&current->sighand->siglock);
345 spin_lock_irq(&current->sighand->siglock); 345 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
346 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 346 if (!(ka->sa.sa_flags & SA_NODEFER))
347 sigaddset(&current->blocked,sig); 347 sigaddset(&current->blocked,sig);
348 recalc_sigpending(); 348 recalc_sigpending();
349 spin_unlock_irq(&current->sighand->siglock); 349 spin_unlock_irq(&current->sighand->siglock);
350 }
351} 350}
352 351
353/* 352/*
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 30dceb59a462..43a2726c0d0a 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -732,13 +732,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
732 if (ka->sa.sa_flags & SA_ONESHOT) 732 if (ka->sa.sa_flags & SA_ONESHOT)
733 ka->sa.sa_handler = SIG_DFL; 733 ka->sa.sa_handler = SIG_DFL;
734 734
735 if (!(ka->sa.sa_flags & SA_NODEFER)) { 735 spin_lock_irq(&current->sighand->siglock);
736 spin_lock_irq(&current->sighand->siglock); 736 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
737 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 737 if (!(ka->sa.sa_flags & SA_NODEFER))
738 sigaddset(&current->blocked,sig); 738 sigaddset(&current->blocked,sig);
739 recalc_sigpending(); 739 recalc_sigpending();
740 spin_unlock_irq(&current->sighand->siglock); 740 spin_unlock_irq(&current->sighand->siglock);
741 }
742} 741}
743 742
744/* 743/*
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 40244782a8e5..4c114ae21793 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -155,13 +155,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
155 else 155 else
156 setup_irix_frame(ka, regs, sig, oldset); 156 setup_irix_frame(ka, regs, sig, oldset);
157 157
158 if (!(ka->sa.sa_flags & SA_NODEFER)) { 158 spin_lock_irq(&current->sighand->siglock);
159 spin_lock_irq(&current->sighand->siglock); 159 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
160 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 160 if (!(ka->sa.sa_flags & SA_NODEFER))
161 sigaddset(&current->blocked,sig); 161 sigaddset(&current->blocked,sig);
162 recalc_sigpending(); 162 recalc_sigpending();
163 spin_unlock_irq(&current->sighand->siglock); 163 spin_unlock_irq(&current->sighand->siglock);
164 }
165} 164}
166 165
167asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs) 166asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 65ee15396ffd..0209c1dd1429 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -425,13 +425,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
425 setup_frame(ka, regs, sig, oldset); 425 setup_frame(ka, regs, sig, oldset);
426#endif 426#endif
427 427
428 if (!(ka->sa.sa_flags & SA_NODEFER)) { 428 spin_lock_irq(&current->sighand->siglock);
429 spin_lock_irq(&current->sighand->siglock); 429 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
430 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 430 if (!(ka->sa.sa_flags & SA_NODEFER))
431 sigaddset(&current->blocked,sig); 431 sigaddset(&current->blocked,sig);
432 recalc_sigpending(); 432 recalc_sigpending();
433 spin_unlock_irq(&current->sighand->siglock); 433 spin_unlock_irq(&current->sighand->siglock);
434 }
435} 434}
436 435
437extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); 436extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index c1a69cf232f9..f6875f023a29 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -751,13 +751,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
751 else 751 else
752 setup_frame(ka, regs, sig, oldset); 752 setup_frame(ka, regs, sig, oldset);
753 753
754 if (!(ka->sa.sa_flags & SA_NODEFER)) { 754 spin_lock_irq(&current->sighand->siglock);
755 spin_lock_irq(&current->sighand->siglock); 755 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
756 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 756 if (!(ka->sa.sa_flags & SA_NODEFER))
757 sigaddset(&current->blocked,sig); 757 sigaddset(&current->blocked,sig);
758 recalc_sigpending(); 758 recalc_sigpending();
759 spin_unlock_irq(&current->sighand->siglock); 759 spin_unlock_irq(&current->sighand->siglock);
760 }
761} 760}
762 761
763int do_signal32(sigset_t *oldset, struct pt_regs *regs) 762int do_signal32(sigset_t *oldset, struct pt_regs *regs)
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 9421bb98ea63..55d71c15e1f7 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -517,13 +517,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
517 if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall)) 517 if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
518 return 0; 518 return 0;
519 519
520 if (!(ka->sa.sa_flags & SA_NODEFER)) { 520 spin_lock_irq(&current->sighand->siglock);
521 spin_lock_irq(&current->sighand->siglock); 521 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
522 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 522 if (!(ka->sa.sa_flags & SA_NODEFER))
523 sigaddset(&current->blocked,sig); 523 sigaddset(&current->blocked,sig);
524 recalc_sigpending(); 524 recalc_sigpending();
525 spin_unlock_irq(&current->sighand->siglock); 525 spin_unlock_irq(&current->sighand->siglock);
526 }
527 return 1; 526 return 1;
528} 527}
529 528
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index f9b0d778dd82..d1b6e6dcb504 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -21,11 +21,13 @@ CC := $(CC) -m32
21endif 21endif
22 22
23LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic 23LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic
24CPPFLAGS += -Iarch/$(ARCH) 24CPPFLAGS += -Iarch/$(ARCH) -Iinclude3
25AFLAGS += -Iarch/$(ARCH) 25AFLAGS += -Iarch/$(ARCH)
26CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \ 26CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
27 -ffixed-r2 -mmultiple 27 -ffixed-r2 -mmultiple
28CPP = $(CC) -E $(CFLAGS) 28CPP = $(CC) -E $(CFLAGS)
29# Temporary hack until we have migrated to asm-powerpc
30LINUXINCLUDE += -Iinclude3
29 31
30CHECKFLAGS += -D__powerpc__ 32CHECKFLAGS += -D__powerpc__
31 33
@@ -101,6 +103,7 @@ endef
101 103
102archclean: 104archclean:
103 $(Q)$(MAKE) $(clean)=arch/ppc/boot 105 $(Q)$(MAKE) $(clean)=arch/ppc/boot
106 $(Q)rm -rf include3
104 107
105prepare: include/asm-$(ARCH)/offsets.h checkbin 108prepare: include/asm-$(ARCH)/offsets.h checkbin
106 109
@@ -110,6 +113,12 @@ arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
110include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s 113include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
111 $(call filechk,gen-asm-offsets) 114 $(call filechk,gen-asm-offsets)
112 115
116# Temporary hack until we have migrated to asm-powerpc
117include/asm: include3/asm
118include3/asm:
119 $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi
120 $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
121
113# Use the file '.tmp_gas_check' for binutils tests, as gas won't output 122# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
114# to stdout and these checks are run even on install targets. 123# to stdout and these checks are run even on install targets.
115TOUT := .tmp_gas_check 124TOUT := .tmp_gas_check
diff --git a/arch/ppc/boot/utils/addRamDisk.c b/arch/ppc/boot/utils/addRamDisk.c
deleted file mode 100644
index 93400dfcce7f..000000000000
--- a/arch/ppc/boot/utils/addRamDisk.c
+++ /dev/null
@@ -1,203 +0,0 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <netinet/in.h>
4#include <unistd.h>
5#include <sys/types.h>
6#include <sys/stat.h>
7#include <string.h>
8
9#define ElfHeaderSize (64 * 1024)
10#define ElfPages (ElfHeaderSize / 4096)
11#define KERNELBASE (0xc0000000)
12
13void get4k(FILE *file, char *buf )
14{
15 unsigned j;
16 unsigned num = fread(buf, 1, 4096, file);
17 for ( j=num; j<4096; ++j )
18 buf[j] = 0;
19}
20
21void put4k(FILE *file, char *buf )
22{
23 fwrite(buf, 1, 4096, file);
24}
25
26void death(const char *msg, FILE *fdesc, const char *fname)
27{
28 printf(msg);
29 fclose(fdesc);
30 unlink(fname);
31 exit(1);
32}
33
34int main(int argc, char **argv)
35{
36 char inbuf[4096];
37 FILE *ramDisk = NULL;
38 FILE *inputVmlinux = NULL;
39 FILE *outputVmlinux = NULL;
40 unsigned i = 0;
41 u_int32_t ramFileLen = 0;
42 u_int32_t ramLen = 0;
43 u_int32_t roundR = 0;
44 u_int32_t kernelLen = 0;
45 u_int32_t actualKernelLen = 0;
46 u_int32_t round = 0;
47 u_int32_t roundedKernelLen = 0;
48 u_int32_t ramStartOffs = 0;
49 u_int32_t ramPages = 0;
50 u_int32_t roundedKernelPages = 0;
51 u_int32_t hvReleaseData = 0;
52 u_int32_t eyeCatcher = 0xc8a5d9c4;
53 u_int32_t naca = 0;
54 u_int32_t xRamDisk = 0;
55 u_int32_t xRamDiskSize = 0;
56 if ( argc < 2 ) {
57 printf("Name of RAM disk file missing.\n");
58 exit(1);
59 }
60
61 if ( argc < 3 ) {
62 printf("Name of vmlinux file missing.\n");
63 exit(1);
64 }
65
66 if ( argc < 4 ) {
67 printf("Name of vmlinux output file missing.\n");
68 exit(1);
69 }
70
71 ramDisk = fopen(argv[1], "r");
72 if ( ! ramDisk ) {
73 printf("RAM disk file \"%s\" failed to open.\n", argv[1]);
74 exit(1);
75 }
76 inputVmlinux = fopen(argv[2], "r");
77 if ( ! inputVmlinux ) {
78 printf("vmlinux file \"%s\" failed to open.\n", argv[2]);
79 exit(1);
80 }
81 outputVmlinux = fopen(argv[3], "w+");
82 if ( ! outputVmlinux ) {
83 printf("output vmlinux file \"%s\" failed to open.\n", argv[3]);
84 exit(1);
85 }
86 fseek(ramDisk, 0, SEEK_END);
87 ramFileLen = ftell(ramDisk);
88 fseek(ramDisk, 0, SEEK_SET);
89 printf("%s file size = %d\n", argv[1], ramFileLen);
90
91 ramLen = ramFileLen;
92
93 roundR = 4096 - (ramLen % 4096);
94 if ( roundR ) {
95 printf("Rounding RAM disk file up to a multiple of 4096, adding %d\n", roundR);
96 ramLen += roundR;
97 }
98
99 printf("Rounded RAM disk size is %d\n", ramLen);
100 fseek(inputVmlinux, 0, SEEK_END);
101 kernelLen = ftell(inputVmlinux);
102 fseek(inputVmlinux, 0, SEEK_SET);
103 printf("kernel file size = %d\n", kernelLen);
104 if ( kernelLen == 0 ) {
105 printf("You must have a linux kernel specified as argv[2]\n");
106 exit(1);
107 }
108
109 actualKernelLen = kernelLen - ElfHeaderSize;
110
111 printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen);
112
113 round = actualKernelLen % 4096;
114 roundedKernelLen = actualKernelLen;
115 if ( round )
116 roundedKernelLen += (4096 - round);
117
118 printf("actual kernel length rounded up to a 4k multiple = %d\n", roundedKernelLen);
119
120 ramStartOffs = roundedKernelLen;
121 ramPages = ramLen / 4096;
122
123 printf("RAM disk pages to copy = %d\n", ramPages);
124
125 // Copy 64K ELF header
126 for (i=0; i<(ElfPages); ++i) {
127 get4k( inputVmlinux, inbuf );
128 put4k( outputVmlinux, inbuf );
129 }
130
131 roundedKernelPages = roundedKernelLen / 4096;
132
133 fseek(inputVmlinux, ElfHeaderSize, SEEK_SET);
134
135 for ( i=0; i<roundedKernelPages; ++i ) {
136 get4k( inputVmlinux, inbuf );
137 put4k( outputVmlinux, inbuf );
138 }
139
140 for ( i=0; i<ramPages; ++i ) {
141 get4k( ramDisk, inbuf );
142 put4k( outputVmlinux, inbuf );
143 }
144
145 /* Close the input files */
146 fclose(ramDisk);
147 fclose(inputVmlinux);
148 /* And flush the written output file */
149 fflush(outputVmlinux);
150
151 /* fseek to the hvReleaseData pointer */
152 fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
153 if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
154 death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[3]);
155 }
156 hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
157 printf("hvReleaseData is at %08x\n", hvReleaseData);
158
159 /* fseek to the hvReleaseData */
160 fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
161 if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
162 death("Could not read hvReleaseData\n", outputVmlinux, argv[3]);
163 }
164 /* Check hvReleaseData sanity */
165 if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
166 death("hvReleaseData is invalid\n", outputVmlinux, argv[3]);
167 }
168 /* Get the naca pointer */
169 naca = ntohl(*((u_int32_t *) &inbuf[0x0c])) - KERNELBASE;
170 printf("naca is at %08x\n", naca);
171
172 /* fseek to the naca */
173 fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
174 if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
175 death("Could not read naca\n", outputVmlinux, argv[3]);
176 }
177 xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
178 xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
179 /* Make sure a RAM disk isn't already present */
180 if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
181 death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[3]);
182 }
183 /* Fill in the values */
184 *((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
185 *((u_int32_t *) &inbuf[0x14]) = htonl(ramPages);
186
187 /* Write out the new naca */
188 fflush(outputVmlinux);
189 fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
190 if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
191 death("Could not write naca\n", outputVmlinux, argv[3]);
192 }
193 printf("RAM Disk of 0x%x pages size is attached to the kernel at offset 0x%08x\n",
194 ramPages, ramStartOffs);
195
196 /* Done */
197 fclose(outputVmlinux);
198 /* Set permission to executable */
199 chmod(argv[3], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
200
201 return 0;
202}
203
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
index 8aaeb6f4e750..2244bf91e593 100644
--- a/arch/ppc/kernel/signal.c
+++ b/arch/ppc/kernel/signal.c
@@ -759,13 +759,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
759 else 759 else
760 handle_signal(signr, &ka, &info, oldset, regs, newsp); 760 handle_signal(signr, &ka, &info, oldset, regs, newsp);
761 761
762 if (!(ka.sa.sa_flags & SA_NODEFER)) { 762 spin_lock_irq(&current->sighand->siglock);
763 spin_lock_irq(&current->sighand->siglock); 763 sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
764 sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask); 764 if (!(ka.sa.sa_flags & SA_NODEFER))
765 sigaddset(&current->blocked, signr); 765 sigaddset(&current->blocked, signr);
766 recalc_sigpending(); 766 recalc_sigpending();
767 spin_unlock_irq(&current->sighand->siglock); 767 spin_unlock_irq(&current->sighand->siglock);
768 }
769 768
770 return 1; 769 return 1;
771} 770}
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 55a381af4e37..4c888da89b3c 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -57,7 +57,7 @@ unsigned char __res[sizeof(bd_t)];
57extern void m8xx_ide_init(void); 57extern void m8xx_ide_init(void);
58 58
59extern unsigned long find_available_memory(void); 59extern unsigned long find_available_memory(void);
60extern void m8xx_cpm_reset(); 60extern void m8xx_cpm_reset(void);
61extern void m8xx_wdt_handler_install(bd_t *bp); 61extern void m8xx_wdt_handler_install(bd_t *bp);
62extern void rpxfb_alloc_pages(void); 62extern void rpxfb_alloc_pages(void);
63extern void cpm_interrupt_init(void); 63extern void cpm_interrupt_init(void);
@@ -266,8 +266,8 @@ m8xx_show_percpuinfo(struct seq_file *m, int i)
266 266
267 bp = (bd_t *)__res; 267 bp = (bd_t *)__res;
268 268
269 seq_printf(m, "clock\t\t: %ldMHz\n" 269 seq_printf(m, "clock\t\t: %uMHz\n"
270 "bus clock\t: %ldMHz\n", 270 "bus clock\t: %uMHz\n",
271 bp->bi_intfreq / 1000000, 271 bp->bi_intfreq / 1000000,
272 bp->bi_busfreq / 1000000); 272 bp->bi_busfreq / 1000000);
273 273
@@ -423,7 +423,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
423 ppc_md.find_end_of_memory = m8xx_find_end_of_memory; 423 ppc_md.find_end_of_memory = m8xx_find_end_of_memory;
424 ppc_md.setup_io_mappings = m8xx_map_io; 424 ppc_md.setup_io_mappings = m8xx_map_io;
425 425
426#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 426#if defined(CONFIG_BLK_DEV_MPC8xx_IDE)
427 m8xx_ide_init(); 427 m8xx_ide_init();
428#endif 428#endif
429} 429}
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 2ce87836c671..13b262f10216 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -302,12 +302,6 @@ config GENERIC_HARDIRQS
302 bool 302 bool
303 default y 303 default y
304 304
305config MSCHUNKS
306 bool
307 depends on PPC_ISERIES
308 default y
309
310
311config PPC_RTAS 305config PPC_RTAS
312 bool 306 bool
313 depends on PPC_PSERIES || PPC_BPA 307 depends on PPC_PSERIES || PPC_BPA
@@ -350,13 +344,46 @@ config SECCOMP
350 344
351 If unsure, say Y. Only embedded should say N here. 345 If unsure, say Y. Only embedded should say N here.
352 346
347source "fs/Kconfig.binfmt"
348
349config HOTPLUG_CPU
350 bool "Support for hot-pluggable CPUs"
351 depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
352 select HOTPLUG
353 ---help---
354 Say Y here to be able to turn CPUs off and on.
355
356 Say N if you are unsure.
357
358config PROC_DEVICETREE
359 bool "Support for Open Firmware device tree in /proc"
360 depends on !PPC_ISERIES
361 help
362 This option adds a device-tree directory under /proc which contains
363 an image of the device tree that the kernel copies from Open
364 Firmware. If unsure, say Y here.
365
366config CMDLINE_BOOL
367 bool "Default bootloader kernel arguments"
368 depends on !PPC_ISERIES
369
370config CMDLINE
371 string "Initial kernel command string"
372 depends on CMDLINE_BOOL
373 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
374 help
375 On some platforms, there is currently no way for the boot loader to
376 pass arguments to the kernel. For these platforms, you can supply
377 some command-line options at build time by entering them here. In
378 most cases you will need to specify the root device here.
379
353endmenu 380endmenu
354 381
355config ISA_DMA_API 382config ISA_DMA_API
356 bool 383 bool
357 default y 384 default y
358 385
359menu "General setup" 386menu "Bus Options"
360 387
361config ISA 388config ISA
362 bool 389 bool
@@ -389,45 +416,12 @@ config PCI_DOMAINS
389 bool 416 bool
390 default PCI 417 default PCI
391 418
392source "fs/Kconfig.binfmt"
393
394source "drivers/pci/Kconfig" 419source "drivers/pci/Kconfig"
395 420
396config HOTPLUG_CPU
397 bool "Support for hot-pluggable CPUs"
398 depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
399 select HOTPLUG
400 ---help---
401 Say Y here to be able to turn CPUs off and on.
402
403 Say N if you are unsure.
404
405source "drivers/pcmcia/Kconfig" 421source "drivers/pcmcia/Kconfig"
406 422
407source "drivers/pci/hotplug/Kconfig" 423source "drivers/pci/hotplug/Kconfig"
408 424
409config PROC_DEVICETREE
410 bool "Support for Open Firmware device tree in /proc"
411 depends on !PPC_ISERIES
412 help
413 This option adds a device-tree directory under /proc which contains
414 an image of the device tree that the kernel copies from Open
415 Firmware. If unsure, say Y here.
416
417config CMDLINE_BOOL
418 bool "Default bootloader kernel arguments"
419 depends on !PPC_ISERIES
420
421config CMDLINE
422 string "Initial kernel command string"
423 depends on CMDLINE_BOOL
424 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
425 help
426 On some platforms, there is currently no way for the boot loader to
427 pass arguments to the kernel. For these platforms, you can supply
428 some command-line options at build time by entering them here. In
429 most cases you will need to specify the root device here.
430
431endmenu 425endmenu
432 426
433source "net/Kconfig" 427source "net/Kconfig"
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 731b84758331..6350cce82efb 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -55,6 +55,8 @@ LDFLAGS := -m elf64ppc
55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) 55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
56CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ 56CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \
57 -mcall-aixdesc 57 -mcall-aixdesc
58# Temporary hack until we have migrated to asm-powerpc
59CPPFLAGS += -Iinclude3
58 60
59GCC_VERSION := $(call cc-version) 61GCC_VERSION := $(call cc-version)
60GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) 62GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
@@ -112,6 +114,7 @@ all: $(KBUILD_IMAGE)
112 114
113archclean: 115archclean:
114 $(Q)$(MAKE) $(clean)=$(boot) 116 $(Q)$(MAKE) $(clean)=$(boot)
117 $(Q)rm -rf include3
115 118
116prepare: include/asm-ppc64/offsets.h 119prepare: include/asm-ppc64/offsets.h
117 120
@@ -121,6 +124,12 @@ arch/ppc64/kernel/asm-offsets.s: include/asm include/linux/version.h \
121include/asm-ppc64/offsets.h: arch/ppc64/kernel/asm-offsets.s 124include/asm-ppc64/offsets.h: arch/ppc64/kernel/asm-offsets.s
122 $(call filechk,gen-asm-offsets) 125 $(call filechk,gen-asm-offsets)
123 126
127# Temporary hack until we have migrated to asm-powerpc
128include/asm: include3/asm
129include3/asm:
130 $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi;
131 $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
132
124define archhelp 133define archhelp
125 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 134 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
126 echo ' zImage.initrd- Compressed kernel image with initrd attached,' 135 echo ' zImage.initrd- Compressed kernel image with initrd attached,'
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile
index 683b2d43c15f..2c5f5e73d00c 100644
--- a/arch/ppc64/boot/Makefile
+++ b/arch/ppc64/boot/Makefile
@@ -22,8 +22,8 @@
22 22
23 23
24HOSTCC := gcc 24HOSTCC := gcc
25BOOTCFLAGS := $(HOSTCFLAGS) $(LINUXINCLUDE) -fno-builtin 25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include)
26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional 26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds
28OBJCOPYFLAGS := contents,alloc,load,readonly,data 28OBJCOPYFLAGS := contents,alloc,load,readonly,data
29 29
diff --git a/arch/ppc64/boot/addnote.c b/arch/ppc64/boot/addnote.c
index 719663a694bb..8041a9845ab7 100644
--- a/arch/ppc64/boot/addnote.c
+++ b/arch/ppc64/boot/addnote.c
@@ -157,7 +157,7 @@ main(int ac, char **av)
157 PUT_32BE(ns, strlen(arch) + 1); 157 PUT_32BE(ns, strlen(arch) + 1);
158 PUT_32BE(ns + 4, N_DESCR * 4); 158 PUT_32BE(ns + 4, N_DESCR * 4);
159 PUT_32BE(ns + 8, 0x1275); 159 PUT_32BE(ns + 8, 0x1275);
160 strcpy(&buf[ns + 12], arch); 160 strcpy((char *) &buf[ns + 12], arch);
161 ns += 12 + strlen(arch) + 1; 161 ns += 12 + strlen(arch) + 1;
162 for (i = 0; i < N_DESCR; ++i, ns += 4) 162 for (i = 0; i < N_DESCR; ++i, ns += 4)
163 PUT_32BE(ns, descr[i]); 163 PUT_32BE(ns, descr[i]);
@@ -172,7 +172,7 @@ main(int ac, char **av)
172 PUT_32BE(ns, strlen(rpaname) + 1); 172 PUT_32BE(ns, strlen(rpaname) + 1);
173 PUT_32BE(ns + 4, sizeof(rpanote)); 173 PUT_32BE(ns + 4, sizeof(rpanote));
174 PUT_32BE(ns + 8, 0x12759999); 174 PUT_32BE(ns + 8, 0x12759999);
175 strcpy(&buf[ns + 12], rpaname); 175 strcpy((char *) &buf[ns + 12], rpaname);
176 ns += 12 + ROUNDUP(strlen(rpaname) + 1); 176 ns += 12 + ROUNDUP(strlen(rpaname) + 1);
177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) 177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4)
178 PUT_32BE(ns, rpanote[i]); 178 PUT_32BE(ns, rpanote[i]);
diff --git a/arch/ppc64/boot/crt0.S b/arch/ppc64/boot/crt0.S
index 04d3e74cd72f..3861e7f9cf19 100644
--- a/arch/ppc64/boot/crt0.S
+++ b/arch/ppc64/boot/crt0.S
@@ -9,7 +9,7 @@
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
10 */ 10 */
11 11
12#include <asm/ppc_asm.h> 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl _start 15 .globl _start
diff --git a/arch/ppc64/boot/div64.S b/arch/ppc64/boot/div64.S
index 38f7e466d7d6..722f360a32a9 100644
--- a/arch/ppc64/boot/div64.S
+++ b/arch/ppc64/boot/div64.S
@@ -13,7 +13,7 @@
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16#include <asm/ppc_asm.h> 16#include "ppc_asm.h"
17 17
18 .globl __div64_32 18 .globl __div64_32
19__div64_32: 19__div64_32:
diff --git a/arch/ppc64/boot/elf.h b/arch/ppc64/boot/elf.h
new file mode 100644
index 000000000000..d4828fcf1cb9
--- /dev/null
+++ b/arch/ppc64/boot/elf.h
@@ -0,0 +1,149 @@
1#ifndef _PPC_BOOT_ELF_H_
2#define _PPC_BOOT_ELF_H_
3
4/* 32-bit ELF base types. */
5typedef unsigned int Elf32_Addr;
6typedef unsigned short Elf32_Half;
7typedef unsigned int Elf32_Off;
8typedef signed int Elf32_Sword;
9typedef unsigned int Elf32_Word;
10
11/* 64-bit ELF base types. */
12typedef unsigned long long Elf64_Addr;
13typedef unsigned short Elf64_Half;
14typedef signed short Elf64_SHalf;
15typedef unsigned long long Elf64_Off;
16typedef signed int Elf64_Sword;
17typedef unsigned int Elf64_Word;
18typedef unsigned long long Elf64_Xword;
19typedef signed long long Elf64_Sxword;
20
21/* These constants are for the segment types stored in the image headers */
22#define PT_NULL 0
23#define PT_LOAD 1
24#define PT_DYNAMIC 2
25#define PT_INTERP 3
26#define PT_NOTE 4
27#define PT_SHLIB 5
28#define PT_PHDR 6
29#define PT_TLS 7 /* Thread local storage segment */
30#define PT_LOOS 0x60000000 /* OS-specific */
31#define PT_HIOS 0x6fffffff /* OS-specific */
32#define PT_LOPROC 0x70000000
33#define PT_HIPROC 0x7fffffff
34#define PT_GNU_EH_FRAME 0x6474e550
35
36#define PT_GNU_STACK (PT_LOOS + 0x474e551)
37
38/* These constants define the different elf file types */
39#define ET_NONE 0
40#define ET_REL 1
41#define ET_EXEC 2
42#define ET_DYN 3
43#define ET_CORE 4
44#define ET_LOPROC 0xff00
45#define ET_HIPROC 0xffff
46
47/* These constants define the various ELF target machines */
48#define EM_NONE 0
49#define EM_PPC 20 /* PowerPC */
50#define EM_PPC64 21 /* PowerPC64 */
51
52#define EI_NIDENT 16
53
54typedef struct elf32_hdr {
55 unsigned char e_ident[EI_NIDENT];
56 Elf32_Half e_type;
57 Elf32_Half e_machine;
58 Elf32_Word e_version;
59 Elf32_Addr e_entry; /* Entry point */
60 Elf32_Off e_phoff;
61 Elf32_Off e_shoff;
62 Elf32_Word e_flags;
63 Elf32_Half e_ehsize;
64 Elf32_Half e_phentsize;
65 Elf32_Half e_phnum;
66 Elf32_Half e_shentsize;
67 Elf32_Half e_shnum;
68 Elf32_Half e_shstrndx;
69} Elf32_Ehdr;
70
71typedef struct elf64_hdr {
72 unsigned char e_ident[16]; /* ELF "magic number" */
73 Elf64_Half e_type;
74 Elf64_Half e_machine;
75 Elf64_Word e_version;
76 Elf64_Addr e_entry; /* Entry point virtual address */
77 Elf64_Off e_phoff; /* Program header table file offset */
78 Elf64_Off e_shoff; /* Section header table file offset */
79 Elf64_Word e_flags;
80 Elf64_Half e_ehsize;
81 Elf64_Half e_phentsize;
82 Elf64_Half e_phnum;
83 Elf64_Half e_shentsize;
84 Elf64_Half e_shnum;
85 Elf64_Half e_shstrndx;
86} Elf64_Ehdr;
87
88/* These constants define the permissions on sections in the program
89 header, p_flags. */
90#define PF_R 0x4
91#define PF_W 0x2
92#define PF_X 0x1
93
94typedef struct elf32_phdr {
95 Elf32_Word p_type;
96 Elf32_Off p_offset;
97 Elf32_Addr p_vaddr;
98 Elf32_Addr p_paddr;
99 Elf32_Word p_filesz;
100 Elf32_Word p_memsz;
101 Elf32_Word p_flags;
102 Elf32_Word p_align;
103} Elf32_Phdr;
104
105typedef struct elf64_phdr {
106 Elf64_Word p_type;
107 Elf64_Word p_flags;
108 Elf64_Off p_offset; /* Segment file offset */
109 Elf64_Addr p_vaddr; /* Segment virtual address */
110 Elf64_Addr p_paddr; /* Segment physical address */
111 Elf64_Xword p_filesz; /* Segment size in file */
112 Elf64_Xword p_memsz; /* Segment size in memory */
113 Elf64_Xword p_align; /* Segment alignment, file & memory */
114} Elf64_Phdr;
115
116#define EI_MAG0 0 /* e_ident[] indexes */
117#define EI_MAG1 1
118#define EI_MAG2 2
119#define EI_MAG3 3
120#define EI_CLASS 4
121#define EI_DATA 5
122#define EI_VERSION 6
123#define EI_OSABI 7
124#define EI_PAD 8
125
126#define ELFMAG0 0x7f /* EI_MAG */
127#define ELFMAG1 'E'
128#define ELFMAG2 'L'
129#define ELFMAG3 'F'
130#define ELFMAG "\177ELF"
131#define SELFMAG 4
132
133#define ELFCLASSNONE 0 /* EI_CLASS */
134#define ELFCLASS32 1
135#define ELFCLASS64 2
136#define ELFCLASSNUM 3
137
138#define ELFDATANONE 0 /* e_ident[EI_DATA] */
139#define ELFDATA2LSB 1
140#define ELFDATA2MSB 2
141
142#define EV_NONE 0 /* e_version, EI_VERSION */
143#define EV_CURRENT 1
144#define EV_NUM 2
145
146#define ELFOSABI_NONE 0
147#define ELFOSABI_LINUX 3
148
149#endif /* _PPC_BOOT_ELF_H_ */
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index 199d9804f61c..99e68cfbe688 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -8,36 +8,28 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include "ppc32-types.h" 11#include <stdarg.h>
12#include <stddef.h>
13#include "elf.h"
14#include "page.h"
15#include "string.h"
16#include "stdio.h"
17#include "prom.h"
12#include "zlib.h" 18#include "zlib.h"
13#include <linux/elf.h> 19
14#include <linux/string.h> 20static void gunzip(void *, int, unsigned char *, int *);
15#include <asm/processor.h> 21extern void flush_cache(void *, unsigned long);
16#include <asm/page.h> 22
17
18extern void *finddevice(const char *);
19extern int getprop(void *, const char *, void *, int);
20extern void printf(const char *fmt, ...);
21extern int sprintf(char *buf, const char *fmt, ...);
22void gunzip(void *, int, unsigned char *, int *);
23void *claim(unsigned int, unsigned int, unsigned int);
24void flush_cache(void *, unsigned long);
25void pause(void);
26extern void exit(void);
27
28unsigned long strlen(const char *s);
29void *memmove(void *dest, const void *src, unsigned long n);
30void *memcpy(void *dest, const void *src, unsigned long n);
31 23
32/* Value picked to match that used by yaboot */ 24/* Value picked to match that used by yaboot */
33#define PROG_START 0x01400000 25#define PROG_START 0x01400000
34#define RAM_END (256<<20) // Fixme: use OF */ 26#define RAM_END (256<<20) // Fixme: use OF */
35 27
36char *avail_ram; 28static char *avail_ram;
37char *begin_avail, *end_avail; 29static char *begin_avail, *end_avail;
38char *avail_high; 30static char *avail_high;
39unsigned int heap_use; 31static unsigned int heap_use;
40unsigned int heap_max; 32static unsigned int heap_max;
41 33
42extern char _start[]; 34extern char _start[];
43extern char _vmlinux_start[]; 35extern char _vmlinux_start[];
@@ -52,9 +44,9 @@ struct addr_range {
52 unsigned long size; 44 unsigned long size;
53 unsigned long memsize; 45 unsigned long memsize;
54}; 46};
55struct addr_range vmlinux = {0, 0, 0}; 47static struct addr_range vmlinux = {0, 0, 0};
56struct addr_range vmlinuz = {0, 0, 0}; 48static struct addr_range vmlinuz = {0, 0, 0};
57struct addr_range initrd = {0, 0, 0}; 49static struct addr_range initrd = {0, 0, 0};
58 50
59static char scratch[128<<10]; /* 128kB of scratch space for gunzip */ 51static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
60 52
@@ -64,13 +56,6 @@ typedef void (*kernel_entry_t)( unsigned long,
64 void *); 56 void *);
65 57
66 58
67int (*prom)(void *);
68
69void *chosen_handle;
70void *stdin;
71void *stdout;
72void *stderr;
73
74#undef DEBUG 59#undef DEBUG
75 60
76static unsigned long claim_base = PROG_START; 61static unsigned long claim_base = PROG_START;
@@ -277,7 +262,7 @@ void zfree(void *x, void *addr, unsigned nb)
277 262
278#define DEFLATED 8 263#define DEFLATED 8
279 264
280void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 265static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
281{ 266{
282 z_stream s; 267 z_stream s;
283 int r, i, flags; 268 int r, i, flags;
diff --git a/arch/ppc64/boot/page.h b/arch/ppc64/boot/page.h
new file mode 100644
index 000000000000..14eca30fef64
--- /dev/null
+++ b/arch/ppc64/boot/page.h
@@ -0,0 +1,34 @@
1#ifndef _PPC_BOOT_PAGE_H
2#define _PPC_BOOT_PAGE_H
3/*
4 * Copyright (C) 2001 PPC64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifdef __ASSEMBLY__
13#define ASM_CONST(x) x
14#else
15#define __ASM_CONST(x) x##UL
16#define ASM_CONST(x) __ASM_CONST(x)
17#endif
18
19/* PAGE_SHIFT determines the page size */
20#define PAGE_SHIFT 12
21#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24/* align addr on a size boundary - adjust address up/down if needed */
25#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
26#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
27
28/* align addr on a size boundary - adjust address up if needed */
29#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
30
31/* to align the pointer to the (next) page boundary */
32#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
33
34#endif /* _PPC_BOOT_PAGE_H */
diff --git a/arch/ppc64/boot/ppc32-types.h b/arch/ppc64/boot/ppc32-types.h
deleted file mode 100644
index f7b8884f8f70..000000000000
--- a/arch/ppc64/boot/ppc32-types.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _PPC64_TYPES_H
2#define _PPC64_TYPES_H
3
4typedef __signed__ char __s8;
5typedef unsigned char __u8;
6
7typedef __signed__ short __s16;
8typedef unsigned short __u16;
9
10typedef __signed__ int __s32;
11typedef unsigned int __u32;
12
13typedef __signed__ long long __s64;
14typedef unsigned long long __u64;
15
16typedef signed char s8;
17typedef unsigned char u8;
18
19typedef signed short s16;
20typedef unsigned short u16;
21
22typedef signed int s32;
23typedef unsigned int u32;
24
25typedef signed long long s64;
26typedef unsigned long long u64;
27
28typedef struct {
29 __u32 u[4];
30} __attribute((aligned(16))) __vector128;
31
32#define BITS_PER_LONG 32
33
34typedef __vector128 vector128;
35
36#endif /* _PPC64_TYPES_H */
diff --git a/arch/ppc64/boot/ppc_asm.h b/arch/ppc64/boot/ppc_asm.h
new file mode 100644
index 000000000000..1c2c2817f9b7
--- /dev/null
+++ b/arch/ppc64/boot/ppc_asm.h
@@ -0,0 +1,62 @@
1#ifndef _PPC64_PPC_ASM_H
2#define _PPC64_PPC_ASM_H
3/*
4 *
5 * Definitions used by various bits of low-level assembly code on PowerPC.
6 *
7 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/* Condition Register Bit Fields */
16
17#define cr0 0
18#define cr1 1
19#define cr2 2
20#define cr3 3
21#define cr4 4
22#define cr5 5
23#define cr6 6
24#define cr7 7
25
26
27/* General Purpose Registers (GPRs) */
28
29#define r0 0
30#define r1 1
31#define r2 2
32#define r3 3
33#define r4 4
34#define r5 5
35#define r6 6
36#define r7 7
37#define r8 8
38#define r9 9
39#define r10 10
40#define r11 11
41#define r12 12
42#define r13 13
43#define r14 14
44#define r15 15
45#define r16 16
46#define r17 17
47#define r18 18
48#define r19 19
49#define r20 20
50#define r21 21
51#define r22 22
52#define r23 23
53#define r24 24
54#define r25 25
55#define r26 26
56#define r27 27
57#define r28 28
58#define r29 29
59#define r30 30
60#define r31 31
61
62#endif /* _PPC64_PPC_ASM_H */
diff --git a/arch/ppc64/boot/prom.c b/arch/ppc64/boot/prom.c
index 5e48b80ff5a0..4bea2f4dcb06 100644
--- a/arch/ppc64/boot/prom.c
+++ b/arch/ppc64/boot/prom.c
@@ -7,43 +7,19 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#include <stdarg.h> 9#include <stdarg.h>
10#include <linux/types.h> 10#include <stddef.h>
11#include <linux/string.h> 11#include "string.h"
12#include <linux/ctype.h> 12#include "stdio.h"
13 13#include "prom.h"
14extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor);
15
16/* The unnecessary pointer compare is there
17 * to check for type safety (n must be 64bit)
18 */
19# define do_div(n,base) ({ \
20 __u32 __base = (base); \
21 __u32 __rem; \
22 (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
23 if (((n) >> 32) == 0) { \
24 __rem = (__u32)(n) % __base; \
25 (n) = (__u32)(n) / __base; \
26 } else \
27 __rem = __div64_32(&(n), __base); \
28 __rem; \
29 })
30 14
31int (*prom)(void *); 15int (*prom)(void *);
32 16
33void *chosen_handle; 17void *chosen_handle;
18
34void *stdin; 19void *stdin;
35void *stdout; 20void *stdout;
36void *stderr; 21void *stderr;
37 22
38void exit(void);
39void *finddevice(const char *name);
40int getprop(void *phandle, const char *name, void *buf, int buflen);
41void chrpboot(int a1, int a2, void *prom); /* in main.c */
42
43int printf(char *fmt, ...);
44
45/* there is no convenient header to get this from... -- paulus */
46extern unsigned long strlen(const char *);
47 23
48int 24int
49write(void *handle, void *ptr, int nb) 25write(void *handle, void *ptr, int nb)
@@ -210,107 +186,6 @@ fputs(char *str, void *f)
210 return write(f, str, n) == n? 0: -1; 186 return write(f, str, n) == n? 0: -1;
211} 187}
212 188
213int
214readchar(void)
215{
216 char ch;
217
218 for (;;) {
219 switch (read(stdin, &ch, 1)) {
220 case 1:
221 return ch;
222 case -1:
223 printf("read(stdin) returned -1\r\n");
224 return -1;
225 }
226 }
227}
228
229static char line[256];
230static char *lineptr;
231static int lineleft;
232
233int
234getchar(void)
235{
236 int c;
237
238 if (lineleft == 0) {
239 lineptr = line;
240 for (;;) {
241 c = readchar();
242 if (c == -1 || c == 4)
243 break;
244 if (c == '\r' || c == '\n') {
245 *lineptr++ = '\n';
246 putchar('\n');
247 break;
248 }
249 switch (c) {
250 case 0177:
251 case '\b':
252 if (lineptr > line) {
253 putchar('\b');
254 putchar(' ');
255 putchar('\b');
256 --lineptr;
257 }
258 break;
259 case 'U' & 0x1F:
260 while (lineptr > line) {
261 putchar('\b');
262 putchar(' ');
263 putchar('\b');
264 --lineptr;
265 }
266 break;
267 default:
268 if (lineptr >= &line[sizeof(line) - 1])
269 putchar('\a');
270 else {
271 putchar(c);
272 *lineptr++ = c;
273 }
274 }
275 }
276 lineleft = lineptr - line;
277 lineptr = line;
278 }
279 if (lineleft == 0)
280 return -1;
281 --lineleft;
282 return *lineptr++;
283}
284
285
286
287/* String functions lifted from lib/vsprintf.c and lib/ctype.c */
288unsigned char _ctype[] = {
289_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
290_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
291_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
292_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
293_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
294_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
295_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
296_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
297_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
298_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
299_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
300_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
301_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
302_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
303_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
304_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
3050,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
3060,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
307_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
308_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
309_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
310_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
311_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
312_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
313
314size_t strnlen(const char * s, size_t count) 189size_t strnlen(const char * s, size_t count)
315{ 190{
316 const char *sc; 191 const char *sc;
@@ -320,44 +195,30 @@ size_t strnlen(const char * s, size_t count)
320 return sc - s; 195 return sc - s;
321} 196}
322 197
323unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) 198extern unsigned int __div64_32(unsigned long long *dividend,
324{ 199 unsigned int divisor);
325 unsigned long result = 0,value;
326 200
327 if (!base) { 201/* The unnecessary pointer compare is there
328 base = 10; 202 * to check for type safety (n must be 64bit)
329 if (*cp == '0') { 203 */
330 base = 8; 204# define do_div(n,base) ({ \
331 cp++; 205 unsigned int __base = (base); \
332 if ((*cp == 'x') && isxdigit(cp[1])) { 206 unsigned int __rem; \
333 cp++; 207 (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
334 base = 16; 208 if (((n) >> 32) == 0) { \
335 } 209 __rem = (unsigned int)(n) % __base; \
336 } 210 (n) = (unsigned int)(n) / __base; \
337 } 211 } else \
338 while (isxdigit(*cp) && 212 __rem = __div64_32(&(n), __base); \
339 (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 213 __rem; \
340 result = result*base + value; 214 })
341 cp++;
342 }
343 if (endp)
344 *endp = (char *)cp;
345 return result;
346}
347
348long simple_strtol(const char *cp,char **endp,unsigned int base)
349{
350 if(*cp=='-')
351 return -simple_strtoul(cp+1,endp,base);
352 return simple_strtoul(cp,endp,base);
353}
354 215
355static int skip_atoi(const char **s) 216static int skip_atoi(const char **s)
356{ 217{
357 int i=0; 218 int i, c;
358 219
359 while (isdigit(**s)) 220 for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
360 i = i*10 + *((*s)++) - '0'; 221 i = i*10 + c - '0';
361 return i; 222 return i;
362} 223}
363 224
@@ -436,9 +297,6 @@ static char * number(char * str, unsigned long long num, int base, int size, int
436 return str; 297 return str;
437} 298}
438 299
439/* Forward decl. needed for IP address printing stuff... */
440int sprintf(char * buf, const char *fmt, ...);
441
442int vsprintf(char *buf, const char *fmt, va_list args) 300int vsprintf(char *buf, const char *fmt, va_list args)
443{ 301{
444 int len; 302 int len;
@@ -477,7 +335,7 @@ int vsprintf(char *buf, const char *fmt, va_list args)
477 335
478 /* get field width */ 336 /* get field width */
479 field_width = -1; 337 field_width = -1;
480 if (isdigit(*fmt)) 338 if ('0' <= *fmt && *fmt <= '9')
481 field_width = skip_atoi(&fmt); 339 field_width = skip_atoi(&fmt);
482 else if (*fmt == '*') { 340 else if (*fmt == '*') {
483 ++fmt; 341 ++fmt;
@@ -493,7 +351,7 @@ int vsprintf(char *buf, const char *fmt, va_list args)
493 precision = -1; 351 precision = -1;
494 if (*fmt == '.') { 352 if (*fmt == '.') {
495 ++fmt; 353 ++fmt;
496 if (isdigit(*fmt)) 354 if ('0' <= *fmt && *fmt <= '9')
497 precision = skip_atoi(&fmt); 355 precision = skip_atoi(&fmt);
498 else if (*fmt == '*') { 356 else if (*fmt == '*') {
499 ++fmt; 357 ++fmt;
@@ -628,7 +486,7 @@ int sprintf(char * buf, const char *fmt, ...)
628static char sprint_buf[1024]; 486static char sprint_buf[1024];
629 487
630int 488int
631printf(char *fmt, ...) 489printf(const char *fmt, ...)
632{ 490{
633 va_list args; 491 va_list args;
634 int n; 492 int n;
diff --git a/arch/ppc64/boot/prom.h b/arch/ppc64/boot/prom.h
new file mode 100644
index 000000000000..96ab5aec740c
--- /dev/null
+++ b/arch/ppc64/boot/prom.h
@@ -0,0 +1,18 @@
1#ifndef _PPC_BOOT_PROM_H_
2#define _PPC_BOOT_PROM_H_
3
4extern int (*prom) (void *);
5extern void *chosen_handle;
6
7extern void *stdin;
8extern void *stdout;
9extern void *stderr;
10
11extern int write(void *handle, void *ptr, int nb);
12extern int read(void *handle, void *ptr, int nb);
13extern void exit(void);
14extern void pause(void);
15extern void *finddevice(const char *);
16extern void *claim(unsigned long virt, unsigned long size, unsigned long align);
17extern int getprop(void *phandle, const char *name, void *buf, int buflen);
18#endif /* _PPC_BOOT_PROM_H_ */
diff --git a/arch/ppc64/boot/stdio.h b/arch/ppc64/boot/stdio.h
new file mode 100644
index 000000000000..24bd3a8dee94
--- /dev/null
+++ b/arch/ppc64/boot/stdio.h
@@ -0,0 +1,16 @@
1#ifndef _PPC_BOOT_STDIO_H_
2#define _PPC_BOOT_STDIO_H_
3
4extern int printf(const char *fmt, ...);
5
6extern int sprintf(char *buf, const char *fmt, ...);
7
8extern int vsprintf(char *buf, const char *fmt, va_list args);
9
10extern int putc(int c, void *f);
11extern int putchar(int c);
12extern int getchar(void);
13
14extern int fputs(char *str, void *f);
15
16#endif /* _PPC_BOOT_STDIO_H_ */
diff --git a/arch/ppc64/boot/string.S b/arch/ppc64/boot/string.S
index ba5f2d21c9ea..7ade87ae7718 100644
--- a/arch/ppc64/boot/string.S
+++ b/arch/ppc64/boot/string.S
@@ -9,7 +9,7 @@
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
10 */ 10 */
11 11
12#include <asm/ppc_asm.h> 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl strcpy 15 .globl strcpy
diff --git a/arch/ppc64/boot/string.h b/arch/ppc64/boot/string.h
new file mode 100644
index 000000000000..9289258bcbd6
--- /dev/null
+++ b/arch/ppc64/boot/string.h
@@ -0,0 +1,16 @@
1#ifndef _PPC_BOOT_STRING_H_
2#define _PPC_BOOT_STRING_H_
3
4extern char *strcpy(char *dest, const char *src);
5extern char *strncpy(char *dest, const char *src, size_t n);
6extern char *strcat(char *dest, const char *src);
7extern int strcmp(const char *s1, const char *s2);
8extern size_t strlen(const char *s);
9extern size_t strnlen(const char *s, size_t count);
10
11extern void *memset(void *s, int c, size_t n);
12extern void *memmove(void *dest, const void *src, unsigned long n);
13extern void *memcpy(void *dest, const void *src, unsigned long n);
14extern int memcmp(const void *s1, const void *s2, size_t n);
15
16#endif /* _PPC_BOOT_STRING_H_ */
diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
index 78837e884b8b..0d910cd2079d 100644
--- a/arch/ppc64/boot/zlib.c
+++ b/arch/ppc64/boot/zlib.c
@@ -107,7 +107,7 @@ extern void *memcpy(void *, const void *, unsigned long);
107 107
108/* Diagnostic functions */ 108/* Diagnostic functions */
109#ifdef DEBUG_ZLIB 109#ifdef DEBUG_ZLIB
110# include <stdio.h> 110# include "stdio.h"
111# ifndef verbose 111# ifndef verbose
112# define verbose 0 112# define verbose 0
113# endif 113# endif
diff --git a/arch/ppc64/configs/g5_defconfig b/arch/ppc64/configs/g5_defconfig
index ab567741e80e..fc83d9330282 100644
--- a/arch/ppc64/configs/g5_defconfig
+++ b/arch/ppc64/configs/g5_defconfig
@@ -103,10 +103,10 @@ CONFIG_PREEMPT_NONE=y
103# CONFIG_PREEMPT_VOLUNTARY is not set 103# CONFIG_PREEMPT_VOLUNTARY is not set
104# CONFIG_PREEMPT is not set 104# CONFIG_PREEMPT is not set
105# CONFIG_PREEMPT_BKL is not set 105# CONFIG_PREEMPT_BKL is not set
106CONFIG_HZ_100=y 106# CONFIG_HZ_100 is not set
107# CONFIG_HZ_250 is not set 107CONFIG_HZ_250=y
108# CONFIG_HZ_1000 is not set 108# CONFIG_HZ_1000 is not set
109CONFIG_HZ=100 109CONFIG_HZ=250
110CONFIG_GENERIC_HARDIRQS=y 110CONFIG_GENERIC_HARDIRQS=y
111CONFIG_SECCOMP=y 111CONFIG_SECCOMP=y
112CONFIG_ISA_DMA_API=y 112CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/iSeries_defconfig b/arch/ppc64/configs/iSeries_defconfig
index 394ba18b58c7..013d4e0e4003 100644
--- a/arch/ppc64/configs/iSeries_defconfig
+++ b/arch/ppc64/configs/iSeries_defconfig
@@ -94,12 +94,11 @@ CONFIG_PREEMPT_NONE=y
94# CONFIG_PREEMPT_VOLUNTARY is not set 94# CONFIG_PREEMPT_VOLUNTARY is not set
95# CONFIG_PREEMPT is not set 95# CONFIG_PREEMPT is not set
96# CONFIG_PREEMPT_BKL is not set 96# CONFIG_PREEMPT_BKL is not set
97CONFIG_HZ_100=y 97# CONFIG_HZ_100 is not set
98# CONFIG_HZ_250 is not set 98CONFIG_HZ_250=y
99# CONFIG_HZ_1000 is not set 99# CONFIG_HZ_1000 is not set
100CONFIG_HZ=100 100CONFIG_HZ=250
101CONFIG_GENERIC_HARDIRQS=y 101CONFIG_GENERIC_HARDIRQS=y
102CONFIG_MSCHUNKS=y
103CONFIG_LPARCFG=y 102CONFIG_LPARCFG=y
104CONFIG_SECCOMP=y 103CONFIG_SECCOMP=y
105CONFIG_ISA_DMA_API=y 104CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/maple_defconfig b/arch/ppc64/configs/maple_defconfig
index 2033fe663dbe..dd42892cd873 100644
--- a/arch/ppc64/configs/maple_defconfig
+++ b/arch/ppc64/configs/maple_defconfig
@@ -103,10 +103,10 @@ CONFIG_PREEMPT_NONE=y
103# CONFIG_PREEMPT_VOLUNTARY is not set 103# CONFIG_PREEMPT_VOLUNTARY is not set
104# CONFIG_PREEMPT is not set 104# CONFIG_PREEMPT is not set
105# CONFIG_PREEMPT_BKL is not set 105# CONFIG_PREEMPT_BKL is not set
106CONFIG_HZ_100=y 106# CONFIG_HZ_100 is not set
107# CONFIG_HZ_250 is not set 107CONFIG_HZ_250=y
108# CONFIG_HZ_1000 is not set 108# CONFIG_HZ_1000 is not set
109CONFIG_HZ=100 109CONFIG_HZ=250
110CONFIG_GENERIC_HARDIRQS=y 110CONFIG_GENERIC_HARDIRQS=y
111CONFIG_SECCOMP=y 111CONFIG_SECCOMP=y
112CONFIG_ISA_DMA_API=y 112CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig
index 297fd5229487..29f7b80b0efc 100644
--- a/arch/ppc64/configs/pSeries_defconfig
+++ b/arch/ppc64/configs/pSeries_defconfig
@@ -112,10 +112,10 @@ CONFIG_PREEMPT_NONE=y
112# CONFIG_PREEMPT_VOLUNTARY is not set 112# CONFIG_PREEMPT_VOLUNTARY is not set
113# CONFIG_PREEMPT is not set 113# CONFIG_PREEMPT is not set
114# CONFIG_PREEMPT_BKL is not set 114# CONFIG_PREEMPT_BKL is not set
115CONFIG_HZ_100=y 115# CONFIG_HZ_100 is not set
116# CONFIG_HZ_250 is not set 116CONFIG_HZ_250=y
117# CONFIG_HZ_1000 is not set 117# CONFIG_HZ_1000 is not set
118CONFIG_HZ=100 118CONFIG_HZ=250
119CONFIG_EEH=y 119CONFIG_EEH=y
120CONFIG_GENERIC_HARDIRQS=y 120CONFIG_GENERIC_HARDIRQS=y
121CONFIG_PPC_RTAS=y 121CONFIG_PPC_RTAS=y
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index c361e7727b7a..7cb4750bb7a9 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -114,10 +114,10 @@ CONFIG_PREEMPT_NONE=y
114# CONFIG_PREEMPT_VOLUNTARY is not set 114# CONFIG_PREEMPT_VOLUNTARY is not set
115# CONFIG_PREEMPT is not set 115# CONFIG_PREEMPT is not set
116# CONFIG_PREEMPT_BKL is not set 116# CONFIG_PREEMPT_BKL is not set
117CONFIG_HZ_100=y 117# CONFIG_HZ_100 is not set
118# CONFIG_HZ_250 is not set 118CONFIG_HZ_250=y
119# CONFIG_HZ_1000 is not set 119# CONFIG_HZ_1000 is not set
120CONFIG_HZ=100 120CONFIG_HZ=250
121CONFIG_EEH=y 121CONFIG_EEH=y
122CONFIG_GENERIC_HARDIRQS=y 122CONFIG_GENERIC_HARDIRQS=y
123CONFIG_PPC_RTAS=y 123CONFIG_PPC_RTAS=y
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
index 1c11031c838e..0a9c23ca2f0c 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/ppc64/kernel/LparData.c
@@ -51,6 +51,17 @@ struct HvReleaseData hvReleaseData = {
51 0xf4, 0x4b, 0xf6, 0xf4 }, 51 0xf4, 0x4b, 0xf6, 0xf4 },
52}; 52};
53 53
54/*
55 * The NACA. The first dword of the naca is required by the iSeries
56 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
57 * through the pointer in hvReleaseData.
58 */
59struct naca_struct naca = {
60 .xItVpdAreas = &itVpdAreas,
61 .xRamDisk = 0,
62 .xRamDiskSize = 0,
63};
64
54extern void system_reset_iSeries(void); 65extern void system_reset_iSeries(void);
55extern void machine_check_iSeries(void); 66extern void machine_check_iSeries(void);
56extern void data_access_iSeries(void); 67extern void data_access_iSeries(void);
@@ -214,29 +225,3 @@ struct ItVpdAreas itVpdAreas = {
214 0,0 225 0,0
215 } 226 }
216}; 227};
217
218struct msChunks msChunks;
219EXPORT_SYMBOL(msChunks);
220
221/* Depending on whether this is called from iSeries or pSeries setup
222 * code, the location of the msChunks struct may or may not have
223 * to be reloc'd, so we force the caller to do that for us by passing
224 * in a pointer to the structure.
225 */
226unsigned long
227msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size)
228{
229 unsigned long offset = reloc_offset();
230 struct msChunks *_msChunks = PTRRELOC(&msChunks);
231
232 _msChunks->num_chunks = num_chunks;
233 _msChunks->chunk_size = chunk_size;
234 _msChunks->chunk_shift = __ilog2(chunk_size);
235 _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1;
236
237 mem = _ALIGN(mem, sizeof(msChunks_entry));
238 _msChunks->abs = (msChunks_entry *)(mem + offset);
239 mem += num_chunks * sizeof(msChunks_entry);
240
241 return mem;
242}
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index 2ecccb6b4f8c..f4b3bfcc109d 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ 11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \ 12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \ 13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o 14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/ 15obj-y += vdso32/ vdso64/
16 16
17obj-$(CONFIG_PPC_OF) += of_device.o 17obj-$(CONFIG_PPC_OF) += of_device.o
@@ -50,7 +50,10 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 51obj-$(CONFIG_BOOTX_TEXT) += btext.o
52obj-$(CONFIG_HVCS) += hvcserver.o 52obj-$(CONFIG_HVCS) += hvcserver.o
53obj-$(CONFIG_IBMVIO) += vio.o 53
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
54obj-$(CONFIG_XICS) += xics.o 57obj-$(CONFIG_XICS) += xics.o
55obj-$(CONFIG_MPIC) += mpic.o 58obj-$(CONFIG_MPIC) += mpic.o
56 59
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index abb9e5b5da03..17e35d0fed09 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -94,7 +94,8 @@ int main(void)
94 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 94 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
95 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 95 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
96#ifdef CONFIG_HUGETLB_PAGE 96#ifdef CONFIG_HUGETLB_PAGE
97 DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs)); 97 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
98 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
98#endif /* CONFIG_HUGETLB_PAGE */ 99#endif /* CONFIG_HUGETLB_PAGE */
99 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); 100 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
100 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 101 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 77cec42f9525..4847f2ac8c9f 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Modifications for ppc64: 6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -60,7 +60,6 @@ struct cpu_spec cpu_specs[] = {
60 .icache_bsize = 128, 60 .icache_bsize = 128,
61 .dcache_bsize = 128, 61 .dcache_bsize = 128,
62 .cpu_setup = __setup_cpu_power3, 62 .cpu_setup = __setup_cpu_power3,
63 .firmware_features = COMMON_PPC64_FW,
64 }, 63 },
65 { /* Power3+ */ 64 { /* Power3+ */
66 .pvr_mask = 0xffff0000, 65 .pvr_mask = 0xffff0000,
@@ -73,7 +72,6 @@ struct cpu_spec cpu_specs[] = {
73 .icache_bsize = 128, 72 .icache_bsize = 128,
74 .dcache_bsize = 128, 73 .dcache_bsize = 128,
75 .cpu_setup = __setup_cpu_power3, 74 .cpu_setup = __setup_cpu_power3,
76 .firmware_features = COMMON_PPC64_FW,
77 }, 75 },
78 { /* Northstar */ 76 { /* Northstar */
79 .pvr_mask = 0xffff0000, 77 .pvr_mask = 0xffff0000,
@@ -86,7 +84,6 @@ struct cpu_spec cpu_specs[] = {
86 .icache_bsize = 128, 84 .icache_bsize = 128,
87 .dcache_bsize = 128, 85 .dcache_bsize = 128,
88 .cpu_setup = __setup_cpu_power3, 86 .cpu_setup = __setup_cpu_power3,
89 .firmware_features = COMMON_PPC64_FW,
90 }, 87 },
91 { /* Pulsar */ 88 { /* Pulsar */
92 .pvr_mask = 0xffff0000, 89 .pvr_mask = 0xffff0000,
@@ -99,7 +96,6 @@ struct cpu_spec cpu_specs[] = {
99 .icache_bsize = 128, 96 .icache_bsize = 128,
100 .dcache_bsize = 128, 97 .dcache_bsize = 128,
101 .cpu_setup = __setup_cpu_power3, 98 .cpu_setup = __setup_cpu_power3,
102 .firmware_features = COMMON_PPC64_FW,
103 }, 99 },
104 { /* I-star */ 100 { /* I-star */
105 .pvr_mask = 0xffff0000, 101 .pvr_mask = 0xffff0000,
@@ -112,7 +108,6 @@ struct cpu_spec cpu_specs[] = {
112 .icache_bsize = 128, 108 .icache_bsize = 128,
113 .dcache_bsize = 128, 109 .dcache_bsize = 128,
114 .cpu_setup = __setup_cpu_power3, 110 .cpu_setup = __setup_cpu_power3,
115 .firmware_features = COMMON_PPC64_FW,
116 }, 111 },
117 { /* S-star */ 112 { /* S-star */
118 .pvr_mask = 0xffff0000, 113 .pvr_mask = 0xffff0000,
@@ -125,7 +120,6 @@ struct cpu_spec cpu_specs[] = {
125 .icache_bsize = 128, 120 .icache_bsize = 128,
126 .dcache_bsize = 128, 121 .dcache_bsize = 128,
127 .cpu_setup = __setup_cpu_power3, 122 .cpu_setup = __setup_cpu_power3,
128 .firmware_features = COMMON_PPC64_FW,
129 }, 123 },
130 { /* Power4 */ 124 { /* Power4 */
131 .pvr_mask = 0xffff0000, 125 .pvr_mask = 0xffff0000,
@@ -138,7 +132,6 @@ struct cpu_spec cpu_specs[] = {
138 .icache_bsize = 128, 132 .icache_bsize = 128,
139 .dcache_bsize = 128, 133 .dcache_bsize = 128,
140 .cpu_setup = __setup_cpu_power4, 134 .cpu_setup = __setup_cpu_power4,
141 .firmware_features = COMMON_PPC64_FW,
142 }, 135 },
143 { /* Power4+ */ 136 { /* Power4+ */
144 .pvr_mask = 0xffff0000, 137 .pvr_mask = 0xffff0000,
@@ -151,7 +144,6 @@ struct cpu_spec cpu_specs[] = {
151 .icache_bsize = 128, 144 .icache_bsize = 128,
152 .dcache_bsize = 128, 145 .dcache_bsize = 128,
153 .cpu_setup = __setup_cpu_power4, 146 .cpu_setup = __setup_cpu_power4,
154 .firmware_features = COMMON_PPC64_FW,
155 }, 147 },
156 { /* PPC970 */ 148 { /* PPC970 */
157 .pvr_mask = 0xffff0000, 149 .pvr_mask = 0xffff0000,
@@ -166,7 +158,6 @@ struct cpu_spec cpu_specs[] = {
166 .icache_bsize = 128, 158 .icache_bsize = 128,
167 .dcache_bsize = 128, 159 .dcache_bsize = 128,
168 .cpu_setup = __setup_cpu_ppc970, 160 .cpu_setup = __setup_cpu_ppc970,
169 .firmware_features = COMMON_PPC64_FW,
170 }, 161 },
171 { /* PPC970FX */ 162 { /* PPC970FX */
172 .pvr_mask = 0xffff0000, 163 .pvr_mask = 0xffff0000,
@@ -181,7 +172,6 @@ struct cpu_spec cpu_specs[] = {
181 .icache_bsize = 128, 172 .icache_bsize = 128,
182 .dcache_bsize = 128, 173 .dcache_bsize = 128,
183 .cpu_setup = __setup_cpu_ppc970, 174 .cpu_setup = __setup_cpu_ppc970,
184 .firmware_features = COMMON_PPC64_FW,
185 }, 175 },
186 { /* PPC970MP */ 176 { /* PPC970MP */
187 .pvr_mask = 0xffff0000, 177 .pvr_mask = 0xffff0000,
@@ -196,7 +186,6 @@ struct cpu_spec cpu_specs[] = {
196 .icache_bsize = 128, 186 .icache_bsize = 128,
197 .dcache_bsize = 128, 187 .dcache_bsize = 128,
198 .cpu_setup = __setup_cpu_ppc970, 188 .cpu_setup = __setup_cpu_ppc970,
199 .firmware_features = COMMON_PPC64_FW,
200 }, 189 },
201 { /* Power5 */ 190 { /* Power5 */
202 .pvr_mask = 0xffff0000, 191 .pvr_mask = 0xffff0000,
@@ -211,7 +200,6 @@ struct cpu_spec cpu_specs[] = {
211 .icache_bsize = 128, 200 .icache_bsize = 128,
212 .dcache_bsize = 128, 201 .dcache_bsize = 128,
213 .cpu_setup = __setup_cpu_power4, 202 .cpu_setup = __setup_cpu_power4,
214 .firmware_features = COMMON_PPC64_FW,
215 }, 203 },
216 { /* Power5 */ 204 { /* Power5 */
217 .pvr_mask = 0xffff0000, 205 .pvr_mask = 0xffff0000,
@@ -226,7 +214,6 @@ struct cpu_spec cpu_specs[] = {
226 .icache_bsize = 128, 214 .icache_bsize = 128,
227 .dcache_bsize = 128, 215 .dcache_bsize = 128,
228 .cpu_setup = __setup_cpu_power4, 216 .cpu_setup = __setup_cpu_power4,
229 .firmware_features = COMMON_PPC64_FW,
230 }, 217 },
231 { /* BE DD1.x */ 218 { /* BE DD1.x */
232 .pvr_mask = 0xffff0000, 219 .pvr_mask = 0xffff0000,
@@ -241,7 +228,6 @@ struct cpu_spec cpu_specs[] = {
241 .icache_bsize = 128, 228 .icache_bsize = 128,
242 .dcache_bsize = 128, 229 .dcache_bsize = 128,
243 .cpu_setup = __setup_cpu_be, 230 .cpu_setup = __setup_cpu_be,
244 .firmware_features = COMMON_PPC64_FW,
245 }, 231 },
246 { /* default match */ 232 { /* default match */
247 .pvr_mask = 0x00000000, 233 .pvr_mask = 0x00000000,
@@ -254,29 +240,5 @@ struct cpu_spec cpu_specs[] = {
254 .icache_bsize = 128, 240 .icache_bsize = 128,
255 .dcache_bsize = 128, 241 .dcache_bsize = 128,
256 .cpu_setup = __setup_cpu_power4, 242 .cpu_setup = __setup_cpu_power4,
257 .firmware_features = COMMON_PPC64_FW,
258 } 243 }
259}; 244};
260
261firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
262 {FW_FEATURE_PFT, "hcall-pft"},
263 {FW_FEATURE_TCE, "hcall-tce"},
264 {FW_FEATURE_SPRG0, "hcall-sprg0"},
265 {FW_FEATURE_DABR, "hcall-dabr"},
266 {FW_FEATURE_COPY, "hcall-copy"},
267 {FW_FEATURE_ASR, "hcall-asr"},
268 {FW_FEATURE_DEBUG, "hcall-debug"},
269 {FW_FEATURE_PERF, "hcall-perf"},
270 {FW_FEATURE_DUMP, "hcall-dump"},
271 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
272 {FW_FEATURE_MIGRATE, "hcall-migrate"},
273 {FW_FEATURE_PERFMON, "hcall-perfmon"},
274 {FW_FEATURE_CRQ, "hcall-crq"},
275 {FW_FEATURE_VIO, "hcall-vio"},
276 {FW_FEATURE_RDMA, "hcall-rdma"},
277 {FW_FEATURE_LLAN, "hcall-lLAN"},
278 {FW_FEATURE_BULK, "hcall-bulk"},
279 {FW_FEATURE_XDABR, "hcall-xdabr"},
280 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
281 {FW_FEATURE_SPLPAR, "hcall-splpar"},
282};
diff --git a/arch/ppc64/kernel/firmware.c b/arch/ppc64/kernel/firmware.c
new file mode 100644
index 000000000000..d8432c0fb27d
--- /dev/null
+++ b/arch/ppc64/kernel/firmware.c
@@ -0,0 +1,47 @@
1/*
2 * arch/ppc64/kernel/firmware.c
3 *
4 * Extracted from cputable.c
5 *
6 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
7 *
8 * Modifications for ppc64:
9 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
10 * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/config.h>
19
20#include <asm/firmware.h>
21
22unsigned long ppc64_firmware_features;
23
24#ifdef CONFIG_PPC_PSERIES
25firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
26 {FW_FEATURE_PFT, "hcall-pft"},
27 {FW_FEATURE_TCE, "hcall-tce"},
28 {FW_FEATURE_SPRG0, "hcall-sprg0"},
29 {FW_FEATURE_DABR, "hcall-dabr"},
30 {FW_FEATURE_COPY, "hcall-copy"},
31 {FW_FEATURE_ASR, "hcall-asr"},
32 {FW_FEATURE_DEBUG, "hcall-debug"},
33 {FW_FEATURE_PERF, "hcall-perf"},
34 {FW_FEATURE_DUMP, "hcall-dump"},
35 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
36 {FW_FEATURE_MIGRATE, "hcall-migrate"},
37 {FW_FEATURE_PERFMON, "hcall-perfmon"},
38 {FW_FEATURE_CRQ, "hcall-crq"},
39 {FW_FEATURE_VIO, "hcall-vio"},
40 {FW_FEATURE_RDMA, "hcall-rdma"},
41 {FW_FEATURE_LLAN, "hcall-lLAN"},
42 {FW_FEATURE_BULK, "hcall-bulk"},
43 {FW_FEATURE_XDABR, "hcall-xdabr"},
44 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
45 {FW_FEATURE_SPLPAR, "hcall-splpar"},
46};
47#endif
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index accaa052d31f..036959775623 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -23,14 +23,11 @@
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h> 26#include <linux/config.h>
29#include <linux/threads.h> 27#include <linux/threads.h>
30#include <asm/processor.h> 28#include <asm/processor.h>
31#include <asm/page.h> 29#include <asm/page.h>
32#include <asm/mmu.h> 30#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h> 31#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h> 32#include <asm/ppc_asm.h>
36#include <asm/offsets.h> 33#include <asm/offsets.h>
@@ -45,18 +42,13 @@
45#endif 42#endif
46 43
47/* 44/*
48 * hcall interface to pSeries LPAR
49 */
50#define H_SET_ASR 0x30
51
52/*
53 * We layout physical memory as follows: 45 * We layout physical memory as follows:
54 * 0x0000 - 0x00ff : Secondary processor spin code 46 * 0x0000 - 0x00ff : Secondary processor spin code
55 * 0x0100 - 0x2fff : pSeries Interrupt prologs 47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
56 * 0x3000 - 0x3fff : Interrupt support 48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
57 * 0x4000 - 0x4fff : NACA 49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
58 * 0x6000 : iSeries and common interrupt prologs 50 * 0x7000 - 0x7fff : FWNMI data area
59 * 0x9000 - 0x9fff : Initial segment table 51 * 0x8000 - : Early init and support code
60 */ 52 */
61 53
62/* 54/*
@@ -94,6 +86,7 @@ END_FTR_SECTION(0, 1)
94 86
95 /* Catch branch to 0 in real mode */ 87 /* Catch branch to 0 in real mode */
96 trap 88 trap
89
97#ifdef CONFIG_PPC_ISERIES 90#ifdef CONFIG_PPC_ISERIES
98 /* 91 /*
99 * At offset 0x20, there is a pointer to iSeries LPAR data. 92 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -103,12 +96,12 @@ END_FTR_SECTION(0, 1)
103 .llong hvReleaseData-KERNELBASE 96 .llong hvReleaseData-KERNELBASE
104 97
105 /* 98 /*
106 * At offset 0x28 and 0x30 are offsets to the msChunks 99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
107 * array (used by the iSeries LPAR debugger to do translation 100 * array (used by the iSeries LPAR debugger to do translation
108 * between physical addresses and absolute addresses) and 101 * between physical addresses and absolute addresses) and
109 * to the pidhash table (also used by the debugger) 102 * to the pidhash table (also used by the debugger)
110 */ 103 */
111 .llong msChunks-KERNELBASE 104 .llong mschunks_map-KERNELBASE
112 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
113 106
114 /* Offset 0x38 - Pointer to start of embedded System.map */ 107 /* Offset 0x38 - Pointer to start of embedded System.map */
@@ -120,7 +113,7 @@ embedded_sysmap_start:
120embedded_sysmap_end: 113embedded_sysmap_end:
121 .llong 0 114 .llong 0
122 115
123#else /* CONFIG_PPC_ISERIES */ 116#endif /* CONFIG_PPC_ISERIES */
124 117
125 /* Secondary processors spin on this value until it goes to 1. */ 118 /* Secondary processors spin on this value until it goes to 1. */
126 .globl __secondary_hold_spinloop 119 .globl __secondary_hold_spinloop
@@ -155,7 +148,7 @@ _GLOBAL(__secondary_hold)
155 std r24,__secondary_hold_acknowledge@l(0) 148 std r24,__secondary_hold_acknowledge@l(0)
156 sync 149 sync
157 150
158 /* All secondary cpu's wait here until told to start. */ 151 /* All secondary cpus wait here until told to start. */
159100: ld r4,__secondary_hold_spinloop@l(0) 152100: ld r4,__secondary_hold_spinloop@l(0)
160 cmpdi 0,r4,1 153 cmpdi 0,r4,1
161 bne 100b 154 bne 100b
@@ -170,7 +163,6 @@ _GLOBAL(__secondary_hold)
170 BUG_OPCODE 163 BUG_OPCODE
171#endif 164#endif
172#endif 165#endif
173#endif
174 166
175/* This value is used to mark exception frames on the stack. */ 167/* This value is used to mark exception frames on the stack. */
176 .section ".toc","aw" 168 .section ".toc","aw"
@@ -502,33 +494,37 @@ system_call_pSeries:
502 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
503 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
504 496
497 . = 0x3000
498
499/*** pSeries interrupt support ***/
500
505 /* moved from 0xf00 */ 501 /* moved from 0xf00 */
506 STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 502 STD_EXCEPTION_PSERIES(., performance_monitor)
507 503
508 . = 0x3100 504 .align 7
509_GLOBAL(do_stab_bolted_pSeries) 505_GLOBAL(do_stab_bolted_pSeries)
510 mtcrf 0x80,r12 506 mtcrf 0x80,r12
511 mfspr r12,SPRG2 507 mfspr r12,SPRG2
512 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
513 509
514 510/*
515 /* Space for the naca. Architected to be located at real address 511 * Vectors for the FWNMI option. Share common code.
516 * NACA_PHYS_ADDR. Various tools rely on this location being fixed. 512 */
517 * The first dword of the naca is required by iSeries LPAR to 513 .globl system_reset_fwnmi
518 * point to itVpdAreas. On pSeries native, this value is not used. 514system_reset_fwnmi:
519 */ 515 HMT_MEDIUM
520 . = NACA_PHYS_ADDR 516 mtspr SPRG1,r13 /* save r13 */
521 .globl __end_interrupts 517 RUNLATCH_ON(r13)
522__end_interrupts: 518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
523#ifdef CONFIG_PPC_ISERIES
524 .globl naca
525naca:
526 .llong itVpdAreas
527 .llong 0 /* xRamDisk */
528 .llong 0 /* xRamDiskSize */
529 519
530 . = 0x6100 520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
531 526
527#ifdef CONFIG_PPC_ISERIES
532/*** ISeries-LPAR interrupt handlers ***/ 528/*** ISeries-LPAR interrupt handlers ***/
533 529
534 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
@@ -626,9 +622,7 @@ system_reset_iSeries:
626 622
627 cmpwi 0,r23,0 623 cmpwi 0,r23,0
628 beq iSeries_secondary_smp_loop /* Loop until told to go */ 624 beq iSeries_secondary_smp_loop /* Loop until told to go */
629#ifdef SECONDARY_PROCESSORS
630 bne .__secondary_start /* Loop until told to go */ 625 bne .__secondary_start /* Loop until told to go */
631#endif
632iSeries_secondary_smp_loop: 626iSeries_secondary_smp_loop:
633 /* Let the Hypervisor know we are alive */ 627 /* Let the Hypervisor know we are alive */
634 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -671,51 +665,8 @@ hardware_interrupt_iSeries_masked:
671 ld r13,PACA_EXGEN+EX_R13(r13) 665 ld r13,PACA_EXGEN+EX_R13(r13)
672 rfid 666 rfid
673 b . /* prevent speculative execution */ 667 b . /* prevent speculative execution */
674#endif
675
676/*
677 * Data area reserved for FWNMI option.
678 */
679 .= 0x7000
680 .globl fwnmi_data_area
681fwnmi_data_area:
682
683#ifdef CONFIG_PPC_ISERIES
684 . = LPARMAP_PHYS
685#include "lparmap.s"
686#endif /* CONFIG_PPC_ISERIES */ 668#endif /* CONFIG_PPC_ISERIES */
687 669
688/*
689 * Vectors for the FWNMI option. Share common code.
690 */
691 . = 0x8000
692 .globl system_reset_fwnmi
693system_reset_fwnmi:
694 HMT_MEDIUM
695 mtspr SPRG1,r13 /* save r13 */
696 RUNLATCH_ON(r13)
697 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
698 .globl machine_check_fwnmi
699machine_check_fwnmi:
700 HMT_MEDIUM
701 mtspr SPRG1,r13 /* save r13 */
702 RUNLATCH_ON(r13)
703 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
704
705 /*
706 * Space for the initial segment table
707 * For LPAR, the hypervisor must fill in at least one entry
708 * before we get control (with relocate on)
709 */
710 . = STAB0_PHYS_ADDR
711 .globl __start_stab
712__start_stab:
713
714 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
715 .globl __end_stab
716__end_stab:
717
718
719/*** Common interrupt handlers ***/ 670/*** Common interrupt handlers ***/
720 671
721 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 672 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
@@ -752,8 +703,8 @@ machine_check_common:
752 * R9 contains the saved CR, r13 points to the paca, 703 * R9 contains the saved CR, r13 points to the paca,
753 * r10 contains the (bad) kernel stack pointer, 704 * r10 contains the (bad) kernel stack pointer,
754 * r11 and r12 contain the saved SRR0 and SRR1. 705 * r11 and r12 contain the saved SRR0 and SRR1.
755 * We switch to using the paca guard page as an emergency stack, 706 * We switch to using an emergency stack, save the registers there,
756 * save the registers there, and call kernel_bad_stack(), which panics. 707 * and call kernel_bad_stack(), which panics.
757 */ 708 */
758bad_stack: 709bad_stack:
759 ld r1,PACAEMERGSP(r13) 710 ld r1,PACAEMERGSP(r13)
@@ -906,6 +857,62 @@ fp_unavailable_common:
906 bl .kernel_fp_unavailable_exception 857 bl .kernel_fp_unavailable_exception
907 BUG_OPCODE 858 BUG_OPCODE
908 859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
909 .align 7 916 .align 7
910 .globl altivec_unavailable_common 917 .globl altivec_unavailable_common
911altivec_unavailable_common: 918altivec_unavailable_common:
@@ -921,6 +928,80 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
921 bl .altivec_unavailable_exception 928 bl .altivec_unavailable_exception
922 b .ret_from_except 929 b .ret_from_except
923 930
931#ifdef CONFIG_ALTIVEC
932/*
933 * load_up_altivec(unused, unused, tsk)
934 * Disable VMX for the task which had it previously,
935 * and save its vector registers in its thread_struct.
936 * Enables the VMX for use in the kernel on return.
937 * On SMP we know the VMX is free, since we give it up every
938 * switch (ie, no lazy save of the vector registers).
939 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940 */
941_STATIC(load_up_altivec)
942 mfmsr r5 /* grab the current MSR */
943 oris r5,r5,MSR_VEC@h
944 mtmsrd r5 /* enable use of VMX now */
945 isync
946
947/*
948 * For SMP, we don't do lazy VMX switching because it just gets too
949 * horrendously complex, especially when a task switches from one CPU
950 * to another. Instead we call giveup_altvec in switch_to.
951 * VRSAVE isn't dealt with here, that is done in the normal context
952 * switch code. Note that we could rely on vrsave value to eventually
953 * avoid saving all of the VREGs here...
954 */
955#ifndef CONFIG_SMP
956 ld r3,last_task_used_altivec@got(r2)
957 ld r4,0(r3)
958 cmpdi 0,r4,0
959 beq 1f
960 /* Save VMX state to last_task_used_altivec's THREAD struct */
961 addi r4,r4,THREAD
962 SAVE_32VRS(0,r5,r4)
963 mfvscr vr0
964 li r10,THREAD_VSCR
965 stvx vr0,r10,r4
966 /* Disable VMX for last_task_used_altivec */
967 ld r5,PT_REGS(r4)
968 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 lis r6,MSR_VEC@h
970 andc r4,r4,r6
971 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9721:
973#endif /* CONFIG_SMP */
974 /* Hack: if we get an altivec unavailable trap with VRSAVE
975 * set to all zeros, we assume this is a broken application
976 * that fails to set it properly, and thus we switch it to
977 * all 1's
978 */
979 mfspr r4,SPRN_VRSAVE
980 cmpdi 0,r4,0
981 bne+ 1f
982 li r4,-1
983 mtspr SPRN_VRSAVE,r4
9841:
985 /* enable use of VMX after return */
986 ld r4,PACACURRENT(r13)
987 addi r5,r4,THREAD /* Get THREAD */
988 oris r12,r12,MSR_VEC@h
989 std r12,_MSR(r1)
990 li r4,1
991 li r10,THREAD_VSCR
992 stw r4,THREAD_USED_VR(r5)
993 lvx vr0,r10,r5
994 mtvscr vr0
995 REST_32VRS(0,r4,r5)
996#ifndef CONFIG_SMP
997 /* Update last_task_used_math to 'current' */
998 subi r4,r5,THREAD /* Back to 'current' */
999 std r4,0(r3)
1000#endif /* CONFIG_SMP */
1001 /* restore registers and return */
1002 b fast_exception_return
1003#endif /* CONFIG_ALTIVEC */
1004
924/* 1005/*
925 * Hash table stuff 1006 * Hash table stuff
926 */ 1007 */
@@ -1167,6 +1248,42 @@ unrecov_slb:
1167 bl .unrecoverable_exception 1248 bl .unrecoverable_exception
1168 b 1b 1249 b 1b
1169 1250
1251/*
1252 * Space for CPU0's segment table.
1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT).
1259 */
1260 . = STAB0_PHYS_ADDR /* 0x6000 */
1261 .globl initial_stab
1262initial_stab:
1263 .space 4096
1264
1265/*
1266 * Data area reserved for FWNMI option.
1267 * This address (0x7000) is fixed by the RPA.
1268 */
1269 .= 0x7000
1270 .globl fwnmi_data_area
1271fwnmi_data_area:
1272
1273 /* iSeries does not use the FWNMI stuff, so it is safe to put
1274 * this here, even if we later allow kernels that will boot on
1275 * both pSeries and iSeries */
1276#ifdef CONFIG_PPC_ISERIES
1277 . = LPARMAP_PHYS
1278#include "lparmap.s"
1279/*
1280 * This ".text" is here for old compilers that generate a trailing
1281 * .note section when compiling .c files to .s
1282 */
1283 .text
1284#endif /* CONFIG_PPC_ISERIES */
1285
1286 . = 0x8000
1170 1287
1171/* 1288/*
1172 * On pSeries, secondary processors spin in the following code. 1289 * On pSeries, secondary processors spin in the following code.
@@ -1200,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1200 b .kexec_wait /* next kernel might do better */ 1317 b .kexec_wait /* next kernel might do better */
1201 1318
12022: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1203 /* From now on, r24 is expected to be logica cpuid */ 1320 /* From now on, r24 is expected to be logical cpuid */
1204 mr r24,r5 1321 mr r24,r5
12053: HMT_LOW 13223: HMT_LOW
1206 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1323 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
@@ -1213,10 +1330,8 @@ _GLOBAL(pSeries_secondary_smp_init)
1213 1330
1214 cmpwi 0,r23,0 1331 cmpwi 0,r23,0
1215#ifdef CONFIG_SMP 1332#ifdef CONFIG_SMP
1216#ifdef SECONDARY_PROCESSORS
1217 bne .__secondary_start 1333 bne .__secondary_start
1218#endif 1334#endif
1219#endif
1220 b 3b /* Loop until told to go */ 1335 b 3b /* Loop until told to go */
1221 1336
1222#ifdef CONFIG_PPC_ISERIES 1337#ifdef CONFIG_PPC_ISERIES
@@ -1430,228 +1545,6 @@ _GLOBAL(copy_and_flush)
1430.align 8 1545.align 8
1431copy_to_here: 1546copy_to_here:
1432 1547
1433/*
1434 * load_up_fpu(unused, unused, tsk)
1435 * Disable FP for the task which had the FPU previously,
1436 * and save its floating-point registers in its thread_struct.
1437 * Enables the FPU for use in the kernel on return.
1438 * On SMP we know the fpu is free, since we give it up every
1439 * switch (ie, no lazy save of the FP registers).
1440 * On entry: r13 == 'current' && last_task_used_math != 'current'
1441 */
1442_STATIC(load_up_fpu)
1443 mfmsr r5 /* grab the current MSR */
1444 ori r5,r5,MSR_FP
1445 mtmsrd r5 /* enable use of fpu now */
1446 isync
1447/*
1448 * For SMP, we don't do lazy FPU switching because it just gets too
1449 * horrendously complex, especially when a task switches from one CPU
1450 * to another. Instead we call giveup_fpu in switch_to.
1451 *
1452 */
1453#ifndef CONFIG_SMP
1454 ld r3,last_task_used_math@got(r2)
1455 ld r4,0(r3)
1456 cmpdi 0,r4,0
1457 beq 1f
1458 /* Save FP state to last_task_used_math's THREAD struct */
1459 addi r4,r4,THREAD
1460 SAVE_32FPRS(0, r4)
1461 mffs fr0
1462 stfd fr0,THREAD_FPSCR(r4)
1463 /* Disable FP for last_task_used_math */
1464 ld r5,PT_REGS(r4)
1465 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1466 li r6,MSR_FP|MSR_FE0|MSR_FE1
1467 andc r4,r4,r6
1468 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14691:
1470#endif /* CONFIG_SMP */
1471 /* enable use of FP after return */
1472 ld r4,PACACURRENT(r13)
1473 addi r5,r4,THREAD /* Get THREAD */
1474 ld r4,THREAD_FPEXC_MODE(r5)
1475 ori r12,r12,MSR_FP
1476 or r12,r12,r4
1477 std r12,_MSR(r1)
1478 lfd fr0,THREAD_FPSCR(r5)
1479 mtfsf 0xff,fr0
1480 REST_32FPRS(0, r5)
1481#ifndef CONFIG_SMP
1482 /* Update last_task_used_math to 'current' */
1483 subi r4,r5,THREAD /* Back to 'current' */
1484 std r4,0(r3)
1485#endif /* CONFIG_SMP */
1486 /* restore registers and return */
1487 b fast_exception_return
1488
1489/*
1490 * disable_kernel_fp()
1491 * Disable the FPU.
1492 */
1493_GLOBAL(disable_kernel_fp)
1494 mfmsr r3
1495 rldicl r0,r3,(63-MSR_FP_LG),1
1496 rldicl r3,r0,(MSR_FP_LG+1),0
1497 mtmsrd r3 /* disable use of fpu now */
1498 isync
1499 blr
1500
1501/*
1502 * giveup_fpu(tsk)
1503 * Disable FP for the task given as the argument,
1504 * and save the floating-point registers in its thread_struct.
1505 * Enables the FPU for use in the kernel on return.
1506 */
1507_GLOBAL(giveup_fpu)
1508 mfmsr r5
1509 ori r5,r5,MSR_FP
1510 mtmsrd r5 /* enable use of fpu now */
1511 isync
1512 cmpdi 0,r3,0
1513 beqlr- /* if no previous owner, done */
1514 addi r3,r3,THREAD /* want THREAD of task */
1515 ld r5,PT_REGS(r3)
1516 cmpdi 0,r5,0
1517 SAVE_32FPRS(0, r3)
1518 mffs fr0
1519 stfd fr0,THREAD_FPSCR(r3)
1520 beq 1f
1521 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1522 li r3,MSR_FP|MSR_FE0|MSR_FE1
1523 andc r4,r4,r3 /* disable FP for previous task */
1524 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15251:
1526#ifndef CONFIG_SMP
1527 li r5,0
1528 ld r4,last_task_used_math@got(r2)
1529 std r5,0(r4)
1530#endif /* CONFIG_SMP */
1531 blr
1532
1533
1534#ifdef CONFIG_ALTIVEC
1535
1536/*
1537 * load_up_altivec(unused, unused, tsk)
1538 * Disable VMX for the task which had it previously,
1539 * and save its vector registers in its thread_struct.
1540 * Enables the VMX for use in the kernel on return.
1541 * On SMP we know the VMX is free, since we give it up every
1542 * switch (ie, no lazy save of the vector registers).
1543 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1544 */
1545_STATIC(load_up_altivec)
1546 mfmsr r5 /* grab the current MSR */
1547 oris r5,r5,MSR_VEC@h
1548 mtmsrd r5 /* enable use of VMX now */
1549 isync
1550
1551/*
1552 * For SMP, we don't do lazy VMX switching because it just gets too
1553 * horrendously complex, especially when a task switches from one CPU
1554 * to another. Instead we call giveup_altvec in switch_to.
1555 * VRSAVE isn't dealt with here, that is done in the normal context
1556 * switch code. Note that we could rely on vrsave value to eventually
1557 * avoid saving all of the VREGs here...
1558 */
1559#ifndef CONFIG_SMP
1560 ld r3,last_task_used_altivec@got(r2)
1561 ld r4,0(r3)
1562 cmpdi 0,r4,0
1563 beq 1f
1564 /* Save VMX state to last_task_used_altivec's THREAD struct */
1565 addi r4,r4,THREAD
1566 SAVE_32VRS(0,r5,r4)
1567 mfvscr vr0
1568 li r10,THREAD_VSCR
1569 stvx vr0,r10,r4
1570 /* Disable VMX for last_task_used_altivec */
1571 ld r5,PT_REGS(r4)
1572 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1573 lis r6,MSR_VEC@h
1574 andc r4,r4,r6
1575 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15761:
1577#endif /* CONFIG_SMP */
1578 /* Hack: if we get an altivec unavailable trap with VRSAVE
1579 * set to all zeros, we assume this is a broken application
1580 * that fails to set it properly, and thus we switch it to
1581 * all 1's
1582 */
1583 mfspr r4,SPRN_VRSAVE
1584 cmpdi 0,r4,0
1585 bne+ 1f
1586 li r4,-1
1587 mtspr SPRN_VRSAVE,r4
15881:
1589 /* enable use of VMX after return */
1590 ld r4,PACACURRENT(r13)
1591 addi r5,r4,THREAD /* Get THREAD */
1592 oris r12,r12,MSR_VEC@h
1593 std r12,_MSR(r1)
1594 li r4,1
1595 li r10,THREAD_VSCR
1596 stw r4,THREAD_USED_VR(r5)
1597 lvx vr0,r10,r5
1598 mtvscr vr0
1599 REST_32VRS(0,r4,r5)
1600#ifndef CONFIG_SMP
1601 /* Update last_task_used_math to 'current' */
1602 subi r4,r5,THREAD /* Back to 'current' */
1603 std r4,0(r3)
1604#endif /* CONFIG_SMP */
1605 /* restore registers and return */
1606 b fast_exception_return
1607
1608/*
1609 * disable_kernel_altivec()
1610 * Disable the VMX.
1611 */
1612_GLOBAL(disable_kernel_altivec)
1613 mfmsr r3
1614 rldicl r0,r3,(63-MSR_VEC_LG),1
1615 rldicl r3,r0,(MSR_VEC_LG+1),0
1616 mtmsrd r3 /* disable use of VMX now */
1617 isync
1618 blr
1619
1620/*
1621 * giveup_altivec(tsk)
1622 * Disable VMX for the task given as the argument,
1623 * and save the vector registers in its thread_struct.
1624 * Enables the VMX for use in the kernel on return.
1625 */
1626_GLOBAL(giveup_altivec)
1627 mfmsr r5
1628 oris r5,r5,MSR_VEC@h
1629 mtmsrd r5 /* enable use of VMX now */
1630 isync
1631 cmpdi 0,r3,0
1632 beqlr- /* if no previous owner, done */
1633 addi r3,r3,THREAD /* want THREAD of task */
1634 ld r5,PT_REGS(r3)
1635 cmpdi 0,r5,0
1636 SAVE_32VRS(0,r4,r3)
1637 mfvscr vr0
1638 li r4,THREAD_VSCR
1639 stvx vr0,r4,r3
1640 beq 1f
1641 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1642 lis r3,MSR_VEC@h
1643 andc r4,r4,r3 /* disable FP for previous task */
1644 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16451:
1646#ifndef CONFIG_SMP
1647 li r5,0
1648 ld r4,last_task_used_altivec@got(r2)
1649 std r5,0(r4)
1650#endif /* CONFIG_SMP */
1651 blr
1652
1653#endif /* CONFIG_ALTIVEC */
1654
1655#ifdef CONFIG_SMP 1548#ifdef CONFIG_SMP
1656#ifdef CONFIG_PPC_PMAC 1549#ifdef CONFIG_PPC_PMAC
1657/* 1550/*
@@ -2002,9 +1895,6 @@ _STATIC(start_here_common)
2002 1895
2003 bl .start_kernel 1896 bl .start_kernel
2004 1897
2005_GLOBAL(__setup_cpu_power3)
2006 blr
2007
2008_GLOBAL(hmt_init) 1898_GLOBAL(hmt_init)
2009#ifdef CONFIG_HMT 1899#ifdef CONFIG_HMT
2010 LOADADDR(r5, hmt_thread_data) 1900 LOADADDR(r5, hmt_thread_data)
@@ -2095,20 +1985,19 @@ _GLOBAL(smp_release_cpus)
2095 1985
2096/* 1986/*
2097 * We put a few things here that have to be page-aligned. 1987 * We put a few things here that have to be page-aligned.
2098 * This stuff goes at the beginning of the data segment, 1988 * This stuff goes at the beginning of the bss, which is page-aligned.
2099 * which is page-aligned.
2100 */ 1989 */
2101 .data 1990 .section ".bss"
1991
2102 .align 12 1992 .align 12
2103 .globl sdata 1993
2104sdata:
2105 .globl empty_zero_page 1994 .globl empty_zero_page
2106empty_zero_page: 1995empty_zero_page:
2107 .space 4096 1996 .space PAGE_SIZE
2108 1997
2109 .globl swapper_pg_dir 1998 .globl swapper_pg_dir
2110swapper_pg_dir: 1999swapper_pg_dir:
2111 .space 4096 2000 .space PAGE_SIZE
2112 2001
2113/* 2002/*
2114 * This space gets a copy of optional info passed to us by the bootstrap 2003 * This space gets a copy of optional info passed to us by the bootstrap
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
index b0250ae4a72a..2192055a90a0 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/ppc64/kernel/iSeries_htab.c
@@ -41,6 +41,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
41 unsigned long prpn, unsigned long vflags, 41 unsigned long prpn, unsigned long vflags,
42 unsigned long rflags) 42 unsigned long rflags)
43{ 43{
44 unsigned long arpn;
44 long slot; 45 long slot;
45 hpte_t lhpte; 46 hpte_t lhpte;
46 int secondary = 0; 47 int secondary = 0;
@@ -70,8 +71,10 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
70 slot &= 0x7fffffffffffffff; 71 slot &= 0x7fffffffffffffff;
71 } 72 }
72 73
74 arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
75
73 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 76 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
74 lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags; 77 lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
75 78
76 /* Now fill in the actual HPTE */ 79 /* Now fill in the actual HPTE */
77 HvCallHpt_addValidate(slot, secondary, &lhpte); 80 HvCallHpt_addValidate(slot, secondary, &lhpte);
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index a649edbb23b6..3ffefbbc6623 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -39,6 +39,7 @@
39#include <asm/cputable.h> 39#include <asm/cputable.h>
40#include <asm/sections.h> 40#include <asm/sections.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/firmware.h>
42 43
43#include <asm/time.h> 44#include <asm/time.h>
44#include "iSeries_setup.h" 45#include "iSeries_setup.h"
@@ -314,6 +315,8 @@ static void __init iSeries_init_early(void)
314 315
315 DBG(" -> iSeries_init_early()\n"); 316 DBG(" -> iSeries_init_early()\n");
316 317
318 ppc64_firmware_features = FW_FEATURE_ISERIES;
319
317 ppcdbg_initialize(); 320 ppcdbg_initialize();
318 321
319#if defined(CONFIG_BLK_DEV_INITRD) 322#if defined(CONFIG_BLK_DEV_INITRD)
@@ -412,6 +415,22 @@ static void __init iSeries_init_early(void)
412 DBG(" <- iSeries_init_early()\n"); 415 DBG(" <- iSeries_init_early()\n");
413} 416}
414 417
418struct mschunks_map mschunks_map = {
419 /* XXX We don't use these, but Piranha might need them. */
420 .chunk_size = MSCHUNKS_CHUNK_SIZE,
421 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
422 .chunk_mask = MSCHUNKS_OFFSET_MASK,
423};
424EXPORT_SYMBOL(mschunks_map);
425
426void mschunks_alloc(unsigned long num_chunks)
427{
428 klimit = _ALIGN(klimit, sizeof(u32));
429 mschunks_map.mapping = (u32 *)klimit;
430 klimit += num_chunks * sizeof(u32);
431 mschunks_map.num_chunks = num_chunks;
432}
433
415/* 434/*
416 * The iSeries may have very large memories ( > 128 GB ) and a partition 435 * The iSeries may have very large memories ( > 128 GB ) and a partition
417 * may get memory in "chunks" that may be anywhere in the 2**52 real 436 * may get memory in "chunks" that may be anywhere in the 2**52 real
@@ -449,7 +468,7 @@ static void __init build_iSeries_Memory_Map(void)
449 468
450 /* Chunk size on iSeries is 256K bytes */ 469 /* Chunk size on iSeries is 256K bytes */
451 totalChunks = (u32)HvLpConfig_getMsChunks(); 470 totalChunks = (u32)HvLpConfig_getMsChunks();
452 klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18); 471 mschunks_alloc(totalChunks);
453 472
454 /* 473 /*
455 * Get absolute address of our load area 474 * Get absolute address of our load area
@@ -486,7 +505,7 @@ static void __init build_iSeries_Memory_Map(void)
486 printk("Load area size %dK\n", loadAreaSize * 256); 505 printk("Load area size %dK\n", loadAreaSize * 256);
487 506
488 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) 507 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
489 msChunks.abs[nextPhysChunk] = 508 mschunks_map.mapping[nextPhysChunk] =
490 loadAreaFirstChunk + nextPhysChunk; 509 loadAreaFirstChunk + nextPhysChunk;
491 510
492 /* 511 /*
@@ -495,7 +514,7 @@ static void __init build_iSeries_Memory_Map(void)
495 */ 514 */
496 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 515 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
497 hptSizePages = (u32)HvCallHpt_getHptPages(); 516 hptSizePages = (u32)HvCallHpt_getHptPages();
498 hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT); 517 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
499 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 518 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
500 519
501 printk("HPT absolute addr = %016lx, size = %dK\n", 520 printk("HPT absolute addr = %016lx, size = %dK\n",
@@ -552,7 +571,8 @@ static void __init build_iSeries_Memory_Map(void)
552 (absChunk > hptLastChunk)) && 571 (absChunk > hptLastChunk)) &&
553 ((absChunk < loadAreaFirstChunk) || 572 ((absChunk < loadAreaFirstChunk) ||
554 (absChunk > loadAreaLastChunk))) { 573 (absChunk > loadAreaLastChunk))) {
555 msChunks.abs[nextPhysChunk] = absChunk; 574 mschunks_map.mapping[nextPhysChunk] =
575 absChunk;
556 ++nextPhysChunk; 576 ++nextPhysChunk;
557 } 577 }
558 } 578 }
@@ -944,6 +964,8 @@ void __init iSeries_early_setup(void)
944 ppc_md.calibrate_decr = iSeries_calibrate_decr; 964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
945 ppc_md.progress = iSeries_progress; 965 ppc_md.progress = iSeries_progress;
946 966
967 /* XXX Implement enable_pmcs for iSeries */
968
947 if (get_paca()->lppaca.shared_proc) { 969 if (get_paca()->lppaca.shared_proc) {
948 ppc_md.idle_loop = iseries_shared_idle; 970 ppc_md.idle_loop = iseries_shared_idle;
949 printk(KERN_INFO "Using shared processor idle loop\n"); 971 printk(KERN_INFO "Using shared processor idle loop\n");
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/ppc64/kernel/iSeries_vio.c
new file mode 100644
index 000000000000..6b754b0c8344
--- /dev/null
+++ b/arch/ppc64/kernel/iSeries_vio.c
@@ -0,0 +1,155 @@
1/*
2 * IBM PowerPC iSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2005 Stephen Rothwell, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/types.h>
12#include <linux/device.h>
13#include <linux/init.h>
14
15#include <asm/vio.h>
16#include <asm/iommu.h>
17#include <asm/abs_addr.h>
18#include <asm/page.h>
19#include <asm/iSeries/vio.h>
20#include <asm/iSeries/HvTypes.h>
21#include <asm/iSeries/HvLpConfig.h>
22#include <asm/iSeries/HvCallXm.h>
23
24struct device *iSeries_vio_dev = &vio_bus_device.dev;
25EXPORT_SYMBOL(iSeries_vio_dev);
26
27static struct iommu_table veth_iommu_table;
28static struct iommu_table vio_iommu_table;
29
30static void __init iommu_vio_init(void)
31{
32 struct iommu_table *t;
33 struct iommu_table_cb cb;
34 unsigned long cbp;
35 unsigned long itc_entries;
36
37 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
38 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
39
40 cbp = virt_to_abs(&cb);
41 HvCallXm_getTceTableParms(cbp);
42
43 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
44 veth_iommu_table.it_size = itc_entries / 2;
45 veth_iommu_table.it_busno = cb.itc_busno;
46 veth_iommu_table.it_offset = cb.itc_offset;
47 veth_iommu_table.it_index = cb.itc_index;
48 veth_iommu_table.it_type = TCE_VB;
49 veth_iommu_table.it_blocksize = 1;
50
51 t = iommu_init_table(&veth_iommu_table);
52
53 if (!t)
54 printk("Virtual Bus VETH TCE table failed.\n");
55
56 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
57 vio_iommu_table.it_busno = cb.itc_busno;
58 vio_iommu_table.it_offset = cb.itc_offset +
59 veth_iommu_table.it_size;
60 vio_iommu_table.it_index = cb.itc_index;
61 vio_iommu_table.it_type = TCE_VB;
62 vio_iommu_table.it_blocksize = 1;
63
64 t = iommu_init_table(&vio_iommu_table);
65
66 if (!t)
67 printk("Virtual Bus VIO TCE table failed.\n");
68}
69
70/**
71 * vio_register_device_iseries: - Register a new iSeries vio device.
72 * @voidev: The device to register.
73 */
74static struct vio_dev *__init vio_register_device_iseries(char *type,
75 uint32_t unit_num)
76{
77 struct vio_dev *viodev;
78
79 /* allocate a vio_dev for this device */
80 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
81 if (!viodev)
82 return NULL;
83 memset(viodev, 0, sizeof(struct vio_dev));
84
85 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
86
87 viodev->name = viodev->dev.bus_id;
88 viodev->type = type;
89 viodev->unit_address = unit_num;
90 viodev->iommu_table = &vio_iommu_table;
91 if (vio_register_device(viodev) == NULL) {
92 kfree(viodev);
93 return NULL;
94 }
95 return viodev;
96}
97
98void __init probe_bus_iseries(void)
99{
100 HvLpIndexMap vlan_map;
101 struct vio_dev *viodev;
102 int i;
103
104 /* there is only one of each of these */
105 vio_register_device_iseries("viocons", 0);
106 vio_register_device_iseries("vscsi", 0);
107
108 vlan_map = HvLpConfig_getVirtualLanIndexMap();
109 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
110 if ((vlan_map & (0x8000 >> i)) == 0)
111 continue;
112 viodev = vio_register_device_iseries("vlan", i);
113 /* veth is special and has it own iommu_table */
114 viodev->iommu_table = &veth_iommu_table;
115 }
116 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
117 vio_register_device_iseries("viodasd", i);
118 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
119 vio_register_device_iseries("viocd", i);
120 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
121 vio_register_device_iseries("viotape", i);
122}
123
124/**
125 * vio_match_device_iseries: - Tell if a iSeries VIO device matches a
126 * vio_device_id
127 */
128static int vio_match_device_iseries(const struct vio_device_id *id,
129 const struct vio_dev *dev)
130{
131 return strncmp(dev->type, id->type, strlen(id->type)) == 0;
132}
133
134static struct vio_bus_ops vio_bus_ops_iseries = {
135 .match = vio_match_device_iseries,
136};
137
138/**
139 * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus
140 */
141static int __init vio_bus_init_iseries(void)
142{
143 int err;
144
145 err = vio_bus_init(&vio_bus_ops_iseries);
146 if (err == 0) {
147 iommu_vio_init();
148 vio_bus_device.iommu_table = &vio_iommu_table;
149 iSeries_vio_dev = &vio_bus_device.dev;
150 probe_bus_iseries();
151 }
152 return err;
153}
154
155__initcall(vio_bus_init_iseries);
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
index d6c6bd03d2a4..5adaca2ddc9d 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/ppc64/kernel/lmb.c
@@ -28,33 +28,28 @@ void lmb_dump_all(void)
28{ 28{
29#ifdef DEBUG 29#ifdef DEBUG
30 unsigned long i; 30 unsigned long i;
31 struct lmb *_lmb = &lmb;
32 31
33 udbg_printf("lmb_dump_all:\n"); 32 udbg_printf("lmb_dump_all:\n");
34 udbg_printf(" memory.cnt = 0x%lx\n", 33 udbg_printf(" memory.cnt = 0x%lx\n",
35 _lmb->memory.cnt); 34 lmb.memory.cnt);
36 udbg_printf(" memory.size = 0x%lx\n", 35 udbg_printf(" memory.size = 0x%lx\n",
37 _lmb->memory.size); 36 lmb.memory.size);
38 for (i=0; i < _lmb->memory.cnt ;i++) { 37 for (i=0; i < lmb.memory.cnt ;i++) {
39 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
40 i, _lmb->memory.region[i].base); 39 i, lmb.memory.region[i].base);
41 udbg_printf(" .physbase = 0x%lx\n",
42 _lmb->memory.region[i].physbase);
43 udbg_printf(" .size = 0x%lx\n", 40 udbg_printf(" .size = 0x%lx\n",
44 _lmb->memory.region[i].size); 41 lmb.memory.region[i].size);
45 } 42 }
46 43
47 udbg_printf("\n reserved.cnt = 0x%lx\n", 44 udbg_printf("\n reserved.cnt = 0x%lx\n",
48 _lmb->reserved.cnt); 45 lmb.reserved.cnt);
49 udbg_printf(" reserved.size = 0x%lx\n", 46 udbg_printf(" reserved.size = 0x%lx\n",
50 _lmb->reserved.size); 47 lmb.reserved.size);
51 for (i=0; i < _lmb->reserved.cnt ;i++) { 48 for (i=0; i < lmb.reserved.cnt ;i++) {
52 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
53 i, _lmb->reserved.region[i].base); 50 i, lmb.reserved.region[i].base);
54 udbg_printf(" .physbase = 0x%lx\n",
55 _lmb->reserved.region[i].physbase);
56 udbg_printf(" .size = 0x%lx\n", 51 udbg_printf(" .size = 0x%lx\n",
57 _lmb->reserved.region[i].size); 52 lmb.reserved.region[i].size);
58 } 53 }
59#endif /* DEBUG */ 54#endif /* DEBUG */
60} 55}
@@ -98,7 +93,6 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
98 rgn->region[r1].size += rgn->region[r2].size; 93 rgn->region[r1].size += rgn->region[r2].size;
99 for (i=r2; i < rgn->cnt-1; i++) { 94 for (i=r2; i < rgn->cnt-1; i++) {
100 rgn->region[i].base = rgn->region[i+1].base; 95 rgn->region[i].base = rgn->region[i+1].base;
101 rgn->region[i].physbase = rgn->region[i+1].physbase;
102 rgn->region[i].size = rgn->region[i+1].size; 96 rgn->region[i].size = rgn->region[i+1].size;
103 } 97 }
104 rgn->cnt--; 98 rgn->cnt--;
@@ -108,49 +102,29 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
108void __init 102void __init
109lmb_init(void) 103lmb_init(void)
110{ 104{
111 struct lmb *_lmb = &lmb;
112
113 /* Create a dummy zero size LMB which will get coalesced away later. 105 /* Create a dummy zero size LMB which will get coalesced away later.
114 * This simplifies the lmb_add() code below... 106 * This simplifies the lmb_add() code below...
115 */ 107 */
116 _lmb->memory.region[0].base = 0; 108 lmb.memory.region[0].base = 0;
117 _lmb->memory.region[0].size = 0; 109 lmb.memory.region[0].size = 0;
118 _lmb->memory.cnt = 1; 110 lmb.memory.cnt = 1;
119 111
120 /* Ditto. */ 112 /* Ditto. */
121 _lmb->reserved.region[0].base = 0; 113 lmb.reserved.region[0].base = 0;
122 _lmb->reserved.region[0].size = 0; 114 lmb.reserved.region[0].size = 0;
123 _lmb->reserved.cnt = 1; 115 lmb.reserved.cnt = 1;
124} 116}
125 117
126/* This routine called with relocation disabled. */ 118/* This routine called with relocation disabled. */
127void __init 119void __init
128lmb_analyze(void) 120lmb_analyze(void)
129{ 121{
130 unsigned long i; 122 int i;
131 unsigned long mem_size = 0; 123
132 unsigned long size_mask = 0; 124 lmb.memory.size = 0;
133 struct lmb *_lmb = &lmb;
134#ifdef CONFIG_MSCHUNKS
135 unsigned long physbase = 0;
136#endif
137
138 for (i=0; i < _lmb->memory.cnt; i++) {
139 unsigned long lmb_size;
140
141 lmb_size = _lmb->memory.region[i].size;
142
143#ifdef CONFIG_MSCHUNKS
144 _lmb->memory.region[i].physbase = physbase;
145 physbase += lmb_size;
146#else
147 _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
148#endif
149 mem_size += lmb_size;
150 size_mask |= lmb_size;
151 }
152 125
153 _lmb->memory.size = mem_size; 126 for (i = 0; i < lmb.memory.cnt; i++)
127 lmb.memory.size += lmb.memory.region[i].size;
154} 128}
155 129
156/* This routine called with relocation disabled. */ 130/* This routine called with relocation disabled. */
@@ -168,7 +142,6 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
168 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 142 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
169 if ( adjacent > 0 ) { 143 if ( adjacent > 0 ) {
170 rgn->region[i].base -= size; 144 rgn->region[i].base -= size;
171 rgn->region[i].physbase -= size;
172 rgn->region[i].size += size; 145 rgn->region[i].size += size;
173 coalesced++; 146 coalesced++;
174 break; 147 break;
@@ -195,11 +168,9 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
195 for (i=rgn->cnt-1; i >= 0; i--) { 168 for (i=rgn->cnt-1; i >= 0; i--) {
196 if (base < rgn->region[i].base) { 169 if (base < rgn->region[i].base) {
197 rgn->region[i+1].base = rgn->region[i].base; 170 rgn->region[i+1].base = rgn->region[i].base;
198 rgn->region[i+1].physbase = rgn->region[i].physbase;
199 rgn->region[i+1].size = rgn->region[i].size; 171 rgn->region[i+1].size = rgn->region[i].size;
200 } else { 172 } else {
201 rgn->region[i+1].base = base; 173 rgn->region[i+1].base = base;
202 rgn->region[i+1].physbase = lmb_abs_to_phys(base);
203 rgn->region[i+1].size = size; 174 rgn->region[i+1].size = size;
204 break; 175 break;
205 } 176 }
@@ -213,12 +184,11 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
213long __init 184long __init
214lmb_add(unsigned long base, unsigned long size) 185lmb_add(unsigned long base, unsigned long size)
215{ 186{
216 struct lmb *_lmb = &lmb; 187 struct lmb_region *_rgn = &(lmb.memory);
217 struct lmb_region *_rgn = &(_lmb->memory);
218 188
219 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
220 if ( base == 0 ) 190 if ( base == 0 )
221 _lmb->rmo_size = size; 191 lmb.rmo_size = size;
222 192
223 return lmb_add_region(_rgn, base, size); 193 return lmb_add_region(_rgn, base, size);
224 194
@@ -227,8 +197,7 @@ lmb_add(unsigned long base, unsigned long size)
227long __init 197long __init
228lmb_reserve(unsigned long base, unsigned long size) 198lmb_reserve(unsigned long base, unsigned long size)
229{ 199{
230 struct lmb *_lmb = &lmb; 200 struct lmb_region *_rgn = &(lmb.reserved);
231 struct lmb_region *_rgn = &(_lmb->reserved);
232 201
233 return lmb_add_region(_rgn, base, size); 202 return lmb_add_region(_rgn, base, size);
234} 203}
@@ -260,13 +229,10 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
260{ 229{
261 long i, j; 230 long i, j;
262 unsigned long base = 0; 231 unsigned long base = 0;
263 struct lmb *_lmb = &lmb;
264 struct lmb_region *_mem = &(_lmb->memory);
265 struct lmb_region *_rsv = &(_lmb->reserved);
266 232
267 for (i=_mem->cnt-1; i >= 0; i--) { 233 for (i=lmb.memory.cnt-1; i >= 0; i--) {
268 unsigned long lmbbase = _mem->region[i].base; 234 unsigned long lmbbase = lmb.memory.region[i].base;
269 unsigned long lmbsize = _mem->region[i].size; 235 unsigned long lmbsize = lmb.memory.region[i].size;
270 236
271 if ( max_addr == LMB_ALLOC_ANYWHERE ) 237 if ( max_addr == LMB_ALLOC_ANYWHERE )
272 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
@@ -276,8 +242,8 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
276 continue; 242 continue;
277 243
278 while ( (lmbbase <= base) && 244 while ( (lmbbase <= base) &&
279 ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { 245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
280 base = _ALIGN_DOWN(_rsv->region[j].base-size, align); 246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
281 } 247 }
282 248
283 if ( (base != 0) && (lmbbase <= base) ) 249 if ( (base != 0) && (lmbbase <= base) )
@@ -287,62 +253,24 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
287 if ( i < 0 ) 253 if ( i < 0 )
288 return 0; 254 return 0;
289 255
290 lmb_add_region(_rsv, base, size); 256 lmb_add_region(&lmb.reserved, base, size);
291 257
292 return base; 258 return base;
293} 259}
294 260
261/* You must call lmb_analyze() before this. */
295unsigned long __init 262unsigned long __init
296lmb_phys_mem_size(void) 263lmb_phys_mem_size(void)
297{ 264{
298 struct lmb *_lmb = &lmb; 265 return lmb.memory.size;
299#ifdef CONFIG_MSCHUNKS
300 return _lmb->memory.size;
301#else
302 struct lmb_region *_mem = &(_lmb->memory);
303 unsigned long total = 0;
304 int i;
305
306 /* add all physical memory to the bootmem map */
307 for (i=0; i < _mem->cnt; i++)
308 total += _mem->region[i].size;
309 return total;
310#endif /* CONFIG_MSCHUNKS */
311} 266}
312 267
313unsigned long __init 268unsigned long __init
314lmb_end_of_DRAM(void) 269lmb_end_of_DRAM(void)
315{ 270{
316 struct lmb *_lmb = &lmb; 271 int idx = lmb.memory.cnt - 1;
317 struct lmb_region *_mem = &(_lmb->memory);
318 int idx = _mem->cnt - 1;
319
320#ifdef CONFIG_MSCHUNKS
321 return (_mem->region[idx].physbase + _mem->region[idx].size);
322#else
323 return (_mem->region[idx].base + _mem->region[idx].size);
324#endif /* CONFIG_MSCHUNKS */
325
326 return 0;
327}
328
329unsigned long __init
330lmb_abs_to_phys(unsigned long aa)
331{
332 unsigned long i, pa = aa;
333 struct lmb *_lmb = &lmb;
334 struct lmb_region *_mem = &(_lmb->memory);
335
336 for (i=0; i < _mem->cnt; i++) {
337 unsigned long lmbbase = _mem->region[i].base;
338 unsigned long lmbsize = _mem->region[i].size;
339 if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
340 pa = _mem->region[i].physbase + (aa - lmbbase);
341 break;
342 }
343 }
344 272
345 return pa; 273 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
346} 274}
347 275
348/* 276/*
@@ -353,20 +281,19 @@ void __init lmb_enforce_memory_limit(void)
353{ 281{
354 extern unsigned long memory_limit; 282 extern unsigned long memory_limit;
355 unsigned long i, limit; 283 unsigned long i, limit;
356 struct lmb_region *mem = &(lmb.memory);
357 284
358 if (! memory_limit) 285 if (! memory_limit)
359 return; 286 return;
360 287
361 limit = memory_limit; 288 limit = memory_limit;
362 for (i = 0; i < mem->cnt; i++) { 289 for (i = 0; i < lmb.memory.cnt; i++) {
363 if (limit > mem->region[i].size) { 290 if (limit > lmb.memory.region[i].size) {
364 limit -= mem->region[i].size; 291 limit -= lmb.memory.region[i].size;
365 continue; 292 continue;
366 } 293 }
367 294
368 mem->region[i].size = limit; 295 lmb.memory.region[i].size = limit;
369 mem->cnt = i + 1; 296 lmb.memory.cnt = i + 1;
370 break; 297 break;
371 } 298 }
372} 299}
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c
index 02e96627fa66..edad361a8db0 100644
--- a/arch/ppc64/kernel/lparcfg.c
+++ b/arch/ppc64/kernel/lparcfg.c
@@ -29,7 +29,7 @@
29#include <asm/iSeries/HvLpConfig.h> 29#include <asm/iSeries/HvLpConfig.h>
30#include <asm/lppaca.h> 30#include <asm/lppaca.h>
31#include <asm/hvcall.h> 31#include <asm/hvcall.h>
32#include <asm/cputable.h> 32#include <asm/firmware.h>
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/time.h> 35#include <asm/time.h>
@@ -273,6 +273,7 @@ static void parse_system_parameter_string(struct seq_file *m)
273 if (!workbuffer) { 273 if (!workbuffer) {
274 printk(KERN_ERR "%s %s kmalloc failure at line %d \n", 274 printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
275 __FILE__, __FUNCTION__, __LINE__); 275 __FILE__, __FUNCTION__, __LINE__);
276 kfree(local_buffer);
276 return; 277 return;
277 } 278 }
278#ifdef LPARCFG_DEBUG 279#ifdef LPARCFG_DEBUG
@@ -377,7 +378,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
377 378
378 partition_active_processors = lparcfg_count_active_processors(); 379 partition_active_processors = lparcfg_count_active_processors();
379 380
380 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 381 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
381 unsigned long h_entitled, h_unallocated; 382 unsigned long h_entitled, h_unallocated;
382 unsigned long h_aggregation, h_resource; 383 unsigned long h_aggregation, h_resource;
383 unsigned long pool_idle_time, pool_procs; 384 unsigned long pool_idle_time, pool_procs;
@@ -571,7 +572,7 @@ int __init lparcfg_init(void)
571 mode_t mode = S_IRUSR; 572 mode_t mode = S_IRUSR;
572 573
573 /* Allow writing if we have FW_FEATURE_SPLPAR */ 574 /* Allow writing if we have FW_FEATURE_SPLPAR */
574 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 575 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
575 lparcfg_fops.write = lparcfg_write; 576 lparcfg_fops.write = lparcfg_write;
576 mode |= S_IWUSR; 577 mode |= S_IWUSR;
577 } 578 }
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index a05b50b738e9..474df0a862bf 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -680,6 +680,104 @@ _GLOBAL(kernel_thread)
680 ld r30,-16(r1) 680 ld r30,-16(r1)
681 blr 681 blr
682 682
683/*
684 * disable_kernel_fp()
685 * Disable the FPU.
686 */
687_GLOBAL(disable_kernel_fp)
688 mfmsr r3
689 rldicl r0,r3,(63-MSR_FP_LG),1
690 rldicl r3,r0,(MSR_FP_LG+1),0
691 mtmsrd r3 /* disable use of fpu now */
692 isync
693 blr
694
695/*
696 * giveup_fpu(tsk)
697 * Disable FP for the task given as the argument,
698 * and save the floating-point registers in its thread_struct.
699 * Enables the FPU for use in the kernel on return.
700 */
701_GLOBAL(giveup_fpu)
702 mfmsr r5
703 ori r5,r5,MSR_FP
704 mtmsrd r5 /* enable use of fpu now */
705 isync
706 cmpdi 0,r3,0
707 beqlr- /* if no previous owner, done */
708 addi r3,r3,THREAD /* want THREAD of task */
709 ld r5,PT_REGS(r3)
710 cmpdi 0,r5,0
711 SAVE_32FPRS(0, r3)
712 mffs fr0
713 stfd fr0,THREAD_FPSCR(r3)
714 beq 1f
715 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
716 li r3,MSR_FP|MSR_FE0|MSR_FE1
717 andc r4,r4,r3 /* disable FP for previous task */
718 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7191:
720#ifndef CONFIG_SMP
721 li r5,0
722 ld r4,last_task_used_math@got(r2)
723 std r5,0(r4)
724#endif /* CONFIG_SMP */
725 blr
726
727#ifdef CONFIG_ALTIVEC
728
729#if 0 /* this has no callers for now */
730/*
731 * disable_kernel_altivec()
732 * Disable the VMX.
733 */
734_GLOBAL(disable_kernel_altivec)
735 mfmsr r3
736 rldicl r0,r3,(63-MSR_VEC_LG),1
737 rldicl r3,r0,(MSR_VEC_LG+1),0
738 mtmsrd r3 /* disable use of VMX now */
739 isync
740 blr
741#endif /* 0 */
742
743/*
744 * giveup_altivec(tsk)
745 * Disable VMX for the task given as the argument,
746 * and save the vector registers in its thread_struct.
747 * Enables the VMX for use in the kernel on return.
748 */
749_GLOBAL(giveup_altivec)
750 mfmsr r5
751 oris r5,r5,MSR_VEC@h
752 mtmsrd r5 /* enable use of VMX now */
753 isync
754 cmpdi 0,r3,0
755 beqlr- /* if no previous owner, done */
756 addi r3,r3,THREAD /* want THREAD of task */
757 ld r5,PT_REGS(r3)
758 cmpdi 0,r5,0
759 SAVE_32VRS(0,r4,r3)
760 mfvscr vr0
761 li r4,THREAD_VSCR
762 stvx vr0,r4,r3
763 beq 1f
764 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
765 lis r3,MSR_VEC@h
766 andc r4,r4,r3 /* disable FP for previous task */
767 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7681:
769#ifndef CONFIG_SMP
770 li r5,0
771 ld r4,last_task_used_altivec@got(r2)
772 std r5,0(r4)
773#endif /* CONFIG_SMP */
774 blr
775
776#endif /* CONFIG_ALTIVEC */
777
778_GLOBAL(__setup_cpu_power3)
779 blr
780
683/* kexec_wait(phys_cpu) 781/* kexec_wait(phys_cpu)
684 * 782 *
685 * wait for the flag to change, indicating this kernel is going away but 783 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
index b80e81984ba8..da580812ddfe 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/ppc64/kernel/of_device.c
@@ -236,7 +236,6 @@ void of_device_unregister(struct of_device *ofdev)
236struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) 236struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id)
237{ 237{
238 struct of_device *dev; 238 struct of_device *dev;
239 u32 *reg;
240 239
241 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 240 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
242 if (!dev) 241 if (!dev)
@@ -250,7 +249,6 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
250 dev->dev.bus = &of_platform_bus_type; 249 dev->dev.bus = &of_platform_bus_type;
251 dev->dev.release = of_release_dev; 250 dev->dev.release = of_release_dev;
252 251
253 reg = (u32 *)get_property(np, "reg", NULL);
254 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 252 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
255 253
256 if (of_device_register(dev) != 0) { 254 if (of_device_register(dev) != 0) {
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index 69130522a87e..9d5e1e7fc389 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -45,6 +45,7 @@
45#include <asm/plpar_wrappers.h> 45#include <asm/plpar_wrappers.h>
46#include <asm/pSeries_reconfig.h> 46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h>
48#include "pci.h" 49#include "pci.h"
49 50
50#define DBG(fmt...) 51#define DBG(fmt...)
@@ -546,7 +547,7 @@ void iommu_init_early_pSeries(void)
546 } 547 }
547 548
548 if (systemcfg->platform & PLATFORM_LPAR) { 549 if (systemcfg->platform & PLATFORM_LPAR) {
549 if (cur_cpu_spec->firmware_features & FW_FEATURE_MULTITCE) { 550 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
550 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 551 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
551 ppc_md.tce_free = tce_freemulti_pSeriesLP; 552 ppc_md.tce_free = tce_freemulti_pSeriesLP;
552 } else { 553 } else {
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
index 74dd144dcce8..0a3ddc9227c5 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/ppc64/kernel/pSeries_lpar.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(plpar_hcall_4out);
52EXPORT_SYMBOL(plpar_hcall_norets); 52EXPORT_SYMBOL(plpar_hcall_norets);
53EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 53EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
54 54
55extern void fw_feature_init(void);
56extern void pSeries_find_serial_port(void); 55extern void pSeries_find_serial_port(void);
57 56
58 57
@@ -279,7 +278,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
279 unsigned long va, unsigned long prpn, 278 unsigned long va, unsigned long prpn,
280 unsigned long vflags, unsigned long rflags) 279 unsigned long vflags, unsigned long rflags)
281{ 280{
282 unsigned long arpn = physRpn_to_absRpn(prpn);
283 unsigned long lpar_rc; 281 unsigned long lpar_rc;
284 unsigned long flags; 282 unsigned long flags;
285 unsigned long slot; 283 unsigned long slot;
@@ -290,7 +288,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
290 if (vflags & HPTE_V_LARGE) 288 if (vflags & HPTE_V_LARGE)
291 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); 289 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
292 290
293 hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 291 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
294 292
295 /* Now fill in the actual HPTE */ 293 /* Now fill in the actual HPTE */
296 /* Set CEC cookie to 0 */ 294 /* Set CEC cookie to 0 */
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 5bec956e44a0..f0f0630cf07c 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -60,7 +60,8 @@
60#include <asm/nvram.h> 60#include <asm/nvram.h>
61#include <asm/plpar_wrappers.h> 61#include <asm/plpar_wrappers.h>
62#include <asm/xics.h> 62#include <asm/xics.h>
63#include <asm/cputable.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h>
64 65
65#include "i8259.h" 66#include "i8259.h"
66#include "mpic.h" 67#include "mpic.h"
@@ -187,6 +188,21 @@ static void __init pSeries_setup_mpic(void)
187 " MPIC "); 188 " MPIC ");
188} 189}
189 190
191static void pseries_lpar_enable_pmcs(void)
192{
193 unsigned long set, reset;
194
195 power4_enable_pmcs();
196
197 set = 1UL << 63;
198 reset = 0;
199 plpar_hcall_norets(H_PERFMON, set, reset);
200
201 /* instruct hypervisor to maintain PMCs */
202 if (firmware_has_feature(FW_FEATURE_SPLPAR))
203 get_paca()->lppaca.pmcregs_in_use = 1;
204}
205
190static void __init pSeries_setup_arch(void) 206static void __init pSeries_setup_arch(void)
191{ 207{
192 /* Fixup ppc_md depending on the type of interrupt controller */ 208 /* Fixup ppc_md depending on the type of interrupt controller */
@@ -231,11 +247,9 @@ static void __init pSeries_setup_arch(void)
231 247
232 pSeries_nvram_init(); 248 pSeries_nvram_init();
233 249
234 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
235 vpa_init(boot_cpuid);
236
237 /* Choose an idle loop */ 250 /* Choose an idle loop */
238 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 251 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
252 vpa_init(boot_cpuid);
239 if (get_paca()->lppaca.shared_proc) { 253 if (get_paca()->lppaca.shared_proc) {
240 printk(KERN_INFO "Using shared processor idle loop\n"); 254 printk(KERN_INFO "Using shared processor idle loop\n");
241 ppc_md.idle_loop = pseries_shared_idle; 255 ppc_md.idle_loop = pseries_shared_idle;
@@ -247,6 +261,11 @@ static void __init pSeries_setup_arch(void)
247 printk(KERN_INFO "Using default idle loop\n"); 261 printk(KERN_INFO "Using default idle loop\n");
248 ppc_md.idle_loop = default_idle; 262 ppc_md.idle_loop = default_idle;
249 } 263 }
264
265 if (systemcfg->platform & PLATFORM_LPAR)
266 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
267 else
268 ppc_md.enable_pmcs = power4_enable_pmcs;
250} 269}
251 270
252static int __init pSeries_init_panel(void) 271static int __init pSeries_init_panel(void)
@@ -260,11 +279,11 @@ static int __init pSeries_init_panel(void)
260arch_initcall(pSeries_init_panel); 279arch_initcall(pSeries_init_panel);
261 280
262 281
263/* Build up the firmware_features bitmask field 282/* Build up the ppc64_firmware_features bitmask field
264 * using contents of device-tree/ibm,hypertas-functions. 283 * using contents of device-tree/ibm,hypertas-functions.
265 * Ultimately this functionality may be moved into prom.c prom_init(). 284 * Ultimately this functionality may be moved into prom.c prom_init().
266 */ 285 */
267void __init fw_feature_init(void) 286static void __init fw_feature_init(void)
268{ 287{
269 struct device_node * dn; 288 struct device_node * dn;
270 char * hypertas; 289 char * hypertas;
@@ -272,7 +291,7 @@ void __init fw_feature_init(void)
272 291
273 DBG(" -> fw_feature_init()\n"); 292 DBG(" -> fw_feature_init()\n");
274 293
275 cur_cpu_spec->firmware_features = 0; 294 ppc64_firmware_features = 0;
276 dn = of_find_node_by_path("/rtas"); 295 dn = of_find_node_by_path("/rtas");
277 if (dn == NULL) { 296 if (dn == NULL) {
278 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n"); 297 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
@@ -288,7 +307,7 @@ void __init fw_feature_init(void)
288 if ((firmware_features_table[i].name) && 307 if ((firmware_features_table[i].name) &&
289 (strcmp(firmware_features_table[i].name,hypertas))==0) { 308 (strcmp(firmware_features_table[i].name,hypertas))==0) {
290 /* we have a match */ 309 /* we have a match */
291 cur_cpu_spec->firmware_features |= 310 ppc64_firmware_features |=
292 (firmware_features_table[i].val); 311 (firmware_features_table[i].val);
293 break; 312 break;
294 } 313 }
@@ -302,7 +321,7 @@ void __init fw_feature_init(void)
302 of_node_put(dn); 321 of_node_put(dn);
303 no_rtas: 322 no_rtas:
304 printk(KERN_INFO "firmware_features = 0x%lx\n", 323 printk(KERN_INFO "firmware_features = 0x%lx\n",
305 cur_cpu_spec->firmware_features); 324 ppc64_firmware_features);
306 325
307 DBG(" <- fw_feature_init()\n"); 326 DBG(" <- fw_feature_init()\n");
308} 327}
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
index 62c55a123560..79c7f3223665 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/ppc64/kernel/pSeries_smp.c
@@ -41,6 +41,7 @@
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/xics.h> 42#include <asm/xics.h>
43#include <asm/cputable.h> 43#include <asm/cputable.h>
44#include <asm/firmware.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/rtas.h> 46#include <asm/rtas.h>
46#include <asm/plpar_wrappers.h> 47#include <asm/plpar_wrappers.h>
@@ -326,7 +327,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
326 if (cpu != boot_cpuid) 327 if (cpu != boot_cpuid)
327 xics_setup_cpu(); 328 xics_setup_cpu();
328 329
329 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 330 if (firmware_has_feature(FW_FEATURE_SPLPAR))
330 vpa_init(cpu); 331 vpa_init(cpu);
331 332
332 cpu_clear(cpu, of_spin_map); 333 cpu_clear(cpu, of_spin_map);
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/ppc64/kernel/pSeries_vio.c
new file mode 100644
index 000000000000..e0ae06f58f86
--- /dev/null
+++ b/arch/ppc64/kernel/pSeries_vio.c
@@ -0,0 +1,273 @@
1/*
2 * IBM PowerPC pSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/kobject.h>
20#include <asm/iommu.h>
21#include <asm/dma.h>
22#include <asm/prom.h>
23#include <asm/vio.h>
24#include <asm/hvcall.h>
25
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27
28static void probe_bus_pseries(void)
29{
30 struct device_node *node_vroot, *of_node;
31
32 node_vroot = find_devices("vdevice");
33 if ((node_vroot == NULL) || (node_vroot->child == NULL))
34 /* this machine doesn't do virtual IO, and that's ok */
35 return;
36
37 /*
38 * Create struct vio_devices for each virtual device in the device tree.
39 * Drivers will associate with them later.
40 */
41 for (of_node = node_vroot->child; of_node != NULL;
42 of_node = of_node->sibling) {
43 printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
44 vio_register_device_node(of_node);
45 }
46}
47
48/**
49 * vio_match_device_pseries: - Tell if a pSeries VIO device matches a
50 * vio_device_id
51 */
52static int vio_match_device_pseries(const struct vio_device_id *id,
53 const struct vio_dev *dev)
54{
55 return (strncmp(dev->type, id->type, strlen(id->type)) == 0) &&
56 device_is_compatible(dev->dev.platform_data, id->compat);
57}
58
59static void vio_release_device_pseries(struct device *dev)
60{
61 /* XXX free TCE table */
62 of_node_put(dev->platform_data);
63}
64
65static ssize_t viodev_show_devspec(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 struct device_node *of_node = dev->platform_data;
69
70 return sprintf(buf, "%s\n", of_node->full_name);
71}
72DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
73
74static void vio_unregister_device_pseries(struct vio_dev *viodev)
75{
76 device_remove_file(&viodev->dev, &dev_attr_devspec);
77}
78
79static struct vio_bus_ops vio_bus_ops_pseries = {
80 .match = vio_match_device_pseries,
81 .unregister_device = vio_unregister_device_pseries,
82 .release_device = vio_release_device_pseries,
83};
84
85/**
86 * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus
87 */
88static int __init vio_bus_init_pseries(void)
89{
90 int err;
91
92 err = vio_bus_init(&vio_bus_ops_pseries);
93 if (err == 0)
94 probe_bus_pseries();
95 return err;
96}
97
98__initcall(vio_bus_init_pseries);
99
100/**
101 * vio_build_iommu_table: - gets the dma information from OF and
102 * builds the TCE tree.
103 * @dev: the virtual device.
104 *
105 * Returns a pointer to the built tce tree, or NULL if it can't
106 * find property.
107*/
108static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
109{
110 unsigned int *dma_window;
111 struct iommu_table *newTceTable;
112 unsigned long offset;
113 int dma_window_property_size;
114
115 dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
116 if(!dma_window) {
117 return NULL;
118 }
119
120 newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
121
122 /* There should be some code to extract the phys-encoded offset
123 using prom_n_addr_cells(). However, according to a comment
124 on earlier versions, it's always zero, so we don't bother */
125 offset = dma_window[1] >> PAGE_SHIFT;
126
127 /* TCE table size - measured in tce entries */
128 newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
129 /* offset for VIO should always be 0 */
130 newTceTable->it_offset = offset;
131 newTceTable->it_busno = 0;
132 newTceTable->it_index = (unsigned long)dma_window[0];
133 newTceTable->it_type = TCE_VB;
134
135 return iommu_init_table(newTceTable);
136}
137
138/**
139 * vio_register_device_node: - Register a new vio device.
140 * @of_node: The OF node for this device.
141 *
142 * Creates and initializes a vio_dev structure from the data in
143 * of_node (dev.platform_data) and adds it to the list of virtual devices.
144 * Returns a pointer to the created vio_dev or NULL if node has
145 * NULL device_type or compatible fields.
146 */
147struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
148{
149 struct vio_dev *viodev;
150 unsigned int *unit_address;
151 unsigned int *irq_p;
152
153 /* we need the 'device_type' property, in order to match with drivers */
154 if ((NULL == of_node->type)) {
155 printk(KERN_WARNING
156 "%s: node %s missing 'device_type'\n", __FUNCTION__,
157 of_node->name ? of_node->name : "<unknown>");
158 return NULL;
159 }
160
161 unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
162 if (!unit_address) {
163 printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
164 of_node->name ? of_node->name : "<unknown>");
165 return NULL;
166 }
167
168 /* allocate a vio_dev for this node */
169 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
170 if (!viodev) {
171 return NULL;
172 }
173 memset(viodev, 0, sizeof(struct vio_dev));
174
175 viodev->dev.platform_data = of_node_get(of_node);
176
177 viodev->irq = NO_IRQ;
178 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
179 if (irq_p) {
180 int virq = virt_irq_create_mapping(*irq_p);
181 if (virq == NO_IRQ) {
182 printk(KERN_ERR "Unable to allocate interrupt "
183 "number for %s\n", of_node->full_name);
184 } else
185 viodev->irq = irq_offset_up(virq);
186 }
187
188 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
189 viodev->name = of_node->name;
190 viodev->type = of_node->type;
191 viodev->unit_address = *unit_address;
192 viodev->iommu_table = vio_build_iommu_table(viodev);
193
194 /* register with generic device framework */
195 if (vio_register_device(viodev) == NULL) {
196 /* XXX free TCE table */
197 kfree(viodev);
198 return NULL;
199 }
200 device_create_file(&viodev->dev, &dev_attr_devspec);
201
202 return viodev;
203}
204EXPORT_SYMBOL(vio_register_device_node);
205
206/**
207 * vio_get_attribute: - get attribute for virtual device
208 * @vdev: The vio device to get property.
209 * @which: The property/attribute to be extracted.
210 * @length: Pointer to length of returned data size (unused if NULL).
211 *
212 * Calls prom.c's get_property() to return the value of the
213 * attribute specified by the preprocessor constant @which
214*/
215const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
216{
217 return get_property(vdev->dev.platform_data, (char*)which, length);
218}
219EXPORT_SYMBOL(vio_get_attribute);
220
221/* vio_find_name() - internal because only vio.c knows how we formatted the
222 * kobject name
223 * XXX once vio_bus_type.devices is actually used as a kset in
224 * drivers/base/bus.c, this function should be removed in favor of
225 * "device_find(kobj_name, &vio_bus_type)"
226 */
227static struct vio_dev *vio_find_name(const char *kobj_name)
228{
229 struct kobject *found;
230
231 found = kset_find_obj(&devices_subsys.kset, kobj_name);
232 if (!found)
233 return NULL;
234
235 return to_vio_dev(container_of(found, struct device, kobj));
236}
237
238/**
239 * vio_find_node - find an already-registered vio_dev
240 * @vnode: device_node of the virtual device we're looking for
241 */
242struct vio_dev *vio_find_node(struct device_node *vnode)
243{
244 uint32_t *unit_address;
245 char kobj_name[BUS_ID_SIZE];
246
247 /* construct the kobject name from the device node */
248 unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
249 if (!unit_address)
250 return NULL;
251 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
252
253 return vio_find_name(kobj_name);
254}
255EXPORT_SYMBOL(vio_find_node);
256
257int vio_enable_interrupts(struct vio_dev *dev)
258{
259 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
260 if (rc != H_Success)
261 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
262 return rc;
263}
264EXPORT_SYMBOL(vio_enable_interrupts);
265
266int vio_disable_interrupts(struct vio_dev *dev)
267{
268 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
269 if (rc != H_Success)
270 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
271 return rc;
272}
273EXPORT_SYMBOL(vio_disable_interrupts);
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index 6316188737b6..6182a2cd90a5 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -78,7 +78,7 @@ extern unsigned long __toc_start;
78 78
79#define BOOTCPU_PACA_INIT(number) \ 79#define BOOTCPU_PACA_INIT(number) \
80{ \ 80{ \
81 PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \ 81 PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \
82 PACA_INIT_ISERIES(number) \ 82 PACA_INIT_ISERIES(number) \
83} 83}
84 84
@@ -90,7 +90,7 @@ extern unsigned long __toc_start;
90 90
91#define BOOTCPU_PACA_INIT(number) \ 91#define BOOTCPU_PACA_INIT(number) \
92{ \ 92{ \
93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \ 93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \
94} 94}
95#endif 95#endif
96 96
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index e40877fa67cd..8ff86a766cdf 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -71,6 +71,7 @@
71#include <asm/of_device.h> 71#include <asm/of_device.h>
72#include <asm/lmb.h> 72#include <asm/lmb.h>
73#include <asm/smu.h> 73#include <asm/smu.h>
74#include <asm/pmc.h>
74 75
75#include "pmac.h" 76#include "pmac.h"
76#include "mpic.h" 77#include "mpic.h"
@@ -511,4 +512,5 @@ struct machdep_calls __initdata pmac_md = {
511 .progress = pmac_progress, 512 .progress = pmac_progress,
512 .check_legacy_ioport = pmac_check_legacy_ioport, 513 .check_legacy_ioport = pmac_check_legacy_ioport,
513 .idle_loop = native_idle, 514 .idle_loop = native_idle,
515 .enable_pmcs = power4_enable_pmcs,
514}; 516};
diff --git a/arch/ppc64/kernel/pmc.c b/arch/ppc64/kernel/pmc.c
index 67be773f9c00..cdfec7438d01 100644
--- a/arch/ppc64/kernel/pmc.c
+++ b/arch/ppc64/kernel/pmc.c
@@ -65,3 +65,24 @@ void release_pmc_hardware(void)
65 spin_unlock(&pmc_owner_lock); 65 spin_unlock(&pmc_owner_lock);
66} 66}
67EXPORT_SYMBOL_GPL(release_pmc_hardware); 67EXPORT_SYMBOL_GPL(release_pmc_hardware);
68
69void power4_enable_pmcs(void)
70{
71 unsigned long hid0;
72
73 hid0 = mfspr(HID0);
74 hid0 |= 1UL << (63 - 20);
75
76 /* POWER4 requires the following sequence */
77 asm volatile(
78 "sync\n"
79 "mtspr %1, %0\n"
80 "mfspr %0, %1\n"
81 "mfspr %0, %1\n"
82 "mfspr %0, %1\n"
83 "mfspr %0, %1\n"
84 "mfspr %0, %1\n"
85 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
87 "memory");
88}
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c
index f7cae05e40fb..7a7e027653ad 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/ppc64/kernel/process.c
@@ -50,6 +50,7 @@
50#include <asm/machdep.h> 50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h> 51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h> 52#include <asm/cputable.h>
53#include <asm/firmware.h>
53#include <asm/sections.h> 54#include <asm/sections.h>
54#include <asm/tlbflush.h> 55#include <asm/tlbflush.h>
55#include <asm/time.h> 56#include <asm/time.h>
@@ -202,11 +203,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
202 new_thread = &new->thread; 203 new_thread = &new->thread;
203 old_thread = &current->thread; 204 old_thread = &current->thread;
204 205
205/* Collect purr utilization data per process and per processor wise */ 206 /* Collect purr utilization data per process and per processor
206/* purr is nothing but processor time base */ 207 * wise purr is nothing but processor time base
207 208 */
208#if defined(CONFIG_PPC_PSERIES) 209 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
209 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
210 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 210 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
211 long unsigned start_tb, current_tb; 211 long unsigned start_tb, current_tb;
212 start_tb = old_thread->start_tb; 212 start_tb = old_thread->start_tb;
@@ -214,8 +214,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
214 old_thread->accum_tb += (current_tb - start_tb); 214 old_thread->accum_tb += (current_tb - start_tb);
215 new_thread->start_tb = current_tb; 215 new_thread->start_tb = current_tb;
216 } 216 }
217#endif
218
219 217
220 local_irq_save(flags); 218 local_irq_save(flags);
221 last = _switch(old_thread, new_thread); 219 last = _switch(old_thread, new_thread);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 5aca01ddd81f..b21848826791 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -625,8 +625,8 @@ void __init finish_device_tree(void)
625 625
626static inline char *find_flat_dt_string(u32 offset) 626static inline char *find_flat_dt_string(u32 offset)
627{ 627{
628 return ((char *)initial_boot_params) + initial_boot_params->off_dt_strings 628 return ((char *)initial_boot_params) +
629 + offset; 629 initial_boot_params->off_dt_strings + offset;
630} 630}
631 631
632/** 632/**
@@ -635,26 +635,33 @@ static inline char *find_flat_dt_string(u32 offset)
635 * unflatten the tree 635 * unflatten the tree
636 */ 636 */
637static int __init scan_flat_dt(int (*it)(unsigned long node, 637static int __init scan_flat_dt(int (*it)(unsigned long node,
638 const char *full_path, void *data), 638 const char *uname, int depth,
639 void *data),
639 void *data) 640 void *data)
640{ 641{
641 unsigned long p = ((unsigned long)initial_boot_params) + 642 unsigned long p = ((unsigned long)initial_boot_params) +
642 initial_boot_params->off_dt_struct; 643 initial_boot_params->off_dt_struct;
643 int rc = 0; 644 int rc = 0;
645 int depth = -1;
644 646
645 do { 647 do {
646 u32 tag = *((u32 *)p); 648 u32 tag = *((u32 *)p);
647 char *pathp; 649 char *pathp;
648 650
649 p += 4; 651 p += 4;
650 if (tag == OF_DT_END_NODE) 652 if (tag == OF_DT_END_NODE) {
653 depth --;
654 continue;
655 }
656 if (tag == OF_DT_NOP)
651 continue; 657 continue;
652 if (tag == OF_DT_END) 658 if (tag == OF_DT_END)
653 break; 659 break;
654 if (tag == OF_DT_PROP) { 660 if (tag == OF_DT_PROP) {
655 u32 sz = *((u32 *)p); 661 u32 sz = *((u32 *)p);
656 p += 8; 662 p += 8;
657 p = _ALIGN(p, sz >= 8 ? 8 : 4); 663 if (initial_boot_params->version < 0x10)
664 p = _ALIGN(p, sz >= 8 ? 8 : 4);
658 p += sz; 665 p += sz;
659 p = _ALIGN(p, 4); 666 p = _ALIGN(p, 4);
660 continue; 667 continue;
@@ -664,9 +671,18 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
664 " device tree !\n", tag); 671 " device tree !\n", tag);
665 return -EINVAL; 672 return -EINVAL;
666 } 673 }
674 depth++;
667 pathp = (char *)p; 675 pathp = (char *)p;
668 p = _ALIGN(p + strlen(pathp) + 1, 4); 676 p = _ALIGN(p + strlen(pathp) + 1, 4);
669 rc = it(p, pathp, data); 677 if ((*pathp) == '/') {
678 char *lp, *np;
679 for (lp = NULL, np = pathp; *np; np++)
680 if ((*np) == '/')
681 lp = np+1;
682 if (lp != NULL)
683 pathp = lp;
684 }
685 rc = it(p, pathp, depth, data);
670 if (rc != 0) 686 if (rc != 0)
671 break; 687 break;
672 } while(1); 688 } while(1);
@@ -689,17 +705,21 @@ static void* __init get_flat_dt_prop(unsigned long node, const char *name,
689 const char *nstr; 705 const char *nstr;
690 706
691 p += 4; 707 p += 4;
708 if (tag == OF_DT_NOP)
709 continue;
692 if (tag != OF_DT_PROP) 710 if (tag != OF_DT_PROP)
693 return NULL; 711 return NULL;
694 712
695 sz = *((u32 *)p); 713 sz = *((u32 *)p);
696 noff = *((u32 *)(p + 4)); 714 noff = *((u32 *)(p + 4));
697 p += 8; 715 p += 8;
698 p = _ALIGN(p, sz >= 8 ? 8 : 4); 716 if (initial_boot_params->version < 0x10)
717 p = _ALIGN(p, sz >= 8 ? 8 : 4);
699 718
700 nstr = find_flat_dt_string(noff); 719 nstr = find_flat_dt_string(noff);
701 if (nstr == NULL) { 720 if (nstr == NULL) {
702 printk(KERN_WARNING "Can't find property index name !\n"); 721 printk(KERN_WARNING "Can't find property index"
722 " name !\n");
703 return NULL; 723 return NULL;
704 } 724 }
705 if (strcmp(name, nstr) == 0) { 725 if (strcmp(name, nstr) == 0) {
@@ -713,7 +733,7 @@ static void* __init get_flat_dt_prop(unsigned long node, const char *name,
713} 733}
714 734
715static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 735static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
716 unsigned long align) 736 unsigned long align)
717{ 737{
718 void *res; 738 void *res;
719 739
@@ -727,13 +747,16 @@ static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
727static unsigned long __init unflatten_dt_node(unsigned long mem, 747static unsigned long __init unflatten_dt_node(unsigned long mem,
728 unsigned long *p, 748 unsigned long *p,
729 struct device_node *dad, 749 struct device_node *dad,
730 struct device_node ***allnextpp) 750 struct device_node ***allnextpp,
751 unsigned long fpsize)
731{ 752{
732 struct device_node *np; 753 struct device_node *np;
733 struct property *pp, **prev_pp = NULL; 754 struct property *pp, **prev_pp = NULL;
734 char *pathp; 755 char *pathp;
735 u32 tag; 756 u32 tag;
736 unsigned int l; 757 unsigned int l, allocl;
758 int has_name = 0;
759 int new_format = 0;
737 760
738 tag = *((u32 *)(*p)); 761 tag = *((u32 *)(*p));
739 if (tag != OF_DT_BEGIN_NODE) { 762 if (tag != OF_DT_BEGIN_NODE) {
@@ -742,21 +765,62 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
742 } 765 }
743 *p += 4; 766 *p += 4;
744 pathp = (char *)*p; 767 pathp = (char *)*p;
745 l = strlen(pathp) + 1; 768 l = allocl = strlen(pathp) + 1;
746 *p = _ALIGN(*p + l, 4); 769 *p = _ALIGN(*p + l, 4);
747 770
748 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + l, 771 /* version 0x10 has a more compact unit name here instead of the full
772 * path. we accumulate the full path size using "fpsize", we'll rebuild
773 * it later. We detect this because the first character of the name is
774 * not '/'.
775 */
776 if ((*pathp) != '/') {
777 new_format = 1;
778 if (fpsize == 0) {
779 /* root node: special case. fpsize accounts for path
780 * plus terminating zero. root node only has '/', so
781 * fpsize should be 2, but we want to avoid the first
782 * level nodes to have two '/' so we use fpsize 1 here
783 */
784 fpsize = 1;
785 allocl = 2;
786 } else {
787 /* account for '/' and path size minus terminal 0
788 * already in 'l'
789 */
790 fpsize += l;
791 allocl = fpsize;
792 }
793 }
794
795
796 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
749 __alignof__(struct device_node)); 797 __alignof__(struct device_node));
750 if (allnextpp) { 798 if (allnextpp) {
751 memset(np, 0, sizeof(*np)); 799 memset(np, 0, sizeof(*np));
752 np->full_name = ((char*)np) + sizeof(struct device_node); 800 np->full_name = ((char*)np) + sizeof(struct device_node);
753 memcpy(np->full_name, pathp, l); 801 if (new_format) {
802 char *p = np->full_name;
803 /* rebuild full path for new format */
804 if (dad && dad->parent) {
805 strcpy(p, dad->full_name);
806#ifdef DEBUG
807 if ((strlen(p) + l + 1) != allocl) {
808 DBG("%s: p: %d, l: %d, a: %d\n",
809 pathp, strlen(p), l, allocl);
810 }
811#endif
812 p += strlen(p);
813 }
814 *(p++) = '/';
815 memcpy(p, pathp, l);
816 } else
817 memcpy(np->full_name, pathp, l);
754 prev_pp = &np->properties; 818 prev_pp = &np->properties;
755 **allnextpp = np; 819 **allnextpp = np;
756 *allnextpp = &np->allnext; 820 *allnextpp = &np->allnext;
757 if (dad != NULL) { 821 if (dad != NULL) {
758 np->parent = dad; 822 np->parent = dad;
759 /* we temporarily use the `next' field as `last_child'. */ 823 /* we temporarily use the next field as `last_child'*/
760 if (dad->next == 0) 824 if (dad->next == 0)
761 dad->child = np; 825 dad->child = np;
762 else 826 else
@@ -770,18 +834,26 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
770 char *pname; 834 char *pname;
771 835
772 tag = *((u32 *)(*p)); 836 tag = *((u32 *)(*p));
837 if (tag == OF_DT_NOP) {
838 *p += 4;
839 continue;
840 }
773 if (tag != OF_DT_PROP) 841 if (tag != OF_DT_PROP)
774 break; 842 break;
775 *p += 4; 843 *p += 4;
776 sz = *((u32 *)(*p)); 844 sz = *((u32 *)(*p));
777 noff = *((u32 *)((*p) + 4)); 845 noff = *((u32 *)((*p) + 4));
778 *p = _ALIGN((*p) + 8, sz >= 8 ? 8 : 4); 846 *p += 8;
847 if (initial_boot_params->version < 0x10)
848 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
779 849
780 pname = find_flat_dt_string(noff); 850 pname = find_flat_dt_string(noff);
781 if (pname == NULL) { 851 if (pname == NULL) {
782 printk("Can't find property name in list !\n"); 852 printk("Can't find property name in list !\n");
783 break; 853 break;
784 } 854 }
855 if (strcmp(pname, "name") == 0)
856 has_name = 1;
785 l = strlen(pname) + 1; 857 l = strlen(pname) + 1;
786 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 858 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
787 __alignof__(struct property)); 859 __alignof__(struct property));
@@ -801,6 +873,36 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
801 } 873 }
802 *p = _ALIGN((*p) + sz, 4); 874 *p = _ALIGN((*p) + sz, 4);
803 } 875 }
876 /* with version 0x10 we may not have the name property, recreate
877 * it here from the unit name if absent
878 */
879 if (!has_name) {
880 char *p = pathp, *ps = pathp, *pa = NULL;
881 int sz;
882
883 while (*p) {
884 if ((*p) == '@')
885 pa = p;
886 if ((*p) == '/')
887 ps = p + 1;
888 p++;
889 }
890 if (pa < ps)
891 pa = p;
892 sz = (pa - ps) + 1;
893 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
894 __alignof__(struct property));
895 if (allnextpp) {
896 pp->name = "name";
897 pp->length = sz;
898 pp->value = (unsigned char *)(pp + 1);
899 *prev_pp = pp;
900 prev_pp = &pp->next;
901 memcpy(pp->value, ps, sz - 1);
902 ((char *)pp->value)[sz - 1] = 0;
903 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
904 }
905 }
804 if (allnextpp) { 906 if (allnextpp) {
805 *prev_pp = NULL; 907 *prev_pp = NULL;
806 np->name = get_property(np, "name", NULL); 908 np->name = get_property(np, "name", NULL);
@@ -812,11 +914,11 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
812 np->type = "<NULL>"; 914 np->type = "<NULL>";
813 } 915 }
814 while (tag == OF_DT_BEGIN_NODE) { 916 while (tag == OF_DT_BEGIN_NODE) {
815 mem = unflatten_dt_node(mem, p, np, allnextpp); 917 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
816 tag = *((u32 *)(*p)); 918 tag = *((u32 *)(*p));
817 } 919 }
818 if (tag != OF_DT_END_NODE) { 920 if (tag != OF_DT_END_NODE) {
819 printk("Weird tag at start of node: %x\n", tag); 921 printk("Weird tag at end of node: %x\n", tag);
820 return mem; 922 return mem;
821 } 923 }
822 *p += 4; 924 *p += 4;
@@ -842,21 +944,32 @@ void __init unflatten_device_tree(void)
842 /* First pass, scan for size */ 944 /* First pass, scan for size */
843 start = ((unsigned long)initial_boot_params) + 945 start = ((unsigned long)initial_boot_params) +
844 initial_boot_params->off_dt_struct; 946 initial_boot_params->off_dt_struct;
845 size = unflatten_dt_node(0, &start, NULL, NULL); 947 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
948 size = (size | 3) + 1;
846 949
847 DBG(" size is %lx, allocating...\n", size); 950 DBG(" size is %lx, allocating...\n", size);
848 951
849 /* Allocate memory for the expanded device tree */ 952 /* Allocate memory for the expanded device tree */
850 mem = (unsigned long)abs_to_virt(lmb_alloc(size, 953 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
851 __alignof__(struct device_node))); 954 if (!mem) {
955 DBG("Couldn't allocate memory with lmb_alloc()!\n");
956 panic("Couldn't allocate memory with lmb_alloc()!\n");
957 }
958 mem = (unsigned long)abs_to_virt(mem);
959
960 ((u32 *)mem)[size / 4] = 0xdeadbeef;
961
852 DBG(" unflattening...\n", mem); 962 DBG(" unflattening...\n", mem);
853 963
854 /* Second pass, do actual unflattening */ 964 /* Second pass, do actual unflattening */
855 start = ((unsigned long)initial_boot_params) + 965 start = ((unsigned long)initial_boot_params) +
856 initial_boot_params->off_dt_struct; 966 initial_boot_params->off_dt_struct;
857 unflatten_dt_node(mem, &start, NULL, &allnextp); 967 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
858 if (*((u32 *)start) != OF_DT_END) 968 if (*((u32 *)start) != OF_DT_END)
859 printk(KERN_WARNING "Weird tag at end of tree: %x\n", *((u32 *)start)); 969 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
970 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
971 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
972 ((u32 *)mem)[size / 4] );
860 *allnextp = NULL; 973 *allnextp = NULL;
861 974
862 /* Get pointer to OF "/chosen" node for use everywhere */ 975 /* Get pointer to OF "/chosen" node for use everywhere */
@@ -880,7 +993,7 @@ void __init unflatten_device_tree(void)
880 993
881 994
882static int __init early_init_dt_scan_cpus(unsigned long node, 995static int __init early_init_dt_scan_cpus(unsigned long node,
883 const char *full_path, void *data) 996 const char *uname, int depth, void *data)
884{ 997{
885 char *type = get_flat_dt_prop(node, "device_type", NULL); 998 char *type = get_flat_dt_prop(node, "device_type", NULL);
886 u32 *prop; 999 u32 *prop;
@@ -947,13 +1060,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
947} 1060}
948 1061
949static int __init early_init_dt_scan_chosen(unsigned long node, 1062static int __init early_init_dt_scan_chosen(unsigned long node,
950 const char *full_path, void *data) 1063 const char *uname, int depth, void *data)
951{ 1064{
952 u32 *prop; 1065 u32 *prop;
953 u64 *prop64; 1066 u64 *prop64;
954 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 1067 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
955 1068
956 if (strcmp(full_path, "/chosen") != 0) 1069 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1070
1071 if (depth != 1 || strcmp(uname, "chosen") != 0)
957 return 0; 1072 return 0;
958 1073
959 /* get platform type */ 1074 /* get platform type */
@@ -1003,18 +1118,20 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1003} 1118}
1004 1119
1005static int __init early_init_dt_scan_root(unsigned long node, 1120static int __init early_init_dt_scan_root(unsigned long node,
1006 const char *full_path, void *data) 1121 const char *uname, int depth, void *data)
1007{ 1122{
1008 u32 *prop; 1123 u32 *prop;
1009 1124
1010 if (strcmp(full_path, "/") != 0) 1125 if (depth != 0)
1011 return 0; 1126 return 0;
1012 1127
1013 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); 1128 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL);
1014 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1129 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1015 1130 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1131
1016 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); 1132 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL);
1017 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1133 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1134 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1018 1135
1019 /* break now */ 1136 /* break now */
1020 return 1; 1137 return 1;
@@ -1042,7 +1159,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1042 1159
1043 1160
1044static int __init early_init_dt_scan_memory(unsigned long node, 1161static int __init early_init_dt_scan_memory(unsigned long node,
1045 const char *full_path, void *data) 1162 const char *uname, int depth, void *data)
1046{ 1163{
1047 char *type = get_flat_dt_prop(node, "device_type", NULL); 1164 char *type = get_flat_dt_prop(node, "device_type", NULL);
1048 cell_t *reg, *endp; 1165 cell_t *reg, *endp;
@@ -1058,7 +1175,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
1058 1175
1059 endp = reg + (l / sizeof(cell_t)); 1176 endp = reg + (l / sizeof(cell_t));
1060 1177
1061 DBG("memory scan node %s ...\n", full_path); 1178 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1179 uname, l, reg[0], reg[1], reg[2], reg[3]);
1180
1062 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1181 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1063 unsigned long base, size; 1182 unsigned long base, size;
1064 1183
@@ -1469,10 +1588,11 @@ struct device_node *of_find_node_by_path(const char *path)
1469 struct device_node *np = allnodes; 1588 struct device_node *np = allnodes;
1470 1589
1471 read_lock(&devtree_lock); 1590 read_lock(&devtree_lock);
1472 for (; np != 0; np = np->allnext) 1591 for (; np != 0; np = np->allnext) {
1473 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1592 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1474 && of_node_get(np)) 1593 && of_node_get(np))
1475 break; 1594 break;
1595 }
1476 read_unlock(&devtree_lock); 1596 read_unlock(&devtree_lock);
1477 return np; 1597 return np;
1478} 1598}
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index dbbe6c79d8da..122283a1d39a 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -892,7 +892,10 @@ static void __init prom_init_mem(void)
892 if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR ) 892 if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR )
893 RELOC(alloc_top) = RELOC(rmo_top); 893 RELOC(alloc_top) = RELOC(rmo_top);
894 else 894 else
895 RELOC(alloc_top) = RELOC(rmo_top) = min(0x40000000ul, RELOC(ram_top)); 895 /* Some RS64 machines have buggy firmware where claims up at 1GB
896 * fails. Cap at 768MB as a workaround. Still plenty of room.
897 */
898 RELOC(alloc_top) = RELOC(rmo_top) = min(0x30000000ul, RELOC(ram_top));
896 899
897 prom_printf("memory layout at init:\n"); 900 prom_printf("memory layout at init:\n");
898 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit)); 901 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
@@ -1534,7 +1537,8 @@ static unsigned long __init dt_find_string(char *str)
1534 */ 1537 */
1535#define MAX_PROPERTY_NAME 64 1538#define MAX_PROPERTY_NAME 64
1536 1539
1537static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, 1540static void __init scan_dt_build_strings(phandle node,
1541 unsigned long *mem_start,
1538 unsigned long *mem_end) 1542 unsigned long *mem_end)
1539{ 1543{
1540 unsigned long offset = reloc_offset(); 1544 unsigned long offset = reloc_offset();
@@ -1547,16 +1551,21 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1547 /* get and store all property names */ 1551 /* get and store all property names */
1548 prev_name = RELOC(""); 1552 prev_name = RELOC("");
1549 for (;;) { 1553 for (;;) {
1550 int rc;
1551
1552 /* 64 is max len of name including nul. */ 1554 /* 64 is max len of name including nul. */
1553 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 1555 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1554 rc = call_prom("nextprop", 3, 1, node, prev_name, namep); 1556 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1555 if (rc != 1) {
1556 /* No more nodes: unwind alloc */ 1557 /* No more nodes: unwind alloc */
1557 *mem_start = (unsigned long)namep; 1558 *mem_start = (unsigned long)namep;
1558 break; 1559 break;
1559 } 1560 }
1561
1562 /* skip "name" */
1563 if (strcmp(namep, RELOC("name")) == 0) {
1564 *mem_start = (unsigned long)namep;
1565 prev_name = RELOC("name");
1566 continue;
1567 }
1568 /* get/create string entry */
1560 soff = dt_find_string(namep); 1569 soff = dt_find_string(namep);
1561 if (soff != 0) { 1570 if (soff != 0) {
1562 *mem_start = (unsigned long)namep; 1571 *mem_start = (unsigned long)namep;
@@ -1571,7 +1580,7 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1571 1580
1572 /* do all our children */ 1581 /* do all our children */
1573 child = call_prom("child", 1, 1, node); 1582 child = call_prom("child", 1, 1, node);
1574 while (child != (phandle)0) { 1583 while (child != 0) {
1575 scan_dt_build_strings(child, mem_start, mem_end); 1584 scan_dt_build_strings(child, mem_start, mem_end);
1576 child = call_prom("peer", 1, 1, child); 1585 child = call_prom("peer", 1, 1, child);
1577 } 1586 }
@@ -1580,16 +1589,13 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1580static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 1589static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1581 unsigned long *mem_end) 1590 unsigned long *mem_end)
1582{ 1591{
1583 int l, align;
1584 phandle child; 1592 phandle child;
1585 char *namep, *prev_name, *sstart, *p, *ep; 1593 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1586 unsigned long soff; 1594 unsigned long soff;
1587 unsigned char *valp; 1595 unsigned char *valp;
1588 unsigned long offset = reloc_offset(); 1596 unsigned long offset = reloc_offset();
1589 char pname[MAX_PROPERTY_NAME]; 1597 static char pname[MAX_PROPERTY_NAME];
1590 char *path; 1598 int l;
1591
1592 path = RELOC(prom_scratch);
1593 1599
1594 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 1600 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1595 1601
@@ -1599,23 +1605,33 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1599 namep, *mem_end - *mem_start); 1605 namep, *mem_end - *mem_start);
1600 if (l >= 0) { 1606 if (l >= 0) {
1601 /* Didn't fit? Get more room. */ 1607 /* Didn't fit? Get more room. */
1602 if (l+1 > *mem_end - *mem_start) { 1608 if ((l+1) > (*mem_end - *mem_start)) {
1603 namep = make_room(mem_start, mem_end, l+1, 1); 1609 namep = make_room(mem_start, mem_end, l+1, 1);
1604 call_prom("package-to-path", 3, 1, node, namep, l); 1610 call_prom("package-to-path", 3, 1, node, namep, l);
1605 } 1611 }
1606 namep[l] = '\0'; 1612 namep[l] = '\0';
1613
1607 /* Fixup an Apple bug where they have bogus \0 chars in the 1614 /* Fixup an Apple bug where they have bogus \0 chars in the
1608 * middle of the path in some properties 1615 * middle of the path in some properties
1609 */ 1616 */
1610 for (p = namep, ep = namep + l; p < ep; p++) 1617 for (p = namep, ep = namep + l; p < ep; p++)
1611 if (*p == '\0') { 1618 if (*p == '\0') {
1612 memmove(p, p+1, ep - p); 1619 memmove(p, p+1, ep - p);
1613 ep--; l--; 1620 ep--; l--; p--;
1614 } 1621 }
1615 *mem_start = _ALIGN(((unsigned long) namep) + strlen(namep) + 1, 4); 1622
1623 /* now try to extract the unit name in that mess */
1624 for (p = namep, lp = NULL; *p; p++)
1625 if (*p == '/')
1626 lp = p + 1;
1627 if (lp != NULL)
1628 memmove(namep, lp, strlen(lp) + 1);
1629 *mem_start = _ALIGN(((unsigned long) namep) +
1630 strlen(namep) + 1, 4);
1616 } 1631 }
1617 1632
1618 /* get it again for debugging */ 1633 /* get it again for debugging */
1634 path = RELOC(prom_scratch);
1619 memset(path, 0, PROM_SCRATCH_SIZE); 1635 memset(path, 0, PROM_SCRATCH_SIZE);
1620 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1636 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1621 1637
@@ -1623,23 +1639,27 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1623 prev_name = RELOC(""); 1639 prev_name = RELOC("");
1624 sstart = (char *)RELOC(dt_string_start); 1640 sstart = (char *)RELOC(dt_string_start);
1625 for (;;) { 1641 for (;;) {
1626 int rc; 1642 if (call_prom("nextprop", 3, 1, node, prev_name,
1627 1643 RELOC(pname)) != 1)
1628 rc = call_prom("nextprop", 3, 1, node, prev_name, pname);
1629 if (rc != 1)
1630 break; 1644 break;
1631 1645
1646 /* skip "name" */
1647 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1648 prev_name = RELOC("name");
1649 continue;
1650 }
1651
1632 /* find string offset */ 1652 /* find string offset */
1633 soff = dt_find_string(pname); 1653 soff = dt_find_string(RELOC(pname));
1634 if (soff == 0) { 1654 if (soff == 0) {
1635 prom_printf("WARNING: Can't find string index for <%s>, node %s\n", 1655 prom_printf("WARNING: Can't find string index for"
1636 pname, path); 1656 " <%s>, node %s\n", RELOC(pname), path);
1637 break; 1657 break;
1638 } 1658 }
1639 prev_name = sstart + soff; 1659 prev_name = sstart + soff;
1640 1660
1641 /* get length */ 1661 /* get length */
1642 l = call_prom("getproplen", 2, 1, node, pname); 1662 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1643 1663
1644 /* sanity checks */ 1664 /* sanity checks */
1645 if (l == PROM_ERROR) 1665 if (l == PROM_ERROR)
@@ -1648,7 +1668,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1648 prom_printf("WARNING: ignoring large property "); 1668 prom_printf("WARNING: ignoring large property ");
1649 /* It seems OF doesn't null-terminate the path :-( */ 1669 /* It seems OF doesn't null-terminate the path :-( */
1650 prom_printf("[%s] ", path); 1670 prom_printf("[%s] ", path);
1651 prom_printf("%s length 0x%x\n", pname, l); 1671 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1652 continue; 1672 continue;
1653 } 1673 }
1654 1674
@@ -1658,17 +1678,16 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1658 dt_push_token(soff, mem_start, mem_end); 1678 dt_push_token(soff, mem_start, mem_end);
1659 1679
1660 /* push property content */ 1680 /* push property content */
1661 align = (l >= 8) ? 8 : 4; 1681 valp = make_room(mem_start, mem_end, l, 4);
1662 valp = make_room(mem_start, mem_end, l, align); 1682 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1663 call_prom("getprop", 4, 1, node, pname, valp, l);
1664 *mem_start = _ALIGN(*mem_start, 4); 1683 *mem_start = _ALIGN(*mem_start, 4);
1665 } 1684 }
1666 1685
1667 /* Add a "linux,phandle" property. */ 1686 /* Add a "linux,phandle" property. */
1668 soff = dt_find_string(RELOC("linux,phandle")); 1687 soff = dt_find_string(RELOC("linux,phandle"));
1669 if (soff == 0) 1688 if (soff == 0)
1670 prom_printf("WARNING: Can't find string index for <linux-phandle>" 1689 prom_printf("WARNING: Can't find string index for"
1671 " node %s\n", path); 1690 " <linux-phandle> node %s\n", path);
1672 else { 1691 else {
1673 dt_push_token(OF_DT_PROP, mem_start, mem_end); 1692 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1674 dt_push_token(4, mem_start, mem_end); 1693 dt_push_token(4, mem_start, mem_end);
@@ -1679,7 +1698,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1679 1698
1680 /* do all our children */ 1699 /* do all our children */
1681 child = call_prom("child", 1, 1, node); 1700 child = call_prom("child", 1, 1, node);
1682 while (child != (phandle)0) { 1701 while (child != 0) {
1683 scan_dt_build_struct(child, mem_start, mem_end); 1702 scan_dt_build_struct(child, mem_start, mem_end);
1684 child = call_prom("peer", 1, 1, child); 1703 child = call_prom("peer", 1, 1, child);
1685 } 1704 }
@@ -1718,7 +1737,8 @@ static void __init flatten_device_tree(void)
1718 1737
1719 /* Build header and make room for mem rsv map */ 1738 /* Build header and make room for mem rsv map */
1720 mem_start = _ALIGN(mem_start, 4); 1739 mem_start = _ALIGN(mem_start, 4);
1721 hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); 1740 hdr = make_room(&mem_start, &mem_end,
1741 sizeof(struct boot_param_header), 4);
1722 RELOC(dt_header_start) = (unsigned long)hdr; 1742 RELOC(dt_header_start) = (unsigned long)hdr;
1723 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 1743 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1724 1744
@@ -1731,11 +1751,11 @@ static void __init flatten_device_tree(void)
1731 namep = make_room(&mem_start, &mem_end, 16, 1); 1751 namep = make_room(&mem_start, &mem_end, 16, 1);
1732 strcpy(namep, RELOC("linux,phandle")); 1752 strcpy(namep, RELOC("linux,phandle"));
1733 mem_start = (unsigned long)namep + strlen(namep) + 1; 1753 mem_start = (unsigned long)namep + strlen(namep) + 1;
1734 RELOC(dt_string_end) = mem_start;
1735 1754
1736 /* Build string array */ 1755 /* Build string array */
1737 prom_printf("Building dt strings...\n"); 1756 prom_printf("Building dt strings...\n");
1738 scan_dt_build_strings(root, &mem_start, &mem_end); 1757 scan_dt_build_strings(root, &mem_start, &mem_end);
1758 RELOC(dt_string_end) = mem_start;
1739 1759
1740 /* Build structure */ 1760 /* Build structure */
1741 mem_start = PAGE_ALIGN(mem_start); 1761 mem_start = PAGE_ALIGN(mem_start);
@@ -1750,9 +1770,11 @@ static void __init flatten_device_tree(void)
1750 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); 1770 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1751 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); 1771 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1752 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); 1772 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1773 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1753 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); 1774 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1754 hdr->version = OF_DT_VERSION; 1775 hdr->version = OF_DT_VERSION;
1755 hdr->last_comp_version = 1; 1776 /* Version 16 is not backward compatible */
1777 hdr->last_comp_version = 0x10;
1756 1778
1757 /* Reserve the whole thing and copy the reserve map in, we 1779 /* Reserve the whole thing and copy the reserve map in, we
1758 * also bump mem_reserve_cnt to cause further reservations to 1780 * also bump mem_reserve_cnt to cause further reservations to
@@ -1808,6 +1830,9 @@ static void __init fixup_device_tree(void)
1808 /* does it need fixup ? */ 1830 /* does it need fixup ? */
1809 if (prom_getproplen(i2c, "interrupts") > 0) 1831 if (prom_getproplen(i2c, "interrupts") > 0)
1810 return; 1832 return;
1833
1834 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1835
1811 /* interrupt on this revision of u3 is number 0 and level */ 1836 /* interrupt on this revision of u3 is number 0 and level */
1812 interrupts[0] = 0; 1837 interrupts[0] = 0;
1813 interrupts[1] = 1; 1838 interrupts[1] = 1;
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 1048817befb8..1dccadaddd1d 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -58,6 +58,21 @@ static int config_access_valid(struct device_node *dn, int where)
58 return 0; 58 return 0;
59} 59}
60 60
61static int of_device_available(struct device_node * dn)
62{
63 char * status;
64
65 status = get_property(dn, "status", NULL);
66
67 if (!status)
68 return 1;
69
70 if (!strcmp(status, "okay"))
71 return 1;
72
73 return 0;
74}
75
61static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) 76static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
62{ 77{
63 int returnval = -1; 78 int returnval = -1;
@@ -103,7 +118,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
103 118
104 /* Search only direct children of the bus */ 119 /* Search only direct children of the bus */
105 for (dn = busdn->child; dn; dn = dn->sibling) 120 for (dn = busdn->child; dn; dn = dn->sibling)
106 if (dn->devfn == devfn) 121 if (dn->devfn == devfn && of_device_available(dn))
107 return rtas_read_config(dn, where, size, val); 122 return rtas_read_config(dn, where, size, val);
108 return PCIBIOS_DEVICE_NOT_FOUND; 123 return PCIBIOS_DEVICE_NOT_FOUND;
109} 124}
@@ -146,7 +161,7 @@ static int rtas_pci_write_config(struct pci_bus *bus,
146 161
147 /* Search only direct children of the bus */ 162 /* Search only direct children of the bus */
148 for (dn = busdn->child; dn; dn = dn->sibling) 163 for (dn = busdn->child; dn; dn = dn->sibling)
149 if (dn->devfn == devfn) 164 if (dn->devfn == devfn && of_device_available(dn))
150 return rtas_write_config(dn, where, size, val); 165 return rtas_write_config(dn, where, size, val);
151 return PCIBIOS_DEVICE_NOT_FOUND; 166 return PCIBIOS_DEVICE_NOT_FOUND;
152} 167}
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 687e85595208..ee3b20de2e7a 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -536,15 +536,19 @@ static void __init check_for_initrd(void)
536 536
537 DBG(" -> check_for_initrd()\n"); 537 DBG(" -> check_for_initrd()\n");
538 538
539 prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL); 539 if (of_chosen) {
540 if (prop != NULL) { 540 prop = (u64 *)get_property(of_chosen,
541 initrd_start = (unsigned long)__va(*prop); 541 "linux,initrd-start", NULL);
542 prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL);
543 if (prop != NULL) { 542 if (prop != NULL) {
544 initrd_end = (unsigned long)__va(*prop); 543 initrd_start = (unsigned long)__va(*prop);
545 initrd_below_start_ok = 1; 544 prop = (u64 *)get_property(of_chosen,
546 } else 545 "linux,initrd-end", NULL);
547 initrd_start = 0; 546 if (prop != NULL) {
547 initrd_end = (unsigned long)__va(*prop);
548 initrd_below_start_ok = 1;
549 } else
550 initrd_start = 0;
551 }
548 } 552 }
549 553
550 /* If we were passed an initrd, set the ROOT_DEV properly if the values 554 /* If we were passed an initrd, set the ROOT_DEV properly if the values
@@ -627,7 +631,7 @@ void __init setup_system(void)
627 * Initialize xmon 631 * Initialize xmon
628 */ 632 */
629#ifdef CONFIG_XMON_DEFAULT 633#ifdef CONFIG_XMON_DEFAULT
630 xmon_init(); 634 xmon_init(1);
631#endif 635#endif
632 /* 636 /*
633 * Register early console 637 * Register early console
@@ -706,6 +710,8 @@ void machine_power_off(void)
706 local_irq_disable(); 710 local_irq_disable();
707 while (1) ; 711 while (1) ;
708} 712}
713/* Used by the G5 thermal driver */
714EXPORT_SYMBOL_GPL(machine_power_off);
709 715
710void machine_halt(void) 716void machine_halt(void)
711{ 717{
@@ -1341,11 +1347,13 @@ static int __init early_xmon(char *p)
1341 /* ensure xmon is enabled */ 1347 /* ensure xmon is enabled */
1342 if (p) { 1348 if (p) {
1343 if (strncmp(p, "on", 2) == 0) 1349 if (strncmp(p, "on", 2) == 0)
1344 xmon_init(); 1350 xmon_init(1);
1351 if (strncmp(p, "off", 3) == 0)
1352 xmon_init(0);
1345 if (strncmp(p, "early", 5) != 0) 1353 if (strncmp(p, "early", 5) != 0)
1346 return 0; 1354 return 0;
1347 } 1355 }
1348 xmon_init(); 1356 xmon_init(1);
1349 debugger(NULL); 1357 debugger(NULL);
1350 1358
1351 return 0; 1359 return 0;
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index bf782276984c..49a79a55c32d 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -481,10 +481,11 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
481 /* Set up Signal Frame */ 481 /* Set up Signal Frame */
482 ret = setup_rt_frame(sig, ka, info, oldset, regs); 482 ret = setup_rt_frame(sig, ka, info, oldset, regs);
483 483
484 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) { 484 if (ret) {
485 spin_lock_irq(&current->sighand->siglock); 485 spin_lock_irq(&current->sighand->siglock);
486 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 486 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
487 sigaddset(&current->blocked,sig); 487 if (!(ka->sa.sa_flags & SA_NODEFER))
488 sigaddset(&current->blocked,sig);
488 recalc_sigpending(); 489 recalc_sigpending();
489 spin_unlock_irq(&current->sighand->siglock); 490 spin_unlock_irq(&current->sighand->siglock);
490 } 491 }
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
index 3c2fa5c284c0..46f4d6cc7fc9 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/ppc64/kernel/signal32.c
@@ -976,11 +976,12 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
976 else 976 else
977 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp); 977 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp);
978 978
979 if (ret && !(ka.sa.sa_flags & SA_NODEFER)) { 979 if (ret) {
980 spin_lock_irq(&current->sighand->siglock); 980 spin_lock_irq(&current->sighand->siglock);
981 sigorsets(&current->blocked, &current->blocked, 981 sigorsets(&current->blocked, &current->blocked,
982 &ka.sa.sa_mask); 982 &ka.sa.sa_mask);
983 sigaddset(&current->blocked, signr); 983 if (!(ka.sa.sa_flags & SA_NODEFER))
984 sigaddset(&current->blocked, signr);
984 recalc_sigpending(); 985 recalc_sigpending();
985 spin_unlock_irq(&current->sighand->siglock); 986 spin_unlock_irq(&current->sighand->siglock);
986 } 987 }
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index 02b8ac4e0168..f311ee7c0070 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -13,6 +13,7 @@
13#include <asm/current.h> 13#include <asm/current.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
16#include <asm/firmware.h>
16#include <asm/hvcall.h> 17#include <asm/hvcall.h>
17#include <asm/prom.h> 18#include <asm/prom.h>
18#include <asm/systemcfg.h> 19#include <asm/systemcfg.h>
@@ -100,6 +101,8 @@ static int __init setup_smt_snooze_delay(char *str)
100} 101}
101__setup("smt-snooze-delay=", setup_smt_snooze_delay); 102__setup("smt-snooze-delay=", setup_smt_snooze_delay);
102 103
104#endif /* CONFIG_PPC_MULTIPLATFORM */
105
103/* 106/*
104 * Enabling PMCs will slow partition context switch times so we only do 107 * Enabling PMCs will slow partition context switch times so we only do
105 * it the first time we write to the PMCs. 108 * it the first time we write to the PMCs.
@@ -109,65 +112,15 @@ static DEFINE_PER_CPU(char, pmcs_enabled);
109 112
110void ppc64_enable_pmcs(void) 113void ppc64_enable_pmcs(void)
111{ 114{
112 unsigned long hid0;
113#ifdef CONFIG_PPC_PSERIES
114 unsigned long set, reset;
115#endif /* CONFIG_PPC_PSERIES */
116
117 /* Only need to enable them once */ 115 /* Only need to enable them once */
118 if (__get_cpu_var(pmcs_enabled)) 116 if (__get_cpu_var(pmcs_enabled))
119 return; 117 return;
120 118
121 __get_cpu_var(pmcs_enabled) = 1; 119 __get_cpu_var(pmcs_enabled) = 1;
122 120
123 switch (systemcfg->platform) { 121 if (ppc_md.enable_pmcs)
124 case PLATFORM_PSERIES: 122 ppc_md.enable_pmcs();
125 case PLATFORM_POWERMAC:
126 hid0 = mfspr(HID0);
127 hid0 |= 1UL << (63 - 20);
128
129 /* POWER4 requires the following sequence */
130 asm volatile(
131 "sync\n"
132 "mtspr %1, %0\n"
133 "mfspr %0, %1\n"
134 "mfspr %0, %1\n"
135 "mfspr %0, %1\n"
136 "mfspr %0, %1\n"
137 "mfspr %0, %1\n"
138 "mfspr %0, %1\n"
139 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
140 "memory");
141 break;
142
143#ifdef CONFIG_PPC_PSERIES
144 case PLATFORM_PSERIES_LPAR:
145 set = 1UL << 63;
146 reset = 0;
147 plpar_hcall_norets(H_PERFMON, set, reset);
148 break;
149#endif /* CONFIG_PPC_PSERIES */
150
151 default:
152 break;
153 }
154
155#ifdef CONFIG_PPC_PSERIES
156 /* instruct hypervisor to maintain PMCs */
157 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
158 get_paca()->lppaca.pmcregs_in_use = 1;
159#endif /* CONFIG_PPC_PSERIES */
160} 123}
161
162#else
163
164/* PMC stuff */
165void ppc64_enable_pmcs(void)
166{
167 /* XXX Implement for iseries */
168}
169#endif /* CONFIG_PPC_MULTIPLATFORM */
170
171EXPORT_SYMBOL(ppc64_enable_pmcs); 124EXPORT_SYMBOL(ppc64_enable_pmcs);
172 125
173/* XXX convert to rusty's on_one_cpu */ 126/* XXX convert to rusty's on_one_cpu */
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 909462e1adea..1696e1b05bb9 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -67,6 +67,7 @@
67#include <asm/prom.h> 67#include <asm/prom.h>
68#include <asm/sections.h> 68#include <asm/sections.h>
69#include <asm/systemcfg.h> 69#include <asm/systemcfg.h>
70#include <asm/firmware.h>
70 71
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 72u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
72 73
@@ -370,13 +371,11 @@ int timer_interrupt(struct pt_regs * regs)
370 process_hvlpevents(regs); 371 process_hvlpevents(regs);
371#endif 372#endif
372 373
373/* collect purr register values often, for accurate calculations */ 374 /* collect purr register values often, for accurate calculations */
374#if defined(CONFIG_PPC_PSERIES) 375 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
376 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 376 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
377 cu->current_tb = mfspr(SPRN_PURR); 377 cu->current_tb = mfspr(SPRN_PURR);
378 } 378 }
379#endif
380 379
381 irq_exit(); 380 irq_exit();
382 381
diff --git a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c
index 0c0ba71ac0e8..c90e1dd875ce 100644
--- a/arch/ppc64/kernel/vio.c
+++ b/arch/ppc64/kernel/vio.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003 IBM Corp. 4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
8 * 9 *
9 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
@@ -14,61 +15,30 @@
14 15
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/console.h> 17#include <linux/console.h>
17#include <linux/version.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kobject.h>
20#include <linux/mm.h> 19#include <linux/mm.h>
21#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
22#include <asm/rtas.h>
23#include <asm/iommu.h> 21#include <asm/iommu.h>
24#include <asm/dma.h> 22#include <asm/dma.h>
25#include <asm/ppcdebug.h>
26#include <asm/vio.h> 23#include <asm/vio.h>
27#include <asm/hvcall.h>
28#include <asm/iSeries/vio.h>
29#include <asm/iSeries/HvTypes.h>
30#include <asm/iSeries/HvCallXm.h>
31#include <asm/iSeries/HvLpConfig.h>
32
33#define DBGENTER() pr_debug("%s entered\n", __FUNCTION__)
34
35extern struct subsystem devices_subsys; /* needed for vio_find_name() */
36 24
37static const struct vio_device_id *vio_match_device( 25static const struct vio_device_id *vio_match_device(
38 const struct vio_device_id *, const struct vio_dev *); 26 const struct vio_device_id *, const struct vio_dev *);
39 27
40#ifdef CONFIG_PPC_PSERIES 28struct vio_dev vio_bus_device = { /* fake "parent" device */
41static struct iommu_table *vio_build_iommu_table(struct vio_dev *);
42static int vio_num_address_cells;
43#endif
44#ifdef CONFIG_PPC_ISERIES
45static struct iommu_table veth_iommu_table;
46static struct iommu_table vio_iommu_table;
47#endif
48static struct vio_dev vio_bus_device = { /* fake "parent" device */
49 .name = vio_bus_device.dev.bus_id, 29 .name = vio_bus_device.dev.bus_id,
50 .type = "", 30 .type = "",
51#ifdef CONFIG_PPC_ISERIES
52 .iommu_table = &vio_iommu_table,
53#endif
54 .dev.bus_id = "vio", 31 .dev.bus_id = "vio",
55 .dev.bus = &vio_bus_type, 32 .dev.bus = &vio_bus_type,
56}; 33};
57 34
58#ifdef CONFIG_PPC_ISERIES 35static struct vio_bus_ops vio_bus_ops;
59static struct vio_dev *__init vio_register_device_iseries(char *type,
60 uint32_t unit_num);
61
62struct device *iSeries_vio_dev = &vio_bus_device.dev;
63EXPORT_SYMBOL(iSeries_vio_dev);
64
65#define device_is_compatible(a, b) 1
66 36
67#endif 37/*
68 38 * Convert from struct device to struct vio_dev and pass to driver.
69/* convert from struct device to struct vio_dev and pass to driver.
70 * dev->driver has already been set by generic code because vio_bus_match 39 * dev->driver has already been set by generic code because vio_bus_match
71 * succeeded. */ 40 * succeeded.
41 */
72static int vio_bus_probe(struct device *dev) 42static int vio_bus_probe(struct device *dev)
73{ 43{
74 struct vio_dev *viodev = to_vio_dev(dev); 44 struct vio_dev *viodev = to_vio_dev(dev);
@@ -76,15 +46,12 @@ static int vio_bus_probe(struct device *dev)
76 const struct vio_device_id *id; 46 const struct vio_device_id *id;
77 int error = -ENODEV; 47 int error = -ENODEV;
78 48
79 DBGENTER();
80
81 if (!viodrv->probe) 49 if (!viodrv->probe)
82 return error; 50 return error;
83 51
84 id = vio_match_device(viodrv->id_table, viodev); 52 id = vio_match_device(viodrv->id_table, viodev);
85 if (id) { 53 if (id)
86 error = viodrv->probe(viodev, id); 54 error = viodrv->probe(viodev, id);
87 }
88 55
89 return error; 56 return error;
90} 57}
@@ -95,11 +62,8 @@ static int vio_bus_remove(struct device *dev)
95 struct vio_dev *viodev = to_vio_dev(dev); 62 struct vio_dev *viodev = to_vio_dev(dev);
96 struct vio_driver *viodrv = to_vio_driver(dev->driver); 63 struct vio_driver *viodrv = to_vio_driver(dev->driver);
97 64
98 DBGENTER(); 65 if (viodrv->remove)
99
100 if (viodrv->remove) {
101 return viodrv->remove(viodev); 66 return viodrv->remove(viodev);
102 }
103 67
104 /* driver can't remove */ 68 /* driver can't remove */
105 return 1; 69 return 1;
@@ -135,193 +99,72 @@ void vio_unregister_driver(struct vio_driver *viodrv)
135EXPORT_SYMBOL(vio_unregister_driver); 99EXPORT_SYMBOL(vio_unregister_driver);
136 100
137/** 101/**
138 * vio_match_device: - Tell if a VIO device has a matching VIO device id structure. 102 * vio_match_device: - Tell if a VIO device has a matching
139 * @ids: array of VIO device id structures to search in 103 * VIO device id structure.
140 * @dev: the VIO device structure to match against 104 * @ids: array of VIO device id structures to search in
105 * @dev: the VIO device structure to match against
141 * 106 *
142 * Used by a driver to check whether a VIO device present in the 107 * Used by a driver to check whether a VIO device present in the
143 * system is in its list of supported devices. Returns the matching 108 * system is in its list of supported devices. Returns the matching
144 * vio_device_id structure or NULL if there is no match. 109 * vio_device_id structure or NULL if there is no match.
145 */ 110 */
146static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids, 111static const struct vio_device_id *vio_match_device(
147 const struct vio_dev *dev) 112 const struct vio_device_id *ids, const struct vio_dev *dev)
148{ 113{
149 DBGENTER(); 114 while (ids->type[0] != '\0') {
150 115 if (vio_bus_ops.match(ids, dev))
151 while (ids->type) {
152 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
153 device_is_compatible(dev->dev.platform_data, ids->compat))
154 return ids; 116 return ids;
155 ids++; 117 ids++;
156 } 118 }
157 return NULL; 119 return NULL;
158} 120}
159 121
160#ifdef CONFIG_PPC_ISERIES
161void __init iommu_vio_init(void)
162{
163 struct iommu_table *t;
164 struct iommu_table_cb cb;
165 unsigned long cbp;
166 unsigned long itc_entries;
167
168 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
169 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
170
171 cbp = virt_to_abs(&cb);
172 HvCallXm_getTceTableParms(cbp);
173
174 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
175 veth_iommu_table.it_size = itc_entries / 2;
176 veth_iommu_table.it_busno = cb.itc_busno;
177 veth_iommu_table.it_offset = cb.itc_offset;
178 veth_iommu_table.it_index = cb.itc_index;
179 veth_iommu_table.it_type = TCE_VB;
180 veth_iommu_table.it_blocksize = 1;
181
182 t = iommu_init_table(&veth_iommu_table);
183
184 if (!t)
185 printk("Virtual Bus VETH TCE table failed.\n");
186
187 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
188 vio_iommu_table.it_busno = cb.itc_busno;
189 vio_iommu_table.it_offset = cb.itc_offset +
190 veth_iommu_table.it_size;
191 vio_iommu_table.it_index = cb.itc_index;
192 vio_iommu_table.it_type = TCE_VB;
193 vio_iommu_table.it_blocksize = 1;
194
195 t = iommu_init_table(&vio_iommu_table);
196
197 if (!t)
198 printk("Virtual Bus VIO TCE table failed.\n");
199}
200#endif
201
202#ifdef CONFIG_PPC_PSERIES
203static void probe_bus_pseries(void)
204{
205 struct device_node *node_vroot, *of_node;
206
207 node_vroot = find_devices("vdevice");
208 if ((node_vroot == NULL) || (node_vroot->child == NULL))
209 /* this machine doesn't do virtual IO, and that's ok */
210 return;
211
212 vio_num_address_cells = prom_n_addr_cells(node_vroot->child);
213
214 /*
215 * Create struct vio_devices for each virtual device in the device tree.
216 * Drivers will associate with them later.
217 */
218 for (of_node = node_vroot->child; of_node != NULL;
219 of_node = of_node->sibling) {
220 printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
221 vio_register_device_node(of_node);
222 }
223}
224#endif
225
226#ifdef CONFIG_PPC_ISERIES
227static void probe_bus_iseries(void)
228{
229 HvLpIndexMap vlan_map = HvLpConfig_getVirtualLanIndexMap();
230 struct vio_dev *viodev;
231 int i;
232
233 /* there is only one of each of these */
234 vio_register_device_iseries("viocons", 0);
235 vio_register_device_iseries("vscsi", 0);
236
237 vlan_map = HvLpConfig_getVirtualLanIndexMap();
238 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
239 if ((vlan_map & (0x8000 >> i)) == 0)
240 continue;
241 viodev = vio_register_device_iseries("vlan", i);
242 /* veth is special and has it own iommu_table */
243 viodev->iommu_table = &veth_iommu_table;
244 }
245 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
246 vio_register_device_iseries("viodasd", i);
247 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
248 vio_register_device_iseries("viocd", i);
249 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
250 vio_register_device_iseries("viotape", i);
251}
252#endif
253
254/** 122/**
255 * vio_bus_init: - Initialize the virtual IO bus 123 * vio_bus_init: - Initialize the virtual IO bus
256 */ 124 */
257static int __init vio_bus_init(void) 125int __init vio_bus_init(struct vio_bus_ops *ops)
258{ 126{
259 int err; 127 int err;
260 128
129 vio_bus_ops = *ops;
130
261 err = bus_register(&vio_bus_type); 131 err = bus_register(&vio_bus_type);
262 if (err) { 132 if (err) {
263 printk(KERN_ERR "failed to register VIO bus\n"); 133 printk(KERN_ERR "failed to register VIO bus\n");
264 return err; 134 return err;
265 } 135 }
266 136
267 /* the fake parent of all vio devices, just to give us a nice directory */ 137 /*
138 * The fake parent of all vio devices, just to give us
139 * a nice directory
140 */
268 err = device_register(&vio_bus_device.dev); 141 err = device_register(&vio_bus_device.dev);
269 if (err) { 142 if (err) {
270 printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, 143 printk(KERN_WARNING "%s: device_register returned %i\n",
271 err); 144 __FUNCTION__, err);
272 return err; 145 return err;
273 } 146 }
274 147
275#ifdef CONFIG_PPC_PSERIES
276 probe_bus_pseries();
277#endif
278#ifdef CONFIG_PPC_ISERIES
279 probe_bus_iseries();
280#endif
281
282 return 0; 148 return 0;
283} 149}
284 150
285__initcall(vio_bus_init);
286
287/* vio_dev refcount hit 0 */ 151/* vio_dev refcount hit 0 */
288static void __devinit vio_dev_release(struct device *dev) 152static void __devinit vio_dev_release(struct device *dev)
289{ 153{
290 DBGENTER(); 154 if (vio_bus_ops.release_device)
291 155 vio_bus_ops.release_device(dev);
292#ifdef CONFIG_PPC_PSERIES
293 /* XXX free TCE table */
294 of_node_put(dev->platform_data);
295#endif
296 kfree(to_vio_dev(dev)); 156 kfree(to_vio_dev(dev));
297} 157}
298 158
299#ifdef CONFIG_PPC_PSERIES 159static ssize_t viodev_show_name(struct device *dev,
300static ssize_t viodev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) 160 struct device_attribute *attr, char *buf)
301{
302 struct device_node *of_node = dev->platform_data;
303
304 return sprintf(buf, "%s\n", of_node->full_name);
305}
306DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
307#endif
308
309static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf)
310{ 161{
311 return sprintf(buf, "%s\n", to_vio_dev(dev)->name); 162 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
312} 163}
313DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL); 164DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
314 165
315static struct vio_dev * __devinit vio_register_device_common( 166struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
316 struct vio_dev *viodev, char *name, char *type,
317 uint32_t unit_address, struct iommu_table *iommu_table)
318{ 167{
319 DBGENTER();
320
321 viodev->name = name;
322 viodev->type = type;
323 viodev->unit_address = unit_address;
324 viodev->iommu_table = iommu_table;
325 /* init generic 'struct device' fields: */ 168 /* init generic 'struct device' fields: */
326 viodev->dev.parent = &vio_bus_device.dev; 169 viodev->dev.parent = &vio_bus_device.dev;
327 viodev->dev.bus = &vio_bus_type; 170 viodev->dev.bus = &vio_bus_type;
@@ -338,222 +181,15 @@ static struct vio_dev * __devinit vio_register_device_common(
338 return viodev; 181 return viodev;
339} 182}
340 183
341#ifdef CONFIG_PPC_PSERIES
342/**
343 * vio_register_device_node: - Register a new vio device.
344 * @of_node: The OF node for this device.
345 *
346 * Creates and initializes a vio_dev structure from the data in
347 * of_node (dev.platform_data) and adds it to the list of virtual devices.
348 * Returns a pointer to the created vio_dev or NULL if node has
349 * NULL device_type or compatible fields.
350 */
351struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
352{
353 struct vio_dev *viodev;
354 unsigned int *unit_address;
355 unsigned int *irq_p;
356
357 DBGENTER();
358
359 /* we need the 'device_type' property, in order to match with drivers */
360 if ((NULL == of_node->type)) {
361 printk(KERN_WARNING
362 "%s: node %s missing 'device_type'\n", __FUNCTION__,
363 of_node->name ? of_node->name : "<unknown>");
364 return NULL;
365 }
366
367 unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
368 if (!unit_address) {
369 printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
370 of_node->name ? of_node->name : "<unknown>");
371 return NULL;
372 }
373
374 /* allocate a vio_dev for this node */
375 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
376 if (!viodev) {
377 return NULL;
378 }
379 memset(viodev, 0, sizeof(struct vio_dev));
380
381 viodev->dev.platform_data = of_node_get(of_node);
382
383 viodev->irq = NO_IRQ;
384 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
385 if (irq_p) {
386 int virq = virt_irq_create_mapping(*irq_p);
387 if (virq == NO_IRQ) {
388 printk(KERN_ERR "Unable to allocate interrupt "
389 "number for %s\n", of_node->full_name);
390 } else
391 viodev->irq = irq_offset_up(virq);
392 }
393
394 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
395
396 /* register with generic device framework */
397 if (vio_register_device_common(viodev, of_node->name, of_node->type,
398 *unit_address, vio_build_iommu_table(viodev))
399 == NULL) {
400 /* XXX free TCE table */
401 kfree(viodev);
402 return NULL;
403 }
404 device_create_file(&viodev->dev, &dev_attr_devspec);
405
406 return viodev;
407}
408EXPORT_SYMBOL(vio_register_device_node);
409#endif
410
411#ifdef CONFIG_PPC_ISERIES
412/**
413 * vio_register_device: - Register a new vio device.
414 * @voidev: The device to register.
415 */
416static struct vio_dev *__init vio_register_device_iseries(char *type,
417 uint32_t unit_num)
418{
419 struct vio_dev *viodev;
420
421 DBGENTER();
422
423 /* allocate a vio_dev for this node */
424 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
425 if (!viodev)
426 return NULL;
427 memset(viodev, 0, sizeof(struct vio_dev));
428
429 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
430
431 return vio_register_device_common(viodev, viodev->dev.bus_id, type,
432 unit_num, &vio_iommu_table);
433}
434#endif
435
436void __devinit vio_unregister_device(struct vio_dev *viodev) 184void __devinit vio_unregister_device(struct vio_dev *viodev)
437{ 185{
438 DBGENTER(); 186 if (vio_bus_ops.unregister_device)
439#ifdef CONFIG_PPC_PSERIES 187 vio_bus_ops.unregister_device(viodev);
440 device_remove_file(&viodev->dev, &dev_attr_devspec);
441#endif
442 device_remove_file(&viodev->dev, &dev_attr_name); 188 device_remove_file(&viodev->dev, &dev_attr_name);
443 device_unregister(&viodev->dev); 189 device_unregister(&viodev->dev);
444} 190}
445EXPORT_SYMBOL(vio_unregister_device); 191EXPORT_SYMBOL(vio_unregister_device);
446 192
447#ifdef CONFIG_PPC_PSERIES
448/**
449 * vio_get_attribute: - get attribute for virtual device
450 * @vdev: The vio device to get property.
451 * @which: The property/attribute to be extracted.
452 * @length: Pointer to length of returned data size (unused if NULL).
453 *
454 * Calls prom.c's get_property() to return the value of the
455 * attribute specified by the preprocessor constant @which
456*/
457const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
458{
459 return get_property(vdev->dev.platform_data, (char*)which, length);
460}
461EXPORT_SYMBOL(vio_get_attribute);
462
463/* vio_find_name() - internal because only vio.c knows how we formatted the
464 * kobject name
465 * XXX once vio_bus_type.devices is actually used as a kset in
466 * drivers/base/bus.c, this function should be removed in favor of
467 * "device_find(kobj_name, &vio_bus_type)"
468 */
469static struct vio_dev *vio_find_name(const char *kobj_name)
470{
471 struct kobject *found;
472
473 found = kset_find_obj(&devices_subsys.kset, kobj_name);
474 if (!found)
475 return NULL;
476
477 return to_vio_dev(container_of(found, struct device, kobj));
478}
479
480/**
481 * vio_find_node - find an already-registered vio_dev
482 * @vnode: device_node of the virtual device we're looking for
483 */
484struct vio_dev *vio_find_node(struct device_node *vnode)
485{
486 uint32_t *unit_address;
487 char kobj_name[BUS_ID_SIZE];
488
489 /* construct the kobject name from the device node */
490 unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
491 if (!unit_address)
492 return NULL;
493 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
494
495 return vio_find_name(kobj_name);
496}
497EXPORT_SYMBOL(vio_find_node);
498
499/**
500 * vio_build_iommu_table: - gets the dma information from OF and builds the TCE tree.
501 * @dev: the virtual device.
502 *
503 * Returns a pointer to the built tce tree, or NULL if it can't
504 * find property.
505*/
506static struct iommu_table * vio_build_iommu_table(struct vio_dev *dev)
507{
508 unsigned int *dma_window;
509 struct iommu_table *newTceTable;
510 unsigned long offset;
511 int dma_window_property_size;
512
513 dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
514 if(!dma_window) {
515 return NULL;
516 }
517
518 newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
519
520 /* There should be some code to extract the phys-encoded offset
521 using prom_n_addr_cells(). However, according to a comment
522 on earlier versions, it's always zero, so we don't bother */
523 offset = dma_window[1] >> PAGE_SHIFT;
524
525 /* TCE table size - measured in tce entries */
526 newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
527 /* offset for VIO should always be 0 */
528 newTceTable->it_offset = offset;
529 newTceTable->it_busno = 0;
530 newTceTable->it_index = (unsigned long)dma_window[0];
531 newTceTable->it_type = TCE_VB;
532
533 return iommu_init_table(newTceTable);
534}
535
536int vio_enable_interrupts(struct vio_dev *dev)
537{
538 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
539 if (rc != H_Success) {
540 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
541 }
542 return rc;
543}
544EXPORT_SYMBOL(vio_enable_interrupts);
545
546int vio_disable_interrupts(struct vio_dev *dev)
547{
548 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
549 if (rc != H_Success) {
550 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
551 }
552 return rc;
553}
554EXPORT_SYMBOL(vio_disable_interrupts);
555#endif
556
557static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 193static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
558 size_t size, enum dma_data_direction direction) 194 size_t size, enum dma_data_direction direction)
559{ 195{
@@ -615,18 +251,8 @@ static int vio_bus_match(struct device *dev, struct device_driver *drv)
615 const struct vio_dev *vio_dev = to_vio_dev(dev); 251 const struct vio_dev *vio_dev = to_vio_dev(dev);
616 struct vio_driver *vio_drv = to_vio_driver(drv); 252 struct vio_driver *vio_drv = to_vio_driver(drv);
617 const struct vio_device_id *ids = vio_drv->id_table; 253 const struct vio_device_id *ids = vio_drv->id_table;
618 const struct vio_device_id *found_id;
619
620 DBGENTER();
621 254
622 if (!ids) 255 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
623 return 0;
624
625 found_id = vio_match_device(ids, vio_dev);
626 if (found_id)
627 return 1;
628
629 return 0;
630} 256}
631 257
632struct bus_type vio_bus_type = { 258struct bus_type vio_bus_type = {
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index fbff24827ae7..35eb49e1b890 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -129,12 +129,10 @@ _GLOBAL(__hash_page)
129 * code rather than call a C function...) 129 * code rather than call a C function...)
130 */ 130 */
131BEGIN_FTR_SECTION 131BEGIN_FTR_SECTION
132BEGIN_FTR_SECTION
133 mr r4,r30 132 mr r4,r30
134 mr r5,r7 133 mr r5,r7
135 bl .hash_page_do_lazy_icache 134 bl .hash_page_do_lazy_icache
136END_FTR_SECTION_IFSET(CPU_FTR_NOEXECUTE) 135END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
137END_FTR_SECTION_IFCLR(CPU_FTR_COHERENT_ICACHE)
138 136
139 /* At this point, r3 contains new PP bits, save them in 137 /* At this point, r3 contains new PP bits, save them in
140 * place of "access" in the param area (sic) 138 * place of "access" in the param area (sic)
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index a6abd3a979bf..7626bb59954d 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -51,7 +51,6 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, unsigned long vflags, 51 unsigned long prpn, unsigned long vflags,
52 unsigned long rflags) 52 unsigned long rflags)
53{ 53{
54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 hpte_t *hptep = htab_address + hpte_group; 54 hpte_t *hptep = htab_address + hpte_group;
56 unsigned long hpte_v, hpte_r; 55 unsigned long hpte_v, hpte_r;
57 int i; 56 int i;
@@ -74,7 +73,7 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
74 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 73 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
75 if (vflags & HPTE_V_LARGE) 74 if (vflags & HPTE_V_LARGE)
76 va &= ~(1UL << HPTE_V_AVPN_SHIFT); 75 va &= ~(1UL << HPTE_V_AVPN_SHIFT);
77 hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 76 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
78 77
79 hptep->r = hpte_r; 78 hptep->r = hpte_r;
80 /* Guarantee the second dword is visible before the valid bit */ 79 /* Guarantee the second dword is visible before the valid bit */
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 623b5d130c31..09475c8edf7c 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -210,7 +210,7 @@ void __init htab_initialize(void)
210 210
211 /* create bolted the linear mapping in the hash table */ 211 /* create bolted the linear mapping in the hash table */
212 for (i=0; i < lmb.memory.cnt; i++) { 212 for (i=0; i < lmb.memory.cnt; i++) {
213 base = lmb.memory.region[i].physbase + KERNELBASE; 213 base = lmb.memory.region[i].base + KERNELBASE;
214 size = lmb.memory.region[i].size; 214 size = lmb.memory.region[i].size;
215 215
216 DBG("creating mapping for region: %lx : %lx\n", base, size); 216 DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -302,7 +302,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
302 int local = 0; 302 int local = 0;
303 cpumask_t tmp; 303 cpumask_t tmp;
304 304
305 if ((ea & ~REGION_MASK) > EADDR_MASK) 305 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
306 return 1; 306 return 1;
307 307
308 switch (REGION_ID(ea)) { 308 switch (REGION_ID(ea)) {
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index f9524602818d..e7833c80eb68 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -27,124 +27,94 @@
27 27
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29 29
30#define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) 30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) 31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32#define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1))
33 32
34#define HUGEPTE_INDEX_SIZE 9 33/* Modelled after find_linux_pte() */
35#define HUGEPGD_INDEX_SIZE 10 34pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
36
37#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
38#define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE)
39
40static inline int hugepgd_index(unsigned long addr)
41{
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
43}
44
45static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
46{ 35{
47 int index; 36 pgd_t *pg;
37 pud_t *pu;
38 pmd_t *pm;
39 pte_t *pt;
48 40
49 if (! mm->context.huge_pgdir) 41 BUG_ON(! in_hugepage_area(mm->context, addr));
50 return NULL;
51 42
43 addr &= HPAGE_MASK;
44
45 pg = pgd_offset(mm, addr);
46 if (!pgd_none(*pg)) {
47 pu = pud_offset(pg, addr);
48 if (!pud_none(*pu)) {
49 pm = pmd_offset(pu, addr);
50 pt = (pte_t *)pm;
51 BUG_ON(!pmd_none(*pm)
52 && !(pte_present(*pt) && pte_huge(*pt)));
53 return pt;
54 }
55 }
52 56
53 index = hugepgd_index(addr); 57 return NULL;
54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return (pud_t *)(mm->context.huge_pgdir + index);
56} 58}
57 59
58static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) 60pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
59{ 61{
60 int index; 62 pgd_t *pg;
61 63 pud_t *pu;
62 if (pud_none(*dir)) 64 pmd_t *pm;
63 return NULL; 65 pte_t *pt;
64 66
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pud_page(*dir) + index;
67}
68
69static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
70{
71 BUG_ON(! in_hugepage_area(mm->context, addr)); 67 BUG_ON(! in_hugepage_area(mm->context, addr));
72 68
73 if (! mm->context.huge_pgdir) { 69 addr &= HPAGE_MASK;
74 pgd_t *new;
75 spin_unlock(&mm->page_table_lock);
76 /* Don't use pgd_alloc(), because we want __GFP_REPEAT */
77 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT);
78 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
79 spin_lock(&mm->page_table_lock);
80 70
81 /* 71 pg = pgd_offset(mm, addr);
82 * Because we dropped the lock, we should re-check the 72 pu = pud_alloc(mm, pg, addr);
83 * entry, as somebody else could have populated it..
84 */
85 if (mm->context.huge_pgdir)
86 pgd_free(new);
87 else
88 mm->context.huge_pgdir = new;
89 }
90 return hugepgd_offset(mm, addr);
91}
92 73
93static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) 74 if (pu) {
94{ 75 pm = pmd_alloc(mm, pu, addr);
95 if (! pud_present(*dir)) { 76 if (pm) {
96 pte_t *new; 77 pt = (pte_t *)pm;
97 78 BUG_ON(!pmd_none(*pm)
98 spin_unlock(&mm->page_table_lock); 79 && !(pte_present(*pt) && pte_huge(*pt)));
99 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 80 return pt;
100 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
101 spin_lock(&mm->page_table_lock);
102 /*
103 * Because we dropped the lock, we should re-check the
104 * entry, as somebody else could have populated it..
105 */
106 if (pud_present(*dir)) {
107 if (new)
108 kmem_cache_free(zero_cache, new);
109 } else {
110 struct page *ptepage;
111
112 if (! new)
113 return NULL;
114 ptepage = virt_to_page(new);
115 ptepage->mapping = (void *) mm;
116 ptepage->index = addr & HUGEPGDIR_MASK;
117 pud_populate(mm, dir, new);
118 } 81 }
119 } 82 }
120 83
121 return hugepte_offset(dir, addr); 84 return NULL;
122} 85}
123 86
124pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 87#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
125{
126 pud_t *pud;
127 88
128 BUG_ON(! in_hugepage_area(mm->context, addr)); 89void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 int i;
129 93
130 pud = hugepgd_offset(mm, addr); 94 if (pte_present(*ptep)) {
131 if (! pud) 95 pte_clear(mm, addr, ptep);
132 return NULL; 96 flush_tlb_pending();
97 }
133 98
134 return hugepte_offset(pud, addr); 99 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
100 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
101 ptep++;
102 }
135} 103}
136 104
137pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 105pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep)
138{ 107{
139 pud_t *pud; 108 unsigned long old = pte_update(ptep, ~0UL);
109 int i;
140 110
141 BUG_ON(! in_hugepage_area(mm->context, addr)); 111 if (old & _PAGE_HASHPTE)
112 hpte_update(mm, addr, old, 0);
142 113
143 pud = hugepgd_alloc(mm, addr); 114 for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
144 if (! pud) 115 ptep[i] = __pte(0);
145 return NULL;
146 116
147 return hugepte_alloc(mm, pud, addr); 117 return __pte(old);
148} 118}
149 119
150/* 120/*
@@ -162,15 +132,17 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
162 return 0; 132 return 0;
163} 133}
164 134
165static void flush_segments(void *parm) 135static void flush_low_segments(void *parm)
166{ 136{
167 u16 segs = (unsigned long) parm; 137 u16 areas = (unsigned long) parm;
168 unsigned long i; 138 unsigned long i;
169 139
170 asm volatile("isync" : : : "memory"); 140 asm volatile("isync" : : : "memory");
171 141
172 for (i = 0; i < 16; i++) { 142 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
173 if (! (segs & (1U << i))) 143
144 for (i = 0; i < NUM_LOW_AREAS; i++) {
145 if (! (areas & (1U << i)))
174 continue; 146 continue;
175 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 147 asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
176 } 148 }
@@ -178,13 +150,33 @@ static void flush_segments(void *parm)
178 asm volatile("isync" : : : "memory"); 150 asm volatile("isync" : : : "memory");
179} 151}
180 152
181static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) 153static void flush_high_segments(void *parm)
182{ 154{
183 unsigned long start = seg << SID_SHIFT; 155 u16 areas = (unsigned long) parm;
184 unsigned long end = (seg+1) << SID_SHIFT; 156 unsigned long i, j;
157
158 asm volatile("isync" : : : "memory");
159
160 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
161
162 for (i = 0; i < NUM_HIGH_AREAS; i++) {
163 if (! (areas & (1U << i)))
164 continue;
165 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
166 asm volatile("slbie %0"
167 :: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
168 }
169
170 asm volatile("isync" : : : "memory");
171}
172
173static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
174{
175 unsigned long start = area << SID_SHIFT;
176 unsigned long end = (area+1) << SID_SHIFT;
185 struct vm_area_struct *vma; 177 struct vm_area_struct *vma;
186 178
187 BUG_ON(seg >= 16); 179 BUG_ON(area >= NUM_LOW_AREAS);
188 180
189 /* Check no VMAs are in the region */ 181 /* Check no VMAs are in the region */
190 vma = find_vma(mm, start); 182 vma = find_vma(mm, start);
@@ -194,20 +186,39 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
194 return 0; 186 return 0;
195} 187}
196 188
197static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) 189static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
190{
191 unsigned long start = area << HTLB_AREA_SHIFT;
192 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
193 struct vm_area_struct *vma;
194
195 BUG_ON(area >= NUM_HIGH_AREAS);
196
197 /* Check no VMAs are in the region */
198 vma = find_vma(mm, start);
199 if (vma && (vma->vm_start < end))
200 return -EBUSY;
201
202 return 0;
203}
204
205static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
198{ 206{
199 unsigned long i; 207 unsigned long i;
200 208
201 newsegs &= ~(mm->context.htlb_segs); 209 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
202 if (! newsegs) 210 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
211
212 newareas &= ~(mm->context.low_htlb_areas);
213 if (! newareas)
203 return 0; /* The segments we want are already open */ 214 return 0; /* The segments we want are already open */
204 215
205 for (i = 0; i < 16; i++) 216 for (i = 0; i < NUM_LOW_AREAS; i++)
206 if ((1 << i) & newsegs) 217 if ((1 << i) & newareas)
207 if (prepare_low_seg_for_htlb(mm, i) != 0) 218 if (prepare_low_area_for_htlb(mm, i) != 0)
208 return -EBUSY; 219 return -EBUSY;
209 220
210 mm->context.htlb_segs |= newsegs; 221 mm->context.low_htlb_areas |= newareas;
211 222
212 /* update the paca copy of the context struct */ 223 /* update the paca copy of the context struct */
213 get_paca()->context = mm->context; 224 get_paca()->context = mm->context;
@@ -215,29 +226,63 @@ static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
215 /* the context change must make it to memory before the flush, 226 /* the context change must make it to memory before the flush,
216 * so that further SLB misses do the right thing. */ 227 * so that further SLB misses do the right thing. */
217 mb(); 228 mb();
218 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); 229 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
230
231 return 0;
232}
233
234static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
235{
236 unsigned long i;
237
238 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
239 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
240 != NUM_HIGH_AREAS);
241
242 newareas &= ~(mm->context.high_htlb_areas);
243 if (! newareas)
244 return 0; /* The areas we want are already open */
245
246 for (i = 0; i < NUM_HIGH_AREAS; i++)
247 if ((1 << i) & newareas)
248 if (prepare_high_area_for_htlb(mm, i) != 0)
249 return -EBUSY;
250
251 mm->context.high_htlb_areas |= newareas;
252
253 /* update the paca copy of the context struct */
254 get_paca()->context = mm->context;
255
256 /* the context change must make it to memory before the flush,
257 * so that further SLB misses do the right thing. */
258 mb();
259 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
219 260
220 return 0; 261 return 0;
221} 262}
222 263
223int prepare_hugepage_range(unsigned long addr, unsigned long len) 264int prepare_hugepage_range(unsigned long addr, unsigned long len)
224{ 265{
225 if (within_hugepage_high_range(addr, len)) 266 int err;
226 return 0; 267
227 else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) { 268 if ( (addr+len) < addr )
228 int err; 269 return -EINVAL;
229 /* Yes, we need both tests, in case addr+len overflows 270
230 * 64-bit arithmetic */ 271 if ((addr + len) < 0x100000000UL)
231 err = open_low_hpage_segs(current->mm, 272 err = open_low_hpage_areas(current->mm,
232 LOW_ESID_MASK(addr, len)); 273 LOW_ESID_MASK(addr, len));
233 if (err) 274 else
234 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 275 err = open_high_hpage_areas(current->mm,
235 " failed (segs: 0x%04hx)\n", addr, len, 276 HTLB_AREA_MASK(addr, len));
236 LOW_ESID_MASK(addr, len)); 277 if (err) {
278 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
279 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
280 addr, len,
281 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
237 return err; 282 return err;
238 } 283 }
239 284
240 return -EINVAL; 285 return 0;
241} 286}
242 287
243struct page * 288struct page *
@@ -309,8 +354,8 @@ full_search:
309 vma = find_vma(mm, addr); 354 vma = find_vma(mm, addr);
310 continue; 355 continue;
311 } 356 }
312 if (touches_hugepage_high_range(addr, len)) { 357 if (touches_hugepage_high_range(mm, addr, len)) {
313 addr = TASK_HPAGE_END; 358 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
314 vma = find_vma(mm, addr); 359 vma = find_vma(mm, addr);
315 continue; 360 continue;
316 } 361 }
@@ -389,8 +434,9 @@ hugepage_recheck:
389 if (touches_hugepage_low_range(mm, addr, len)) { 434 if (touches_hugepage_low_range(mm, addr, len)) {
390 addr = (addr & ((~0) << SID_SHIFT)) - len; 435 addr = (addr & ((~0) << SID_SHIFT)) - len;
391 goto hugepage_recheck; 436 goto hugepage_recheck;
392 } else if (touches_hugepage_high_range(addr, len)) { 437 } else if (touches_hugepage_high_range(mm, addr, len)) {
393 addr = TASK_HPAGE_BASE - len; 438 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
439 goto hugepage_recheck;
394 } 440 }
395 441
396 /* 442 /*
@@ -481,23 +527,28 @@ static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
481 return -ENOMEM; 527 return -ENOMEM;
482} 528}
483 529
484static unsigned long htlb_get_high_area(unsigned long len) 530static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
485{ 531{
486 unsigned long addr = TASK_HPAGE_BASE; 532 unsigned long addr = 0x100000000UL;
487 struct vm_area_struct *vma; 533 struct vm_area_struct *vma;
488 534
489 vma = find_vma(current->mm, addr); 535 vma = find_vma(current->mm, addr);
490 for (vma = find_vma(current->mm, addr); 536 while (addr + len <= TASK_SIZE_USER64) {
491 addr + len <= TASK_HPAGE_END;
492 vma = vma->vm_next) {
493 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ 537 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
494 BUG_ON(! within_hugepage_high_range(addr, len)); 538
539 if (! __within_hugepage_high_range(addr, len, areamask)) {
540 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
541 vma = find_vma(current->mm, addr);
542 continue;
543 }
495 544
496 if (!vma || (addr + len) <= vma->vm_start) 545 if (!vma || (addr + len) <= vma->vm_start)
497 return addr; 546 return addr;
498 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 547 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
499 /* Because we're in a hugepage region, this alignment 548 /* Depending on segmask this might not be a confirmed
500 * should not skip us over any VMAs */ 549 * hugepage region, so the ALIGN could have skipped
550 * some VMAs */
551 vma = find_vma(current->mm, addr);
501 } 552 }
502 553
503 return -ENOMEM; 554 return -ENOMEM;
@@ -507,6 +558,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
507 unsigned long len, unsigned long pgoff, 558 unsigned long len, unsigned long pgoff,
508 unsigned long flags) 559 unsigned long flags)
509{ 560{
561 int lastshift;
562 u16 areamask, curareas;
563
510 if (len & ~HPAGE_MASK) 564 if (len & ~HPAGE_MASK)
511 return -EINVAL; 565 return -EINVAL;
512 566
@@ -514,67 +568,49 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
514 return -EINVAL; 568 return -EINVAL;
515 569
516 if (test_thread_flag(TIF_32BIT)) { 570 if (test_thread_flag(TIF_32BIT)) {
517 int lastshift = 0; 571 curareas = current->mm->context.low_htlb_areas;
518 u16 segmask, cursegs = current->mm->context.htlb_segs;
519 572
520 /* First see if we can do the mapping in the existing 573 /* First see if we can do the mapping in the existing
521 * low hpage segments */ 574 * low areas */
522 addr = htlb_get_low_area(len, cursegs); 575 addr = htlb_get_low_area(len, curareas);
523 if (addr != -ENOMEM) 576 if (addr != -ENOMEM)
524 return addr; 577 return addr;
525 578
526 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len); 579 lastshift = 0;
527 ! lastshift; segmask >>=1) { 580 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
528 if (segmask & 1) 581 ! lastshift; areamask >>=1) {
582 if (areamask & 1)
529 lastshift = 1; 583 lastshift = 1;
530 584
531 addr = htlb_get_low_area(len, cursegs | segmask); 585 addr = htlb_get_low_area(len, curareas | areamask);
532 if ((addr != -ENOMEM) 586 if ((addr != -ENOMEM)
533 && open_low_hpage_segs(current->mm, segmask) == 0) 587 && open_low_hpage_areas(current->mm, areamask) == 0)
534 return addr; 588 return addr;
535 } 589 }
536 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
537 " enough segments\n");
538 return -ENOMEM;
539 } else { 590 } else {
540 return htlb_get_high_area(len); 591 curareas = current->mm->context.high_htlb_areas;
541 }
542}
543
544void hugetlb_mm_free_pgd(struct mm_struct *mm)
545{
546 int i;
547 pgd_t *pgdir;
548
549 spin_lock(&mm->page_table_lock);
550
551 pgdir = mm->context.huge_pgdir;
552 if (! pgdir)
553 goto out;
554
555 mm->context.huge_pgdir = NULL;
556 592
557 /* cleanup any hugepte pages leftover */ 593 /* First see if we can do the mapping in the existing
558 for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 594 * high areas */
559 pud_t *pud = (pud_t *)(pgdir + i); 595 addr = htlb_get_high_area(len, curareas);
560 596 if (addr != -ENOMEM)
561 if (! pud_none(*pud)) { 597 return addr;
562 pte_t *pte = (pte_t *)pud_page(*pud);
563 struct page *ptepage = virt_to_page(pte);
564 598
565 ptepage->mapping = NULL; 599 lastshift = 0;
600 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
601 ! lastshift; areamask >>=1) {
602 if (areamask & 1)
603 lastshift = 1;
566 604
567 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 605 addr = htlb_get_high_area(len, curareas | areamask);
568 kmem_cache_free(zero_cache, pte); 606 if ((addr != -ENOMEM)
607 && open_high_hpage_areas(current->mm, areamask) == 0)
608 return addr;
569 } 609 }
570 pud_clear(pud);
571 } 610 }
572 611 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
573 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 612 " enough areas\n");
574 kmem_cache_free(zero_cache, pgdir); 613 return -ENOMEM;
575
576 out:
577 spin_unlock(&mm->page_table_lock);
578} 614}
579 615
580int hash_huge_page(struct mm_struct *mm, unsigned long access, 616int hash_huge_page(struct mm_struct *mm, unsigned long access,
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index b6e75b891ac0..c65b87b92756 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -31,7 +31,7 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
31 break; 31 break;
32 if ((unsigned long)tmp->addr >= ioremap_bot) 32 if ((unsigned long)tmp->addr >= ioremap_bot)
33 addr = tmp->size + (unsigned long) tmp->addr; 33 addr = tmp->size + (unsigned long) tmp->addr;
34 if (addr > IMALLOC_END-size) 34 if (addr >= IMALLOC_END-size)
35 return 1; 35 return 1;
36 } 36 }
37 *im_addr = addr; 37 *im_addr = addr;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index e58a24d42879..c02dc9809ca5 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -42,7 +42,6 @@
42 42
43#include <asm/pgalloc.h> 43#include <asm/pgalloc.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/abs_addr.h>
46#include <asm/prom.h> 45#include <asm/prom.h>
47#include <asm/lmb.h> 46#include <asm/lmb.h>
48#include <asm/rtas.h> 47#include <asm/rtas.h>
@@ -66,6 +65,14 @@
66#include <asm/vdso.h> 65#include <asm/vdso.h>
67#include <asm/imalloc.h> 66#include <asm/imalloc.h>
68 67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
69int mem_init_done; 76int mem_init_done;
70unsigned long ioremap_bot = IMALLOC_BASE; 77unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE; 78static unsigned long phbs_io_bot = PHBS_IO_BASE;
@@ -159,7 +166,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
159 ptep = pte_alloc_kernel(&init_mm, pmdp, ea); 166 ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
160 if (!ptep) 167 if (!ptep)
161 return -ENOMEM; 168 return -ENOMEM;
162 pa = abs_to_phys(pa);
163 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 169 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
164 __pgprot(flags))); 170 __pgprot(flags)));
165 spin_unlock(&init_mm.page_table_lock); 171 spin_unlock(&init_mm.page_table_lock);
@@ -226,7 +232,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
226 * Before that, we map using addresses going 232 * Before that, we map using addresses going
227 * up from ioremap_bot. imalloc will use 233 * up from ioremap_bot. imalloc will use
228 * the addresses from ioremap_bot through 234 * the addresses from ioremap_bot through
229 * IMALLOC_END (0xE000001fffffffff) 235 * IMALLOC_END
230 * 236 *
231 */ 237 */
232 pa = addr & PAGE_MASK; 238 pa = addr & PAGE_MASK;
@@ -417,12 +423,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
417 int index; 423 int index;
418 int err; 424 int err;
419 425
420#ifdef CONFIG_HUGETLB_PAGE
421 /* We leave htlb_segs as it was, but for a fork, we need to
422 * clear the huge_pgdir. */
423 mm->context.huge_pgdir = NULL;
424#endif
425
426again: 426again:
427 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 427 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
428 return -ENOMEM; 428 return -ENOMEM;
@@ -453,8 +453,6 @@ void destroy_context(struct mm_struct *mm)
453 spin_unlock(&mmu_context_lock); 453 spin_unlock(&mmu_context_lock);
454 454
455 mm->context.id = NO_CONTEXT; 455 mm->context.id = NO_CONTEXT;
456
457 hugetlb_mm_free_pgd(mm);
458} 456}
459 457
460/* 458/*
@@ -484,9 +482,9 @@ void __init mm_init_ppc64(void)
484 for (i = 1; i < lmb.memory.cnt; i++) { 482 for (i = 1; i < lmb.memory.cnt; i++) {
485 unsigned long base, prevbase, prevsize; 483 unsigned long base, prevbase, prevsize;
486 484
487 prevbase = lmb.memory.region[i-1].physbase; 485 prevbase = lmb.memory.region[i-1].base;
488 prevsize = lmb.memory.region[i-1].size; 486 prevsize = lmb.memory.region[i-1].size;
489 base = lmb.memory.region[i].physbase; 487 base = lmb.memory.region[i].base;
490 if (base > (prevbase + prevsize)) { 488 if (base > (prevbase + prevsize)) {
491 io_hole_start = prevbase + prevsize; 489 io_hole_start = prevbase + prevsize;
492 io_hole_size = base - (prevbase + prevsize); 490 io_hole_size = base - (prevbase + prevsize);
@@ -513,11 +511,8 @@ int page_is_ram(unsigned long pfn)
513 for (i=0; i < lmb.memory.cnt; i++) { 511 for (i=0; i < lmb.memory.cnt; i++) {
514 unsigned long base; 512 unsigned long base;
515 513
516#ifdef CONFIG_MSCHUNKS
517 base = lmb.memory.region[i].physbase;
518#else
519 base = lmb.memory.region[i].base; 514 base = lmb.memory.region[i].base;
520#endif 515
521 if ((paddr >= base) && 516 if ((paddr >= base) &&
522 (paddr < (base + lmb.memory.region[i].size))) { 517 (paddr < (base + lmb.memory.region[i].size))) {
523 return 1; 518 return 1;
@@ -547,7 +542,7 @@ void __init do_init_bootmem(void)
547 */ 542 */
548 bootmap_pages = bootmem_bootmap_pages(total_pages); 543 bootmap_pages = bootmem_bootmap_pages(total_pages);
549 544
550 start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); 545 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
551 BUG_ON(!start); 546 BUG_ON(!start);
552 547
553 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 548 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
@@ -558,25 +553,25 @@ void __init do_init_bootmem(void)
558 * present. 553 * present.
559 */ 554 */
560 for (i=0; i < lmb.memory.cnt; i++) { 555 for (i=0; i < lmb.memory.cnt; i++) {
561 unsigned long physbase, size; 556 unsigned long base, size;
562 unsigned long start_pfn, end_pfn; 557 unsigned long start_pfn, end_pfn;
563 558
564 physbase = lmb.memory.region[i].physbase; 559 base = lmb.memory.region[i].base;
565 size = lmb.memory.region[i].size; 560 size = lmb.memory.region[i].size;
566 561
567 start_pfn = physbase >> PAGE_SHIFT; 562 start_pfn = base >> PAGE_SHIFT;
568 end_pfn = start_pfn + (size >> PAGE_SHIFT); 563 end_pfn = start_pfn + (size >> PAGE_SHIFT);
569 memory_present(0, start_pfn, end_pfn); 564 memory_present(0, start_pfn, end_pfn);
570 565
571 free_bootmem(physbase, size); 566 free_bootmem(base, size);
572 } 567 }
573 568
574 /* reserve the sections we're already using */ 569 /* reserve the sections we're already using */
575 for (i=0; i < lmb.reserved.cnt; i++) { 570 for (i=0; i < lmb.reserved.cnt; i++) {
576 unsigned long physbase = lmb.reserved.region[i].physbase; 571 unsigned long base = lmb.reserved.region[i].base;
577 unsigned long size = lmb.reserved.region[i].size; 572 unsigned long size = lmb.reserved.region[i].size;
578 573
579 reserve_bootmem(physbase, size); 574 reserve_bootmem(base, size);
580 } 575 }
581} 576}
582 577
@@ -615,10 +610,10 @@ static int __init setup_kcore(void)
615 int i; 610 int i;
616 611
617 for (i=0; i < lmb.memory.cnt; i++) { 612 for (i=0; i < lmb.memory.cnt; i++) {
618 unsigned long physbase, size; 613 unsigned long base, size;
619 struct kcore_list *kcore_mem; 614 struct kcore_list *kcore_mem;
620 615
621 physbase = lmb.memory.region[i].physbase; 616 base = lmb.memory.region[i].base;
622 size = lmb.memory.region[i].size; 617 size = lmb.memory.region[i].size;
623 618
624 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ 619 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
@@ -626,7 +621,7 @@ static int __init setup_kcore(void)
626 if (!kcore_mem) 621 if (!kcore_mem)
627 panic("mem_init: kmalloc failed\n"); 622 panic("mem_init: kmalloc failed\n");
628 623
629 kclist_add(kcore_mem, __va(physbase), size); 624 kclist_add(kcore_mem, __va(base), size);
630 } 625 }
631 626
632 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); 627 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
@@ -686,9 +681,6 @@ void __init mem_init(void)
686 681
687 mem_init_done = 1; 682 mem_init_done = 1;
688 683
689#ifdef CONFIG_PPC_ISERIES
690 iommu_vio_init();
691#endif
692 /* Initialize the vDSO */ 684 /* Initialize the vDSO */
693 vdso_init(); 685 vdso_init();
694} 686}
@@ -833,23 +825,43 @@ void __iomem * reserve_phb_iospace(unsigned long size)
833 return virt_addr; 825 return virt_addr;
834} 826}
835 827
836kmem_cache_t *zero_cache; 828static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
837
838static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
839{ 829{
840 memset(pte, 0, PAGE_SIZE); 830 memset(addr, 0, kmem_cache_size(cache));
841} 831}
842 832
833static const int pgtable_cache_size[2] = {
834 PTE_TABLE_SIZE, PMD_TABLE_SIZE
835};
836static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
837 "pgd_pte_cache", "pud_pmd_cache",
838};
839
840kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
841
843void pgtable_cache_init(void) 842void pgtable_cache_init(void)
844{ 843{
845 zero_cache = kmem_cache_create("zero", 844 int i;
846 PAGE_SIZE, 845
847 0, 846 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
848 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, 847 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
849 zero_ctor, 848 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
850 NULL); 849 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
851 if (!zero_cache) 850
852 panic("pgtable_cache_init(): could not create zero_cache!\n"); 851 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
852 int size = pgtable_cache_size[i];
853 const char *name = pgtable_cache_name[i];
854
855 pgtable_cache[i] = kmem_cache_create(name,
856 size, size,
857 SLAB_HWCACHE_ALIGN
858 | SLAB_MUST_HWCACHE_ALIGN,
859 zero_ctor,
860 NULL);
861 if (! pgtable_cache[i])
862 panic("pgtable_cache_init(): could not create %s!\n",
863 name);
864 }
853} 865}
854 866
855pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 867pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index 0b191f2de016..c3116f0d788c 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -671,7 +671,7 @@ new_range:
671 * Mark reserved regions on this node 671 * Mark reserved regions on this node
672 */ 672 */
673 for (i = 0; i < lmb.reserved.cnt; i++) { 673 for (i = 0; i < lmb.reserved.cnt; i++) {
674 unsigned long physbase = lmb.reserved.region[i].physbase; 674 unsigned long physbase = lmb.reserved.region[i].base;
675 unsigned long size = lmb.reserved.region[i].size; 675 unsigned long size = lmb.reserved.region[i].size;
676 676
677 if (pa_to_nid(physbase) != nid && 677 if (pa_to_nid(physbase) != nid &&
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
index 8379d678f70f..bab255889c58 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/ppc64/mm/slb_low.S
@@ -89,28 +89,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
89 b 9f 89 b 9f
90 90
910: /* user address: proto-VSID = context<<15 | ESID */ 910: /* user address: proto-VSID = context<<15 | ESID */
92 li r11,SLB_VSID_USER 92 srdi. r9,r3,USER_ESID_BITS
93
94 srdi. r9,r3,13
95 bne- 8f /* invalid ea bits set */ 93 bne- 8f /* invalid ea bits set */
96 94
97#ifdef CONFIG_HUGETLB_PAGE 95#ifdef CONFIG_HUGETLB_PAGE
98BEGIN_FTR_SECTION 96BEGIN_FTR_SECTION
99 /* check against the hugepage ranges */ 97 lhz r9,PACAHIGHHTLBAREAS(r13)
100 cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) 98 srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
101 bge 6f /* >= TASK_HPAGE_END */ 99 srd r9,r9,r11
102 cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) 100 andi. r9,r9,1
103 bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ 101 bne 5f
102
103 li r11,SLB_VSID_USER
104
104 cmpldi r3,16 105 cmpldi r3,16
105 bge 6f /* 4GB..TASK_HPAGE_BASE */ 106 bge 6f
106 107
107 lhz r9,PACAHTLBSEGS(r13) 108 lhz r9,PACALOWHTLBAREAS(r13)
108 srd r9,r9,r3 109 srd r9,r9,r3
109 andi. r9,r9,1 110 andi. r9,r9,1
111
110 beq 6f 112 beq 6f
111 113
1125: /* this is a hugepage user address */ 1145: li r11,SLB_VSID_USER|SLB_VSID_L
113 li r11,(SLB_VSID_USER|SLB_VSID_L)
114END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 115END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
115#endif /* CONFIG_HUGETLB_PAGE */ 116#endif /* CONFIG_HUGETLB_PAGE */
116 117
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index 26f0172c4527..d8a6593a13f0 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -41,7 +41,58 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42unsigned long pte_freelist_forced_free; 42unsigned long pte_freelist_forced_free;
43 43
44void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) 44struct pte_freelist_batch
45{
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49};
50
51DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52unsigned long pte_freelist_forced_free;
53
54#define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58#ifdef CONFIG_SMP
59static void pte_free_smp_sync(void *arg)
60{
61 /* Do nothing, just ensure we sync with all CPUs */
62}
63#endif
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(pgtable_free_t pgf)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87}
88
89static void pte_free_submit(struct pte_freelist_batch *batch)
90{
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
45{ 96{
46 /* This is safe as we are holding page_table_lock */ 97 /* This is safe as we are holding page_table_lock */
47 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); 98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
@@ -49,19 +100,19 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
49 100
50 if (atomic_read(&tlb->mm->mm_users) < 2 || 101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
51 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
52 pte_free(ptepage); 103 pgtable_free(pgf);
53 return; 104 return;
54 } 105 }
55 106
56 if (*batchp == NULL) { 107 if (*batchp == NULL) {
57 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
58 if (*batchp == NULL) { 109 if (*batchp == NULL) {
59 pte_free_now(ptepage); 110 pgtable_free_now(pgf);
60 return; 111 return;
61 } 112 }
62 (*batchp)->index = 0; 113 (*batchp)->index = 0;
63 } 114 }
64 (*batchp)->pages[(*batchp)->index++] = ptepage; 115 (*batchp)->tables[(*batchp)->index++] = pgf;
65 if ((*batchp)->index == PTE_FREELIST_SIZE) { 116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
66 pte_free_submit(*batchp); 117 pte_free_submit(*batchp);
67 *batchp = NULL; 118 *batchp = NULL;
@@ -132,42 +183,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
132 put_cpu(); 183 put_cpu();
133} 184}
134 185
135#ifdef CONFIG_SMP
136static void pte_free_smp_sync(void *arg)
137{
138 /* Do nothing, just ensure we sync with all CPUs */
139}
140#endif
141
142/* This is only called when we are critically out of memory
143 * (and fail to get a page in pte_free_tlb).
144 */
145void pte_free_now(struct page *ptepage)
146{
147 pte_freelist_forced_free++;
148
149 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
150
151 pte_free(ptepage);
152}
153
154static void pte_free_rcu_callback(struct rcu_head *head)
155{
156 struct pte_freelist_batch *batch =
157 container_of(head, struct pte_freelist_batch, rcu);
158 unsigned int i;
159
160 for (i = 0; i < batch->index; i++)
161 pte_free(batch->pages[i]);
162 free_page((unsigned long)batch);
163}
164
165void pte_free_submit(struct pte_freelist_batch *batch)
166{
167 INIT_RCU_HEAD(&batch->rcu);
168 call_rcu(&batch->rcu, pte_free_rcu_callback);
169}
170
171void pte_free_finish(void) 186void pte_free_finish(void)
172{ 187{
173 /* This is safe as we are holding page_table_lock */ 188 /* This is safe as we are holding page_table_lock */
diff --git a/arch/ppc64/oprofile/common.c b/arch/ppc64/oprofile/common.c
index b28bfda23d94..4acd1a424933 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/ppc64/oprofile/common.c
@@ -153,6 +153,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
153 153
154 case PV_970: 154 case PV_970:
155 case PV_970FX: 155 case PV_970FX:
156 case PV_970MP:
156 model = &op_model_power4; 157 model = &op_model_power4;
157 model->num_counters = 8; 158 model->num_counters = 8;
158 ops->cpu_type = "ppc64/970"; 159 ops->cpu_type = "ppc64/970";
diff --git a/arch/ppc64/xmon/start.c b/arch/ppc64/xmon/start.c
index a9265bcc79b2..f86b584acd76 100644
--- a/arch/ppc64/xmon/start.c
+++ b/arch/ppc64/xmon/start.c
@@ -27,7 +27,7 @@ static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
27 struct tty_struct *tty) 27 struct tty_struct *tty)
28{ 28{
29 /* ensure xmon is enabled */ 29 /* ensure xmon is enabled */
30 xmon_init(); 30 xmon_init(1);
31 debugger(pt_regs); 31 debugger(pt_regs);
32} 32}
33 33
diff --git a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c
index 05539439e6bc..45908b10acd3 100644
--- a/arch/ppc64/xmon/xmon.c
+++ b/arch/ppc64/xmon/xmon.c
@@ -2496,15 +2496,25 @@ static void dump_stab(void)
2496 } 2496 }
2497} 2497}
2498 2498
2499void xmon_init(void) 2499void xmon_init(int enable)
2500{ 2500{
2501 __debugger = xmon; 2501 if (enable) {
2502 __debugger_ipi = xmon_ipi; 2502 __debugger = xmon;
2503 __debugger_bpt = xmon_bpt; 2503 __debugger_ipi = xmon_ipi;
2504 __debugger_sstep = xmon_sstep; 2504 __debugger_bpt = xmon_bpt;
2505 __debugger_iabr_match = xmon_iabr_match; 2505 __debugger_sstep = xmon_sstep;
2506 __debugger_dabr_match = xmon_dabr_match; 2506 __debugger_iabr_match = xmon_iabr_match;
2507 __debugger_fault_handler = xmon_fault_handler; 2507 __debugger_dabr_match = xmon_dabr_match;
2508 __debugger_fault_handler = xmon_fault_handler;
2509 } else {
2510 __debugger = NULL;
2511 __debugger_ipi = NULL;
2512 __debugger_bpt = NULL;
2513 __debugger_sstep = NULL;
2514 __debugger_iabr_match = NULL;
2515 __debugger_dabr_match = NULL;
2516 __debugger_fault_handler = NULL;
2517 }
2508} 2518}
2509 2519
2510void dump_segments(void) 2520void dump_segments(void)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index d05d65ac9694..7358cdb8441f 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -637,12 +637,11 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
637 else 637 else
638 setup_frame32(sig, ka, oldset, regs); 638 setup_frame32(sig, ka, oldset, regs);
639 639
640 if (!(ka->sa.sa_flags & SA_NODEFER)) { 640 spin_lock_irq(&current->sighand->siglock);
641 spin_lock_irq(&current->sighand->siglock); 641 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
642 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 642 if (!(ka->sa.sa_flags & SA_NODEFER))
643 sigaddset(&current->blocked,sig); 643 sigaddset(&current->blocked,sig);
644 recalc_sigpending(); 644 recalc_sigpending();
645 spin_unlock_irq(&current->sighand->siglock); 645 spin_unlock_irq(&current->sighand->siglock);
646 }
647} 646}
648 647
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 610c1d03e975..6a3f5b7473a9 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -429,13 +429,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
429 else 429 else
430 setup_frame(sig, ka, oldset, regs); 430 setup_frame(sig, ka, oldset, regs);
431 431
432 if (!(ka->sa.sa_flags & SA_NODEFER)) { 432 spin_lock_irq(&current->sighand->siglock);
433 spin_lock_irq(&current->sighand->siglock); 433 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
434 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 434 if (!(ka->sa.sa_flags & SA_NODEFER))
435 sigaddset(&current->blocked,sig); 435 sigaddset(&current->blocked,sig);
436 recalc_sigpending(); 436 recalc_sigpending();
437 spin_unlock_irq(&current->sighand->siglock); 437 spin_unlock_irq(&current->sighand->siglock);
438 }
439} 438}
440 439
441/* 440/*
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 8022243f0178..b475c4d2405f 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -546,13 +546,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
546 if (ka->sa.sa_flags & SA_ONESHOT) 546 if (ka->sa.sa_flags & SA_ONESHOT)
547 ka->sa.sa_handler = SIG_DFL; 547 ka->sa.sa_handler = SIG_DFL;
548 548
549 if (!(ka->sa.sa_flags & SA_NODEFER)) { 549 spin_lock_irq(&current->sighand->siglock);
550 spin_lock_irq(&current->sighand->siglock); 550 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
551 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 551 if (!(ka->sa.sa_flags & SA_NODEFER))
552 sigaddset(&current->blocked,sig); 552 sigaddset(&current->blocked,sig);
553 recalc_sigpending(); 553 recalc_sigpending();
554 spin_unlock_irq(&current->sighand->siglock); 554 spin_unlock_irq(&current->sighand->siglock);
555 }
556} 555}
557 556
558/* 557/*
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index c6a14a87c59b..3ea8929e483b 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -664,13 +664,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
664 else 664 else
665 setup_frame(sig, ka, oldset, regs); 665 setup_frame(sig, ka, oldset, regs);
666 666
667 if (!(ka->sa.sa_flags & SA_NODEFER)) { 667 spin_lock_irq(&current->sighand->siglock);
668 spin_lock_irq(&current->sighand->siglock); 668 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
669 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 669 if (!(ka->sa.sa_flags & SA_NODEFER))
670 sigaddset(&current->blocked,sig); 670 sigaddset(&current->blocked,sig);
671 recalc_sigpending(); 671 recalc_sigpending();
672 spin_unlock_irq(&current->sighand->siglock); 672 spin_unlock_irq(&current->sighand->siglock);
673 }
674} 673}
675 674
676/* 675/*
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 55352ed85e8a..53c192a4982f 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -32,7 +32,6 @@
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/root_dev.h> 33#include <linux/root_dev.h>
34 34
35#include <asm/segment.h>
36#include <asm/system.h> 35#include <asm/system.h>
37#include <asm/io.h> 36#include <asm/io.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 011ff35057a5..5f34d7dc2b89 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1034,13 +1034,12 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
1034 else 1034 else
1035 setup_frame(&ka->sa, regs, signr, oldset, info); 1035 setup_frame(&ka->sa, regs, signr, oldset, info);
1036 } 1036 }
1037 if (!(ka->sa.sa_flags & SA_NOMASK)) { 1037 spin_lock_irq(&current->sighand->siglock);
1038 spin_lock_irq(&current->sighand->siglock); 1038 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1039 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 1039 if (!(ka->sa.sa_flags & SA_NOMASK))
1040 sigaddset(&current->blocked, signr); 1040 sigaddset(&current->blocked, signr);
1041 recalc_sigpending(); 1041 recalc_sigpending();
1042 spin_unlock_irq(&current->sighand->siglock); 1042 spin_unlock_irq(&current->sighand->siglock);
1043 }
1044} 1043}
1045 1044
1046static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 1045static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
index fd8005a3e6bd..591547af4c65 100644
--- a/arch/sparc/kernel/tick14.c
+++ b/arch/sparc/kernel/tick14.c
@@ -19,7 +19,6 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20 20
21#include <asm/oplib.h> 21#include <asm/oplib.h>
22#include <asm/segment.h>
23#include <asm/timer.h> 22#include <asm/timer.h>
24#include <asm/mostek.h> 23#include <asm/mostek.h>
25#include <asm/system.h> 24#include <asm/system.h>
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 6486cbf2efe9..3b759aefc170 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -32,7 +32,6 @@
32#include <linux/profile.h> 32#include <linux/profile.h>
33 33
34#include <asm/oplib.h> 34#include <asm/oplib.h>
35#include <asm/segment.h>
36#include <asm/timer.h> 35#include <asm/timer.h>
37#include <asm/mostek.h> 36#include <asm/mostek.h>
38#include <asm/system.h> 37#include <asm/system.h>
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index 37f4107bae66..2bbd53f3cafb 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -23,7 +23,6 @@
23#include <linux/module.h> 23#include <linux/module.h>
24 24
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/segment.h>
27#include <asm/page.h> 26#include <asm/page.h>
28#include <asm/pgtable.h> 27#include <asm/pgtable.h>
29#include <asm/memreg.h> 28#include <asm/memreg.h>
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index ec2e05028a10..c03babaa0498 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -25,7 +25,6 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26 26
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/segment.h>
29#include <asm/vac-ops.h> 28#include <asm/vac-ops.h>
30#include <asm/page.h> 29#include <asm/page.h>
31#include <asm/pgtable.h> 30#include <asm/pgtable.h>
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 88332f00094a..cecdc0a7521f 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -21,6 +21,7 @@
21#include <asm/visasm.h> 21#include <asm/visasm.h>
22#include <asm/estate.h> 22#include <asm/estate.h>
23#include <asm/auxio.h> 23#include <asm/auxio.h>
24#include <asm/sfafsr.h>
24 25
25#define curptr g6 26#define curptr g6
26 27
@@ -690,14 +691,159 @@ netbsd_syscall:
690 retl 691 retl
691 nop 692 nop
692 693
693 /* These next few routines must be sure to clear the 694 /* We need to carefully read the error status, ACK
694 * SFSR FaultValid bit so that the fast tlb data protection 695 * the errors, prevent recursive traps, and pass the
695 * handler does not flush the wrong context and lock up the 696 * information on to C code for logging.
696 * box. 697 *
698 * We pass the AFAR in as-is, and we encode the status
699 * information as described in asm-sparc64/sfafsr.h
700 */
701 .globl __spitfire_access_error
702__spitfire_access_error:
703 /* Disable ESTATE error reporting so that we do not
704 * take recursive traps and RED state the processor.
705 */
706 stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
707 membar #Sync
708
709 mov UDBE_UE, %g1
710 ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
711
712 /* __spitfire_cee_trap branches here with AFSR in %g4 and
713 * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
714 * ESTATE Error Enable register.
715 */
716__spitfire_cee_trap_continue:
717 ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
718
719 rdpr %tt, %g3
720 and %g3, 0x1ff, %g3 ! Paranoia
721 sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
722 or %g4, %g3, %g4
723 rdpr %tl, %g3
724 cmp %g3, 1
725 mov 1, %g3
726 bleu %xcc, 1f
727 sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
728
729 or %g4, %g3, %g4
730
731 /* Read in the UDB error register state, clearing the
732 * sticky error bits as-needed. We only clear them if
733 * the UE bit is set. Likewise, __spitfire_cee_trap
734 * below will only do so if the CE bit is set.
735 *
736 * NOTE: UltraSparc-I/II have high and low UDB error
737 * registers, corresponding to the two UDB units
738 * present on those chips. UltraSparc-IIi only
739 * has a single UDB, called "SDB" in the manual.
740 * For IIi the upper UDB register always reads
741 * as zero so for our purposes things will just
742 * work with the checks below.
697 */ 743 */
698 .globl __do_data_access_exception 7441: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
699 .globl __do_data_access_exception_tl1 745 and %g3, 0x3ff, %g7 ! Paranoia
700__do_data_access_exception_tl1: 746 sllx %g7, SFSTAT_UDBH_SHIFT, %g7
747 or %g4, %g7, %g4
748 andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
749 be,pn %xcc, 1f
750 nop
751 stxa %g3, [%g0] ASI_UDB_ERROR_W
752 membar #Sync
753
7541: mov 0x18, %g3
755 ldxa [%g3] ASI_UDBL_ERROR_R, %g3
756 and %g3, 0x3ff, %g7 ! Paranoia
757 sllx %g7, SFSTAT_UDBL_SHIFT, %g7
758 or %g4, %g7, %g4
759 andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
760 be,pn %xcc, 1f
761 nop
762 mov 0x18, %g7
763 stxa %g3, [%g7] ASI_UDB_ERROR_W
764 membar #Sync
765
7661: /* Ok, now that we've latched the error state,
767 * clear the sticky bits in the AFSR.
768 */
769 stxa %g4, [%g0] ASI_AFSR
770 membar #Sync
771
772 rdpr %tl, %g2
773 cmp %g2, 1
774 rdpr %pil, %g2
775 bleu,pt %xcc, 1f
776 wrpr %g0, 15, %pil
777
778 ba,pt %xcc, etraptl1
779 rd %pc, %g7
780
781 ba,pt %xcc, 2f
782 nop
783
7841: ba,pt %xcc, etrap_irq
785 rd %pc, %g7
786
7872: mov %l4, %o1
788 mov %l5, %o2
789 call spitfire_access_error
790 add %sp, PTREGS_OFF, %o0
791 ba,pt %xcc, rtrap
792 clr %l6
793
794 /* This is the trap handler entry point for ECC correctable
795 * errors. They are corrected, but we listen for the trap
796 * so that the event can be logged.
797 *
798 * Disrupting errors are either:
799 * 1) single-bit ECC errors during UDB reads to system
800 * memory
801 * 2) data parity errors during write-back events
802 *
803 * As far as I can make out from the manual, the CEE trap
804 * is only for correctable errors during memory read
805 * accesses by the front-end of the processor.
806 *
807 * The code below is only for trap level 1 CEE events,
808 * as it is the only situation where we can safely record
809 * and log. For trap level >1 we just clear the CE bit
810 * in the AFSR and return.
811 *
812 * This is just like __spiftire_access_error above, but it
813 * specifically handles correctable errors. If an
814 * uncorrectable error is indicated in the AFSR we
815 * will branch directly above to __spitfire_access_error
816 * to handle it instead. Uncorrectable therefore takes
817 * priority over correctable, and the error logging
818 * C code will notice this case by inspecting the
819 * trap type.
820 */
821 .globl __spitfire_cee_trap
822__spitfire_cee_trap:
823 ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
824 mov 1, %g3
825 sllx %g3, SFAFSR_UE_SHIFT, %g3
826 andcc %g4, %g3, %g0 ! Check for UE
827 bne,pn %xcc, __spitfire_access_error
828 nop
829
830 /* Ok, in this case we only have a correctable error.
831 * Indicate we only wish to capture that state in register
832 * %g1, and we only disable CE error reporting unlike UE
833 * handling which disables all errors.
834 */
835 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
836 andn %g3, ESTATE_ERR_CE, %g3
837 stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
838 membar #Sync
839
840 /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
841 ba,pt %xcc, __spitfire_cee_trap_continue
842 mov UDBE_CE, %g1
843
844 .globl __spitfire_data_access_exception
845 .globl __spitfire_data_access_exception_tl1
846__spitfire_data_access_exception_tl1:
701 rdpr %pstate, %g4 847 rdpr %pstate, %g4
702 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 848 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
703 mov TLB_SFSR, %g3 849 mov TLB_SFSR, %g3
@@ -706,9 +852,25 @@ __do_data_access_exception_tl1:
706 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR 852 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
707 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit 853 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
708 membar #Sync 854 membar #Sync
855 rdpr %tt, %g3
856 cmp %g3, 0x80 ! first win spill/fill trap
857 blu,pn %xcc, 1f
858 cmp %g3, 0xff ! last win spill/fill trap
859 bgu,pn %xcc, 1f
860 nop
709 ba,pt %xcc, winfix_dax 861 ba,pt %xcc, winfix_dax
710 rdpr %tpc, %g3 862 rdpr %tpc, %g3
711__do_data_access_exception: 8631: sethi %hi(109f), %g7
864 ba,pt %xcc, etraptl1
865109: or %g7, %lo(109b), %g7
866 mov %l4, %o1
867 mov %l5, %o2
868 call spitfire_data_access_exception_tl1
869 add %sp, PTREGS_OFF, %o0
870 ba,pt %xcc, rtrap
871 clr %l6
872
873__spitfire_data_access_exception:
712 rdpr %pstate, %g4 874 rdpr %pstate, %g4
713 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 875 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
714 mov TLB_SFSR, %g3 876 mov TLB_SFSR, %g3
@@ -722,20 +884,19 @@ __do_data_access_exception:
722109: or %g7, %lo(109b), %g7 884109: or %g7, %lo(109b), %g7
723 mov %l4, %o1 885 mov %l4, %o1
724 mov %l5, %o2 886 mov %l5, %o2
725 call data_access_exception 887 call spitfire_data_access_exception
726 add %sp, PTREGS_OFF, %o0 888 add %sp, PTREGS_OFF, %o0
727 ba,pt %xcc, rtrap 889 ba,pt %xcc, rtrap
728 clr %l6 890 clr %l6
729 891
730 .globl __do_instruction_access_exception 892 .globl __spitfire_insn_access_exception
731 .globl __do_instruction_access_exception_tl1 893 .globl __spitfire_insn_access_exception_tl1
732__do_instruction_access_exception_tl1: 894__spitfire_insn_access_exception_tl1:
733 rdpr %pstate, %g4 895 rdpr %pstate, %g4
734 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 896 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
735 mov TLB_SFSR, %g3 897 mov TLB_SFSR, %g3
736 mov DMMU_SFAR, %g5 898 ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
737 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 899 rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
738 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
739 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 900 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
740 membar #Sync 901 membar #Sync
741 sethi %hi(109f), %g7 902 sethi %hi(109f), %g7
@@ -743,18 +904,17 @@ __do_instruction_access_exception_tl1:
743109: or %g7, %lo(109b), %g7 904109: or %g7, %lo(109b), %g7
744 mov %l4, %o1 905 mov %l4, %o1
745 mov %l5, %o2 906 mov %l5, %o2
746 call instruction_access_exception_tl1 907 call spitfire_insn_access_exception_tl1
747 add %sp, PTREGS_OFF, %o0 908 add %sp, PTREGS_OFF, %o0
748 ba,pt %xcc, rtrap 909 ba,pt %xcc, rtrap
749 clr %l6 910 clr %l6
750 911
751__do_instruction_access_exception: 912__spitfire_insn_access_exception:
752 rdpr %pstate, %g4 913 rdpr %pstate, %g4
753 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 914 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
754 mov TLB_SFSR, %g3 915 mov TLB_SFSR, %g3
755 mov DMMU_SFAR, %g5 916 ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
756 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 917 rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
757 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
758 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 918 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
759 membar #Sync 919 membar #Sync
760 sethi %hi(109f), %g7 920 sethi %hi(109f), %g7
@@ -762,102 +922,11 @@ __do_instruction_access_exception:
762109: or %g7, %lo(109b), %g7 922109: or %g7, %lo(109b), %g7
763 mov %l4, %o1 923 mov %l4, %o1
764 mov %l5, %o2 924 mov %l5, %o2
765 call instruction_access_exception 925 call spitfire_insn_access_exception
766 add %sp, PTREGS_OFF, %o0 926 add %sp, PTREGS_OFF, %o0
767 ba,pt %xcc, rtrap 927 ba,pt %xcc, rtrap
768 clr %l6 928 clr %l6
769 929
770 /* This is the trap handler entry point for ECC correctable
771 * errors. They are corrected, but we listen for the trap
772 * so that the event can be logged.
773 *
774 * Disrupting errors are either:
775 * 1) single-bit ECC errors during UDB reads to system
776 * memory
777 * 2) data parity errors during write-back events
778 *
779 * As far as I can make out from the manual, the CEE trap
780 * is only for correctable errors during memory read
781 * accesses by the front-end of the processor.
782 *
783 * The code below is only for trap level 1 CEE events,
784 * as it is the only situation where we can safely record
785 * and log. For trap level >1 we just clear the CE bit
786 * in the AFSR and return.
787 */
788
789 /* Our trap handling infrastructure allows us to preserve
790 * two 64-bit values during etrap for arguments to
791 * subsequent C code. Therefore we encode the information
792 * as follows:
793 *
794 * value 1) Full 64-bits of AFAR
795 * value 2) Low 33-bits of AFSR, then bits 33-->42
796 * are UDBL error status and bits 43-->52
797 * are UDBH error status
798 */
799 .align 64
800 .globl cee_trap
801cee_trap:
802 ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
803 ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
804 sllx %g1, 31, %g1 ! Clear reserved bits
805 srlx %g1, 31, %g1 ! in AFSR
806
807 /* NOTE: UltraSparc-I/II have high and low UDB error
808 * registers, corresponding to the two UDB units
809 * present on those chips. UltraSparc-IIi only
810 * has a single UDB, called "SDB" in the manual.
811 * For IIi the upper UDB register always reads
812 * as zero so for our purposes things will just
813 * work with the checks below.
814 */
815 ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
816 andcc %g3, (1 << 8), %g4 ! Check CE bit
817 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
818 srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
819
820 sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
821 or %g1, %g3, %g1 ! Or it in
822 be,pn %xcc, 1f ! Branch if CE bit was clear
823 nop
824 stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
825 membar #Sync ! Synchronize ASI stores
8261: mov 0x18, %g5 ! Addr of UDB-High error status
827 ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
828
829 andcc %g3, (1 << 8), %g4 ! Check CE bit
830 sllx %g3, (64 - 10), %g3 ! Clear reserved bits
831 srlx %g3, (64 - 10), %g3 ! in UDB-High error status
832 sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
833 or %g1, %g3, %g1 ! Or it in
834 be,pn %xcc, 1f ! Branch if CE bit was clear
835 nop
836 nop
837
838 stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
839 membar #Sync ! Synchronize ASI stores
8401: mov 1, %g5 ! AFSR CE bit is
841 sllx %g5, 20, %g5 ! bit 20
842 stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
843 membar #Sync ! Synchronize ASI stores
844 sllx %g2, (64 - 41), %g2 ! Clear reserved bits
845 srlx %g2, (64 - 41), %g2 ! in latched AFAR
846
847 andn %g2, 0x0f, %g2 ! Finish resv bit clearing
848 mov %g1, %g4 ! Move AFSR+UDB* into save reg
849 mov %g2, %g5 ! Move AFAR into save reg
850 rdpr %pil, %g2
851 wrpr %g0, 15, %pil
852 ba,pt %xcc, etrap_irq
853 rd %pc, %g7
854 mov %l4, %o0
855
856 mov %l5, %o1
857 call cee_log
858 add %sp, PTREGS_OFF, %o2
859 ba,a,pt %xcc, rtrap_irq
860
861 /* Capture I/D/E-cache state into per-cpu error scoreboard. 930 /* Capture I/D/E-cache state into per-cpu error scoreboard.
862 * 931 *
863 * %g1: (TL>=0) ? 1 : 0 932 * %g1: (TL>=0) ? 1 : 0
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index bba140d98b1b..f21c993f8856 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -540,6 +540,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
540 540
541 pbm->parent->resource_adjust(pdev, res, root); 541 pbm->parent->resource_adjust(pdev, res, root);
542} 542}
543EXPORT_SYMBOL(pcibios_bus_to_resource);
543 544
544char * __init pcibios_setup(char *str) 545char * __init pcibios_setup(char *str)
545{ 546{
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 2803bc7c2c79..425c60cfea19 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -466,7 +466,7 @@ do_flush_sync:
466 if (!limit) 466 if (!limit)
467 break; 467 break;
468 udelay(1); 468 udelay(1);
469 membar("#LoadLoad"); 469 rmb();
470 } 470 }
471 if (!limit) 471 if (!limit)
472 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " 472 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 07424b075938..66255434128a 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -103,7 +103,7 @@ void cpu_idle(void)
103 * other cpus see our increasing idleness for the buddy 103 * other cpus see our increasing idleness for the buddy
104 * redistribution algorithm. -DaveM 104 * redistribution algorithm. -DaveM
105 */ 105 */
106 membar("#StoreStore | #StoreLoad"); 106 membar_storeload_storestore();
107 } 107 }
108} 108}
109 109
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 89f5e019f24c..e09ddf927655 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -147,7 +147,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
147 if (!limit) 147 if (!limit)
148 break; 148 break;
149 udelay(1); 149 udelay(1);
150 membar("#LoadLoad"); 150 rmb();
151 } 151 }
152 if (!limit) 152 if (!limit)
153 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " 153 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index b7e6a91952b2..fbdfed3798d8 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -33,7 +33,6 @@
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/initrd.h> 34#include <linux/initrd.h>
35 35
36#include <asm/segment.h>
37#include <asm/system.h> 36#include <asm/system.h>
38#include <asm/io.h> 37#include <asm/io.h>
39#include <asm/processor.h> 38#include <asm/processor.h>
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index b27934671c35..60f5dfabb1e1 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -574,13 +574,12 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
574{ 574{
575 setup_rt_frame(ka, regs, signr, oldset, 575 setup_rt_frame(ka, regs, signr, oldset,
576 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 576 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
577 if (!(ka->sa.sa_flags & SA_NOMASK)) { 577 spin_lock_irq(&current->sighand->siglock);
578 spin_lock_irq(&current->sighand->siglock); 578 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
579 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 579 if (!(ka->sa.sa_flags & SA_NOMASK))
580 sigaddset(&current->blocked,signr); 580 sigaddset(&current->blocked,signr);
581 recalc_sigpending(); 581 recalc_sigpending();
582 spin_unlock_irq(&current->sighand->siglock); 582 spin_unlock_irq(&current->sighand->siglock);
583 }
584} 583}
585 584
586static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 585static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index f28428f4170e..aecccd0df1d1 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -877,11 +877,12 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
877 unsigned long page = (unsigned long) 877 unsigned long page = (unsigned long)
878 page_address(pte_page(*ptep)); 878 page_address(pte_page(*ptep));
879 879
880 __asm__ __volatile__( 880 wmb();
881 " membar #StoreStore\n" 881 __asm__ __volatile__("flush %0 + %1"
882 " flush %0 + %1" 882 : /* no outputs */
883 : : "r" (page), "r" (address & (PAGE_SIZE - 1)) 883 : "r" (page),
884 : "memory"); 884 "r" (address & (PAGE_SIZE - 1))
885 : "memory");
885 } 886 }
886 pte_unmap(ptep); 887 pte_unmap(ptep);
887 preempt_enable(); 888 preempt_enable();
@@ -1292,11 +1293,12 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
1292 unsigned long page = (unsigned long) 1293 unsigned long page = (unsigned long)
1293 page_address(pte_page(*ptep)); 1294 page_address(pte_page(*ptep));
1294 1295
1295 __asm__ __volatile__( 1296 wmb();
1296 " membar #StoreStore\n" 1297 __asm__ __volatile__("flush %0 + %1"
1297 " flush %0 + %1" 1298 : /* no outputs */
1298 : : "r" (page), "r" (address & (PAGE_SIZE - 1)) 1299 : "r" (page),
1299 : "memory"); 1300 "r" (address & (PAGE_SIZE - 1))
1301 : "memory");
1300 } 1302 }
1301 pte_unmap(ptep); 1303 pte_unmap(ptep);
1302 preempt_enable(); 1304 preempt_enable();
@@ -1325,13 +1327,12 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
1325 else 1327 else
1326 setup_frame32(&ka->sa, regs, signr, oldset, info); 1328 setup_frame32(&ka->sa, regs, signr, oldset, info);
1327 } 1329 }
1328 if (!(ka->sa.sa_flags & SA_NOMASK)) { 1330 spin_lock_irq(&current->sighand->siglock);
1329 spin_lock_irq(&current->sighand->siglock); 1331 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
1330 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 1332 if (!(ka->sa.sa_flags & SA_NOMASK))
1331 sigaddset(&current->blocked,signr); 1333 sigaddset(&current->blocked,signr);
1332 recalc_sigpending(); 1334 recalc_sigpending();
1333 spin_unlock_irq(&current->sighand->siglock); 1335 spin_unlock_irq(&current->sighand->siglock);
1334 }
1335} 1336}
1336 1337
1337static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, 1338static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b9b42491e118..b4fc6a5462b2 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -144,7 +144,7 @@ void __init smp_callin(void)
144 current->active_mm = &init_mm; 144 current->active_mm = &init_mm;
145 145
146 while (!cpu_isset(cpuid, smp_commenced_mask)) 146 while (!cpu_isset(cpuid, smp_commenced_mask))
147 membar("#LoadLoad"); 147 rmb();
148 148
149 cpu_set(cpuid, cpu_online_map); 149 cpu_set(cpuid, cpu_online_map);
150} 150}
@@ -184,11 +184,11 @@ static inline long get_delta (long *rt, long *master)
184 for (i = 0; i < NUM_ITERS; i++) { 184 for (i = 0; i < NUM_ITERS; i++) {
185 t0 = tick_ops->get_tick(); 185 t0 = tick_ops->get_tick();
186 go[MASTER] = 1; 186 go[MASTER] = 1;
187 membar("#StoreLoad"); 187 membar_storeload();
188 while (!(tm = go[SLAVE])) 188 while (!(tm = go[SLAVE]))
189 membar("#LoadLoad"); 189 rmb();
190 go[SLAVE] = 0; 190 go[SLAVE] = 0;
191 membar("#StoreStore"); 191 wmb();
192 t1 = tick_ops->get_tick(); 192 t1 = tick_ops->get_tick();
193 193
194 if (t1 - t0 < best_t1 - best_t0) 194 if (t1 - t0 < best_t1 - best_t0)
@@ -221,7 +221,7 @@ void smp_synchronize_tick_client(void)
221 go[MASTER] = 1; 221 go[MASTER] = 1;
222 222
223 while (go[MASTER]) 223 while (go[MASTER])
224 membar("#LoadLoad"); 224 rmb();
225 225
226 local_irq_save(flags); 226 local_irq_save(flags);
227 { 227 {
@@ -273,21 +273,21 @@ static void smp_synchronize_one_tick(int cpu)
273 273
274 /* wait for client to be ready */ 274 /* wait for client to be ready */
275 while (!go[MASTER]) 275 while (!go[MASTER])
276 membar("#LoadLoad"); 276 rmb();
277 277
278 /* now let the client proceed into his loop */ 278 /* now let the client proceed into his loop */
279 go[MASTER] = 0; 279 go[MASTER] = 0;
280 membar("#StoreLoad"); 280 membar_storeload();
281 281
282 spin_lock_irqsave(&itc_sync_lock, flags); 282 spin_lock_irqsave(&itc_sync_lock, flags);
283 { 283 {
284 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 284 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
285 while (!go[MASTER]) 285 while (!go[MASTER])
286 membar("#LoadLoad"); 286 rmb();
287 go[MASTER] = 0; 287 go[MASTER] = 0;
288 membar("#StoreStore"); 288 wmb();
289 go[SLAVE] = tick_ops->get_tick(); 289 go[SLAVE] = tick_ops->get_tick();
290 membar("#StoreLoad"); 290 membar_storeload();
291 } 291 }
292 } 292 }
293 spin_unlock_irqrestore(&itc_sync_lock, flags); 293 spin_unlock_irqrestore(&itc_sync_lock, flags);
@@ -927,11 +927,11 @@ void smp_capture(void)
927 smp_processor_id()); 927 smp_processor_id());
928#endif 928#endif
929 penguins_are_doing_time = 1; 929 penguins_are_doing_time = 1;
930 membar("#StoreStore | #LoadStore"); 930 membar_storestore_loadstore();
931 atomic_inc(&smp_capture_registry); 931 atomic_inc(&smp_capture_registry);
932 smp_cross_call(&xcall_capture, 0, 0, 0); 932 smp_cross_call(&xcall_capture, 0, 0, 0);
933 while (atomic_read(&smp_capture_registry) != ncpus) 933 while (atomic_read(&smp_capture_registry) != ncpus)
934 membar("#LoadLoad"); 934 rmb();
935#ifdef CAPTURE_DEBUG 935#ifdef CAPTURE_DEBUG
936 printk("done\n"); 936 printk("done\n");
937#endif 937#endif
@@ -947,7 +947,7 @@ void smp_release(void)
947 smp_processor_id()); 947 smp_processor_id());
948#endif 948#endif
949 penguins_are_doing_time = 0; 949 penguins_are_doing_time = 0;
950 membar("#StoreStore | #StoreLoad"); 950 membar_storeload_storestore();
951 atomic_dec(&smp_capture_registry); 951 atomic_dec(&smp_capture_registry);
952 } 952 }
953} 953}
@@ -970,9 +970,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
970 save_alternate_globals(global_save); 970 save_alternate_globals(global_save);
971 prom_world(1); 971 prom_world(1);
972 atomic_inc(&smp_capture_registry); 972 atomic_inc(&smp_capture_registry);
973 membar("#StoreLoad | #StoreStore"); 973 membar_storeload_storestore();
974 while (penguins_are_doing_time) 974 while (penguins_are_doing_time)
975 membar("#LoadLoad"); 975 rmb();
976 restore_alternate_globals(global_save); 976 restore_alternate_globals(global_save);
977 atomic_dec(&smp_capture_registry); 977 atomic_dec(&smp_capture_registry);
978 prom_world(0); 978 prom_world(0);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9202d925a9ce..a3ea697f1adb 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -99,17 +99,6 @@ extern int __ashrdi3(int, int);
99extern void dump_thread(struct pt_regs *, struct user *); 99extern void dump_thread(struct pt_regs *, struct user *);
100extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); 100extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
101 101
102#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
103extern void _do_spin_lock (spinlock_t *lock, char *str);
104extern void _do_spin_unlock (spinlock_t *lock);
105extern int _spin_trylock (spinlock_t *lock);
106extern void _do_read_lock(rwlock_t *rw, char *str);
107extern void _do_read_unlock(rwlock_t *rw, char *str);
108extern void _do_write_lock(rwlock_t *rw, char *str);
109extern void _do_write_unlock(rwlock_t *rw);
110extern int _do_write_trylock(rwlock_t *rw, char *str);
111#endif
112
113extern unsigned long phys_base; 102extern unsigned long phys_base;
114extern unsigned long pfn_base; 103extern unsigned long pfn_base;
115 104
@@ -152,18 +141,6 @@ EXPORT_SYMBOL(_mcount);
152EXPORT_SYMBOL(cpu_online_map); 141EXPORT_SYMBOL(cpu_online_map);
153EXPORT_SYMBOL(phys_cpu_present_map); 142EXPORT_SYMBOL(phys_cpu_present_map);
154 143
155/* Spinlock debugging library, optional. */
156#ifdef CONFIG_DEBUG_SPINLOCK
157EXPORT_SYMBOL(_do_spin_lock);
158EXPORT_SYMBOL(_do_spin_unlock);
159EXPORT_SYMBOL(_spin_trylock);
160EXPORT_SYMBOL(_do_read_lock);
161EXPORT_SYMBOL(_do_read_unlock);
162EXPORT_SYMBOL(_do_write_lock);
163EXPORT_SYMBOL(_do_write_unlock);
164EXPORT_SYMBOL(_do_write_trylock);
165#endif
166
167EXPORT_SYMBOL(smp_call_function); 144EXPORT_SYMBOL(smp_call_function);
168#endif /* CONFIG_SMP */ 145#endif /* CONFIG_SMP */
169 146
@@ -429,3 +406,12 @@ EXPORT_SYMBOL(xor_vis_4);
429EXPORT_SYMBOL(xor_vis_5); 406EXPORT_SYMBOL(xor_vis_5);
430 407
431EXPORT_SYMBOL(prom_palette); 408EXPORT_SYMBOL(prom_palette);
409
410/* memory barriers */
411EXPORT_SYMBOL(mb);
412EXPORT_SYMBOL(rmb);
413EXPORT_SYMBOL(wmb);
414EXPORT_SYMBOL(membar_storeload);
415EXPORT_SYMBOL(membar_storeload_storestore);
416EXPORT_SYMBOL(membar_storeload_loadload);
417EXPORT_SYMBOL(membar_storestore_loadstore);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 0c9e54b2f0c8..b280b2ef674f 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -33,6 +33,7 @@
33#include <asm/dcu.h> 33#include <asm/dcu.h>
34#include <asm/estate.h> 34#include <asm/estate.h>
35#include <asm/chafsr.h> 35#include <asm/chafsr.h>
36#include <asm/sfafsr.h>
36#include <asm/psrcompat.h> 37#include <asm/psrcompat.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/timer.h> 39#include <asm/timer.h>
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line)
143} 144}
144#endif 145#endif
145 146
146void instruction_access_exception(struct pt_regs *regs, 147void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
147 unsigned long sfsr, unsigned long sfar)
148{ 148{
149 siginfo_t info; 149 siginfo_t info;
150 150
@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs,
153 return; 153 return;
154 154
155 if (regs->tstate & TSTATE_PRIV) { 155 if (regs->tstate & TSTATE_PRIV) {
156 printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", 156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 sfsr, sfar); 157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs); 158 die_if_kernel("Iax", regs);
159 } 159 }
160 if (test_thread_flag(TIF_32BIT)) { 160 if (test_thread_flag(TIF_32BIT)) {
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs,
169 force_sig_info(SIGSEGV, &info, current); 169 force_sig_info(SIGSEGV, &info, current);
170} 170}
171 171
172void instruction_access_exception_tl1(struct pt_regs *regs, 172void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
173 unsigned long sfsr, unsigned long sfar)
174{ 173{
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP) 175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return; 176 return;
178 177
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
180 instruction_access_exception(regs, sfsr, sfar); 179 spitfire_insn_access_exception(regs, sfsr, sfar);
181} 180}
182 181
183void data_access_exception(struct pt_regs *regs, 182void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
184 unsigned long sfsr, unsigned long sfar)
185{ 183{
186 siginfo_t info; 184 siginfo_t info;
187 185
@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs,
207 return; 205 return;
208 } 206 }
209 /* Shit... */ 207 /* Shit... */
210 printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", 208 printk("spitfire_data_access_exception: SFSR[%016lx] "
211 sfsr, sfar); 209 "SFAR[%016lx], going.\n", sfsr, sfar);
212 die_if_kernel("Dax", regs); 210 die_if_kernel("Dax", regs);
213 } 211 }
214 212
@@ -220,6 +218,16 @@ void data_access_exception(struct pt_regs *regs,
220 force_sig_info(SIGSEGV, &info, current); 218 force_sig_info(SIGSEGV, &info, current);
221} 219}
222 220
221void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
222{
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
225 return;
226
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
228 spitfire_data_access_exception(regs, sfsr, sfar);
229}
230
223#ifdef CONFIG_PCI 231#ifdef CONFIG_PCI
224/* This is really pathetic... */ 232/* This is really pathetic... */
225extern volatile int pci_poke_in_progress; 233extern volatile int pci_poke_in_progress;
@@ -253,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void)
253 : "memory"); 261 : "memory");
254} 262}
255 263
256void do_iae(struct pt_regs *regs) 264static void spitfire_enable_estate_errors(void)
257{ 265{
258 siginfo_t info; 266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
259 267 "membar #Sync"
260 spitfire_clean_and_reenable_l1_caches(); 268 : /* no outputs */
261 269 : "r" (ESTATE_ERR_ALL),
262 if (notify_die(DIE_TRAP, "instruction access exception", regs, 270 "i" (ASI_ESTATE_ERROR_EN));
263 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 return;
265
266 info.si_signo = SIGBUS;
267 info.si_errno = 0;
268 info.si_code = BUS_OBJERR;
269 info.si_addr = (void *)0;
270 info.si_trapno = 0;
271 force_sig_info(SIGBUS, &info, current);
272}
273
274void do_dae(struct pt_regs *regs)
275{
276 siginfo_t info;
277
278#ifdef CONFIG_PCI
279 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
280 spitfire_clean_and_reenable_l1_caches();
281
282 pci_poke_faulted = 1;
283
284 /* Why the fuck did they have to change this? */
285 if (tlb_type == cheetah || tlb_type == cheetah_plus)
286 regs->tpc += 4;
287
288 regs->tnpc = regs->tpc + 4;
289 return;
290 }
291#endif
292 spitfire_clean_and_reenable_l1_caches();
293
294 if (notify_die(DIE_TRAP, "data access exception", regs,
295 0, 0x30, SIGTRAP) == NOTIFY_STOP)
296 return;
297
298 info.si_signo = SIGBUS;
299 info.si_errno = 0;
300 info.si_code = BUS_OBJERR;
301 info.si_addr = (void *)0;
302 info.si_trapno = 0;
303 force_sig_info(SIGBUS, &info, current);
304} 271}
305 272
306static char ecc_syndrome_table[] = { 273static char ecc_syndrome_table[] = {
@@ -338,65 +305,15 @@ static char ecc_syndrome_table[] = {
338 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a 305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
339}; 306};
340 307
341/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
342 * in the following format. The AFAR is left as is, with
343 * reserved bits cleared, and is a raw 40-bit physical
344 * address.
345 */
346#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
347#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
348#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
349#define CE_STATUS_UDBH_SHIFT 43
350#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
351#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
352#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
353#define CE_STATUS_UDBL_SHIFT 33
354#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
355#define CE_STATUS_AFSR_ME (1UL << 32)
356#define CE_STATUS_AFSR_PRIV (1UL << 31)
357#define CE_STATUS_AFSR_ISAP (1UL << 30)
358#define CE_STATUS_AFSR_ETP (1UL << 29)
359#define CE_STATUS_AFSR_IVUE (1UL << 28)
360#define CE_STATUS_AFSR_TO (1UL << 27)
361#define CE_STATUS_AFSR_BERR (1UL << 26)
362#define CE_STATUS_AFSR_LDP (1UL << 25)
363#define CE_STATUS_AFSR_CP (1UL << 24)
364#define CE_STATUS_AFSR_WP (1UL << 23)
365#define CE_STATUS_AFSR_EDP (1UL << 22)
366#define CE_STATUS_AFSR_UE (1UL << 21)
367#define CE_STATUS_AFSR_CE (1UL << 20)
368#define CE_STATUS_AFSR_ETS (0xfUL << 16)
369#define CE_STATUS_AFSR_ETS_SHIFT 16
370#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
371#define CE_STATUS_AFSR_PSYND_SHIFT 0
372
373/* Layout of Ecache TAG Parity Syndrome of AFSR */
374#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
375#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
376#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
377#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
378
379static char *syndrome_unknown = "<Unknown>"; 308static char *syndrome_unknown = "<Unknown>";
380 309
381asmlinkage void cee_log(unsigned long ce_status, 310static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
382 unsigned long afar,
383 struct pt_regs *regs)
384{ 311{
385 char memmod_str[64]; 312 unsigned short scode;
386 char *p; 313 char memmod_str[64], *p;
387 unsigned short scode, udb_reg;
388 314
389 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " 315 if (udbl & bit) {
390 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n", 316 scode = ecc_syndrome_table[udbl & 0xff];
391 smp_processor_id(),
392 (ce_status & CE_STATUS_AFSR_MASK),
393 afar,
394 ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
395 ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
396
397 udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
398 if (udb_reg & (1 << 8)) {
399 scode = ecc_syndrome_table[udb_reg & 0xff];
400 if (prom_getunumber(scode, afar, 317 if (prom_getunumber(scode, afar,
401 memmod_str, sizeof(memmod_str)) == -1) 318 memmod_str, sizeof(memmod_str)) == -1)
402 p = syndrome_unknown; 319 p = syndrome_unknown;
@@ -407,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status,
407 smp_processor_id(), scode, p); 324 smp_processor_id(), scode, p);
408 } 325 }
409 326
410 udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL); 327 if (udbh & bit) {
411 if (udb_reg & (1 << 8)) { 328 scode = ecc_syndrome_table[udbh & 0xff];
412 scode = ecc_syndrome_table[udb_reg & 0xff];
413 if (prom_getunumber(scode, afar, 329 if (prom_getunumber(scode, afar,
414 memmod_str, sizeof(memmod_str)) == -1) 330 memmod_str, sizeof(memmod_str)) == -1)
415 p = syndrome_unknown; 331 p = syndrome_unknown;
@@ -419,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status,
419 "Memory Module \"%s\"\n", 335 "Memory Module \"%s\"\n",
420 smp_processor_id(), scode, p); 336 smp_processor_id(), scode, p);
421 } 337 }
338
339}
340
341static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
342{
343
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349
350 /* We always log it, even if someone is listening for this
351 * trap.
352 */
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
355
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
358 */
359 spitfire_enable_estate_errors();
360}
361
362static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
363{
364 siginfo_t info;
365
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
372 */
373
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375
376 /* We always log it, even if someone is listening for this
377 * trap.
378 */
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
380 0, tt, SIGTRAP);
381
382 if (regs->tstate & TSTATE_PRIV) {
383 if (tl1)
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
386 }
387
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
391 */
392
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
395
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
399 }
400 info.si_signo = SIGBUS;
401 info.si_errno = 0;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
404 info.si_trapno = 0;
405 force_sig_info(SIGBUS, &info, current);
406}
407
408void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409{
410 unsigned long afsr, tt, udbh, udbl;
411 int tl1;
412
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
418
419#ifdef CONFIG_PCI
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
424
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
427 return;
428 }
429#endif
430
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
437 */
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
442 "membar #Sync"
443 : /* no outputs */
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 }
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
450 "membar #Sync"
451 : /* no outputs */
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
454 }
455 }
456
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
458 }
422} 459}
423 460
424int cheetah_pcache_forced_on; 461int cheetah_pcache_forced_on;
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 491bb3681f9d..8365bc1f81f3 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -18,9 +18,10 @@ sparc64_ttable_tl0:
18tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) 18tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
19tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) 19tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
20tl0_iax: membar #Sync 20tl0_iax: membar #Sync
21 TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) 21 TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
22tl0_resv009: BTRAP(0x9) 22tl0_resv009: BTRAP(0x9)
23tl0_iae: TRAP(do_iae) 23tl0_iae: membar #Sync
24 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
24tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) 25tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
25tl0_ill: membar #Sync 26tl0_ill: membar #Sync
26 TRAP_7INSNS(do_illegal_instruction) 27 TRAP_7INSNS(do_illegal_instruction)
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
36tl0_div0: TRAP(do_div0) 37tl0_div0: TRAP(do_div0)
37tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) 38tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
38tl0_resv02f: BTRAP(0x2f) 39tl0_resv02f: BTRAP(0x2f)
39tl0_dax: TRAP_NOSAVE(__do_data_access_exception) 40tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
40tl0_resv031: BTRAP(0x31) 41tl0_resv031: BTRAP(0x31)
41tl0_dae: TRAP(do_dae) 42tl0_dae: membar #Sync
43 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
42tl0_resv033: BTRAP(0x33) 44tl0_resv033: BTRAP(0x33)
43tl0_mna: TRAP_NOSAVE(do_mna) 45tl0_mna: TRAP_NOSAVE(do_mna)
44tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) 46tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
73tl0_ivec: TRAP_IVEC 75tl0_ivec: TRAP_IVEC
74tl0_paw: TRAP(do_paw) 76tl0_paw: TRAP(do_paw)
75tl0_vaw: TRAP(do_vaw) 77tl0_vaw: TRAP(do_vaw)
76tl0_cee: TRAP_NOSAVE(cee_trap) 78tl0_cee: membar #Sync
79 TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
77tl0_iamiss: 80tl0_iamiss:
78#include "itlb_base.S" 81#include "itlb_base.S"
79tl0_damiss: 82tl0_damiss:
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
175sparc64_ttable_tl1: 178sparc64_ttable_tl1:
176tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) 179tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
177tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) 180tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
178tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) 181tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
179tl1_resv009: BTRAPTL1(0x9) 182tl1_resv009: BTRAPTL1(0x9)
180tl1_iae: TRAPTL1(do_iae_tl1) 183tl1_iae: membar #Sync
184 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
181tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) 185tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
182tl1_ill: TRAPTL1(do_ill_tl1) 186tl1_ill: TRAPTL1(do_ill_tl1)
183tl1_privop: BTRAPTL1(0x11) 187tl1_privop: BTRAPTL1(0x11)
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
193tl1_div0: TRAPTL1(do_div0_tl1) 197tl1_div0: TRAPTL1(do_div0_tl1)
194tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) 198tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
195tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) 199tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
196tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) 200tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
197tl1_resv031: BTRAPTL1(0x31) 201tl1_resv031: BTRAPTL1(0x31)
198tl1_dae: TRAPTL1(do_dae_tl1) 202tl1_dae: membar #Sync
203 TRAP_NOSAVE_7INSNS(__spitfire_access_error)
199tl1_resv033: BTRAPTL1(0x33) 204tl1_resv033: BTRAPTL1(0x33)
200tl1_mna: TRAP_NOSAVE(do_mna) 205tl1_mna: TRAP_NOSAVE(do_mna)
201tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) 206tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
219tl1_vaw: TRAPTL1(do_vaw_tl1) 224tl1_vaw: TRAPTL1(do_vaw_tl1)
220 225
221 /* The grotty trick to save %g1 into current->thread.cee_stuff 226 /* The grotty trick to save %g1 into current->thread.cee_stuff
222 * is because when we take this trap we could be interrupting trap 227 * is because when we take this trap we could be interrupting
223 * code already using the trap alternate global registers. 228 * trap code already using the trap alternate global registers.
224 * 229 *
225 * We cross our fingers and pray that this store/load does 230 * We cross our fingers and pray that this store/load does
226 * not cause yet another CEE trap. 231 * not cause yet another CEE trap.
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 11c3e88732e4..da9739f0d437 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -349,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs)
349 349
350extern void do_fpother(struct pt_regs *regs); 350extern void do_fpother(struct pt_regs *regs);
351extern void do_privact(struct pt_regs *regs); 351extern void do_privact(struct pt_regs *regs);
352extern void data_access_exception(struct pt_regs *regs, 352extern void spitfire_data_access_exception(struct pt_regs *regs,
353 unsigned long sfsr, 353 unsigned long sfsr,
354 unsigned long sfar); 354 unsigned long sfar);
355 355
356int handle_ldf_stq(u32 insn, struct pt_regs *regs) 356int handle_ldf_stq(u32 insn, struct pt_regs *regs)
357{ 357{
@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
394 break; 394 break;
395 } 395 }
396 default: 396 default:
397 data_access_exception(regs, 0, addr); 397 spitfire_data_access_exception(regs, 0, addr);
398 return 1; 398 return 1;
399 } 399 }
400 if (put_user (first >> 32, (u32 __user *)addr) || 400 if (put_user (first >> 32, (u32 __user *)addr) ||
401 __put_user ((u32)first, (u32 __user *)(addr + 4)) || 401 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
402 __put_user (second >> 32, (u32 __user *)(addr + 8)) || 402 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
403 __put_user ((u32)second, (u32 __user *)(addr + 12))) { 403 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
404 data_access_exception(regs, 0, addr); 404 spitfire_data_access_exception(regs, 0, addr);
405 return 1; 405 return 1;
406 } 406 }
407 } else { 407 } else {
@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
414 do_privact(regs); 414 do_privact(regs);
415 return 1; 415 return 1;
416 } else if (asi > ASI_SNFL) { 416 } else if (asi > ASI_SNFL) {
417 data_access_exception(regs, 0, addr); 417 spitfire_data_access_exception(regs, 0, addr);
418 return 1; 418 return 1;
419 } 419 }
420 switch (insn & 0x180000) { 420 switch (insn & 0x180000) {
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
431 err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); 431 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
432 } 432 }
433 if (err && !(asi & 0x2 /* NF */)) { 433 if (err && !(asi & 0x2 /* NF */)) {
434 data_access_exception(regs, 0, addr); 434 spitfire_data_access_exception(regs, 0, addr);
435 return 1; 435 return 1;
436 } 436 }
437 if (asi & 0x8) /* Little */ { 437 if (asi & 0x8) /* Little */ {
@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
534 *(u64 *)(f->regs + freg) = value; 534 *(u64 *)(f->regs + freg) = value;
535 current_thread_info()->fpsaved[0] |= flag; 535 current_thread_info()->fpsaved[0] |= flag;
536 } else { 536 } else {
537daex: data_access_exception(regs, sfsr, sfar); 537daex: spitfire_data_access_exception(regs, sfsr, sfar);
538 return; 538 return;
539 } 539 }
540 advance(regs); 540 advance(regs);
@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
578 __put_user ((u32)value, (u32 __user *)(sfar + 4))) 578 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
579 goto daex; 579 goto daex;
580 } else { 580 } else {
581daex: data_access_exception(regs, sfsr, sfar); 581daex: spitfire_data_access_exception(regs, sfsr, sfar);
582 return; 582 return;
583 } 583 }
584 advance(regs); 584 advance(regs);
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index dfbc7e0dcf70..99c809a1e5ac 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -318,7 +318,7 @@ fill_fixup_dax:
318 nop 318 nop
319 rdpr %pstate, %l1 ! Prepare to change globals. 319 rdpr %pstate, %l1 ! Prepare to change globals.
320 mov %g4, %o1 ! Setup args for 320 mov %g4, %o1 ! Setup args for
321 mov %g5, %o2 ! final call to data_access_exception. 321 mov %g5, %o2 ! final call to spitfire_data_access_exception.
322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO 322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
323 323
324 mov %g6, %o7 ! Stash away current. 324 mov %g6, %o7 ! Stash away current.
@@ -330,7 +330,7 @@ fill_fixup_dax:
330 mov TSB_REG, %g1 330 mov TSB_REG, %g1
331 ldxa [%g1] ASI_IMMU, %g5 331 ldxa [%g1] ASI_IMMU, %g5
332#endif 332#endif
333 call data_access_exception 333 call spitfire_data_access_exception
334 add %sp, PTREGS_OFF, %o0 334 add %sp, PTREGS_OFF, %o0
335 335
336 b,pt %xcc, rtrap 336 b,pt %xcc, rtrap
@@ -391,7 +391,7 @@ window_dax_from_user_common:
391109: or %g7, %lo(109b), %g7 391109: or %g7, %lo(109b), %g7
392 mov %l4, %o1 392 mov %l4, %o1
393 mov %l5, %o2 393 mov %l5, %o2
394 call data_access_exception 394 call spitfire_data_access_exception
395 add %sp, PTREGS_OFF, %o0 395 add %sp, PTREGS_OFF, %o0
396 ba,pt %xcc, rtrap 396 ba,pt %xcc, rtrap
397 clr %l6 397 clr %l6
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d6..6201f1040982 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ 12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ 13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o
16 16
17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o 17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o 18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
index f03344cf784e..f5f0b5586f01 100644
--- a/arch/sparc64/lib/debuglocks.c
+++ b/arch/sparc64/lib/debuglocks.c
@@ -12,8 +12,6 @@
12 12
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 14
15#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
16
17static inline void show (char *str, spinlock_t *lock, unsigned long caller) 15static inline void show (char *str, spinlock_t *lock, unsigned long caller)
18{ 16{
19 int cpu = smp_processor_id(); 17 int cpu = smp_processor_id();
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
51#undef INIT_STUCK 49#undef INIT_STUCK
52#define INIT_STUCK 100000000 50#define INIT_STUCK 100000000
53 51
54void _do_spin_lock(spinlock_t *lock, char *str) 52void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
55{ 53{
56 unsigned long caller, val; 54 unsigned long val;
57 int stuck = INIT_STUCK; 55 int stuck = INIT_STUCK;
58 int cpu = get_cpu(); 56 int cpu = get_cpu();
59 int shown = 0; 57 int shown = 0;
60 58
61 GET_CALLER(caller);
62again: 59again:
63 __asm__ __volatile__("ldstub [%1], %0" 60 __asm__ __volatile__("ldstub [%1], %0"
64 : "=r" (val) 61 : "=r" (val)
65 : "r" (&(lock->lock)) 62 : "r" (&(lock->lock))
66 : "memory"); 63 : "memory");
67 membar("#StoreLoad | #StoreStore"); 64 membar_storeload_storestore();
68 if (val) { 65 if (val) {
69 while (lock->lock) { 66 while (lock->lock) {
70 if (!--stuck) { 67 if (!--stuck) {
@@ -72,7 +69,7 @@ again:
72 show(str, lock, caller); 69 show(str, lock, caller);
73 stuck = INIT_STUCK; 70 stuck = INIT_STUCK;
74 } 71 }
75 membar("#LoadLoad"); 72 rmb();
76 } 73 }
77 goto again; 74 goto again;
78 } 75 }
@@ -84,17 +81,16 @@ again:
84 put_cpu(); 81 put_cpu();
85} 82}
86 83
87int _do_spin_trylock(spinlock_t *lock) 84int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
88{ 85{
89 unsigned long val, caller; 86 unsigned long val;
90 int cpu = get_cpu(); 87 int cpu = get_cpu();
91 88
92 GET_CALLER(caller);
93 __asm__ __volatile__("ldstub [%1], %0" 89 __asm__ __volatile__("ldstub [%1], %0"
94 : "=r" (val) 90 : "=r" (val)
95 : "r" (&(lock->lock)) 91 : "r" (&(lock->lock))
96 : "memory"); 92 : "memory");
97 membar("#StoreLoad | #StoreStore"); 93 membar_storeload_storestore();
98 if (!val) { 94 if (!val) {
99 lock->owner_pc = ((unsigned int)caller); 95 lock->owner_pc = ((unsigned int)caller);
100 lock->owner_cpu = cpu; 96 lock->owner_cpu = cpu;
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock)
111{ 107{
112 lock->owner_pc = 0; 108 lock->owner_pc = 0;
113 lock->owner_cpu = NO_PROC_ID; 109 lock->owner_cpu = NO_PROC_ID;
114 membar("#StoreStore | #LoadStore"); 110 membar_storestore_loadstore();
115 lock->lock = 0; 111 lock->lock = 0;
116 current->thread.smp_lock_count--; 112 current->thread.smp_lock_count--;
117} 113}
118 114
119/* Keep INIT_STUCK the same... */ 115/* Keep INIT_STUCK the same... */
120 116
121void _do_read_lock (rwlock_t *rw, char *str) 117void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
122{ 118{
123 unsigned long caller, val; 119 unsigned long val;
124 int stuck = INIT_STUCK; 120 int stuck = INIT_STUCK;
125 int cpu = get_cpu(); 121 int cpu = get_cpu();
126 int shown = 0; 122 int shown = 0;
127 123
128 GET_CALLER(caller);
129wlock_again: 124wlock_again:
130 /* Wait for any writer to go away. */ 125 /* Wait for any writer to go away. */
131 while (((long)(rw->lock)) < 0) { 126 while (((long)(rw->lock)) < 0) {
@@ -134,7 +129,7 @@ wlock_again:
134 show_read(str, rw, caller); 129 show_read(str, rw, caller);
135 stuck = INIT_STUCK; 130 stuck = INIT_STUCK;
136 } 131 }
137 membar("#LoadLoad"); 132 rmb();
138 } 133 }
139 /* Try once to increment the counter. */ 134 /* Try once to increment the counter. */
140 __asm__ __volatile__( 135 __asm__ __volatile__(
@@ -147,7 +142,7 @@ wlock_again:
147"2:" : "=r" (val) 142"2:" : "=r" (val)
148 : "0" (&(rw->lock)) 143 : "0" (&(rw->lock))
149 : "g1", "g7", "memory"); 144 : "g1", "g7", "memory");
150 membar("#StoreLoad | #StoreStore"); 145 membar_storeload_storestore();
151 if (val) 146 if (val)
152 goto wlock_again; 147 goto wlock_again;
153 rw->reader_pc[cpu] = ((unsigned int)caller); 148 rw->reader_pc[cpu] = ((unsigned int)caller);
@@ -157,15 +152,13 @@ wlock_again:
157 put_cpu(); 152 put_cpu();
158} 153}
159 154
160void _do_read_unlock (rwlock_t *rw, char *str) 155void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
161{ 156{
162 unsigned long caller, val; 157 unsigned long val;
163 int stuck = INIT_STUCK; 158 int stuck = INIT_STUCK;
164 int cpu = get_cpu(); 159 int cpu = get_cpu();
165 int shown = 0; 160 int shown = 0;
166 161
167 GET_CALLER(caller);
168
169 /* Drop our identity _first_. */ 162 /* Drop our identity _first_. */
170 rw->reader_pc[cpu] = 0; 163 rw->reader_pc[cpu] = 0;
171 current->thread.smp_lock_count--; 164 current->thread.smp_lock_count--;
@@ -193,14 +186,13 @@ runlock_again:
193 put_cpu(); 186 put_cpu();
194} 187}
195 188
196void _do_write_lock (rwlock_t *rw, char *str) 189void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
197{ 190{
198 unsigned long caller, val; 191 unsigned long val;
199 int stuck = INIT_STUCK; 192 int stuck = INIT_STUCK;
200 int cpu = get_cpu(); 193 int cpu = get_cpu();
201 int shown = 0; 194 int shown = 0;
202 195
203 GET_CALLER(caller);
204wlock_again: 196wlock_again:
205 /* Spin while there is another writer. */ 197 /* Spin while there is another writer. */
206 while (((long)rw->lock) < 0) { 198 while (((long)rw->lock) < 0) {
@@ -209,7 +201,7 @@ wlock_again:
209 show_write(str, rw, caller); 201 show_write(str, rw, caller);
210 stuck = INIT_STUCK; 202 stuck = INIT_STUCK;
211 } 203 }
212 membar("#LoadLoad"); 204 rmb();
213 } 205 }
214 206
215 /* Try to acuire the write bit. */ 207 /* Try to acuire the write bit. */
@@ -264,7 +256,7 @@ wlock_again:
264 show_write(str, rw, caller); 256 show_write(str, rw, caller);
265 stuck = INIT_STUCK; 257 stuck = INIT_STUCK;
266 } 258 }
267 membar("#LoadLoad"); 259 rmb();
268 } 260 }
269 goto wlock_again; 261 goto wlock_again;
270 } 262 }
@@ -278,14 +270,12 @@ wlock_again:
278 put_cpu(); 270 put_cpu();
279} 271}
280 272
281void _do_write_unlock(rwlock_t *rw) 273void _do_write_unlock(rwlock_t *rw, unsigned long caller)
282{ 274{
283 unsigned long caller, val; 275 unsigned long val;
284 int stuck = INIT_STUCK; 276 int stuck = INIT_STUCK;
285 int shown = 0; 277 int shown = 0;
286 278
287 GET_CALLER(caller);
288
289 /* Drop our identity _first_ */ 279 /* Drop our identity _first_ */
290 rw->writer_pc = 0; 280 rw->writer_pc = 0;
291 rw->writer_cpu = NO_PROC_ID; 281 rw->writer_cpu = NO_PROC_ID;
@@ -313,13 +303,11 @@ wlock_again:
313 } 303 }
314} 304}
315 305
316int _do_write_trylock (rwlock_t *rw, char *str) 306int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
317{ 307{
318 unsigned long caller, val; 308 unsigned long val;
319 int cpu = get_cpu(); 309 int cpu = get_cpu();
320 310
321 GET_CALLER(caller);
322
323 /* Try to acuire the write bit. */ 311 /* Try to acuire the write bit. */
324 __asm__ __volatile__( 312 __asm__ __volatile__(
325" mov 1, %%g3\n" 313" mov 1, %%g3\n"
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S
new file mode 100644
index 000000000000..4004f748619f
--- /dev/null
+++ b/arch/sparc64/lib/mb.S
@@ -0,0 +1,73 @@
1/* mb.S: Out of line memory barriers.
2 *
3 * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
4 */
5
6 /* These are here in an effort to more fully work around
7 * Spitfire Errata #51. Essentially, if a memory barrier
8 * occurs soon after a mispredicted branch, the chip can stop
9 * executing instructions until a trap occurs. Therefore, if
10 * interrupts are disabled, the chip can hang forever.
11 *
12 * It used to be believed that the memory barrier had to be
13 * right in the delay slot, but a case has been traced
14 * recently wherein the memory barrier was one instruction
15 * after the branch delay slot and the chip still hung. The
16 * offending sequence was the following in sym_wakeup_done()
17 * of the sym53c8xx_2 driver:
18 *
19 * call sym_ccb_from_dsa, 0
20 * movge %icc, 0, %l0
21 * brz,pn %o0, .LL1303
22 * mov %o0, %l2
23 * membar #LoadLoad
24 *
25 * The branch has to be mispredicted for the bug to occur.
26 * Therefore, we put the memory barrier explicitly into a
27 * "branch always, predicted taken" delay slot to avoid the
28 * problem case.
29 */
30
31 .text
32
3399: retl
34 nop
35
36 .globl mb
37mb: ba,pt %xcc, 99b
38 membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad
39 .size mb, .-mb
40
41 .globl rmb
42rmb: ba,pt %xcc, 99b
43 membar #LoadLoad
44 .size rmb, .-rmb
45
46 .globl wmb
47wmb: ba,pt %xcc, 99b
48 membar #StoreStore
49 .size wmb, .-wmb
50
51 .globl membar_storeload
52membar_storeload:
53 ba,pt %xcc, 99b
54 membar #StoreLoad
55 .size membar_storeload, .-membar_storeload
56
57 .globl membar_storeload_storestore
58membar_storeload_storestore:
59 ba,pt %xcc, 99b
60 membar #StoreLoad | #StoreStore
61 .size membar_storeload_storestore, .-membar_storeload_storestore
62
63 .globl membar_storeload_loadload
64membar_storeload_loadload:
65 ba,pt %xcc, 99b
66 membar #StoreLoad | #LoadLoad
67 .size membar_storeload_loadload, .-membar_storeload_loadload
68
69 .globl membar_storestore_loadstore
70membar_storestore_loadstore:
71 ba,pt %xcc, 99b
72 membar #StoreStore | #LoadStore
73 .size membar_storestore_loadstore, .-membar_storestore_loadstore
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 15b4cfe07557..302efbcba70e 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -737,7 +737,8 @@ MODULE_LICENSE("GPL");
737extern u32 tl0_solaris[8]; 737extern u32 tl0_solaris[8];
738#define update_ttable(x) \ 738#define update_ttable(x) \
739 tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \ 739 tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \
740 __asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3])) 740 wmb(); \
741 __asm__ __volatile__ ("flush %0" : : "r" (&tl0_solaris[3]))
741#else 742#else
742#endif 743#endif
743 744
@@ -761,7 +762,8 @@ int init_module(void)
761 entry64_personality_patch |= 762 entry64_personality_patch |=
762 (offsetof(struct task_struct, personality) + 763 (offsetof(struct task_struct, personality) +
763 (sizeof(unsigned long) - 1)); 764 (sizeof(unsigned long) - 1));
764 __asm__ __volatile__("membar #StoreStore; flush %0" 765 wmb();
766 __asm__ __volatile__("flush %0"
765 : : "r" (&entry64_personality_patch)); 767 : : "r" (&entry64_personality_patch));
766 return 0; 768 return 0;
767} 769}
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index a37a5ac13c22..022f67bb6873 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -9,19 +9,11 @@
9 * 9 *
10 */ 10 */
11 11
12#include <linux/types.h> 12#include <linux/init.h>
13#include <linux/kdev_t.h>
14#include <linux/time.h>
15#include <linux/devfs_fs_kernel.h>
16#include <linux/module.h> 13#include <linux/module.h>
17#include <linux/mm.h> 14#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/smp_lock.h>
21#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
22#include <asm/uaccess.h> 16#include <asm/uaccess.h>
23#include <asm/irq.h>
24#include <asm/pgtable.h>
25#include "mem_user.h" 17#include "mem_user.h"
26#include "user_util.h" 18#include "user_util.h"
27 19
@@ -31,35 +23,22 @@ static unsigned long p_buf = 0;
31static char *v_buf = NULL; 23static char *v_buf = NULL;
32 24
33static ssize_t 25static ssize_t
34mmapper_read(struct file *file, char *buf, size_t count, loff_t *ppos) 26mmapper_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
35{ 27{
36 if(*ppos > mmapper_size) 28 return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size);
37 return -EINVAL;
38
39 if(count + *ppos > mmapper_size)
40 count = count + *ppos - mmapper_size;
41
42 if(count < 0)
43 return -EINVAL;
44
45 copy_to_user(buf,&v_buf[*ppos],count);
46
47 return count;
48} 29}
49 30
50static ssize_t 31static ssize_t
51mmapper_write(struct file *file, const char *buf, size_t count, loff_t *ppos) 32mmapper_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
52{ 33{
53 if(*ppos > mmapper_size) 34 if (*ppos > mmapper_size)
54 return -EINVAL; 35 return -EINVAL;
55 36
56 if(count + *ppos > mmapper_size) 37 if (count > mmapper_size - *ppos)
57 count = count + *ppos - mmapper_size; 38 count = mmapper_size - *ppos;
58
59 if(count < 0)
60 return -EINVAL;
61 39
62 copy_from_user(&v_buf[*ppos],buf,count); 40 if (copy_from_user(&v_buf[*ppos], buf, count))
41 return -EFAULT;
63 42
64 return count; 43 return count;
65} 44}
@@ -77,7 +56,6 @@ mmapper_mmap(struct file *file, struct vm_area_struct * vma)
77 int ret = -EINVAL; 56 int ret = -EINVAL;
78 int size; 57 int size;
79 58
80 lock_kernel();
81 if (vma->vm_pgoff != 0) 59 if (vma->vm_pgoff != 0)
82 goto out; 60 goto out;
83 61
@@ -92,7 +70,6 @@ mmapper_mmap(struct file *file, struct vm_area_struct * vma)
92 goto out; 70 goto out;
93 ret = 0; 71 ret = 0;
94out: 72out:
95 unlock_kernel();
96 return ret; 73 return ret;
97} 74}
98 75
diff --git a/arch/um/kernel/signal_kern.c b/arch/um/kernel/signal_kern.c
index 7807a3e8c426..03618bd13d55 100644
--- a/arch/um/kernel/signal_kern.c
+++ b/arch/um/kernel/signal_kern.c
@@ -87,12 +87,12 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
87 recalc_sigpending(); 87 recalc_sigpending();
88 spin_unlock_irq(&current->sighand->siglock); 88 spin_unlock_irq(&current->sighand->siglock);
89 force_sigsegv(signr, current); 89 force_sigsegv(signr, current);
90 } 90 } else {
91 else if(!(ka->sa.sa_flags & SA_NODEFER)){
92 spin_lock_irq(&current->sighand->siglock); 91 spin_lock_irq(&current->sighand->siglock);
93 sigorsets(&current->blocked, &current->blocked, 92 sigorsets(&current->blocked, &current->blocked,
94 &ka->sa.sa_mask); 93 &ka->sa.sa_mask);
95 sigaddset(&current->blocked, signr); 94 if(!(ka->sa.sa_flags & SA_NODEFER))
95 sigaddset(&current->blocked, signr);
96 recalc_sigpending(); 96 recalc_sigpending();
97 spin_unlock_irq(&current->sighand->siglock); 97 spin_unlock_irq(&current->sighand->siglock);
98 } 98 }
diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c
index 37061e32e1a4..633e4e1b825f 100644
--- a/arch/v850/kernel/signal.c
+++ b/arch/v850/kernel/signal.c
@@ -462,13 +462,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
462 else 462 else
463 setup_frame(sig, ka, oldset, regs); 463 setup_frame(sig, ka, oldset, regs);
464 464
465 if (!(ka->sa.sa_flags & SA_NODEFER)) { 465 spin_lock_irq(&current->sighand->siglock);
466 spin_lock_irq(&current->sighand->siglock); 466 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
467 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 467 if (!(ka->sa.sa_flags & SA_NODEFER))
468 sigaddset(&current->blocked,sig); 468 sigaddset(&current->blocked,sig);
469 recalc_sigpending(); 469 recalc_sigpending();
470 spin_unlock_irq(&current->sighand->siglock); 470 spin_unlock_irq(&current->sighand->siglock);
471 }
472} 471}
473 472
474/* 473/*
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 776f3c866b70..b97a61e1c71c 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-rc3 3# Linux kernel version: 2.6.13-rc6-git3
4# Fri Jul 22 16:47:31 2005 4# Fri Aug 12 16:40:34 2005
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -284,10 +284,6 @@ CONFIG_IPV6=y
284# Network testing 284# Network testing
285# 285#
286# CONFIG_NET_PKTGEN is not set 286# CONFIG_NET_PKTGEN is not set
287CONFIG_NETPOLL=y
288# CONFIG_NETPOLL_RX is not set
289# CONFIG_NETPOLL_TRAP is not set
290CONFIG_NET_POLL_CONTROLLER=y
291# CONFIG_HAMRADIO is not set 287# CONFIG_HAMRADIO is not set
292# CONFIG_IRDA is not set 288# CONFIG_IRDA is not set
293# CONFIG_BT is not set 289# CONFIG_BT is not set
@@ -463,6 +459,7 @@ CONFIG_AIC79XX_DEBUG_MASK=0
463# CONFIG_MEGARAID_NEWGEN is not set 459# CONFIG_MEGARAID_NEWGEN is not set
464# CONFIG_MEGARAID_LEGACY is not set 460# CONFIG_MEGARAID_LEGACY is not set
465CONFIG_SCSI_SATA=y 461CONFIG_SCSI_SATA=y
462# CONFIG_SCSI_SATA_AHCI is not set
466# CONFIG_SCSI_SATA_SVW is not set 463# CONFIG_SCSI_SATA_SVW is not set
467CONFIG_SCSI_ATA_PIIX=y 464CONFIG_SCSI_ATA_PIIX=y
468# CONFIG_SCSI_SATA_NV is not set 465# CONFIG_SCSI_SATA_NV is not set
@@ -492,6 +489,7 @@ CONFIG_SCSI_QLA2XXX=y
492# CONFIG_SCSI_QLA2300 is not set 489# CONFIG_SCSI_QLA2300 is not set
493# CONFIG_SCSI_QLA2322 is not set 490# CONFIG_SCSI_QLA2322 is not set
494# CONFIG_SCSI_QLA6312 is not set 491# CONFIG_SCSI_QLA6312 is not set
492# CONFIG_SCSI_QLA24XX is not set
495# CONFIG_SCSI_LPFC is not set 493# CONFIG_SCSI_LPFC is not set
496# CONFIG_SCSI_DC395x is not set 494# CONFIG_SCSI_DC395x is not set
497# CONFIG_SCSI_DC390T is not set 495# CONFIG_SCSI_DC390T is not set
@@ -512,9 +510,11 @@ CONFIG_BLK_DEV_DM=y
512# 510#
513# Fusion MPT device support 511# Fusion MPT device support
514# 512#
515# CONFIG_FUSION is not set 513CONFIG_FUSION=y
516# CONFIG_FUSION_SPI is not set 514CONFIG_FUSION_SPI=y
517# CONFIG_FUSION_FC is not set 515# CONFIG_FUSION_FC is not set
516CONFIG_FUSION_MAX_SGE=128
517# CONFIG_FUSION_CTL is not set
518 518
519# 519#
520# IEEE 1394 (FireWire) support 520# IEEE 1394 (FireWire) support
@@ -585,6 +585,7 @@ CONFIG_8139TOO=y
585# CONFIG_ACENIC is not set 585# CONFIG_ACENIC is not set
586# CONFIG_DL2K is not set 586# CONFIG_DL2K is not set
587CONFIG_E1000=y 587CONFIG_E1000=y
588# CONFIG_E1000_NAPI is not set
588# CONFIG_NS83820 is not set 589# CONFIG_NS83820 is not set
589# CONFIG_HAMACHI is not set 590# CONFIG_HAMACHI is not set
590# CONFIG_YELLOWFIN is not set 591# CONFIG_YELLOWFIN is not set
@@ -624,6 +625,10 @@ CONFIG_S2IO=m
624# CONFIG_NET_FC is not set 625# CONFIG_NET_FC is not set
625# CONFIG_SHAPER is not set 626# CONFIG_SHAPER is not set
626CONFIG_NETCONSOLE=y 627CONFIG_NETCONSOLE=y
628CONFIG_NETPOLL=y
629# CONFIG_NETPOLL_RX is not set
630# CONFIG_NETPOLL_TRAP is not set
631CONFIG_NET_POLL_CONTROLLER=y
627 632
628# 633#
629# ISDN subsystem 634# ISDN subsystem
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 6ded3a50dfe6..b548dea4e5b9 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -185,6 +185,40 @@ unsigned long __init e820_end_of_ram(void)
185} 185}
186 186
187/* 187/*
188 * Compute how much memory is missing in a range.
189 * Unlike the other functions in this file the arguments are in page numbers.
190 */
191unsigned long __init
192e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
193{
194 unsigned long ram = 0;
195 unsigned long start = start_pfn << PAGE_SHIFT;
196 unsigned long end = end_pfn << PAGE_SHIFT;
197 int i;
198 for (i = 0; i < e820.nr_map; i++) {
199 struct e820entry *ei = &e820.map[i];
200 unsigned long last, addr;
201
202 if (ei->type != E820_RAM ||
203 ei->addr+ei->size <= start ||
204 ei->addr >= end)
205 continue;
206
207 addr = round_up(ei->addr, PAGE_SIZE);
208 if (addr < start)
209 addr = start;
210
211 last = round_down(ei->addr + ei->size, PAGE_SIZE);
212 if (last >= end)
213 last = end;
214
215 if (last > addr)
216 ram += last - addr;
217 }
218 return ((end - start) - ram) >> PAGE_SHIFT;
219}
220
221/*
188 * Mark e820 reserved areas as busy for the resource manager. 222 * Mark e820 reserved areas as busy for the resource manager.
189 */ 223 */
190void __init e820_reserve_resources(void) 224void __init e820_reserve_resources(void)
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 98590a989f3d..d642fbf3da29 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -394,10 +394,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
394#endif 394#endif
395 ret = setup_rt_frame(sig, ka, info, oldset, regs); 395 ret = setup_rt_frame(sig, ka, info, oldset, regs);
396 396
397 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) { 397 if (ret) {
398 spin_lock_irq(&current->sighand->siglock); 398 spin_lock_irq(&current->sighand->siglock);
399 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 399 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
400 sigaddset(&current->blocked,sig); 400 if (!(ka->sa.sa_flags & SA_NODEFER))
401 sigaddset(&current->blocked,sig);
401 recalc_sigpending(); 402 recalc_sigpending();
402 spin_unlock_irq(&current->sighand->siglock); 403 spin_unlock_irq(&current->sighand->siglock);
403 } 404 }
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 72e4b364ed73..aa4a5189ecee 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -322,18 +322,26 @@ void zap_low_mappings(void)
322void __init paging_init(void) 322void __init paging_init(void)
323{ 323{
324 { 324 {
325 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; 325 unsigned long zones_size[MAX_NR_ZONES];
326 unsigned long holes[MAX_NR_ZONES];
326 unsigned int max_dma; 327 unsigned int max_dma;
327 328
329 memset(zones_size, 0, sizeof(zones_size));
330 memset(holes, 0, sizeof(holes));
331
328 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
329 333
330 if (end_pfn < max_dma) 334 if (end_pfn < max_dma) {
331 zones_size[ZONE_DMA] = end_pfn; 335 zones_size[ZONE_DMA] = end_pfn;
332 else { 336 holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
337 } else {
333 zones_size[ZONE_DMA] = max_dma; 338 zones_size[ZONE_DMA] = max_dma;
339 holes[ZONE_DMA] = e820_hole_size(0, max_dma);
334 zones_size[ZONE_NORMAL] = end_pfn - max_dma; 340 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
341 holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
335 } 342 }
336 free_area_init(zones_size); 343 free_area_init_node(0, NODE_DATA(0), zones_size,
344 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
337 } 345 }
338 return; 346 return;
339} 347}
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 70cb2904a90f..6a156f5692ae 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -126,9 +126,11 @@ void __init setup_node_zones(int nodeid)
126{ 126{
127 unsigned long start_pfn, end_pfn; 127 unsigned long start_pfn, end_pfn;
128 unsigned long zones[MAX_NR_ZONES]; 128 unsigned long zones[MAX_NR_ZONES];
129 unsigned long holes[MAX_NR_ZONES];
129 unsigned long dma_end_pfn; 130 unsigned long dma_end_pfn;
130 131
131 memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES); 132 memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
133 memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
132 134
133 start_pfn = node_start_pfn(nodeid); 135 start_pfn = node_start_pfn(nodeid);
134 end_pfn = node_end_pfn(nodeid); 136 end_pfn = node_end_pfn(nodeid);
@@ -139,13 +141,17 @@ void __init setup_node_zones(int nodeid)
139 dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT; 141 dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
140 if (start_pfn < dma_end_pfn) { 142 if (start_pfn < dma_end_pfn) {
141 zones[ZONE_DMA] = dma_end_pfn - start_pfn; 143 zones[ZONE_DMA] = dma_end_pfn - start_pfn;
144 holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
142 zones[ZONE_NORMAL] = end_pfn - dma_end_pfn; 145 zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
146 holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
147
143 } else { 148 } else {
144 zones[ZONE_NORMAL] = end_pfn - start_pfn; 149 zones[ZONE_NORMAL] = end_pfn - start_pfn;
150 holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
145 } 151 }
146 152
147 free_area_init_node(nodeid, NODE_DATA(nodeid), zones, 153 free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
148 start_pfn, NULL); 154 start_pfn, holes);
149} 155}
150 156
151void __init numa_init_array(void) 157void __init numa_init_array(void)
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index df6e1e17b096..dc42cede9394 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -702,12 +702,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
702 if (ka.sa.sa_flags & SA_ONESHOT) 702 if (ka.sa.sa_flags & SA_ONESHOT)
703 ka.sa.sa_handler = SIG_DFL; 703 ka.sa.sa_handler = SIG_DFL;
704 704
705 if (!(ka.sa.sa_flags & SA_NODEFER)) { 705 spin_lock_irq(&current->sighand->siglock);
706 spin_lock_irq(&current->sighand->siglock); 706 sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
707 sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask); 707 if (!(ka.sa.sa_flags & SA_NODEFER))
708 sigaddset(&current->blocked, signr); 708 sigaddset(&current->blocked, signr);
709 recalc_sigpending(); 709 recalc_sigpending();
710 spin_unlock_irq(&current->sighand->siglock); 710 spin_unlock_irq(&current->sighand->siglock);
711 }
712 return 1; 711 return 1;
713} 712}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index cecab0acc3fe..46d655fab115 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -48,6 +48,8 @@ source "drivers/hwmon/Kconfig"
48 48
49source "drivers/misc/Kconfig" 49source "drivers/misc/Kconfig"
50 50
51source "drivers/mfd/Kconfig"
52
51source "drivers/media/Kconfig" 53source "drivers/media/Kconfig"
52 54
53source "drivers/video/Kconfig" 55source "drivers/video/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 126a851d5653..9663132ed825 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_FB_INTEL) += video/intelfb/
26obj-$(CONFIG_SERIO) += input/serio/ 26obj-$(CONFIG_SERIO) += input/serio/
27obj-y += serial/ 27obj-y += serial/
28obj-$(CONFIG_PARPORT) += parport/ 28obj-$(CONFIG_PARPORT) += parport/
29obj-y += base/ block/ misc/ net/ media/ 29obj-y += base/ block/ misc/ mfd/ net/ media/
30obj-$(CONFIG_NUBUS) += nubus/ 30obj-$(CONFIG_NUBUS) += nubus/
31obj-$(CONFIG_ATM) += atm/ 31obj-$(CONFIG_ATM) += atm/
32obj-$(CONFIG_PPC_PMAC) += macintosh/ 32obj-$(CONFIG_PPC_PMAC) += macintosh/
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c
index 186b182c5825..f93d2ee54800 100644
--- a/drivers/acpi/sleep/poweroff.c
+++ b/drivers/acpi/sleep/poweroff.c
@@ -55,7 +55,11 @@ void acpi_power_off(void)
55 55
56static int acpi_shutdown(struct sys_device *x) 56static int acpi_shutdown(struct sys_device *x)
57{ 57{
58 return acpi_sleep_prepare(ACPI_STATE_S5); 58 if (system_state == SYSTEM_POWER_OFF) {
59 /* Prepare if we are going to power off the system */
60 return acpi_sleep_prepare(ACPI_STATE_S5);
61 }
62 return 0;
59} 63}
60 64
61static struct sysdev_class acpi_sysclass = { 65static struct sysdev_class acpi_sysclass = {
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 73c6b85299c1..d74a7c5e75dd 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -513,7 +513,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
513 513
514 // VC layer stats 514 // VC layer stats
515 atomic_inc(&atm_vcc->stats->rx); 515 atomic_inc(&atm_vcc->stats->rx);
516 do_gettimeofday(&skb->stamp); 516 __net_timestamp(skb);
517 // end of our responsability 517 // end of our responsability
518 atm_vcc->push (atm_vcc, skb); 518 atm_vcc->push (atm_vcc, skb);
519 return; 519 return;
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index f2f01cb82cb4..57f1810fdccd 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -325,7 +325,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
325 result = -ENOBUFS; 325 result = -ENOBUFS;
326 goto done; 326 goto done;
327 } 327 }
328 do_gettimeofday(&new_skb->stamp); 328 __net_timestamp(new_skb);
329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); 329 memcpy(skb_put(new_skb,skb->len),skb->data,skb->len);
330 out_vcc->push(out_vcc,new_skb); 330 out_vcc->push(out_vcc,new_skb);
331 atomic_inc(&vcc->stats->tx); 331 atomic_inc(&vcc->stats->tx);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 10da36934769..c13c4d736ef5 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -537,7 +537,7 @@ static int rx_aal0(struct atm_vcc *vcc)
537 return 0; 537 return 0;
538 } 538 }
539 skb_put(skb,length); 539 skb_put(skb,length);
540 skb->stamp = eni_vcc->timestamp; 540 skb_set_timestamp(skb, &eni_vcc->timestamp);
541 DPRINTK("got len %ld\n",length); 541 DPRINTK("got len %ld\n",length);
542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1; 542 if (do_rx_dma(vcc,skb,1,length >> 2,length >> 2)) return 1;
543 eni_vcc->rxing++; 543 eni_vcc->rxing++;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b078fa548ebf..58219744f5db 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
815 skb_put (skb, qe->p1 & 0xffff); 815 skb_put (skb, qe->p1 & 0xffff);
816 ATM_SKB(skb)->vcc = atm_vcc; 816 ATM_SKB(skb)->vcc = atm_vcc;
817 atomic_inc(&atm_vcc->stats->rx); 817 atomic_inc(&atm_vcc->stats->rx);
818 do_gettimeofday(&skb->stamp); 818 __net_timestamp(skb);
819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); 819 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
820 atm_vcc->push (atm_vcc, skb); 820 atm_vcc->push (atm_vcc, skb);
821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe); 821 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 5f702199543a..2bf723a7b6e6 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1176,7 +1176,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
1176 return -ENOMEM; 1176 return -ENOMEM;
1177 } 1177 }
1178 1178
1179 do_gettimeofday(&skb->stamp); 1179 __net_timestamp(skb);
1180 1180
1181#ifdef FORE200E_52BYTE_AAL0_SDU 1181#ifdef FORE200E_52BYTE_AAL0_SDU
1182 if (cell_header) { 1182 if (cell_header) {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 28250c9b32d6..fde9334059af 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1886,7 +1886,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1886 if (rx_skb_reserve > 0) 1886 if (rx_skb_reserve > 0)
1887 skb_reserve(skb, rx_skb_reserve); 1887 skb_reserve(skb, rx_skb_reserve);
1888 1888
1889 do_gettimeofday(&skb->stamp); 1889 __net_timestamp(skb);
1890 1890
1891 for (iov = he_vcc->iov_head; 1891 for (iov = he_vcc->iov_head;
1892 iov < he_vcc->iov_tail; ++iov) { 1892 iov < he_vcc->iov_tail; ++iov) {
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 924a2c8988bd..0cded0468003 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc; 1034 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
1035 // VC layer stats 1035 // VC layer stats
1036 atomic_inc(&vcc->stats->rx); 1036 atomic_inc(&vcc->stats->rx);
1037 do_gettimeofday(&skb->stamp); 1037 __net_timestamp(skb);
1038 // end of our responsability 1038 // end of our responsability
1039 vcc->push (vcc, skb); 1039 vcc->push (vcc, skb);
1040 } 1040 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 30b7e990ed0b..b4a76cade646 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1101,7 +1101,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1101 cell, ATM_CELL_PAYLOAD); 1101 cell, ATM_CELL_PAYLOAD);
1102 1102
1103 ATM_SKB(sb)->vcc = vcc; 1103 ATM_SKB(sb)->vcc = vcc;
1104 do_gettimeofday(&sb->stamp); 1104 __net_timestamp(sb);
1105 vcc->push(vcc, sb); 1105 vcc->push(vcc, sb);
1106 atomic_inc(&vcc->stats->rx); 1106 atomic_inc(&vcc->stats->rx);
1107 1107
@@ -1179,7 +1179,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1179 1179
1180 skb_trim(skb, len); 1180 skb_trim(skb, len);
1181 ATM_SKB(skb)->vcc = vcc; 1181 ATM_SKB(skb)->vcc = vcc;
1182 do_gettimeofday(&skb->stamp); 1182 __net_timestamp(skb);
1183 1183
1184 vcc->push(vcc, skb); 1184 vcc->push(vcc, skb);
1185 atomic_inc(&vcc->stats->rx); 1185 atomic_inc(&vcc->stats->rx);
@@ -1201,7 +1201,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1201 1201
1202 skb_trim(skb, len); 1202 skb_trim(skb, len);
1203 ATM_SKB(skb)->vcc = vcc; 1203 ATM_SKB(skb)->vcc = vcc;
1204 do_gettimeofday(&skb->stamp); 1204 __net_timestamp(skb);
1205 1205
1206 vcc->push(vcc, skb); 1206 vcc->push(vcc, skb);
1207 atomic_inc(&vcc->stats->rx); 1207 atomic_inc(&vcc->stats->rx);
@@ -1340,7 +1340,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1340 ATM_CELL_PAYLOAD); 1340 ATM_CELL_PAYLOAD);
1341 1341
1342 ATM_SKB(sb)->vcc = vcc; 1342 ATM_SKB(sb)->vcc = vcc;
1343 do_gettimeofday(&sb->stamp); 1343 __net_timestamp(sb);
1344 vcc->push(vcc, sb); 1344 vcc->push(vcc, sb);
1345 atomic_inc(&vcc->stats->rx); 1345 atomic_inc(&vcc->stats->rx);
1346 1346
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ffe3afa723b8..51ec14787293 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1427,7 +1427,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
1427 skb_put(skb, size); 1427 skb_put(skb, size);
1428 vcc_rx_memcpy(skb->data, lvcc, size); 1428 vcc_rx_memcpy(skb->data, lvcc, size);
1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; 1429 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
1430 do_gettimeofday(&skb->stamp); 1430 __net_timestamp(skb);
1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); 1431 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx); 1432 atomic_inc(&lvcc->rx.atmvcc->stats->rx);
1433 out: 1433 out:
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b2a7b754fd14..c57e20dcb0f8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
214static void __devinit ns_init_card_error(ns_dev *card, int error); 214static void __devinit ns_init_card_error(ns_dev *card, int error);
215static scq_info *get_scq(int size, u32 scd); 215static scq_info *get_scq(int size, u32 scd);
216static void free_scq(scq_info *scq, struct atm_vcc *vcc); 216static void free_scq(scq_info *scq, struct atm_vcc *vcc);
217static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 217static void push_rxbufs(ns_dev *, struct sk_buff *);
218 u32 handle2, u32 addr2);
219static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); 218static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
220static int ns_open(struct atm_vcc *vcc); 219static int ns_open(struct atm_vcc *vcc);
221static void ns_close(struct atm_vcc *vcc); 220static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
766 ns_init_card_error(card, error); 765 ns_init_card_error(card, error);
767 return error; 766 return error;
768 } 767 }
768 NS_SKB_CB(hb)->buf_type = BUF_NONE;
769 skb_queue_tail(&card->hbpool.queue, hb); 769 skb_queue_tail(&card->hbpool.queue, hb);
770 card->hbpool.count++; 770 card->hbpool.count++;
771 } 771 }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
786 ns_init_card_error(card, error); 786 ns_init_card_error(card, error);
787 return error; 787 return error;
788 } 788 }
789 NS_SKB_CB(lb)->buf_type = BUF_LG;
789 skb_queue_tail(&card->lbpool.queue, lb); 790 skb_queue_tail(&card->lbpool.queue, lb);
790 skb_reserve(lb, NS_SMBUFSIZE); 791 skb_reserve(lb, NS_SMBUFSIZE);
791 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 792 push_rxbufs(card, lb);
792 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 793 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
793 if (j == 1) 794 if (j == 1)
794 { 795 {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
822 ns_init_card_error(card, error); 823 ns_init_card_error(card, error);
823 return error; 824 return error;
824 } 825 }
826 NS_SKB_CB(sb)->buf_type = BUF_SM;
825 skb_queue_tail(&card->sbpool.queue, sb); 827 skb_queue_tail(&card->sbpool.queue, sb);
826 skb_reserve(sb, NS_AAL0_HEADER); 828 skb_reserve(sb, NS_AAL0_HEADER);
827 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 829 push_rxbufs(card, sb);
828 } 830 }
829 /* Test for strange behaviour which leads to crashes */ 831 /* Test for strange behaviour which leads to crashes */
830 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 832 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
852 ns_init_card_error(card, error); 854 ns_init_card_error(card, error);
853 return error; 855 return error;
854 } 856 }
857 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
855 skb_queue_tail(&card->iovpool.queue, iovb); 858 skb_queue_tail(&card->iovpool.queue, iovb);
856 card->iovpool.count++; 859 card->iovpool.count++;
857 } 860 }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1078 1081
1079/* The handles passed must be pointers to the sk_buff containing the small 1082/* The handles passed must be pointers to the sk_buff containing the small
1080 or large buffer(s) cast to u32. */ 1083 or large buffer(s) cast to u32. */
1081static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 1084static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1082 u32 handle2, u32 addr2)
1083{ 1085{
1086 struct ns_skb_cb *cb = NS_SKB_CB(skb);
1087 u32 handle1, addr1;
1088 u32 handle2, addr2;
1084 u32 stat; 1089 u32 stat;
1085 unsigned long flags; 1090 unsigned long flags;
1086 1091
1092 /* *BARF* */
1093 handle2 = addr2 = 0;
1094 handle1 = (u32)skb;
1095 addr1 = (u32)virt_to_bus(skb->data);
1087 1096
1088#ifdef GENERAL_DEBUG 1097#ifdef GENERAL_DEBUG
1089 if (!addr1) 1098 if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1093 stat = readl(card->membase + STAT); 1102 stat = readl(card->membase + STAT);
1094 card->sbfqc = ns_stat_sfbqc_get(stat); 1103 card->sbfqc = ns_stat_sfbqc_get(stat);
1095 card->lbfqc = ns_stat_lfbqc_get(stat); 1104 card->lbfqc = ns_stat_lfbqc_get(stat);
1096 if (type == BUF_SM) 1105 if (cb->buf_type == BUF_SM)
1097 { 1106 {
1098 if (!addr2) 1107 if (!addr2)
1099 { 1108 {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1111 } 1120 }
1112 } 1121 }
1113 } 1122 }
1114 else /* type == BUF_LG */ 1123 else /* buf_type == BUF_LG */
1115 { 1124 {
1116 if (!addr2) 1125 if (!addr2)
1117 { 1126 {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1132 1141
1133 if (addr2) 1142 if (addr2)
1134 { 1143 {
1135 if (type == BUF_SM) 1144 if (cb->buf_type == BUF_SM)
1136 { 1145 {
1137 if (card->sbfqc >= card->sbnr.max) 1146 if (card->sbfqc >= card->sbnr.max)
1138 { 1147 {
1139 skb_unlink((struct sk_buff *) handle1); 1148 skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1140 dev_kfree_skb_any((struct sk_buff *) handle1); 1149 dev_kfree_skb_any((struct sk_buff *) handle1);
1141 skb_unlink((struct sk_buff *) handle2); 1150 skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1142 dev_kfree_skb_any((struct sk_buff *) handle2); 1151 dev_kfree_skb_any((struct sk_buff *) handle2);
1143 return; 1152 return;
1144 } 1153 }
1145 else 1154 else
1146 card->sbfqc += 2; 1155 card->sbfqc += 2;
1147 } 1156 }
1148 else /* (type == BUF_LG) */ 1157 else /* (buf_type == BUF_LG) */
1149 { 1158 {
1150 if (card->lbfqc >= card->lbnr.max) 1159 if (card->lbfqc >= card->lbnr.max)
1151 { 1160 {
1152 skb_unlink((struct sk_buff *) handle1); 1161 skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1153 dev_kfree_skb_any((struct sk_buff *) handle1); 1162 dev_kfree_skb_any((struct sk_buff *) handle1);
1154 skb_unlink((struct sk_buff *) handle2); 1163 skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1155 dev_kfree_skb_any((struct sk_buff *) handle2); 1164 dev_kfree_skb_any((struct sk_buff *) handle2);
1156 return; 1165 return;
1157 } 1166 }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1166 writel(handle2, card->membase + DR2); 1175 writel(handle2, card->membase + DR2);
1167 writel(addr1, card->membase + DR1); 1176 writel(addr1, card->membase + DR1);
1168 writel(handle1, card->membase + DR0); 1177 writel(handle1, card->membase + DR0);
1169 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); 1178 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1170 1179
1171 spin_unlock_irqrestore(&card->res_lock, flags); 1180 spin_unlock_irqrestore(&card->res_lock, flags);
1172 1181
1173 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1182 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1174 (type == BUF_SM ? "small" : "large"), addr1, addr2); 1183 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1175 } 1184 }
1176 1185
1177 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1186 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1322 card->efbie = 0; 1331 card->efbie = 0;
1323 break; 1332 break;
1324 } 1333 }
1334 NS_SKB_CB(sb)->buf_type = BUF_SM;
1325 skb_queue_tail(&card->sbpool.queue, sb); 1335 skb_queue_tail(&card->sbpool.queue, sb);
1326 skb_reserve(sb, NS_AAL0_HEADER); 1336 skb_reserve(sb, NS_AAL0_HEADER);
1327 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 1337 push_rxbufs(card, sb);
1328 } 1338 }
1329 card->sbfqc = i; 1339 card->sbfqc = i;
1330 process_rsq(card); 1340 process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1348 card->efbie = 0; 1358 card->efbie = 0;
1349 break; 1359 break;
1350 } 1360 }
1361 NS_SKB_CB(lb)->buf_type = BUF_LG;
1351 skb_queue_tail(&card->lbpool.queue, lb); 1362 skb_queue_tail(&card->lbpool.queue, lb);
1352 skb_reserve(lb, NS_SMBUFSIZE); 1363 skb_reserve(lb, NS_SMBUFSIZE);
1353 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 1364 push_rxbufs(card, lb);
1354 } 1365 }
1355 card->lbfqc = i; 1366 card->lbfqc = i;
1356 process_rsq(card); 1367 process_rsq(card);
@@ -2202,7 +2213,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2202 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); 2213 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2203 skb_put(sb, ATM_CELL_PAYLOAD); 2214 skb_put(sb, ATM_CELL_PAYLOAD);
2204 ATM_SKB(sb)->vcc = vcc; 2215 ATM_SKB(sb)->vcc = vcc;
2205 do_gettimeofday(&sb->stamp); 2216 __net_timestamp(sb);
2206 vcc->push(vcc, sb); 2217 vcc->push(vcc, sb);
2207 atomic_inc(&vcc->stats->rx); 2218 atomic_inc(&vcc->stats->rx);
2208 cell += ATM_CELL_PAYLOAD; 2219 cell += ATM_CELL_PAYLOAD;
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2227 recycle_rx_buf(card, skb); 2238 recycle_rx_buf(card, skb);
2228 return; 2239 return;
2229 } 2240 }
2241 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2230 } 2242 }
2231 else 2243 else
2232 if (--card->iovpool.count < card->iovnr.min) 2244 if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2234 struct sk_buff *new_iovb; 2246 struct sk_buff *new_iovb;
2235 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2247 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2236 { 2248 {
2249 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2237 skb_queue_tail(&card->iovpool.queue, new_iovb); 2250 skb_queue_tail(&card->iovpool.queue, new_iovb);
2238 card->iovpool.count++; 2251 card->iovpool.count++;
2239 } 2252 }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2264 2277
2265 if (NS_SKB(iovb)->iovcnt == 1) 2278 if (NS_SKB(iovb)->iovcnt == 1)
2266 { 2279 {
2267 if (skb->list != &card->sbpool.queue) 2280 if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2268 { 2281 {
2269 printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2282 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2270 card->index); 2283 card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2278 } 2291 }
2279 else /* NS_SKB(iovb)->iovcnt >= 2 */ 2292 else /* NS_SKB(iovb)->iovcnt >= 2 */
2280 { 2293 {
2281 if (skb->list != &card->lbpool.queue) 2294 if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2282 { 2295 {
2283 printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2296 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2284 card->index); 2297 card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2322 /* skb points to a small buffer */ 2335 /* skb points to a small buffer */
2323 if (!atm_charge(vcc, skb->truesize)) 2336 if (!atm_charge(vcc, skb->truesize))
2324 { 2337 {
2325 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 2338 push_rxbufs(card, skb);
2326 0, 0);
2327 atomic_inc(&vcc->stats->rx_drop); 2339 atomic_inc(&vcc->stats->rx_drop);
2328 } 2340 }
2329 else 2341 else
@@ -2334,7 +2346,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2334 skb->destructor = ns_sb_destructor; 2346 skb->destructor = ns_sb_destructor;
2335#endif /* NS_USE_DESTRUCTORS */ 2347#endif /* NS_USE_DESTRUCTORS */
2336 ATM_SKB(skb)->vcc = vcc; 2348 ATM_SKB(skb)->vcc = vcc;
2337 do_gettimeofday(&skb->stamp); 2349 __net_timestamp(skb);
2338 vcc->push(vcc, skb); 2350 vcc->push(vcc, skb);
2339 atomic_inc(&vcc->stats->rx); 2351 atomic_inc(&vcc->stats->rx);
2340 } 2352 }
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2350 { 2362 {
2351 if (!atm_charge(vcc, sb->truesize)) 2363 if (!atm_charge(vcc, sb->truesize))
2352 { 2364 {
2353 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2365 push_rxbufs(card, sb);
2354 0, 0);
2355 atomic_inc(&vcc->stats->rx_drop); 2366 atomic_inc(&vcc->stats->rx_drop);
2356 } 2367 }
2357 else 2368 else
@@ -2362,21 +2373,19 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2362 sb->destructor = ns_sb_destructor; 2373 sb->destructor = ns_sb_destructor;
2363#endif /* NS_USE_DESTRUCTORS */ 2374#endif /* NS_USE_DESTRUCTORS */
2364 ATM_SKB(sb)->vcc = vcc; 2375 ATM_SKB(sb)->vcc = vcc;
2365 do_gettimeofday(&sb->stamp); 2376 __net_timestamp(sb);
2366 vcc->push(vcc, sb); 2377 vcc->push(vcc, sb);
2367 atomic_inc(&vcc->stats->rx); 2378 atomic_inc(&vcc->stats->rx);
2368 } 2379 }
2369 2380
2370 push_rxbufs(card, BUF_LG, (u32) skb, 2381 push_rxbufs(card, skb);
2371 (u32) virt_to_bus(skb->data), 0, 0);
2372 2382
2373 } 2383 }
2374 else /* len > NS_SMBUFSIZE, the usual case */ 2384 else /* len > NS_SMBUFSIZE, the usual case */
2375 { 2385 {
2376 if (!atm_charge(vcc, skb->truesize)) 2386 if (!atm_charge(vcc, skb->truesize))
2377 { 2387 {
2378 push_rxbufs(card, BUF_LG, (u32) skb, 2388 push_rxbufs(card, skb);
2379 (u32) virt_to_bus(skb->data), 0, 0);
2380 atomic_inc(&vcc->stats->rx_drop); 2389 atomic_inc(&vcc->stats->rx_drop);
2381 } 2390 }
2382 else 2391 else
@@ -2389,13 +2398,12 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2389 memcpy(skb->data, sb->data, NS_SMBUFSIZE); 2398 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2390 skb_put(skb, len - NS_SMBUFSIZE); 2399 skb_put(skb, len - NS_SMBUFSIZE);
2391 ATM_SKB(skb)->vcc = vcc; 2400 ATM_SKB(skb)->vcc = vcc;
2392 do_gettimeofday(&skb->stamp); 2401 __net_timestamp(skb);
2393 vcc->push(vcc, skb); 2402 vcc->push(vcc, skb);
2394 atomic_inc(&vcc->stats->rx); 2403 atomic_inc(&vcc->stats->rx);
2395 } 2404 }
2396 2405
2397 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2406 push_rxbufs(card, sb);
2398 0, 0);
2399 2407
2400 } 2408 }
2401 2409
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2430 card->hbpool.count++; 2438 card->hbpool.count++;
2431 } 2439 }
2432 } 2440 }
2441 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2433 } 2442 }
2434 else 2443 else
2435 if (--card->hbpool.count < card->hbnr.min) 2444 if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2437 struct sk_buff *new_hb; 2446 struct sk_buff *new_hb;
2438 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2447 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2439 { 2448 {
2449 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2440 skb_queue_tail(&card->hbpool.queue, new_hb); 2450 skb_queue_tail(&card->hbpool.queue, new_hb);
2441 card->hbpool.count++; 2451 card->hbpool.count++;
2442 } 2452 }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2444 { 2454 {
2445 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2455 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2446 { 2456 {
2457 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2447 skb_queue_tail(&card->hbpool.queue, new_hb); 2458 skb_queue_tail(&card->hbpool.queue, new_hb);
2448 card->hbpool.count++; 2459 card->hbpool.count++;
2449 } 2460 }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2473 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2474 iov++; 2485 iov++;
2475 /* Free the small buffer */ 2486 /* Free the small buffer */
2476 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2487 push_rxbufs(card, sb);
2477 0, 0);
2478 2488
2479 /* Copy all large buffers to the huge buffer and free them */ 2489 /* Copy all large buffers to the huge buffer and free them */
2480 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2490 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2485 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2486 iov++; 2496 iov++;
2487 remaining -= tocopy; 2497 remaining -= tocopy;
2488 push_rxbufs(card, BUF_LG, (u32) lb, 2498 push_rxbufs(card, lb);
2489 (u32) virt_to_bus(lb->data), 0, 0);
2490 } 2499 }
2491#ifdef EXTRA_DEBUG 2500#ifdef EXTRA_DEBUG
2492 if (remaining != 0 || hb->len != len) 2501 if (remaining != 0 || hb->len != len)
@@ -2496,7 +2505,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2496#ifdef NS_USE_DESTRUCTORS 2505#ifdef NS_USE_DESTRUCTORS
2497 hb->destructor = ns_hb_destructor; 2506 hb->destructor = ns_hb_destructor;
2498#endif /* NS_USE_DESTRUCTORS */ 2507#endif /* NS_USE_DESTRUCTORS */
2499 do_gettimeofday(&hb->stamp); 2508 __net_timestamp(hb);
2500 vcc->push(vcc, hb); 2509 vcc->push(vcc, hb);
2501 atomic_inc(&vcc->stats->rx); 2510 atomic_inc(&vcc->stats->rx);
2502 } 2511 }
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
2527 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2536 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2528 if (sb == NULL) 2537 if (sb == NULL)
2529 break; 2538 break;
2539 NS_SKB_CB(sb)->buf_type = BUF_SM;
2530 skb_queue_tail(&card->sbpool.queue, sb); 2540 skb_queue_tail(&card->sbpool.queue, sb);
2531 skb_reserve(sb, NS_AAL0_HEADER); 2541 skb_reserve(sb, NS_AAL0_HEADER);
2532 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2542 push_rxbufs(card, sb);
2533 } while (card->sbfqc < card->sbnr.min); 2543 } while (card->sbfqc < card->sbnr.min);
2534} 2544}
2535 2545
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
2550 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2560 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2551 if (lb == NULL) 2561 if (lb == NULL)
2552 break; 2562 break;
2563 NS_SKB_CB(lb)->buf_type = BUF_LG;
2553 skb_queue_tail(&card->lbpool.queue, lb); 2564 skb_queue_tail(&card->lbpool.queue, lb);
2554 skb_reserve(lb, NS_SMBUFSIZE); 2565 skb_reserve(lb, NS_SMBUFSIZE);
2555 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2566 push_rxbufs(card, lb);
2556 } while (card->lbfqc < card->lbnr.min); 2567 } while (card->lbfqc < card->lbnr.min);
2557} 2568}
2558 2569
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2569 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2580 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2570 if (hb == NULL) 2581 if (hb == NULL)
2571 break; 2582 break;
2583 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2572 skb_queue_tail(&card->hbpool.queue, hb); 2584 skb_queue_tail(&card->hbpool.queue, hb);
2573 card->hbpool.count++; 2585 card->hbpool.count++;
2574 } 2586 }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
2577#endif /* NS_USE_DESTRUCTORS */ 2589#endif /* NS_USE_DESTRUCTORS */
2578 2590
2579 2591
2580
2581static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2592static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2582{ 2593{
2583 if (skb->list == &card->sbpool.queue) 2594 struct ns_skb_cb *cb = NS_SKB_CB(skb);
2584 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2585 else if (skb->list == &card->lbpool.queue)
2586 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2587 else
2588 {
2589 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2590 dev_kfree_skb_any(skb);
2591 }
2592}
2593 2595
2596 if (unlikely(cb->buf_type == BUF_NONE)) {
2597 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2598 dev_kfree_skb_any(skb);
2599 } else
2600 push_rxbufs(card, skb);
2601}
2594 2602
2595 2603
2596static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2604static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2597{ 2605{
2598 struct sk_buff *skb; 2606 while (count-- > 0)
2599 2607 recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2600 for (; count > 0; count--)
2601 {
2602 skb = (struct sk_buff *) (iov++)->iov_base;
2603 if (skb->list == &card->sbpool.queue)
2604 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2605 0, 0);
2606 else if (skb->list == &card->lbpool.queue)
2607 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2608 0, 0);
2609 else
2610 {
2611 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2612 dev_kfree_skb_any(skb);
2613 }
2614 }
2615} 2608}
2616 2609
2617 2610
2618
2619static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2611static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2620{ 2612{
2621 if (card->iovpool.count < card->iovnr.max) 2613 if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2631 2623
2632static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2624static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2633{ 2625{
2634 skb_unlink(sb); 2626 skb_unlink(sb, &card->sbpool.queue);
2635#ifdef NS_USE_DESTRUCTORS 2627#ifdef NS_USE_DESTRUCTORS
2636 if (card->sbfqc < card->sbnr.min) 2628 if (card->sbfqc < card->sbnr.min)
2637#else 2629#else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2640 struct sk_buff *new_sb; 2632 struct sk_buff *new_sb;
2641 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2633 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2642 { 2634 {
2635 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2643 skb_queue_tail(&card->sbpool.queue, new_sb); 2636 skb_queue_tail(&card->sbpool.queue, new_sb);
2644 skb_reserve(new_sb, NS_AAL0_HEADER); 2637 skb_reserve(new_sb, NS_AAL0_HEADER);
2645 push_rxbufs(card, BUF_SM, (u32) new_sb, 2638 push_rxbufs(card, new_sb);
2646 (u32) virt_to_bus(new_sb->data), 0, 0);
2647 } 2639 }
2648 } 2640 }
2649 if (card->sbfqc < card->sbnr.init) 2641 if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2652 struct sk_buff *new_sb; 2644 struct sk_buff *new_sb;
2653 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2645 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2654 { 2646 {
2647 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2655 skb_queue_tail(&card->sbpool.queue, new_sb); 2648 skb_queue_tail(&card->sbpool.queue, new_sb);
2656 skb_reserve(new_sb, NS_AAL0_HEADER); 2649 skb_reserve(new_sb, NS_AAL0_HEADER);
2657 push_rxbufs(card, BUF_SM, (u32) new_sb, 2650 push_rxbufs(card, new_sb);
2658 (u32) virt_to_bus(new_sb->data), 0, 0);
2659 } 2651 }
2660 } 2652 }
2661} 2653}
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2664 2656
2665static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2657static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2666{ 2658{
2667 skb_unlink(lb); 2659 skb_unlink(lb, &card->lbpool.queue);
2668#ifdef NS_USE_DESTRUCTORS 2660#ifdef NS_USE_DESTRUCTORS
2669 if (card->lbfqc < card->lbnr.min) 2661 if (card->lbfqc < card->lbnr.min)
2670#else 2662#else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2673 struct sk_buff *new_lb; 2665 struct sk_buff *new_lb;
2674 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2666 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2675 { 2667 {
2668 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2676 skb_queue_tail(&card->lbpool.queue, new_lb); 2669 skb_queue_tail(&card->lbpool.queue, new_lb);
2677 skb_reserve(new_lb, NS_SMBUFSIZE); 2670 skb_reserve(new_lb, NS_SMBUFSIZE);
2678 push_rxbufs(card, BUF_LG, (u32) new_lb, 2671 push_rxbufs(card, new_lb);
2679 (u32) virt_to_bus(new_lb->data), 0, 0);
2680 } 2672 }
2681 } 2673 }
2682 if (card->lbfqc < card->lbnr.init) 2674 if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2685 struct sk_buff *new_lb; 2677 struct sk_buff *new_lb;
2686 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2678 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2687 { 2679 {
2680 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2688 skb_queue_tail(&card->lbpool.queue, new_lb); 2681 skb_queue_tail(&card->lbpool.queue, new_lb);
2689 skb_reserve(new_lb, NS_SMBUFSIZE); 2682 skb_reserve(new_lb, NS_SMBUFSIZE);
2690 push_rxbufs(card, BUF_LG, (u32) new_lb, 2683 push_rxbufs(card, new_lb);
2691 (u32) virt_to_bus(new_lb->data), 0, 0);
2692 } 2684 }
2693 } 2685 }
2694} 2686}
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2880 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2872 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2881 if (sb == NULL) 2873 if (sb == NULL)
2882 return -ENOMEM; 2874 return -ENOMEM;
2875 NS_SKB_CB(sb)->buf_type = BUF_SM;
2883 skb_queue_tail(&card->sbpool.queue, sb); 2876 skb_queue_tail(&card->sbpool.queue, sb);
2884 skb_reserve(sb, NS_AAL0_HEADER); 2877 skb_reserve(sb, NS_AAL0_HEADER);
2885 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2878 push_rxbufs(card, sb);
2886 } 2879 }
2887 break; 2880 break;
2888 2881
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2894 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2887 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2895 if (lb == NULL) 2888 if (lb == NULL)
2896 return -ENOMEM; 2889 return -ENOMEM;
2890 NS_SKB_CB(lb)->buf_type = BUF_LG;
2897 skb_queue_tail(&card->lbpool.queue, lb); 2891 skb_queue_tail(&card->lbpool.queue, lb);
2898 skb_reserve(lb, NS_SMBUFSIZE); 2892 skb_reserve(lb, NS_SMBUFSIZE);
2899 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2893 push_rxbufs(card, lb);
2900 } 2894 }
2901 break; 2895 break;
2902 2896
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2923 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2917 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2924 if (hb == NULL) 2918 if (hb == NULL)
2925 return -ENOMEM; 2919 return -ENOMEM;
2920 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2926 ns_grab_int_lock(card, flags); 2921 ns_grab_int_lock(card, flags);
2927 skb_queue_tail(&card->hbpool.queue, hb); 2922 skb_queue_tail(&card->hbpool.queue, hb);
2928 card->hbpool.count++; 2923 card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2953 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2948 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2954 if (iovb == NULL) 2949 if (iovb == NULL)
2955 return -ENOMEM; 2950 return -ENOMEM;
2951 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2956 ns_grab_int_lock(card, flags); 2952 ns_grab_int_lock(card, flags);
2957 skb_queue_tail(&card->iovpool.queue, iovb); 2953 skb_queue_tail(&card->iovpool.queue, iovb);
2958 card->iovpool.count++; 2954 card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2979} 2975}
2980 2976
2981 2977
2982
2983static void which_list(ns_dev *card, struct sk_buff *skb) 2978static void which_list(ns_dev *card, struct sk_buff *skb)
2984{ 2979{
2985 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? 2980 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2986 "small" : skb->list == &card->lbpool.queue ? "large" :
2987 skb->list == &card->hbpool.queue ? "huge" :
2988 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2989} 2981}
2990 2982
2991 2983
2992
2993static void ns_poll(unsigned long arg) 2984static void ns_poll(unsigned long arg)
2994{ 2985{
2995 int i; 2986 int i;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index ea83c46c8ba5..5997bcb45b59 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -103,8 +103,14 @@
103 103
104#define NS_IOREMAP_SIZE 4096 104#define NS_IOREMAP_SIZE 4096
105 105
106#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ 106/*
107#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ 107 * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
108 * BUG_SM and BUG_LG are both used by the driver and the device.
109 * BUF_NONE is only used by the driver.
110 */
111#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
112#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
113#define BUF_NONE 0xffffffff /* Software only: */
108 114
109#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ 115#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
110#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ 116#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
684/* Device driver structures ***************************************************/ 690/* Device driver structures ***************************************************/
685 691
686 692
693struct ns_skb_cb {
694 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
695};
696
697#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
698
687typedef struct tsq_info 699typedef struct tsq_info
688{ 700{
689 void *org; 701 void *org;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a2b236a966e0..c4b75ecf9460 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -400,7 +400,7 @@ unsigned long *x;
400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> 400EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
401 uPD98401_AAL5_ES_SHIFT,error); 401 uPD98401_AAL5_ES_SHIFT,error);
402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; 402 skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
403 do_gettimeofday(&skb->stamp); 403 __net_timestamp(skb);
404#if 0 404#if 0
405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], 405printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], 406 ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
417 chan = (here[3] & uPD98401_AAL5_CHAN) >> 417 chan = (here[3] & uPD98401_AAL5_CHAN) >>
418 uPD98401_AAL5_CHAN_SHIFT; 418 uPD98401_AAL5_CHAN_SHIFT;
419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { 419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
420 int pos = ZATM_VCC(vcc)->pool;
421
420 vcc = zatm_dev->rx_map[chan]; 422 vcc = zatm_dev->rx_map[chan];
421 if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) 423 if (skb == zatm_dev->last_free[pos])
422 zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; 424 zatm_dev->last_free[pos] = NULL;
423 skb_unlink(skb); 425 skb_unlink(skb, zatm_dev->pool + pos);
424 } 426 }
425 else { 427 else {
426 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " 428 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 9e6f51c528b0..4be976940f69 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -120,7 +120,7 @@ aoenet_xmit(struct sk_buff *sl)
120 * (1) len doesn't include the header by default. I want this. 120 * (1) len doesn't include the header by default. I want this.
121 */ 121 */
122static int 122static int
123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt) 123aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
124{ 124{
125 struct aoe_hdr *h; 125 struct aoe_hdr *h;
126 u32 n; 126 u32 n;
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 2435a7c99b2b..cd056e7e64ec 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -47,7 +47,7 @@ static int cfq_slice_idle = HZ / 100;
47/* 47/*
48 * disable queueing at the driver/hardware level 48 * disable queueing at the driver/hardware level
49 */ 49 */
50static int cfq_max_depth = 1; 50static int cfq_max_depth = 2;
51 51
52/* 52/*
53 * for the hash of cfqq inside the cfqd 53 * for the hash of cfqq inside the cfqd
@@ -385,9 +385,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
385 return crq2; 385 return crq2;
386 if (crq2 == NULL) 386 if (crq2 == NULL)
387 return crq1; 387 return crq1;
388 if (cfq_crq_requeued(crq1)) 388
389 if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
389 return crq1; 390 return crq1;
390 if (cfq_crq_requeued(crq2)) 391 else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
392 return crq2;
393
394 if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
395 return crq1;
396 else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
391 return crq2; 397 return crq2;
392 398
393 s1 = crq1->request->sector; 399 s1 = crq1->request->sector;
@@ -1769,18 +1775,23 @@ static void
1769cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1775cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1770 struct cfq_rq *crq) 1776 struct cfq_rq *crq)
1771{ 1777{
1772 const int sync = cfq_crq_is_sync(crq); 1778 struct cfq_io_context *cic;
1773 1779
1774 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1780 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1775 1781
1776 if (sync) { 1782 /*
1777 struct cfq_io_context *cic = crq->io_context; 1783 * we never wait for an async request and we don't allow preemption
1784 * of an async request. so just return early
1785 */
1786 if (!cfq_crq_is_sync(crq))
1787 return;
1778 1788
1779 cfq_update_io_thinktime(cfqd, cic); 1789 cic = crq->io_context;
1780 cfq_update_idle_window(cfqd, cfqq, cic);
1781 1790
1782 cic->last_queue = jiffies; 1791 cfq_update_io_thinktime(cfqd, cic);
1783 } 1792 cfq_update_idle_window(cfqd, cfqq, cic);
1793
1794 cic->last_queue = jiffies;
1784 1795
1785 if (cfqq == cfqd->active_queue) { 1796 if (cfqq == cfqd->active_queue) {
1786 /* 1797 /*
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 46e56a25d2c8..e46ecd23b3ac 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -776,7 +776,7 @@ static int viodasd_remove(struct vio_dev *vdev)
776 */ 776 */
777static struct vio_device_id viodasd_device_table[] __devinitdata = { 777static struct vio_device_id viodasd_device_table[] __devinitdata = {
778 { "viodasd", "" }, 778 { "viodasd", "" },
779 { 0, } 779 { "", "" }
780}; 780};
781 781
782MODULE_DEVICE_TABLE(vio, viodasd_device_table); 782MODULE_DEVICE_TABLE(vio, viodasd_device_table);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index c42d7e6ac1c5..1e9db0156ea7 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
158 if (err) { 158 if (err) {
159 BT_ERR("%s bulk tx submit failed urb %p err %d", 159 BT_ERR("%s bulk tx submit failed urb %p err %d",
160 bfusb->hdev->name, urb, err); 160 bfusb->hdev->name, urb, err);
161 skb_unlink(skb); 161 skb_unlink(skb, &bfusb->pending_q);
162 usb_free_urb(urb); 162 usb_free_urb(urb);
163 } else 163 } else
164 atomic_inc(&bfusb->pending_tx); 164 atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
212 212
213 read_lock(&bfusb->lock); 213 read_lock(&bfusb->lock);
214 214
215 skb_unlink(skb); 215 skb_unlink(skb, &bfusb->pending_q);
216 skb_queue_tail(&bfusb->completed_q, skb); 216 skb_queue_tail(&bfusb->completed_q, skb);
217 217
218 bfusb_tx_wakeup(bfusb); 218 bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
253 if (err) { 253 if (err) {
254 BT_ERR("%s bulk rx submit failed urb %p err %d", 254 BT_ERR("%s bulk rx submit failed urb %p err %d",
255 bfusb->hdev->name, urb, err); 255 bfusb->hdev->name, urb, err);
256 skb_unlink(skb); 256 skb_unlink(skb, &bfusb->pending_q);
257 kfree_skb(skb); 257 kfree_skb(skb);
258 usb_free_urb(urb); 258 usb_free_urb(urb);
259 } 259 }
@@ -330,7 +330,7 @@ static inline int bfusb_recv_block(struct bfusb *bfusb, int hdr, unsigned char *
330 } 330 }
331 331
332 skb->dev = (void *) bfusb->hdev; 332 skb->dev = (void *) bfusb->hdev;
333 skb->pkt_type = pkt_type; 333 bt_cb(skb)->pkt_type = pkt_type;
334 334
335 bfusb->reassembly = skb; 335 bfusb->reassembly = skb;
336 } else { 336 } else {
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
398 buf += len; 398 buf += len;
399 } 399 }
400 400
401 skb_unlink(skb); 401 skb_unlink(skb, &bfusb->pending_q);
402 kfree_skb(skb); 402 kfree_skb(skb);
403 403
404 bfusb_rx_submit(bfusb, urb); 404 bfusb_rx_submit(bfusb, urb);
@@ -485,7 +485,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
485 unsigned char buf[3]; 485 unsigned char buf[3];
486 int sent = 0, size, count; 486 int sent = 0, size, count;
487 487
488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 488 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
489 489
490 if (!hdev) { 490 if (!hdev) {
491 BT_ERR("Frame for unknown HCI device (hdev=NULL)"); 491 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
@@ -497,7 +497,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
497 497
498 bfusb = (struct bfusb *) hdev->driver_data; 498 bfusb = (struct bfusb *) hdev->driver_data;
499 499
500 switch (skb->pkt_type) { 500 switch (bt_cb(skb)->pkt_type) {
501 case HCI_COMMAND_PKT: 501 case HCI_COMMAND_PKT:
502 hdev->stat.cmd_tx++; 502 hdev->stat.cmd_tx++;
503 break; 503 break;
@@ -510,7 +510,7 @@ static int bfusb_send_frame(struct sk_buff *skb)
510 }; 510 };
511 511
512 /* Prepend skb with frame type */ 512 /* Prepend skb with frame type */
513 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 513 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
514 514
515 count = skb->len; 515 count = skb->len;
516 516
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index bd2ec7e284cc..26fe9c0e1d20 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -270,7 +270,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
270 if (!(skb = skb_dequeue(&(info->txq)))) 270 if (!(skb = skb_dequeue(&(info->txq))))
271 break; 271 break;
272 272
273 if (skb->pkt_type & 0x80) { 273 if (bt_cb(skb)->pkt_type & 0x80) {
274 /* Disable RTS */ 274 /* Disable RTS */
275 info->ctrl_reg |= REG_CONTROL_RTS; 275 info->ctrl_reg |= REG_CONTROL_RTS;
276 outb(info->ctrl_reg, iobase + REG_CONTROL); 276 outb(info->ctrl_reg, iobase + REG_CONTROL);
@@ -288,13 +288,13 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
288 /* Mark the buffer as dirty */ 288 /* Mark the buffer as dirty */
289 clear_bit(ready_bit, &(info->tx_state)); 289 clear_bit(ready_bit, &(info->tx_state));
290 290
291 if (skb->pkt_type & 0x80) { 291 if (bt_cb(skb)->pkt_type & 0x80) {
292 DECLARE_WAIT_QUEUE_HEAD(wq); 292 DECLARE_WAIT_QUEUE_HEAD(wq);
293 DEFINE_WAIT(wait); 293 DEFINE_WAIT(wait);
294 294
295 unsigned char baud_reg; 295 unsigned char baud_reg;
296 296
297 switch (skb->pkt_type) { 297 switch (bt_cb(skb)->pkt_type) {
298 case PKT_BAUD_RATE_460800: 298 case PKT_BAUD_RATE_460800:
299 baud_reg = REG_CONTROL_BAUD_RATE_460800; 299 baud_reg = REG_CONTROL_BAUD_RATE_460800;
300 break; 300 break;
@@ -410,9 +410,9 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 410 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
411 411
412 info->rx_skb->dev = (void *) info->hdev; 412 info->rx_skb->dev = (void *) info->hdev;
413 info->rx_skb->pkt_type = buf[i]; 413 bt_cb(info->rx_skb)->pkt_type = buf[i];
414 414
415 switch (info->rx_skb->pkt_type) { 415 switch (bt_cb(info->rx_skb)->pkt_type) {
416 416
417 case 0x00: 417 case 0x00:
418 /* init packet */ 418 /* init packet */
@@ -444,7 +444,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
444 444
445 default: 445 default:
446 /* unknown packet */ 446 /* unknown packet */
447 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 447 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
448 info->hdev->stat.err_rx++; 448 info->hdev->stat.err_rx++;
449 449
450 kfree_skb(info->rx_skb); 450 kfree_skb(info->rx_skb);
@@ -586,21 +586,21 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
586 switch (baud) { 586 switch (baud) {
587 case 460800: 587 case 460800:
588 cmd[4] = 0x00; 588 cmd[4] = 0x00;
589 skb->pkt_type = PKT_BAUD_RATE_460800; 589 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800;
590 break; 590 break;
591 case 230400: 591 case 230400:
592 cmd[4] = 0x01; 592 cmd[4] = 0x01;
593 skb->pkt_type = PKT_BAUD_RATE_230400; 593 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400;
594 break; 594 break;
595 case 115200: 595 case 115200:
596 cmd[4] = 0x02; 596 cmd[4] = 0x02;
597 skb->pkt_type = PKT_BAUD_RATE_115200; 597 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200;
598 break; 598 break;
599 case 57600: 599 case 57600:
600 /* Fall through... */ 600 /* Fall through... */
601 default: 601 default:
602 cmd[4] = 0x03; 602 cmd[4] = 0x03;
603 skb->pkt_type = PKT_BAUD_RATE_57600; 603 bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600;
604 break; 604 break;
605 } 605 }
606 606
@@ -680,7 +680,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
680 680
681 info = (bluecard_info_t *)(hdev->driver_data); 681 info = (bluecard_info_t *)(hdev->driver_data);
682 682
683 switch (skb->pkt_type) { 683 switch (bt_cb(skb)->pkt_type) {
684 case HCI_COMMAND_PKT: 684 case HCI_COMMAND_PKT:
685 hdev->stat.cmd_tx++; 685 hdev->stat.cmd_tx++;
686 break; 686 break;
@@ -693,7 +693,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
693 }; 693 };
694 694
695 /* Prepend skb with frame type */ 695 /* Prepend skb with frame type */
696 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 696 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
697 skb_queue_tail(&(info->txq), skb); 697 skb_queue_tail(&(info->txq), skb);
698 698
699 bluecard_write_wakeup(info); 699 bluecard_write_wakeup(info);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index f696da6f417b..a1bf8f066c88 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -105,7 +105,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
105 if (skb) { 105 if (skb) {
106 memcpy(skb_put(skb, len), buf, len); 106 memcpy(skb_put(skb, len), buf, len);
107 skb->dev = (void *) data->hdev; 107 skb->dev = (void *) data->hdev;
108 skb->pkt_type = HCI_ACLDATA_PKT; 108 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
109 hci_recv_frame(skb); 109 hci_recv_frame(skb);
110 } 110 }
111 break; 111 break;
@@ -117,7 +117,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
117 if (skb) { 117 if (skb) {
118 memcpy(skb_put(skb, len), buf, len); 118 memcpy(skb_put(skb, len), buf, len);
119 skb->dev = (void *) data->hdev; 119 skb->dev = (void *) data->hdev;
120 skb->pkt_type = HCI_SCODATA_PKT; 120 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
121 hci_recv_frame(skb); 121 hci_recv_frame(skb);
122 } 122 }
123 break; 123 break;
@@ -129,7 +129,7 @@ static void bpa10x_recv_bulk(struct bpa10x_data *data, unsigned char *buf, int c
129 if (skb) { 129 if (skb) {
130 memcpy(skb_put(skb, len), buf, len); 130 memcpy(skb_put(skb, len), buf, len);
131 skb->dev = (void *) data->hdev; 131 skb->dev = (void *) data->hdev;
132 skb->pkt_type = HCI_VENDOR_PKT; 132 bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
133 hci_recv_frame(skb); 133 hci_recv_frame(skb);
134 } 134 }
135 break; 135 break;
@@ -190,7 +190,7 @@ static int bpa10x_recv_event(struct bpa10x_data *data, unsigned char *buf, int s
190 } 190 }
191 191
192 skb->dev = (void *) data->hdev; 192 skb->dev = (void *) data->hdev;
193 skb->pkt_type = pkt_type; 193 bt_cb(skb)->pkt_type = pkt_type;
194 194
195 memcpy(skb_put(skb, size), buf, size); 195 memcpy(skb_put(skb, size), buf, size);
196 196
@@ -307,7 +307,8 @@ unlock:
307 read_unlock(&data->lock); 307 read_unlock(&data->lock);
308} 308}
309 309
310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, size_t size, int flags, void *data) 310static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
311 size_t size, unsigned int __nocast flags, void *data)
311{ 312{
312 struct urb *urb; 313 struct urb *urb;
313 struct usb_ctrlrequest *cr; 314 struct usb_ctrlrequest *cr;
@@ -487,7 +488,7 @@ static int bpa10x_send_frame(struct sk_buff *skb)
487 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 488 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
488 struct bpa10x_data *data; 489 struct bpa10x_data *data;
489 490
490 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, skb->pkt_type, skb->len); 491 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
491 492
492 if (!hdev) { 493 if (!hdev) {
493 BT_ERR("Frame for unknown HCI device"); 494 BT_ERR("Frame for unknown HCI device");
@@ -500,9 +501,9 @@ static int bpa10x_send_frame(struct sk_buff *skb)
500 data = hdev->driver_data; 501 data = hdev->driver_data;
501 502
502 /* Prepend skb with frame type */ 503 /* Prepend skb with frame type */
503 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 504 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
504 505
505 switch (skb->pkt_type) { 506 switch (bt_cb(skb)->pkt_type) {
506 case HCI_COMMAND_PKT: 507 case HCI_COMMAND_PKT:
507 hdev->stat.cmd_tx++; 508 hdev->stat.cmd_tx++;
508 skb_queue_tail(&data->cmd_queue, skb); 509 skb_queue_tail(&data->cmd_queue, skb);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index adf1750ea58d..2e0338d80f32 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -259,11 +259,11 @@ static void bt3c_receive(bt3c_info_t *info)
259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 259 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
260 260
261 info->rx_skb->dev = (void *) info->hdev; 261 info->rx_skb->dev = (void *) info->hdev;
262 info->rx_skb->pkt_type = inb(iobase + DATA_L); 262 bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
263 inb(iobase + DATA_H); 263 inb(iobase + DATA_H);
264 //printk("bt3c: PACKET_TYPE=%02x\n", info->rx_skb->pkt_type); 264 //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
265 265
266 switch (info->rx_skb->pkt_type) { 266 switch (bt_cb(info->rx_skb)->pkt_type) {
267 267
268 case HCI_EVENT_PKT: 268 case HCI_EVENT_PKT:
269 info->rx_state = RECV_WAIT_EVENT_HEADER; 269 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -282,7 +282,7 @@ static void bt3c_receive(bt3c_info_t *info)
282 282
283 default: 283 default:
284 /* Unknown packet */ 284 /* Unknown packet */
285 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 285 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
286 info->hdev->stat.err_rx++; 286 info->hdev->stat.err_rx++;
287 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 287 clear_bit(HCI_RUNNING, &(info->hdev->flags));
288 288
@@ -439,7 +439,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
439 439
440 info = (bt3c_info_t *) (hdev->driver_data); 440 info = (bt3c_info_t *) (hdev->driver_data);
441 441
442 switch (skb->pkt_type) { 442 switch (bt_cb(skb)->pkt_type) {
443 case HCI_COMMAND_PKT: 443 case HCI_COMMAND_PKT:
444 hdev->stat.cmd_tx++; 444 hdev->stat.cmd_tx++;
445 break; 445 break;
@@ -452,7 +452,7 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
452 }; 452 };
453 453
454 /* Prepend skb with frame type */ 454 /* Prepend skb with frame type */
455 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 455 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
456 skb_queue_tail(&(info->txq), skb); 456 skb_queue_tail(&(info->txq), skb);
457 457
458 spin_lock_irqsave(&(info->lock), flags); 458 spin_lock_irqsave(&(info->lock), flags);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index e4c59fdc0e12..89486ea7a021 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -211,9 +211,9 @@ static void btuart_receive(btuart_info_t *info)
211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) { 211 if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
212 212
213 info->rx_skb->dev = (void *) info->hdev; 213 info->rx_skb->dev = (void *) info->hdev;
214 info->rx_skb->pkt_type = inb(iobase + UART_RX); 214 bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
215 215
216 switch (info->rx_skb->pkt_type) { 216 switch (bt_cb(info->rx_skb)->pkt_type) {
217 217
218 case HCI_EVENT_PKT: 218 case HCI_EVENT_PKT:
219 info->rx_state = RECV_WAIT_EVENT_HEADER; 219 info->rx_state = RECV_WAIT_EVENT_HEADER;
@@ -232,7 +232,7 @@ static void btuart_receive(btuart_info_t *info)
232 232
233 default: 233 default:
234 /* Unknown packet */ 234 /* Unknown packet */
235 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 235 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
236 info->hdev->stat.err_rx++; 236 info->hdev->stat.err_rx++;
237 clear_bit(HCI_RUNNING, &(info->hdev->flags)); 237 clear_bit(HCI_RUNNING, &(info->hdev->flags));
238 238
@@ -447,7 +447,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
447 447
448 info = (btuart_info_t *)(hdev->driver_data); 448 info = (btuart_info_t *)(hdev->driver_data);
449 449
450 switch (skb->pkt_type) { 450 switch (bt_cb(skb)->pkt_type) {
451 case HCI_COMMAND_PKT: 451 case HCI_COMMAND_PKT:
452 hdev->stat.cmd_tx++; 452 hdev->stat.cmd_tx++;
453 break; 453 break;
@@ -460,7 +460,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
460 }; 460 };
461 461
462 /* Prepend skb with frame type */ 462 /* Prepend skb with frame type */
463 memcpy(skb_push(skb, 1), &(skb->pkt_type), 1); 463 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
464 skb_queue_tail(&(info->txq), skb); 464 skb_queue_tail(&(info->txq), skb);
465 465
466 btuart_write_wakeup(info); 466 btuart_write_wakeup(info);
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index e39868c3da48..84c1f8839422 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -251,7 +251,7 @@ static void dtl1_receive(dtl1_info_t *info)
251 info->rx_count = nsh->len + (nsh->len & 0x0001); 251 info->rx_count = nsh->len + (nsh->len & 0x0001);
252 break; 252 break;
253 case RECV_WAIT_DATA: 253 case RECV_WAIT_DATA:
254 info->rx_skb->pkt_type = nsh->type; 254 bt_cb(info->rx_skb)->pkt_type = nsh->type;
255 255
256 /* remove PAD byte if it exists */ 256 /* remove PAD byte if it exists */
257 if (nsh->len & 0x0001) { 257 if (nsh->len & 0x0001) {
@@ -262,7 +262,7 @@ static void dtl1_receive(dtl1_info_t *info)
262 /* remove NSH */ 262 /* remove NSH */
263 skb_pull(info->rx_skb, NSHL); 263 skb_pull(info->rx_skb, NSHL);
264 264
265 switch (info->rx_skb->pkt_type) { 265 switch (bt_cb(info->rx_skb)->pkt_type) {
266 case 0x80: 266 case 0x80:
267 /* control data for the Nokia Card */ 267 /* control data for the Nokia Card */
268 dtl1_control(info, info->rx_skb); 268 dtl1_control(info, info->rx_skb);
@@ -272,12 +272,12 @@ static void dtl1_receive(dtl1_info_t *info)
272 case 0x84: 272 case 0x84:
273 /* send frame to the HCI layer */ 273 /* send frame to the HCI layer */
274 info->rx_skb->dev = (void *) info->hdev; 274 info->rx_skb->dev = (void *) info->hdev;
275 info->rx_skb->pkt_type &= 0x0f; 275 bt_cb(info->rx_skb)->pkt_type &= 0x0f;
276 hci_recv_frame(info->rx_skb); 276 hci_recv_frame(info->rx_skb);
277 break; 277 break;
278 default: 278 default:
279 /* unknown packet */ 279 /* unknown packet */
280 BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type); 280 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
281 kfree_skb(info->rx_skb); 281 kfree_skb(info->rx_skb);
282 break; 282 break;
283 } 283 }
@@ -410,7 +410,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
410 410
411 info = (dtl1_info_t *)(hdev->driver_data); 411 info = (dtl1_info_t *)(hdev->driver_data);
412 412
413 switch (skb->pkt_type) { 413 switch (bt_cb(skb)->pkt_type) {
414 case HCI_COMMAND_PKT: 414 case HCI_COMMAND_PKT:
415 hdev->stat.cmd_tx++; 415 hdev->stat.cmd_tx++;
416 nsh.type = 0x81; 416 nsh.type = 0x81;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 858fddb046de..0ee324e1265d 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -149,7 +149,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
149 return 0; 149 return 0;
150 } 150 }
151 151
152 switch (skb->pkt_type) { 152 switch (bt_cb(skb)->pkt_type) {
153 case HCI_ACLDATA_PKT: 153 case HCI_ACLDATA_PKT:
154 case HCI_COMMAND_PKT: 154 case HCI_COMMAND_PKT:
155 skb_queue_tail(&bcsp->rel, skb); 155 skb_queue_tail(&bcsp->rel, skb);
@@ -227,7 +227,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
227 if (!nskb) 227 if (!nskb)
228 return NULL; 228 return NULL;
229 229
230 nskb->pkt_type = pkt_type; 230 bt_cb(nskb)->pkt_type = pkt_type;
231 231
232 bcsp_slip_msgdelim(nskb); 232 bcsp_slip_msgdelim(nskb);
233 233
@@ -286,7 +286,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
286 since they have priority */ 286 since they have priority */
287 287
288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { 288 if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 289 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
290 if (nskb) { 290 if (nskb) {
291 kfree_skb(skb); 291 kfree_skb(skb);
292 return nskb; 292 return nskb;
@@ -303,7 +303,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
303 spin_lock_irqsave(&bcsp->unack.lock, flags); 303 spin_lock_irqsave(&bcsp->unack.lock, flags);
304 304
305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { 305 if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, skb->pkt_type); 306 struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
307 if (nskb) { 307 if (nskb) {
308 __skb_queue_tail(&bcsp->unack, skb); 308 __skb_queue_tail(&bcsp->unack, skb);
309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); 309 mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
@@ -401,7 +401,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
401 if (!nskb) 401 if (!nskb)
402 return; 402 return;
403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); 403 memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
404 nskb->pkt_type = BCSP_LE_PKT; 404 bt_cb(nskb)->pkt_type = BCSP_LE_PKT;
405 405
406 skb_queue_head(&bcsp->unrel, nskb); 406 skb_queue_head(&bcsp->unrel, nskb);
407 hci_uart_tx_wakeup(hu); 407 hci_uart_tx_wakeup(hu);
@@ -483,14 +483,14 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
483 bcsp_pkt_cull(bcsp); 483 bcsp_pkt_cull(bcsp);
484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && 484 if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
485 bcsp->rx_skb->data[0] & 0x80) { 485 bcsp->rx_skb->data[0] & 0x80) {
486 bcsp->rx_skb->pkt_type = HCI_ACLDATA_PKT; 486 bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
487 pass_up = 1; 487 pass_up = 1;
488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && 488 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
489 bcsp->rx_skb->data[0] & 0x80) { 489 bcsp->rx_skb->data[0] & 0x80) {
490 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 490 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
491 pass_up = 1; 491 pass_up = 1;
492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { 492 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
493 bcsp->rx_skb->pkt_type = HCI_SCODATA_PKT; 493 bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
494 pass_up = 1; 494 pass_up = 1;
495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && 495 } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
496 !(bcsp->rx_skb->data[0] & 0x80)) { 496 !(bcsp->rx_skb->data[0] & 0x80)) {
@@ -512,7 +512,7 @@ static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
512 hdr.evt = 0xff; 512 hdr.evt = 0xff;
513 hdr.plen = bcsp->rx_skb->len; 513 hdr.plen = bcsp->rx_skb->len;
514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); 514 memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
515 bcsp->rx_skb->pkt_type = HCI_EVENT_PKT; 515 bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
516 516
517 hci_recv_frame(bcsp->rx_skb); 517 hci_recv_frame(bcsp->rx_skb);
518 } else { 518 } else {
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 533323b60e63..cf8a22d58d96 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
112 BT_DBG("hu %p skb %p", hu, skb); 112 BT_DBG("hu %p skb %p", hu, skb);
113 113
114 /* Prepend skb with frame type */ 114 /* Prepend skb with frame type */
115 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 115 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
116 skb_queue_tail(&h4->txq, skb); 116 skb_queue_tail(&h4->txq, skb);
117 return 0; 117 return 0;
118} 118}
@@ -239,7 +239,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
239 return 0; 239 return 0;
240 } 240 }
241 h4->rx_skb->dev = (void *) hu->hdev; 241 h4->rx_skb->dev = (void *) hu->hdev;
242 h4->rx_skb->pkt_type = type; 242 bt_cb(h4->rx_skb)->pkt_type = type;
243 } 243 }
244 return count; 244 return count;
245} 245}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 90be2eae52e0..aed80cc22890 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -153,7 +153,7 @@ restart:
153 break; 153 break;
154 } 154 }
155 155
156 hci_uart_tx_complete(hu, skb->pkt_type); 156 hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
157 kfree_skb(skb); 157 kfree_skb(skb);
158 } 158 }
159 159
@@ -229,7 +229,7 @@ static int hci_uart_send_frame(struct sk_buff *skb)
229 hu = (struct hci_uart *) hdev->driver_data; 229 hu = (struct hci_uart *) hdev->driver_data;
230 tty = hu->tty; 230 tty = hu->tty;
231 231
232 BT_DBG("%s: type %d len %d", hdev->name, skb->pkt_type, skb->len); 232 BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
233 233
234 hu->proto->enqueue(hu, skb); 234 hu->proto->enqueue(hu, skb);
235 235
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 657719b8254f..67d96b5cbb96 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -127,7 +127,7 @@ static struct usb_device_id blacklist_ids[] = {
127 { } /* Terminating entry */ 127 { } /* Terminating entry */
128}; 128};
129 129
130static struct _urb *_urb_alloc(int isoc, int gfp) 130static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
131{ 131{
132 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 132 struct _urb *_urb = kmalloc(sizeof(struct _urb) +
133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp); 133 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
@@ -443,7 +443,7 @@ static int __tx_submit(struct hci_usb *husb, struct _urb *_urb)
443 443
444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb) 444static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
445{ 445{
446 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 446 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
447 struct usb_ctrlrequest *dr; 447 struct usb_ctrlrequest *dr;
448 struct urb *urb; 448 struct urb *urb;
449 449
@@ -451,7 +451,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
451 _urb = _urb_alloc(0, GFP_ATOMIC); 451 _urb = _urb_alloc(0, GFP_ATOMIC);
452 if (!_urb) 452 if (!_urb)
453 return -ENOMEM; 453 return -ENOMEM;
454 _urb->type = skb->pkt_type; 454 _urb->type = bt_cb(skb)->pkt_type;
455 455
456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC); 456 dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
457 if (!dr) { 457 if (!dr) {
@@ -479,7 +479,7 @@ static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
479 479
480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb) 480static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
481{ 481{
482 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 482 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
483 struct urb *urb; 483 struct urb *urb;
484 int pipe; 484 int pipe;
485 485
@@ -487,7 +487,7 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
487 _urb = _urb_alloc(0, GFP_ATOMIC); 487 _urb = _urb_alloc(0, GFP_ATOMIC);
488 if (!_urb) 488 if (!_urb)
489 return -ENOMEM; 489 return -ENOMEM;
490 _urb->type = skb->pkt_type; 490 _urb->type = bt_cb(skb)->pkt_type;
491 } 491 }
492 492
493 urb = &_urb->urb; 493 urb = &_urb->urb;
@@ -505,14 +505,14 @@ static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
505#ifdef CONFIG_BT_HCIUSB_SCO 505#ifdef CONFIG_BT_HCIUSB_SCO
506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb) 506static inline int hci_usb_send_isoc(struct hci_usb *husb, struct sk_buff *skb)
507{ 507{
508 struct _urb *_urb = __get_completed(husb, skb->pkt_type); 508 struct _urb *_urb = __get_completed(husb, bt_cb(skb)->pkt_type);
509 struct urb *urb; 509 struct urb *urb;
510 510
511 if (!_urb) { 511 if (!_urb) {
512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC); 512 _urb = _urb_alloc(HCI_MAX_ISOC_FRAMES, GFP_ATOMIC);
513 if (!_urb) 513 if (!_urb)
514 return -ENOMEM; 514 return -ENOMEM;
515 _urb->type = skb->pkt_type; 515 _urb->type = bt_cb(skb)->pkt_type;
516 } 516 }
517 517
518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len); 518 BT_DBG("%s skb %p len %d", husb->hdev->name, skb, skb->len);
@@ -601,11 +601,11 @@ static int hci_usb_send_frame(struct sk_buff *skb)
601 if (!test_bit(HCI_RUNNING, &hdev->flags)) 601 if (!test_bit(HCI_RUNNING, &hdev->flags))
602 return -EBUSY; 602 return -EBUSY;
603 603
604 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); 604 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
605 605
606 husb = (struct hci_usb *) hdev->driver_data; 606 husb = (struct hci_usb *) hdev->driver_data;
607 607
608 switch (skb->pkt_type) { 608 switch (bt_cb(skb)->pkt_type) {
609 case HCI_COMMAND_PKT: 609 case HCI_COMMAND_PKT:
610 hdev->stat.cmd_tx++; 610 hdev->stat.cmd_tx++;
611 break; 611 break;
@@ -627,7 +627,7 @@ static int hci_usb_send_frame(struct sk_buff *skb)
627 627
628 read_lock(&husb->completion_lock); 628 read_lock(&husb->completion_lock);
629 629
630 skb_queue_tail(__transmit_q(husb, skb->pkt_type), skb); 630 skb_queue_tail(__transmit_q(husb, bt_cb(skb)->pkt_type), skb);
631 hci_usb_tx_wakeup(husb); 631 hci_usb_tx_wakeup(husb);
632 632
633 read_unlock(&husb->completion_lock); 633 read_unlock(&husb->completion_lock);
@@ -682,7 +682,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
682 return -ENOMEM; 682 return -ENOMEM;
683 } 683 }
684 skb->dev = (void *) husb->hdev; 684 skb->dev = (void *) husb->hdev;
685 skb->pkt_type = type; 685 bt_cb(skb)->pkt_type = type;
686 686
687 __reassembly(husb, type) = skb; 687 __reassembly(husb, type) = skb;
688 688
@@ -702,6 +702,7 @@ static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int c
702 if (!scb->expect) { 702 if (!scb->expect) {
703 /* Complete frame */ 703 /* Complete frame */
704 __reassembly(husb, type) = NULL; 704 __reassembly(husb, type) = NULL;
705 bt_cb(skb)->pkt_type = type;
705 hci_recv_frame(skb); 706 hci_recv_frame(skb);
706 } 707 }
707 708
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index f9b956fb2b8b..52cbd45c308f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -1,229 +1,220 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* 1/*
26 * Bluetooth HCI virtual device driver.
27 * 2 *
28 * $Id: hci_vhci.c,v 1.3 2002/04/17 17:37:20 maxk Exp $ 3 * Bluetooth virtual HCI driver
4 *
5 * Copyright (C) 2000-2001 Qualcomm Incorporated
6 * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
7 * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
29 */ 24 */
30#define VERSION "1.1"
31 25
32#include <linux/config.h> 26#include <linux/config.h>
33#include <linux/module.h> 27#include <linux/module.h>
34 28
35#include <linux/errno.h>
36#include <linux/kernel.h> 29#include <linux/kernel.h>
37#include <linux/major.h> 30#include <linux/init.h>
38#include <linux/sched.h>
39#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/sched.h>
40#include <linux/poll.h> 35#include <linux/poll.h>
41#include <linux/fcntl.h>
42#include <linux/init.h>
43#include <linux/random.h>
44 36
45#include <linux/skbuff.h> 37#include <linux/skbuff.h>
46#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
47 39
48#include <asm/system.h>
49#include <asm/uaccess.h>
50
51#include <net/bluetooth/bluetooth.h> 40#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 41#include <net/bluetooth/hci_core.h>
53#include "hci_vhci.h"
54 42
55/* HCI device part */ 43#ifndef CONFIG_BT_HCIVHCI_DEBUG
44#undef BT_DBG
45#define BT_DBG(D...)
46#endif
47
48#define VERSION "1.2"
49
50static int minor = MISC_DYNAMIC_MINOR;
51
52struct vhci_data {
53 struct hci_dev *hdev;
54
55 unsigned long flags;
56
57 wait_queue_head_t read_wait;
58 struct sk_buff_head readq;
59
60 struct fasync_struct *fasync;
61};
56 62
57static int hci_vhci_open(struct hci_dev *hdev) 63#define VHCI_FASYNC 0x0010
64
65static struct miscdevice vhci_miscdev;
66
67static int vhci_open_dev(struct hci_dev *hdev)
58{ 68{
59 set_bit(HCI_RUNNING, &hdev->flags); 69 set_bit(HCI_RUNNING, &hdev->flags);
60 return 0;
61}
62 70
63static int hci_vhci_flush(struct hci_dev *hdev)
64{
65 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) hdev->driver_data;
66 skb_queue_purge(&hci_vhci->readq);
67 return 0; 71 return 0;
68} 72}
69 73
70static int hci_vhci_close(struct hci_dev *hdev) 74static int vhci_close_dev(struct hci_dev *hdev)
71{ 75{
76 struct vhci_data *vhci = hdev->driver_data;
77
72 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 78 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
73 return 0; 79 return 0;
74 80
75 hci_vhci_flush(hdev); 81 skb_queue_purge(&vhci->readq);
82
76 return 0; 83 return 0;
77} 84}
78 85
79static void hci_vhci_destruct(struct hci_dev *hdev) 86static int vhci_flush(struct hci_dev *hdev)
80{ 87{
81 struct hci_vhci_struct *vhci; 88 struct vhci_data *vhci = hdev->driver_data;
82 89
83 if (!hdev) return; 90 skb_queue_purge(&vhci->readq);
84 91
85 vhci = (struct hci_vhci_struct *) hdev->driver_data; 92 return 0;
86 kfree(vhci);
87} 93}
88 94
89static int hci_vhci_send_frame(struct sk_buff *skb) 95static int vhci_send_frame(struct sk_buff *skb)
90{ 96{
91 struct hci_dev* hdev = (struct hci_dev *) skb->dev; 97 struct hci_dev* hdev = (struct hci_dev *) skb->dev;
92 struct hci_vhci_struct *hci_vhci; 98 struct vhci_data *vhci;
93 99
94 if (!hdev) { 100 if (!hdev) {
95 BT_ERR("Frame for uknown device (hdev=NULL)"); 101 BT_ERR("Frame for unknown HCI device (hdev=NULL)");
96 return -ENODEV; 102 return -ENODEV;
97 } 103 }
98 104
99 if (!test_bit(HCI_RUNNING, &hdev->flags)) 105 if (!test_bit(HCI_RUNNING, &hdev->flags))
100 return -EBUSY; 106 return -EBUSY;
101 107
102 hci_vhci = (struct hci_vhci_struct *) hdev->driver_data; 108 vhci = hdev->driver_data;
109
110 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
111 skb_queue_tail(&vhci->readq, skb);
103 112
104 memcpy(skb_push(skb, 1), &skb->pkt_type, 1); 113 if (vhci->flags & VHCI_FASYNC)
105 skb_queue_tail(&hci_vhci->readq, skb); 114 kill_fasync(&vhci->fasync, SIGIO, POLL_IN);
106 115
107 if (hci_vhci->flags & VHCI_FASYNC) 116 wake_up_interruptible(&vhci->read_wait);
108 kill_fasync(&hci_vhci->fasync, SIGIO, POLL_IN);
109 wake_up_interruptible(&hci_vhci->read_wait);
110 117
111 return 0; 118 return 0;
112} 119}
113 120
114/* Character device part */ 121static void vhci_destruct(struct hci_dev *hdev)
115 122{
116/* Poll */ 123 kfree(hdev->driver_data);
117static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait)
118{
119 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
120
121 poll_wait(file, &hci_vhci->read_wait, wait);
122
123 if (!skb_queue_empty(&hci_vhci->readq))
124 return POLLIN | POLLRDNORM;
125
126 return POLLOUT | POLLWRNORM;
127} 124}
128 125
129/* Get packet from user space buffer(already verified) */ 126static inline ssize_t vhci_get_user(struct vhci_data *vhci,
130static inline ssize_t hci_vhci_get_user(struct hci_vhci_struct *hci_vhci, const char __user *buf, size_t count) 127 const char __user *buf, size_t count)
131{ 128{
132 struct sk_buff *skb; 129 struct sk_buff *skb;
133 130
134 if (count > HCI_MAX_FRAME_SIZE) 131 if (count > HCI_MAX_FRAME_SIZE)
135 return -EINVAL; 132 return -EINVAL;
136 133
137 if (!(skb = bt_skb_alloc(count, GFP_KERNEL))) 134 skb = bt_skb_alloc(count, GFP_KERNEL);
135 if (!skb)
138 return -ENOMEM; 136 return -ENOMEM;
139 137
140 if (copy_from_user(skb_put(skb, count), buf, count)) { 138 if (copy_from_user(skb_put(skb, count), buf, count)) {
141 kfree_skb(skb); 139 kfree_skb(skb);
142 return -EFAULT; 140 return -EFAULT;
143 } 141 }
144 142
145 skb->dev = (void *) hci_vhci->hdev; 143 skb->dev = (void *) vhci->hdev;
146 skb->pkt_type = *((__u8 *) skb->data); 144 bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
147 skb_pull(skb, 1); 145 skb_pull(skb, 1);
148 146
149 hci_recv_frame(skb); 147 hci_recv_frame(skb);
150 148
151 return count; 149 return count;
152}
153
154/* Write */
155static ssize_t hci_vhci_chr_write(struct file * file, const char __user * buf,
156 size_t count, loff_t *pos)
157{
158 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
159
160 if (!access_ok(VERIFY_READ, buf, count))
161 return -EFAULT;
162
163 return hci_vhci_get_user(hci_vhci, buf, count);
164} 150}
165 151
166/* Put packet to user space buffer(already verified) */ 152static inline ssize_t vhci_put_user(struct vhci_data *vhci,
167static inline ssize_t hci_vhci_put_user(struct hci_vhci_struct *hci_vhci, 153 struct sk_buff *skb, char __user *buf, int count)
168 struct sk_buff *skb, char __user *buf,
169 int count)
170{ 154{
171 int len = count, total = 0;
172 char __user *ptr = buf; 155 char __user *ptr = buf;
156 int len, total = 0;
157
158 len = min_t(unsigned int, skb->len, count);
173 159
174 len = min_t(unsigned int, skb->len, len);
175 if (copy_to_user(ptr, skb->data, len)) 160 if (copy_to_user(ptr, skb->data, len))
176 return -EFAULT; 161 return -EFAULT;
162
177 total += len; 163 total += len;
178 164
179 hci_vhci->hdev->stat.byte_tx += len; 165 vhci->hdev->stat.byte_tx += len;
180 switch (skb->pkt_type) {
181 case HCI_COMMAND_PKT:
182 hci_vhci->hdev->stat.cmd_tx++;
183 break;
184 166
185 case HCI_ACLDATA_PKT: 167 switch (bt_cb(skb)->pkt_type) {
186 hci_vhci->hdev->stat.acl_tx++; 168 case HCI_COMMAND_PKT:
187 break; 169 vhci->hdev->stat.cmd_tx++;
170 break;
171
172 case HCI_ACLDATA_PKT:
173 vhci->hdev->stat.acl_tx++;
174 break;
188 175
189 case HCI_SCODATA_PKT: 176 case HCI_SCODATA_PKT:
190 hci_vhci->hdev->stat.cmd_tx++; 177 vhci->hdev->stat.cmd_tx++;
191 break; 178 break;
192 }; 179 };
193 180
194 return total; 181 return total;
195} 182}
196 183
197/* Read */ 184static loff_t vhci_llseek(struct file * file, loff_t offset, int origin)
198static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t count, loff_t *pos) 185{
186 return -ESPIPE;
187}
188
189static ssize_t vhci_read(struct file * file, char __user * buf, size_t count, loff_t *pos)
199{ 190{
200 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
201 DECLARE_WAITQUEUE(wait, current); 191 DECLARE_WAITQUEUE(wait, current);
192 struct vhci_data *vhci = file->private_data;
202 struct sk_buff *skb; 193 struct sk_buff *skb;
203 ssize_t ret = 0; 194 ssize_t ret = 0;
204 195
205 add_wait_queue(&hci_vhci->read_wait, &wait); 196 add_wait_queue(&vhci->read_wait, &wait);
206 while (count) { 197 while (count) {
207 set_current_state(TASK_INTERRUPTIBLE); 198 set_current_state(TASK_INTERRUPTIBLE);
208 199
209 /* Read frames from device queue */ 200 skb = skb_dequeue(&vhci->readq);
210 if (!(skb = skb_dequeue(&hci_vhci->readq))) { 201 if (!skb) {
211 if (file->f_flags & O_NONBLOCK) { 202 if (file->f_flags & O_NONBLOCK) {
212 ret = -EAGAIN; 203 ret = -EAGAIN;
213 break; 204 break;
214 } 205 }
206
215 if (signal_pending(current)) { 207 if (signal_pending(current)) {
216 ret = -ERESTARTSYS; 208 ret = -ERESTARTSYS;
217 break; 209 break;
218 } 210 }
219 211
220 /* Nothing to read, let's sleep */
221 schedule(); 212 schedule();
222 continue; 213 continue;
223 } 214 }
224 215
225 if (access_ok(VERIFY_WRITE, buf, count)) 216 if (access_ok(VERIFY_WRITE, buf, count))
226 ret = hci_vhci_put_user(hci_vhci, skb, buf, count); 217 ret = vhci_put_user(vhci, skb, buf, count);
227 else 218 else
228 ret = -EFAULT; 219 ret = -EFAULT;
229 220
@@ -231,84 +222,90 @@ static ssize_t hci_vhci_chr_read(struct file * file, char __user * buf, size_t c
231 break; 222 break;
232 } 223 }
233 set_current_state(TASK_RUNNING); 224 set_current_state(TASK_RUNNING);
234 remove_wait_queue(&hci_vhci->read_wait, &wait); 225 remove_wait_queue(&vhci->read_wait, &wait);
235 226
236 return ret; 227 return ret;
237} 228}
238 229
239static loff_t hci_vhci_chr_lseek(struct file * file, loff_t offset, int origin) 230static ssize_t vhci_write(struct file *file,
231 const char __user *buf, size_t count, loff_t *pos)
240{ 232{
241 return -ESPIPE; 233 struct vhci_data *vhci = file->private_data;
242}
243 234
244static int hci_vhci_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 235 if (!access_ok(VERIFY_READ, buf, count))
245{ 236 return -EFAULT;
246 return -EINVAL; 237
238 return vhci_get_user(vhci, buf, count);
247} 239}
248 240
249static int hci_vhci_chr_fasync(int fd, struct file *file, int on) 241static unsigned int vhci_poll(struct file *file, poll_table *wait)
250{ 242{
251 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 243 struct vhci_data *vhci = file->private_data;
252 int ret;
253 244
254 if ((ret = fasync_helper(fd, file, on, &hci_vhci->fasync)) < 0) 245 poll_wait(file, &vhci->read_wait, wait);
255 return ret;
256
257 if (on)
258 hci_vhci->flags |= VHCI_FASYNC;
259 else
260 hci_vhci->flags &= ~VHCI_FASYNC;
261 246
262 return 0; 247 if (!skb_queue_empty(&vhci->readq))
248 return POLLIN | POLLRDNORM;
249
250 return POLLOUT | POLLWRNORM;
263} 251}
264 252
265static int hci_vhci_chr_open(struct inode *inode, struct file * file) 253static int vhci_ioctl(struct inode *inode, struct file *file,
254 unsigned int cmd, unsigned long arg)
266{ 255{
267 struct hci_vhci_struct *hci_vhci = NULL; 256 return -EINVAL;
257}
258
259static int vhci_open(struct inode *inode, struct file *file)
260{
261 struct vhci_data *vhci;
268 struct hci_dev *hdev; 262 struct hci_dev *hdev;
269 263
270 if (!(hci_vhci = kmalloc(sizeof(struct hci_vhci_struct), GFP_KERNEL))) 264 vhci = kmalloc(sizeof(struct vhci_data), GFP_KERNEL);
265 if (!vhci)
271 return -ENOMEM; 266 return -ENOMEM;
272 267
273 memset(hci_vhci, 0, sizeof(struct hci_vhci_struct)); 268 memset(vhci, 0, sizeof(struct vhci_data));
274 269
275 skb_queue_head_init(&hci_vhci->readq); 270 skb_queue_head_init(&vhci->readq);
276 init_waitqueue_head(&hci_vhci->read_wait); 271 init_waitqueue_head(&vhci->read_wait);
277 272
278 /* Initialize and register HCI device */
279 hdev = hci_alloc_dev(); 273 hdev = hci_alloc_dev();
280 if (!hdev) { 274 if (!hdev) {
281 kfree(hci_vhci); 275 kfree(vhci);
282 return -ENOMEM; 276 return -ENOMEM;
283 } 277 }
284 278
285 hci_vhci->hdev = hdev; 279 vhci->hdev = hdev;
286 280
287 hdev->type = HCI_VHCI; 281 hdev->type = HCI_VHCI;
288 hdev->driver_data = hci_vhci; 282 hdev->driver_data = vhci;
283 SET_HCIDEV_DEV(hdev, vhci_miscdev.dev);
289 284
290 hdev->open = hci_vhci_open; 285 hdev->open = vhci_open_dev;
291 hdev->close = hci_vhci_close; 286 hdev->close = vhci_close_dev;
292 hdev->flush = hci_vhci_flush; 287 hdev->flush = vhci_flush;
293 hdev->send = hci_vhci_send_frame; 288 hdev->send = vhci_send_frame;
294 hdev->destruct = hci_vhci_destruct; 289 hdev->destruct = vhci_destruct;
295 290
296 hdev->owner = THIS_MODULE; 291 hdev->owner = THIS_MODULE;
297 292
298 if (hci_register_dev(hdev) < 0) { 293 if (hci_register_dev(hdev) < 0) {
299 kfree(hci_vhci); 294 BT_ERR("Can't register HCI device");
295 kfree(vhci);
300 hci_free_dev(hdev); 296 hci_free_dev(hdev);
301 return -EBUSY; 297 return -EBUSY;
302 } 298 }
303 299
304 file->private_data = hci_vhci; 300 file->private_data = vhci;
305 return nonseekable_open(inode, file); 301
302 return nonseekable_open(inode, file);
306} 303}
307 304
308static int hci_vhci_chr_close(struct inode *inode, struct file *file) 305static int vhci_release(struct inode *inode, struct file *file)
309{ 306{
310 struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data; 307 struct vhci_data *vhci = file->private_data;
311 struct hci_dev *hdev = hci_vhci->hdev; 308 struct hci_dev *hdev = vhci->hdev;
312 309
313 if (hci_unregister_dev(hdev) < 0) { 310 if (hci_unregister_dev(hdev) < 0) {
314 BT_ERR("Can't unregister HCI device %s", hdev->name); 311 BT_ERR("Can't unregister HCI device %s", hdev->name);
@@ -317,48 +314,71 @@ static int hci_vhci_chr_close(struct inode *inode, struct file *file)
317 hci_free_dev(hdev); 314 hci_free_dev(hdev);
318 315
319 file->private_data = NULL; 316 file->private_data = NULL;
317
320 return 0; 318 return 0;
321} 319}
322 320
323static struct file_operations hci_vhci_fops = { 321static int vhci_fasync(int fd, struct file *file, int on)
324 .owner = THIS_MODULE, 322{
325 .llseek = hci_vhci_chr_lseek, 323 struct vhci_data *vhci = file->private_data;
326 .read = hci_vhci_chr_read, 324 int err;
327 .write = hci_vhci_chr_write, 325
328 .poll = hci_vhci_chr_poll, 326 err = fasync_helper(fd, file, on, &vhci->fasync);
329 .ioctl = hci_vhci_chr_ioctl, 327 if (err < 0)
330 .open = hci_vhci_chr_open, 328 return err;
331 .release = hci_vhci_chr_close, 329
332 .fasync = hci_vhci_chr_fasync 330 if (on)
331 vhci->flags |= VHCI_FASYNC;
332 else
333 vhci->flags &= ~VHCI_FASYNC;
334
335 return 0;
336}
337
338static struct file_operations vhci_fops = {
339 .owner = THIS_MODULE,
340 .llseek = vhci_llseek,
341 .read = vhci_read,
342 .write = vhci_write,
343 .poll = vhci_poll,
344 .ioctl = vhci_ioctl,
345 .open = vhci_open,
346 .release = vhci_release,
347 .fasync = vhci_fasync,
333}; 348};
334 349
335static struct miscdevice hci_vhci_miscdev= 350static struct miscdevice vhci_miscdev= {
336{ 351 .name = "vhci",
337 VHCI_MINOR, 352 .fops = &vhci_fops,
338 "hci_vhci",
339 &hci_vhci_fops
340}; 353};
341 354
342static int __init hci_vhci_init(void) 355static int __init vhci_init(void)
343{ 356{
344 BT_INFO("VHCI driver ver %s", VERSION); 357 BT_INFO("Virtual HCI driver ver %s", VERSION);
345 358
346 if (misc_register(&hci_vhci_miscdev)) { 359 vhci_miscdev.minor = minor;
347 BT_ERR("Can't register misc device %d\n", VHCI_MINOR); 360
361 if (misc_register(&vhci_miscdev) < 0) {
362 BT_ERR("Can't register misc device with minor %d", minor);
348 return -EIO; 363 return -EIO;
349 } 364 }
350 365
351 return 0; 366 return 0;
352} 367}
353 368
354static void hci_vhci_cleanup(void) 369static void __exit vhci_exit(void)
355{ 370{
356 misc_deregister(&hci_vhci_miscdev); 371 if (misc_deregister(&vhci_miscdev) < 0)
372 BT_ERR("Can't unregister misc device with minor %d", minor);
357} 373}
358 374
359module_init(hci_vhci_init); 375module_init(vhci_init);
360module_exit(hci_vhci_cleanup); 376module_exit(vhci_exit);
377
378module_param(minor, int, 0444);
379MODULE_PARM_DESC(minor, "Miscellaneous minor device number");
361 380
362MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>"); 381MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
363MODULE_DESCRIPTION("Bluetooth VHCI driver ver " VERSION); 382MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
364MODULE_LICENSE("GPL"); 383MODULE_VERSION(VERSION);
384MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_vhci.h b/drivers/bluetooth/hci_vhci.h
deleted file mode 100644
index 53b11f9ef76d..000000000000
--- a/drivers/bluetooth/hci_vhci.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/*
26 * $Id: hci_vhci.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
27 */
28
29#ifndef __HCI_VHCI_H
30#define __HCI_VHCI_H
31
32#ifdef __KERNEL__
33
34struct hci_vhci_struct {
35 struct hci_dev *hdev;
36 __u32 flags;
37 wait_queue_head_t read_wait;
38 struct sk_buff_head readq;
39 struct fasync_struct *fasync;
40};
41
42/* VHCI device flags */
43#define VHCI_FASYNC 0x0010
44
45#endif /* __KERNEL__ */
46
47#define VHCI_DEV "/dev/vhci"
48#define VHCI_MINOR 250
49
50#endif /* __HCI_VHCI_H */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 38dd9ffbe8bc..0829db58462f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -734,7 +734,7 @@ static int viocd_remove(struct vio_dev *vdev)
734 */ 734 */
735static struct vio_device_id viocd_device_table[] __devinitdata = { 735static struct vio_device_id viocd_device_table[] __devinitdata = {
736 { "viocd", "" }, 736 { "viocd", "" },
737 { 0, } 737 { "", "" }
738}; 738};
739 739
740MODULE_DEVICE_TABLE(vio, viocd_device_table); 740MODULE_DEVICE_TABLE(vio, viocd_device_table);
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 123417e43040..56ace9d5e2ae 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -23,13 +23,6 @@ config DRM_TDFX
23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), 23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
24 graphics card. If M is selected, the module will be called tdfx. 24 graphics card. If M is selected, the module will be called tdfx.
25 25
26config DRM_GAMMA
27 tristate "3dlabs GMX 2000"
28 depends on DRM && BROKEN
29 help
30 This is the old gamma driver, please tell me if it might actually
31 work.
32
33config DRM_R128 26config DRM_R128
34 tristate "ATI Rage 128" 27 tristate "ATI Rage 128"
35 depends on DRM && PCI 28 depends on DRM && PCI
@@ -82,7 +75,7 @@ endchoice
82 75
83config DRM_MGA 76config DRM_MGA
84 tristate "Matrox g200/g400" 77 tristate "Matrox g200/g400"
85 depends on DRM && AGP 78 depends on DRM
86 help 79 help
87 Choose this option if you have a Matrox G200, G400 or G450 graphics 80 Choose this option if you have a Matrox G200, G400 or G450 graphics
88 card. If M is selected, the module will be called mga. AGP 81 card. If M is selected, the module will be called mga. AGP
@@ -103,3 +96,10 @@ config DRM_VIA
103 Choose this option if you have a Via unichrome or compatible video 96 Choose this option if you have a Via unichrome or compatible video
104 chipset. If M is selected the module will be called via. 97 chipset. If M is selected the module will be called via.
105 98
99config DRM_SAVAGE
100 tristate "Savage video cards"
101 depends on DRM
102 help
103 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
104 chipset. If M is selected the module will be called savage.
105
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index ddd941045b1f..e41060c76226 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o 9 drm_sysfs.o
10 10
11gamma-objs := gamma_drv.o gamma_dma.o
12tdfx-objs := tdfx_drv.o 11tdfx-objs := tdfx_drv.o
13r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o 12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
14mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o 13mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
15i810-objs := i810_drv.o i810_dma.o 14i810-objs := i810_drv.o i810_dma.o
16i830-objs := i830_drv.o i830_dma.o i830_irq.o 15i830-objs := i830_drv.o i830_dma.o i830_irq.o
17i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
18radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o 17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
19ffb-objs := ffb_drv.o ffb_context.o 18ffb-objs := ffb_drv.o ffb_context.o
20sis-objs := sis_drv.o sis_ds.o sis_mm.o 19sis-objs := sis_drv.o sis_ds.o sis_mm.o
20savage-objs := savage_drv.o savage_bci.o savage_state.o
21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
22 22
23ifeq ($(CONFIG_COMPAT),y) 23ifeq ($(CONFIG_COMPAT),y)
@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o
29endif 29endif
30 30
31obj-$(CONFIG_DRM) += drm.o 31obj-$(CONFIG_DRM) += drm.o
32obj-$(CONFIG_DRM_GAMMA) += gamma.o
33obj-$(CONFIG_DRM_TDFX) += tdfx.o 32obj-$(CONFIG_DRM_TDFX) += tdfx.o
34obj-$(CONFIG_DRM_R128) += r128.o 33obj-$(CONFIG_DRM_R128) += r128.o
35obj-$(CONFIG_DRM_RADEON)+= radeon.o 34obj-$(CONFIG_DRM_RADEON)+= radeon.o
@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o
39obj-$(CONFIG_DRM_I915) += i915.o 38obj-$(CONFIG_DRM_I915) += i915.o
40obj-$(CONFIG_DRM_FFB) += ffb.o 39obj-$(CONFIG_DRM_FFB) += ffb.o
41obj-$(CONFIG_DRM_SIS) += sis.o 40obj-$(CONFIG_DRM_SIS) += sis.o
41obj-$(CONFIG_DRM_SAVAGE)+= savage.o
42obj-$(CONFIG_DRM_VIA) +=via.o 42obj-$(CONFIG_DRM_VIA) +=via.o
43 43
44
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index e8371dd87fbc..fc6598a81acd 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -98,7 +98,7 @@
98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
99 99
100 100
101typedef unsigned long drm_handle_t; 101typedef unsigned int drm_handle_t;
102typedef unsigned int drm_context_t; 102typedef unsigned int drm_context_t;
103typedef unsigned int drm_drawable_t; 103typedef unsigned int drm_drawable_t;
104typedef unsigned int drm_magic_t; 104typedef unsigned int drm_magic_t;
@@ -209,7 +209,8 @@ typedef enum drm_map_type {
209 _DRM_REGISTERS = 1, /**< no caching, no core dump */ 209 _DRM_REGISTERS = 1, /**< no caching, no core dump */
210 _DRM_SHM = 2, /**< shared, cached */ 210 _DRM_SHM = 2, /**< shared, cached */
211 _DRM_AGP = 3, /**< AGP/GART */ 211 _DRM_AGP = 3, /**< AGP/GART */
212 _DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ 212 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
213 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
213} drm_map_type_t; 214} drm_map_type_t;
214 215
215 216
@@ -368,7 +369,8 @@ typedef struct drm_buf_desc {
368 enum { 369 enum {
369 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ 370 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
370 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ 371 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
371 _DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ 372 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
373 _DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
372 } flags; 374 } flags;
373 unsigned long agp_start; /**< 375 unsigned long agp_start; /**<
374 * Start address of where the AGP buffers are 376 * Start address of where the AGP buffers are
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 5df09cc8c6db..6f98701dfe15 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -53,7 +53,6 @@
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/file.h> 54#include <linux/file.h>
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/version.h>
57#include <linux/jiffies.h> 56#include <linux/jiffies.h>
58#include <linux/smp_lock.h> /* For (un)lock_kernel */ 57#include <linux/smp_lock.h> /* For (un)lock_kernel */
59#include <linux/mm.h> 58#include <linux/mm.h>
@@ -96,6 +95,7 @@
96#define DRIVER_IRQ_SHARED 0x80 95#define DRIVER_IRQ_SHARED 0x80
97#define DRIVER_IRQ_VBL 0x100 96#define DRIVER_IRQ_VBL 0x100
98#define DRIVER_DMA_QUEUE 0x200 97#define DRIVER_DMA_QUEUE 0x200
98#define DRIVER_FB_DMA 0x400
99 99
100/***********************************************************************/ 100/***********************************************************************/
101/** \name Begin the DRM... */ 101/** \name Begin the DRM... */
@@ -160,36 +160,7 @@
160#define pte_unmap(pte) 160#define pte_unmap(pte)
161#endif 161#endif
162 162
163#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
164static inline struct page * vmalloc_to_page(void * vmalloc_addr)
165{
166 unsigned long addr = (unsigned long) vmalloc_addr;
167 struct page *page = NULL;
168 pgd_t *pgd = pgd_offset_k(addr);
169 pmd_t *pmd;
170 pte_t *ptep, pte;
171
172 if (!pgd_none(*pgd)) {
173 pmd = pmd_offset(pgd, addr);
174 if (!pmd_none(*pmd)) {
175 preempt_disable();
176 ptep = pte_offset_map(pmd, addr);
177 pte = *ptep;
178 if (pte_present(pte))
179 page = pte_page(pte);
180 pte_unmap(ptep);
181 preempt_enable();
182 }
183 }
184 return page;
185}
186#endif
187
188#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
189#define DRM_RPR_ARG(vma)
190#else
191#define DRM_RPR_ARG(vma) vma, 163#define DRM_RPR_ARG(vma) vma,
192#endif
193 164
194#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) 165#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
195 166
@@ -474,7 +445,8 @@ typedef struct drm_device_dma {
474 unsigned long byte_count; 445 unsigned long byte_count;
475 enum { 446 enum {
476 _DRM_DMA_USE_AGP = 0x01, 447 _DRM_DMA_USE_AGP = 0x01,
477 _DRM_DMA_USE_SG = 0x02 448 _DRM_DMA_USE_SG = 0x02,
449 _DRM_DMA_USE_FB = 0x04
478 } flags; 450 } flags;
479 451
480} drm_device_dma_t; 452} drm_device_dma_t;
@@ -525,12 +497,19 @@ typedef struct drm_sigdata {
525 drm_hw_lock_t *lock; 497 drm_hw_lock_t *lock;
526} drm_sigdata_t; 498} drm_sigdata_t;
527 499
500typedef struct drm_dma_handle {
501 dma_addr_t busaddr;
502 void *vaddr;
503 size_t size;
504} drm_dma_handle_t;
505
528/** 506/**
529 * Mappings list 507 * Mappings list
530 */ 508 */
531typedef struct drm_map_list { 509typedef struct drm_map_list {
532 struct list_head head; /**< list head */ 510 struct list_head head; /**< list head */
533 drm_map_t *map; /**< mapping */ 511 drm_map_t *map; /**< mapping */
512 unsigned int user_token;
534} drm_map_list_t; 513} drm_map_list_t;
535 514
536typedef drm_map_t drm_local_map_t; 515typedef drm_map_t drm_local_map_t;
@@ -578,7 +557,22 @@ struct drm_driver {
578 int (*kernel_context_switch)(struct drm_device *dev, int old, int new); 557 int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
579 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); 558 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
580 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); 559 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
560
561 /**
562 * Called by \c drm_device_is_agp. Typically used to determine if a
563 * card is really attached to AGP or not.
564 *
565 * \param dev DRM device handle
566 *
567 * \returns
568 * One of three values is returned depending on whether or not the
569 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
570 * (return of 1), or may or may not be AGP (return of 2).
571 */
572 int (*device_is_agp) (struct drm_device * dev);
573
581 /* these have to be filled in */ 574 /* these have to be filled in */
575
582 int (*postinit)(struct drm_device *, unsigned long flags); 576 int (*postinit)(struct drm_device *, unsigned long flags);
583 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); 577 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
584 void (*irq_preinstall)(struct drm_device *dev); 578 void (*irq_preinstall)(struct drm_device *dev);
@@ -722,12 +716,8 @@ typedef struct drm_device {
722 int pci_slot; /**< PCI slot number */ 716 int pci_slot; /**< PCI slot number */
723 int pci_func; /**< PCI function number */ 717 int pci_func; /**< PCI function number */
724#ifdef __alpha__ 718#ifdef __alpha__
725#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
726 struct pci_controler *hose;
727#else
728 struct pci_controller *hose; 719 struct pci_controller *hose;
729#endif 720#endif
730#endif
731 drm_sg_mem_t *sg; /**< Scatter gather memory */ 721 drm_sg_mem_t *sg; /**< Scatter gather memory */
732 unsigned long *ctx_bitmap; /**< context bitmap */ 722 unsigned long *ctx_bitmap; /**< context bitmap */
733 void *dev_private; /**< device private data */ 723 void *dev_private; /**< device private data */
@@ -736,6 +726,7 @@ typedef struct drm_device {
736 726
737 struct drm_driver *driver; 727 struct drm_driver *driver;
738 drm_local_map_t *agp_buffer_map; 728 drm_local_map_t *agp_buffer_map;
729 unsigned int agp_buffer_token;
739 drm_head_t primary; /**< primary screen head */ 730 drm_head_t primary; /**< primary screen head */
740} drm_device_t; 731} drm_device_t;
741 732
@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
806 drm_device_t *dev); 797 drm_device_t *dev);
807extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); 798extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
808 799
809extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); 800extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
810extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); 801extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
811extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); 802extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
812extern int drm_unbind_agp(DRM_AGP_MEM *handle); 803extern int drm_unbind_agp(DRM_AGP_MEM *handle);
@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev,
881 unsigned int context); 872 unsigned int context);
882 873
883 /* Buffer management support (drm_bufs.h) */ 874 /* Buffer management support (drm_bufs.h) */
875extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
876extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
877extern int drm_addmap(drm_device_t *dev, unsigned int offset,
878 unsigned int size, drm_map_type_t type,
879 drm_map_flags_t flags, drm_local_map_t **map_ptr);
880extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
881 unsigned int cmd, unsigned long arg);
882extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
883extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
884extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
885 unsigned int cmd, unsigned long arg);
886
884extern int drm_order( unsigned long size ); 887extern int drm_order( unsigned long size );
885extern int drm_addmap( struct inode *inode, struct file *filp,
886 unsigned int cmd, unsigned long arg );
887extern int drm_rmmap( struct inode *inode, struct file *filp,
888 unsigned int cmd, unsigned long arg );
889extern int drm_addbufs( struct inode *inode, struct file *filp, 888extern int drm_addbufs( struct inode *inode, struct file *filp,
890 unsigned int cmd, unsigned long arg ); 889 unsigned int cmd, unsigned long arg );
891extern int drm_infobufs( struct inode *inode, struct file *filp, 890extern int drm_infobufs( struct inode *inode, struct file *filp,
@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp,
896 unsigned int cmd, unsigned long arg ); 895 unsigned int cmd, unsigned long arg );
897extern int drm_mapbufs( struct inode *inode, struct file *filp, 896extern int drm_mapbufs( struct inode *inode, struct file *filp,
898 unsigned int cmd, unsigned long arg ); 897 unsigned int cmd, unsigned long arg );
898extern unsigned long drm_get_resource_start(drm_device_t *dev,
899 unsigned int resource);
900extern unsigned long drm_get_resource_len(drm_device_t *dev,
901 unsigned int resource);
899 902
900 /* DMA support (drm_dma.h) */ 903 /* DMA support (drm_dma.h) */
901extern int drm_dma_setup(drm_device_t *dev); 904extern int drm_dma_setup(drm_device_t *dev);
@@ -919,15 +922,18 @@ extern void drm_vbl_send_signals( drm_device_t *dev );
919 922
920 /* AGP/GART support (drm_agpsupport.h) */ 923 /* AGP/GART support (drm_agpsupport.h) */
921extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); 924extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
922extern int drm_agp_acquire(struct inode *inode, struct file *filp, 925extern int drm_agp_acquire(drm_device_t * dev);
923 unsigned int cmd, unsigned long arg); 926extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
924extern void drm_agp_do_release(drm_device_t *dev); 927 unsigned int cmd, unsigned long arg);
925extern int drm_agp_release(struct inode *inode, struct file *filp, 928extern int drm_agp_release(drm_device_t *dev);
926 unsigned int cmd, unsigned long arg); 929extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
927extern int drm_agp_enable(struct inode *inode, struct file *filp, 930 unsigned int cmd, unsigned long arg);
928 unsigned int cmd, unsigned long arg); 931extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
929extern int drm_agp_info(struct inode *inode, struct file *filp, 932extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
930 unsigned int cmd, unsigned long arg); 933 unsigned int cmd, unsigned long arg);
934extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
935extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
936 unsigned int cmd, unsigned long arg);
931extern int drm_agp_alloc(struct inode *inode, struct file *filp, 937extern int drm_agp_alloc(struct inode *inode, struct file *filp,
932 unsigned int cmd, unsigned long arg); 938 unsigned int cmd, unsigned long arg);
933extern int drm_agp_free(struct inode *inode, struct file *filp, 939extern int drm_agp_free(struct inode *inode, struct file *filp,
@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev,
976 unsigned long addr, 982 unsigned long addr,
977 dma_addr_t bus_addr); 983 dma_addr_t bus_addr);
978 984
979extern void *drm_pci_alloc(drm_device_t * dev, size_t size, 985extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
980 size_t align, dma_addr_t maxaddr, 986 size_t align, dma_addr_t maxaddr);
981 dma_addr_t * busaddr); 987extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
982 988extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
983extern void drm_pci_free(drm_device_t * dev, size_t size,
984 void *vaddr, dma_addr_t busaddr);
985 989
986 /* sysfs support (drm_sysfs.c) */ 990 /* sysfs support (drm_sysfs.c) */
987struct drm_sysfs_class; 991struct drm_sysfs_class;
@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi
1012 drm_ioremapfree( map->handle, map->size, dev ); 1016 drm_ioremapfree( map->handle, map->size, dev );
1013} 1017}
1014 1018
1015static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) 1019static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
1016{ 1020{
1017 struct list_head *_list; 1021 drm_map_list_t *_entry;
1018 list_for_each( _list, &dev->maplist->head ) { 1022 list_for_each_entry(_entry, &dev->maplist->head, head)
1019 drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); 1023 if (_entry->user_token == token)
1020 if ( _entry->map &&
1021 _entry->map->offset == offset ) {
1022 return _entry->map; 1024 return _entry->map;
1025 return NULL;
1026}
1027
1028static __inline__ int drm_device_is_agp(drm_device_t *dev)
1029{
1030 if ( dev->driver->device_is_agp != NULL ) {
1031 int err = (*dev->driver->device_is_agp)( dev );
1032
1033 if (err != 2) {
1034 return err;
1023 } 1035 }
1024 } 1036 }
1025 return NULL; 1037
1038 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1026} 1039}
1027 1040
1028static __inline__ void drm_core_dropmap(struct drm_map *map) 1041static __inline__ void drm_core_dropmap(struct drm_map *map)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 8d94c0b5fa44..8c215adcb4b2 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -37,7 +37,7 @@
37#if __OS_HAS_AGP 37#if __OS_HAS_AGP
38 38
39/** 39/**
40 * AGP information ioctl. 40 * Get AGP information.
41 * 41 *
42 * \param inode device inode. 42 * \param inode device inode.
43 * \param filp file pointer. 43 * \param filp file pointer.
@@ -48,51 +48,56 @@
48 * Verifies the AGP device has been initialized and acquired and fills in the 48 * Verifies the AGP device has been initialized and acquired and fills in the
49 * drm_agp_info structure with the information in drm_agp_head::agp_info. 49 * drm_agp_info structure with the information in drm_agp_head::agp_info.
50 */ 50 */
51int drm_agp_info(struct inode *inode, struct file *filp, 51int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
52 unsigned int cmd, unsigned long arg)
53{ 52{
54 drm_file_t *priv = filp->private_data;
55 drm_device_t *dev = priv->head->dev;
56 DRM_AGP_KERN *kern; 53 DRM_AGP_KERN *kern;
57 drm_agp_info_t info;
58 54
59 if (!dev->agp || !dev->agp->acquired) 55 if (!dev->agp || !dev->agp->acquired)
60 return -EINVAL; 56 return -EINVAL;
61 57
62 kern = &dev->agp->agp_info; 58 kern = &dev->agp->agp_info;
63 info.agp_version_major = kern->version.major; 59 info->agp_version_major = kern->version.major;
64 info.agp_version_minor = kern->version.minor; 60 info->agp_version_minor = kern->version.minor;
65 info.mode = kern->mode; 61 info->mode = kern->mode;
66 info.aperture_base = kern->aper_base; 62 info->aperture_base = kern->aper_base;
67 info.aperture_size = kern->aper_size * 1024 * 1024; 63 info->aperture_size = kern->aper_size * 1024 * 1024;
68 info.memory_allowed = kern->max_memory << PAGE_SHIFT; 64 info->memory_allowed = kern->max_memory << PAGE_SHIFT;
69 info.memory_used = kern->current_memory << PAGE_SHIFT; 65 info->memory_used = kern->current_memory << PAGE_SHIFT;
70 info.id_vendor = kern->device->vendor; 66 info->id_vendor = kern->device->vendor;
71 info.id_device = kern->device->device; 67 info->id_device = kern->device->device;
72 68
73 if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) 69 return 0;
70}
71EXPORT_SYMBOL(drm_agp_info);
72
73int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
74 unsigned int cmd, unsigned long arg)
75{
76 drm_file_t *priv = filp->private_data;
77 drm_device_t *dev = priv->head->dev;
78 drm_agp_info_t info;
79 int err;
80
81 err = drm_agp_info(dev, &info);
82 if (err)
83 return err;
84
85 if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
74 return -EFAULT; 86 return -EFAULT;
75 return 0; 87 return 0;
76} 88}
77 89
78/** 90/**
79 * Acquire the AGP device (ioctl). 91 * Acquire the AGP device.
80 * 92 *
81 * \param inode device inode. 93 * \param dev DRM device that is to acquire AGP
82 * \param filp file pointer.
83 * \param cmd command.
84 * \param arg user argument.
85 * \return zero on success or a negative number on failure. 94 * \return zero on success or a negative number on failure.
86 * 95 *
87 * Verifies the AGP device hasn't been acquired before and calls 96 * Verifies the AGP device hasn't been acquired before and calls
88 * agp_acquire(). 97 * \c agp_backend_acquire.
89 */ 98 */
90int drm_agp_acquire(struct inode *inode, struct file *filp, 99int drm_agp_acquire(drm_device_t *dev)
91 unsigned int cmd, unsigned long arg)
92{ 100{
93 drm_file_t *priv = filp->private_data;
94 drm_device_t *dev = priv->head->dev;
95
96 if (!dev->agp) 101 if (!dev->agp)
97 return -ENODEV; 102 return -ENODEV;
98 if (dev->agp->acquired) 103 if (dev->agp->acquired)
@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
102 dev->agp->acquired = 1; 107 dev->agp->acquired = 1;
103 return 0; 108 return 0;
104} 109}
110EXPORT_SYMBOL(drm_agp_acquire);
105 111
106/** 112/**
107 * Release the AGP device (ioctl). 113 * Acquire the AGP device (ioctl).
108 * 114 *
109 * \param inode device inode. 115 * \param inode device inode.
110 * \param filp file pointer. 116 * \param filp file pointer.
@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
112 * \param arg user argument. 118 * \param arg user argument.
113 * \return zero on success or a negative number on failure. 119 * \return zero on success or a negative number on failure.
114 * 120 *
115 * Verifies the AGP device has been acquired and calls agp_backend_release(). 121 * Verifies the AGP device hasn't been acquired before and calls
122 * \c agp_backend_acquire.
116 */ 123 */
117int drm_agp_release(struct inode *inode, struct file *filp, 124int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
118 unsigned int cmd, unsigned long arg) 125 unsigned int cmd, unsigned long arg)
119{ 126{
120 drm_file_t *priv = filp->private_data; 127 drm_file_t *priv = filp->private_data;
121 drm_device_t *dev = priv->head->dev; 128
129 return drm_agp_acquire( (drm_device_t *) priv->head->dev );
130}
122 131
132/**
133 * Release the AGP device.
134 *
135 * \param dev DRM device that is to release AGP
136 * \return zero on success or a negative number on failure.
137 *
138 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
139 */
140int drm_agp_release(drm_device_t *dev)
141{
123 if (!dev->agp || !dev->agp->acquired) 142 if (!dev->agp || !dev->agp->acquired)
124 return -EINVAL; 143 return -EINVAL;
125 agp_backend_release(dev->agp->bridge); 144 agp_backend_release(dev->agp->bridge);
126 dev->agp->acquired = 0; 145 dev->agp->acquired = 0;
127 return 0; 146 return 0;
128
129} 147}
148EXPORT_SYMBOL(drm_agp_release);
130 149
131/** 150int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
132 * Release the AGP device. 151 unsigned int cmd, unsigned long arg)
133 *
134 * Calls agp_backend_release().
135 */
136void drm_agp_do_release(drm_device_t *dev)
137{ 152{
138 agp_backend_release(dev->agp->bridge); 153 drm_file_t *priv = filp->private_data;
154 drm_device_t *dev = priv->head->dev;
155
156 return drm_agp_release(dev);
139} 157}
140 158
141/** 159/**
142 * Enable the AGP bus. 160 * Enable the AGP bus.
143 * 161 *
144 * \param inode device inode. 162 * \param dev DRM device that has previously acquired AGP.
145 * \param filp file pointer. 163 * \param mode Requested AGP mode.
146 * \param cmd command.
147 * \param arg pointer to a drm_agp_mode structure.
148 * \return zero on success or a negative number on failure. 164 * \return zero on success or a negative number on failure.
149 * 165 *
150 * Verifies the AGP device has been acquired but not enabled, and calls 166 * Verifies the AGP device has been acquired but not enabled, and calls
151 * agp_enable(). 167 * \c agp_enable.
152 */ 168 */
153int drm_agp_enable(struct inode *inode, struct file *filp, 169int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
154 unsigned int cmd, unsigned long arg)
155{ 170{
156 drm_file_t *priv = filp->private_data;
157 drm_device_t *dev = priv->head->dev;
158 drm_agp_mode_t mode;
159
160 if (!dev->agp || !dev->agp->acquired) 171 if (!dev->agp || !dev->agp->acquired)
161 return -EINVAL; 172 return -EINVAL;
162 173
163 if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
164 return -EFAULT;
165
166 dev->agp->mode = mode.mode; 174 dev->agp->mode = mode.mode;
167 agp_enable(dev->agp->bridge, mode.mode); 175 agp_enable(dev->agp->bridge, mode.mode);
168 dev->agp->base = dev->agp->agp_info.aper_base; 176 dev->agp->base = dev->agp->agp_info.aper_base;
169 dev->agp->enabled = 1; 177 dev->agp->enabled = 1;
170 return 0; 178 return 0;
171} 179}
180EXPORT_SYMBOL(drm_agp_enable);
181
182int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
183 unsigned int cmd, unsigned long arg)
184{
185 drm_file_t *priv = filp->private_data;
186 drm_device_t *dev = priv->head->dev;
187 drm_agp_mode_t mode;
188
189
190 if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
191 return -EFAULT;
192
193 return drm_agp_enable(dev, mode);
194}
172 195
173/** 196/**
174 * Allocate AGP memory. 197 * Allocate AGP memory.
@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
206 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 229 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
207 type = (u32) request.type; 230 type = (u32) request.type;
208 231
209 if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { 232 if (!(memory = drm_alloc_agp(dev, pages, type))) {
210 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 233 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
211 return -ENOMEM; 234 return -ENOMEM;
212 } 235 }
@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
403 return NULL; 426 return NULL;
404 } 427 }
405 head->memory = NULL; 428 head->memory = NULL;
406#if LINUX_VERSION_CODE <= 0x020408
407 head->cant_use_aperture = 0;
408 head->page_mask = ~(0xfff);
409#else
410 head->cant_use_aperture = head->agp_info.cant_use_aperture; 429 head->cant_use_aperture = head->agp_info.cant_use_aperture;
411 head->page_mask = head->agp_info.page_mask; 430 head->page_mask = head->agp_info.page_mask;
412#endif
413 431
414 return head; 432 return head;
415} 433}
@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
436 return -EINVAL; 454 return -EINVAL;
437 return agp_bind_memory(handle, start); 455 return agp_bind_memory(handle, start);
438} 456}
457EXPORT_SYMBOL(drm_agp_bind_memory);
439 458
440/** Calls agp_unbind_memory() */ 459/** Calls agp_unbind_memory() */
441int drm_agp_unbind_memory(DRM_AGP_MEM *handle) 460int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 4c6191d231b8..e0743ebbe4bd 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,37 +36,69 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39/** 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
42 *
43 * \param size size.
44 * \return order.
45 *
46 * \todo Can be made faster.
47 */
48int drm_order( unsigned long size )
49{ 40{
50 int order; 41 return pci_resource_start(dev->pdev, resource);
51 unsigned long tmp; 42}
43EXPORT_SYMBOL(drm_get_resource_start);
52 44
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
54 ; 46{
47 return pci_resource_len(dev->pdev, resource);
48}
49EXPORT_SYMBOL(drm_get_resource_len);
55 50
56 if (size & (size - 1)) 51static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
57 ++order; 52 drm_local_map_t *map)
53{
54 struct list_head *list;
58 55
59 return order; 56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 entry->map->offset == map->offset) {
60 return entry->map;
61 }
62 }
63
64 return NULL;
60} 65}
61EXPORT_SYMBOL(drm_order);
62 66
63#ifdef CONFIG_COMPAT
64/* 67/*
65 * Used to allocate 32-bit handles for _DRM_SHM regions 68 * Used to allocate 32-bit handles for mappings.
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
68 */ 69 */
69static unsigned int map32_handle = 0x10000000; 70#define START_RANGE 0x10000000
71#define END_RANGE 0x40000000
72
73#ifdef _LP64
74static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
75{
76 static unsigned int map32_handle = START_RANGE;
77 unsigned int hash;
78
79 if (lhandle & 0xffffffff00000000) {
80 hash = map32_handle;
81 map32_handle += PAGE_SIZE;
82 if (map32_handle > END_RANGE)
83 map32_handle = START_RANGE;
84 } else
85 hash = lhandle;
86
87 while (1) {
88 drm_map_list_t *_entry;
89 list_for_each_entry(_entry, &dev->maplist->head,head) {
90 if (_entry->user_token == hash)
91 break;
92 }
93 if (&_entry->head == &dev->maplist->head)
94 return hash;
95
96 hash += PAGE_SIZE;
97 map32_handle += PAGE_SIZE;
98 }
99}
100#else
101# define HandleID(x,dev) (unsigned int)(x)
70#endif 102#endif
71 103
72/** 104/**
@@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000;
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel. 115 * applicable and if supported by the kernel.
84 */ 116 */
85int drm_addmap( struct inode *inode, struct file *filp, 117int drm_addmap(drm_device_t * dev, unsigned int offset,
86 unsigned int cmd, unsigned long arg ) 118 unsigned int size, drm_map_type_t type,
119 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
87{ 120{
88 drm_file_t *priv = filp->private_data;
89 drm_device_t *dev = priv->head->dev;
90 drm_map_t *map; 121 drm_map_t *map;
91 drm_map_t __user *argp = (void __user *)arg;
92 drm_map_list_t *list; 122 drm_map_list_t *list;
93 123 drm_dma_handle_t *dmah;
94 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ 124 drm_local_map_t *found_map;
95 125
96 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); 126 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
97 if ( !map ) 127 if ( !map )
98 return -ENOMEM; 128 return -ENOMEM;
99 129
100 if ( copy_from_user( map, argp, sizeof(*map) ) ) { 130 map->offset = offset;
101 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 131 map->size = size;
102 return -EFAULT; 132 map->flags = flags;
103 } 133 map->type = type;
104 134
105 /* Only allow shared memory to be removable since we only keep enough 135 /* Only allow shared memory to be removable since we only keep enough
106 * book keeping information about shared memory to allow for removal 136 * book keeping information about shared memory to allow for removal
@@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
122 switch ( map->type ) { 152 switch ( map->type ) {
123 case _DRM_REGISTERS: 153 case _DRM_REGISTERS:
124 case _DRM_FRAME_BUFFER: 154 case _DRM_FRAME_BUFFER:
125#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) 155#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
126 if ( map->offset + map->size < map->offset || 156 if ( map->offset + map->size < map->offset ||
127 map->offset < virt_to_phys(high_memory) ) { 157 map->offset < virt_to_phys(high_memory) ) {
128 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 158 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
@@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp,
132#ifdef __alpha__ 162#ifdef __alpha__
133 map->offset += dev->hose->mem_space->start; 163 map->offset += dev->hose->mem_space->start;
134#endif 164#endif
165 /* Some drivers preinitialize some maps, without the X Server
166 * needing to be aware of it. Therefore, we just return success
167 * when the server tries to create a duplicate map.
168 */
169 found_map = drm_find_matching_map(dev, map);
170 if (found_map != NULL) {
171 if (found_map->size != map->size) {
172 DRM_DEBUG("Matching maps of type %d with "
173 "mismatched sizes, (%ld vs %ld)\n",
174 map->type, map->size, found_map->size);
175 found_map->size = map->size;
176 }
177
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 *map_ptr = found_map;
180 return 0;
181 }
182
135 if (drm_core_has_MTRR(dev)) { 183 if (drm_core_has_MTRR(dev)) {
136 if ( map->type == _DRM_FRAME_BUFFER || 184 if ( map->type == _DRM_FRAME_BUFFER ||
137 (map->flags & _DRM_WRITE_COMBINING) ) { 185 (map->flags & _DRM_WRITE_COMBINING) ) {
@@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp,
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 return -EINVAL; 227 return -EINVAL;
180 } 228 }
181 map->offset += dev->sg->handle; 229 map->offset += (unsigned long)dev->sg->virtual;
230 break;
231 case _DRM_CONSISTENT:
232 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
233 * As we're limiting the address to 2^32-1 (or less),
234 * casting it down to 32 bits is no problem, but we
235 * need to point to a 64bit variable first. */
236 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
237 if (!dmah) {
238 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
239 return -ENOMEM;
240 }
241 map->handle = dmah->vaddr;
242 map->offset = (unsigned long)dmah->busaddr;
243 kfree(dmah);
182 break; 244 break;
183
184 default: 245 default:
185 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 246 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
186 return -EINVAL; 247 return -EINVAL;
@@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp,
196 257
197 down(&dev->struct_sem); 258 down(&dev->struct_sem);
198 list_add(&list->head, &dev->maplist->head); 259 list_add(&list->head, &dev->maplist->head);
199#ifdef CONFIG_COMPAT 260 /* Assign a 32-bit handle */
200 /* Assign a 32-bit handle for _DRM_SHM mappings */
201 /* We do it here so that dev->struct_sem protects the increment */ 261 /* We do it here so that dev->struct_sem protects the increment */
202 if (map->type == _DRM_SHM) 262 list->user_token = HandleID(map->type==_DRM_SHM
203 map->offset = map32_handle += PAGE_SIZE; 263 ? (unsigned long)map->handle
204#endif 264 : map->offset, dev);
205 up(&dev->struct_sem); 265 up(&dev->struct_sem);
206 266
207 if ( copy_to_user( argp, map, sizeof(*map) ) ) 267 *map_ptr = map;
268 return 0;
269}
270EXPORT_SYMBOL(drm_addmap);
271
272int drm_addmap_ioctl(struct inode *inode, struct file *filp,
273 unsigned int cmd, unsigned long arg)
274{
275 drm_file_t *priv = filp->private_data;
276 drm_device_t *dev = priv->head->dev;
277 drm_map_t map;
278 drm_map_t *map_ptr;
279 drm_map_t __user *argp = (void __user *)arg;
280 int err;
281 unsigned long handle = 0;
282
283 if (!(filp->f_mode & 3))
284 return -EACCES; /* Require read/write */
285
286 if (copy_from_user(& map, argp, sizeof(map))) {
287 return -EFAULT;
288 }
289
290 err = drm_addmap(dev, map.offset, map.size, map.type, map.flags,
291 &map_ptr);
292
293 if (err) {
294 return err;
295 }
296
297 {
298 drm_map_list_t *_entry;
299 list_for_each_entry(_entry, &dev->maplist->head, head) {
300 if (_entry->map == map_ptr)
301 handle = _entry->user_token;
302 }
303 if (!handle)
304 return -EFAULT;
305 }
306
307 if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
208 return -EFAULT; 308 return -EFAULT;
209 if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) 309 if (put_user(handle, &argp->handle))
210 return -EFAULT; 310 return -EFAULT;
211 return 0; 311 return 0;
212} 312}
@@ -226,81 +326,138 @@ int drm_addmap( struct inode *inode, struct file *filp,
226 * its being used, and free any associate resource (such as MTRR's) if it's not 326 * its being used, and free any associate resource (such as MTRR's) if it's not
227 * being on use. 327 * being on use.
228 * 328 *
229 * \sa addmap(). 329 * \sa drm_addmap
230 */ 330 */
231int drm_rmmap(struct inode *inode, struct file *filp, 331int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
232 unsigned int cmd, unsigned long arg)
233{ 332{
234 drm_file_t *priv = filp->private_data;
235 drm_device_t *dev = priv->head->dev;
236 struct list_head *list; 333 struct list_head *list;
237 drm_map_list_t *r_list = NULL; 334 drm_map_list_t *r_list = NULL;
238 drm_vma_entry_t *pt, *prev; 335 drm_dma_handle_t dmah;
239 drm_map_t *map; 336
337 /* Find the list entry for the map and remove it */
338 list_for_each(list, &dev->maplist->head) {
339 r_list = list_entry(list, drm_map_list_t, head);
340
341 if (r_list->map == map) {
342 list_del(list);
343 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
344 break;
345 }
346 }
347
348 /* List has wrapped around to the head pointer, or it's empty and we
349 * didn't find anything.
350 */
351 if (list == (&dev->maplist->head)) {
352 return -EINVAL;
353 }
354
355 switch (map->type) {
356 case _DRM_REGISTERS:
357 drm_ioremapfree(map->handle, map->size, dev);
358 /* FALLTHROUGH */
359 case _DRM_FRAME_BUFFER:
360 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
361 int retcode;
362 retcode = mtrr_del(map->mtrr, map->offset,
363 map->size);
364 DRM_DEBUG ("mtrr_del=%d\n", retcode);
365 }
366 break;
367 case _DRM_SHM:
368 vfree(map->handle);
369 break;
370 case _DRM_AGP:
371 case _DRM_SCATTER_GATHER:
372 break;
373 case _DRM_CONSISTENT:
374 dmah.vaddr = map->handle;
375 dmah.busaddr = map->offset;
376 dmah.size = map->size;
377 __drm_pci_free(dev, &dmah);
378 break;
379 }
380 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
381
382 return 0;
383}
384EXPORT_SYMBOL(drm_rmmap_locked);
385
386int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
387{
388 int ret;
389
390 down(&dev->struct_sem);
391 ret = drm_rmmap_locked(dev, map);
392 up(&dev->struct_sem);
393
394 return ret;
395}
396EXPORT_SYMBOL(drm_rmmap);
397
398/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
399 * the last close of the device, and this is necessary for cleanup when things
400 * exit uncleanly. Therefore, having userland manually remove mappings seems
401 * like a pointless exercise since they're going away anyway.
402 *
403 * One use case might be after addmap is allowed for normal users for SHM and
404 * gets used by drivers that the server doesn't need to care about. This seems
405 * unlikely.
406 */
407int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
408 unsigned int cmd, unsigned long arg)
409{
410 drm_file_t *priv = filp->private_data;
411 drm_device_t *dev = priv->head->dev;
240 drm_map_t request; 412 drm_map_t request;
241 int found_maps = 0; 413 drm_local_map_t *map = NULL;
414 struct list_head *list;
415 int ret;
242 416
243 if (copy_from_user(&request, (drm_map_t __user *)arg, 417 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
244 sizeof(request))) {
245 return -EFAULT; 418 return -EFAULT;
246 } 419 }
247 420
248 down(&dev->struct_sem); 421 down(&dev->struct_sem);
249 list = &dev->maplist->head;
250 list_for_each(list, &dev->maplist->head) { 422 list_for_each(list, &dev->maplist->head) {
251 r_list = list_entry(list, drm_map_list_t, head); 423 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
252 424
253 if(r_list->map && 425 if (r_list->map &&
254 r_list->map->offset == (unsigned long) request.handle && 426 r_list->user_token == (unsigned long) request.handle &&
255 r_list->map->flags & _DRM_REMOVABLE) break; 427 r_list->map->flags & _DRM_REMOVABLE) {
428 map = r_list->map;
429 break;
430 }
256 } 431 }
257 432
258 /* List has wrapped around to the head pointer, or its empty we didn't 433 /* List has wrapped around to the head pointer, or its empty we didn't
259 * find anything. 434 * find anything.
260 */ 435 */
261 if(list == (&dev->maplist->head)) { 436 if (list == (&dev->maplist->head)) {
262 up(&dev->struct_sem); 437 up(&dev->struct_sem);
263 return -EINVAL; 438 return -EINVAL;
264 } 439 }
265 map = r_list->map;
266 list_del(list);
267 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
268 440
269 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 441 if (!map)
270 if (pt->vma->vm_private_data == map) found_maps++; 442 return -EINVAL;
271 }
272 443
273 if(!found_maps) { 444 /* Register and framebuffer maps are permanent */
274 switch (map->type) { 445 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
275 case _DRM_REGISTERS: 446 up(&dev->struct_sem);
276 case _DRM_FRAME_BUFFER: 447 return 0;
277 if (drm_core_has_MTRR(dev)) {
278 if (map->mtrr >= 0) {
279 int retcode;
280 retcode = mtrr_del(map->mtrr,
281 map->offset,
282 map->size);
283 DRM_DEBUG("mtrr_del = %d\n", retcode);
284 }
285 }
286 drm_ioremapfree(map->handle, map->size, dev);
287 break;
288 case _DRM_SHM:
289 vfree(map->handle);
290 break;
291 case _DRM_AGP:
292 case _DRM_SCATTER_GATHER:
293 break;
294 }
295 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
296 } 448 }
449
450 ret = drm_rmmap_locked(dev, map);
451
297 up(&dev->struct_sem); 452 up(&dev->struct_sem);
298 return 0; 453
454 return ret;
299} 455}
300 456
301/** 457/**
302 * Cleanup after an error on one of the addbufs() functions. 458 * Cleanup after an error on one of the addbufs() functions.
303 * 459 *
460 * \param dev DRM device.
304 * \param entry buffer entry where the error occurred. 461 * \param entry buffer entry where the error occurred.
305 * 462 *
306 * Frees any pages and buffers associated with the given entry. 463 * Frees any pages and buffers associated with the given entry.
@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
344 501
345#if __OS_HAS_AGP 502#if __OS_HAS_AGP
346/** 503/**
347 * Add AGP buffers for DMA transfers (ioctl). 504 * Add AGP buffers for DMA transfers.
348 * 505 *
349 * \param inode device inode. 506 * \param dev drm_device_t to which the buffers are to be added.
350 * \param filp file pointer. 507 * \param request pointer to a drm_buf_desc_t describing the request.
351 * \param cmd command.
352 * \param arg pointer to a drm_buf_desc_t request.
353 * \return zero on success or a negative number on failure. 508 * \return zero on success or a negative number on failure.
354 * 509 *
355 * After some sanity checks creates a drm_buf structure for each buffer and 510 * After some sanity checks creates a drm_buf structure for each buffer and
356 * reallocates the buffer list of the same size order to accommodate the new 511 * reallocates the buffer list of the same size order to accommodate the new
357 * buffers. 512 * buffers.
358 */ 513 */
359static int drm_addbufs_agp( struct inode *inode, struct file *filp, 514int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
360 unsigned int cmd, unsigned long arg )
361{ 515{
362 drm_file_t *priv = filp->private_data;
363 drm_device_t *dev = priv->head->dev;
364 drm_device_dma_t *dma = dev->dma; 516 drm_device_dma_t *dma = dev->dma;
365 drm_buf_desc_t request;
366 drm_buf_entry_t *entry; 517 drm_buf_entry_t *entry;
367 drm_buf_t *buf; 518 drm_buf_t *buf;
368 unsigned long offset; 519 unsigned long offset;
@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
376 int byte_count; 527 int byte_count;
377 int i; 528 int i;
378 drm_buf_t **temp_buflist; 529 drm_buf_t **temp_buflist;
379 drm_buf_desc_t __user *argp = (void __user *)arg;
380 530
381 if ( !dma ) return -EINVAL; 531 if ( !dma ) return -EINVAL;
382 532
383 if ( copy_from_user( &request, argp, 533 count = request->count;
384 sizeof(request) ) ) 534 order = drm_order(request->size);
385 return -EFAULT;
386
387 count = request.count;
388 order = drm_order( request.size );
389 size = 1 << order; 535 size = 1 << order;
390 536
391 alignment = (request.flags & _DRM_PAGE_ALIGN) 537 alignment = (request->flags & _DRM_PAGE_ALIGN)
392 ? PAGE_ALIGN(size) : size; 538 ? PAGE_ALIGN(size) : size;
393 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 539 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
394 total = PAGE_SIZE << page_order; 540 total = PAGE_SIZE << page_order;
395 541
396 byte_count = 0; 542 byte_count = 0;
397 agp_offset = dev->agp->base + request.agp_start; 543 agp_offset = dev->agp->base + request->agp_start;
398 544
399 DRM_DEBUG( "count: %d\n", count ); 545 DRM_DEBUG( "count: %d\n", count );
400 DRM_DEBUG( "order: %d\n", order ); 546 DRM_DEBUG( "order: %d\n", order );
@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
508 654
509 up( &dev->struct_sem ); 655 up( &dev->struct_sem );
510 656
511 request.count = entry->buf_count; 657 request->count = entry->buf_count;
512 request.size = size; 658 request->size = size;
513
514 if ( copy_to_user( argp, &request, sizeof(request) ) )
515 return -EFAULT;
516 659
517 dma->flags = _DRM_DMA_USE_AGP; 660 dma->flags = _DRM_DMA_USE_AGP;
518 661
519 atomic_dec( &dev->buf_alloc ); 662 atomic_dec( &dev->buf_alloc );
520 return 0; 663 return 0;
521} 664}
665EXPORT_SYMBOL(drm_addbufs_agp);
522#endif /* __OS_HAS_AGP */ 666#endif /* __OS_HAS_AGP */
523 667
524static int drm_addbufs_pci( struct inode *inode, struct file *filp, 668int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
525 unsigned int cmd, unsigned long arg )
526{ 669{
527 drm_file_t *priv = filp->private_data;
528 drm_device_t *dev = priv->head->dev;
529 drm_device_dma_t *dma = dev->dma; 670 drm_device_dma_t *dma = dev->dma;
530 drm_buf_desc_t request;
531 int count; 671 int count;
532 int order; 672 int order;
533 int size; 673 int size;
@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
543 int page_count; 683 int page_count;
544 unsigned long *temp_pagelist; 684 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist; 685 drm_buf_t **temp_buflist;
546 drm_buf_desc_t __user *argp = (void __user *)arg;
547 686
548 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; 687 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
549 if ( !dma ) return -EINVAL; 688 if ( !dma ) return -EINVAL;
550 689
551 if ( copy_from_user( &request, argp, sizeof(request) ) ) 690 count = request->count;
552 return -EFAULT; 691 order = drm_order(request->size);
553
554 count = request.count;
555 order = drm_order( request.size );
556 size = 1 << order; 692 size = 1 << order;
557 693
558 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", 694 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559 request.count, request.size, size, 695 request->count, request->size, size,
560 order, dev->queue_count ); 696 order, dev->queue_count );
561 697
562 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 698 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 699 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
564 700
565 alignment = (request.flags & _DRM_PAGE_ALIGN) 701 alignment = (request->flags & _DRM_PAGE_ALIGN)
566 ? PAGE_ALIGN(size) : size; 702 ? PAGE_ALIGN(size) : size;
567 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 703 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568 total = PAGE_SIZE << page_order; 704 total = PAGE_SIZE << page_order;
@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
740 876
741 up( &dev->struct_sem ); 877 up( &dev->struct_sem );
742 878
743 request.count = entry->buf_count; 879 request->count = entry->buf_count;
744 request.size = size; 880 request->size = size;
745
746 if ( copy_to_user( argp, &request, sizeof(request) ) )
747 return -EFAULT;
748 881
749 atomic_dec( &dev->buf_alloc ); 882 atomic_dec( &dev->buf_alloc );
750 return 0; 883 return 0;
751 884
752} 885}
886EXPORT_SYMBOL(drm_addbufs_pci);
753 887
754static int drm_addbufs_sg( struct inode *inode, struct file *filp, 888static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
755 unsigned int cmd, unsigned long arg )
756{ 889{
757 drm_file_t *priv = filp->private_data;
758 drm_device_t *dev = priv->head->dev;
759 drm_device_dma_t *dma = dev->dma; 890 drm_device_dma_t *dma = dev->dma;
760 drm_buf_desc_t __user *argp = (void __user *)arg;
761 drm_buf_desc_t request;
762 drm_buf_entry_t *entry; 891 drm_buf_entry_t *entry;
763 drm_buf_t *buf; 892 drm_buf_t *buf;
764 unsigned long offset; 893 unsigned long offset;
@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
777 906
778 if ( !dma ) return -EINVAL; 907 if ( !dma ) return -EINVAL;
779 908
780 if ( copy_from_user( &request, argp, sizeof(request) ) ) 909 count = request->count;
781 return -EFAULT; 910 order = drm_order(request->size);
782
783 count = request.count;
784 order = drm_order( request.size );
785 size = 1 << order; 911 size = 1 << order;
786 912
787 alignment = (request.flags & _DRM_PAGE_ALIGN) 913 alignment = (request->flags & _DRM_PAGE_ALIGN)
788 ? PAGE_ALIGN(size) : size; 914 ? PAGE_ALIGN(size) : size;
789 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
790 total = PAGE_SIZE << page_order; 916 total = PAGE_SIZE << page_order;
791 917
792 byte_count = 0; 918 byte_count = 0;
793 agp_offset = request.agp_start; 919 agp_offset = request->agp_start;
794 920
795 DRM_DEBUG( "count: %d\n", count ); 921 DRM_DEBUG( "count: %d\n", count );
796 DRM_DEBUG( "order: %d\n", order ); 922 DRM_DEBUG( "order: %d\n", order );
@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
848 974
849 buf->offset = (dma->byte_count + offset); 975 buf->offset = (dma->byte_count + offset);
850 buf->bus_address = agp_offset + offset; 976 buf->bus_address = agp_offset + offset;
851 buf->address = (void *)(agp_offset + offset + dev->sg->handle); 977 buf->address = (void *)(agp_offset + offset
978 + (unsigned long)dev->sg->virtual);
852 buf->next = NULL; 979 buf->next = NULL;
853 buf->waiting = 0; 980 buf->waiting = 0;
854 buf->pending = 0; 981 buf->pending = 0;
@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
905 1032
906 up( &dev->struct_sem ); 1033 up( &dev->struct_sem );
907 1034
908 request.count = entry->buf_count; 1035 request->count = entry->buf_count;
909 request.size = size; 1036 request->size = size;
910
911 if ( copy_to_user( argp, &request, sizeof(request) ) )
912 return -EFAULT;
913 1037
914 dma->flags = _DRM_DMA_USE_SG; 1038 dma->flags = _DRM_DMA_USE_SG;
915 1039
@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
917 return 0; 1041 return 0;
918} 1042}
919 1043
1044int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1045{
1046 drm_device_dma_t *dma = dev->dma;
1047 drm_buf_entry_t *entry;
1048 drm_buf_t *buf;
1049 unsigned long offset;
1050 unsigned long agp_offset;
1051 int count;
1052 int order;
1053 int size;
1054 int alignment;
1055 int page_order;
1056 int total;
1057 int byte_count;
1058 int i;
1059 drm_buf_t **temp_buflist;
1060
1061 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1062 return -EINVAL;
1063
1064 if (!dma)
1065 return -EINVAL;
1066
1067 count = request->count;
1068 order = drm_order(request->size);
1069 size = 1 << order;
1070
1071 alignment = (request->flags & _DRM_PAGE_ALIGN)
1072 ? PAGE_ALIGN(size) : size;
1073 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1074 total = PAGE_SIZE << page_order;
1075
1076 byte_count = 0;
1077 agp_offset = request->agp_start;
1078
1079 DRM_DEBUG("count: %d\n", count);
1080 DRM_DEBUG("order: %d\n", order);
1081 DRM_DEBUG("size: %d\n", size);
1082 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1083 DRM_DEBUG("alignment: %d\n", alignment);
1084 DRM_DEBUG("page_order: %d\n", page_order);
1085 DRM_DEBUG("total: %d\n", total);
1086
1087 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1088 return -EINVAL;
1089 if (dev->queue_count)
1090 return -EBUSY; /* Not while in use */
1091
1092 spin_lock(&dev->count_lock);
1093 if (dev->buf_use) {
1094 spin_unlock(&dev->count_lock);
1095 return -EBUSY;
1096 }
1097 atomic_inc(&dev->buf_alloc);
1098 spin_unlock(&dev->count_lock);
1099
1100 down(&dev->struct_sem);
1101 entry = &dma->bufs[order];
1102 if (entry->buf_count) {
1103 up(&dev->struct_sem);
1104 atomic_dec(&dev->buf_alloc);
1105 return -ENOMEM; /* May only call once for each order */
1106 }
1107
1108 if (count < 0 || count > 4096) {
1109 up(&dev->struct_sem);
1110 atomic_dec(&dev->buf_alloc);
1111 return -EINVAL;
1112 }
1113
1114 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1115 DRM_MEM_BUFS);
1116 if (!entry->buflist) {
1117 up(&dev->struct_sem);
1118 atomic_dec(&dev->buf_alloc);
1119 return -ENOMEM;
1120 }
1121 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1122
1123 entry->buf_size = size;
1124 entry->page_order = page_order;
1125
1126 offset = 0;
1127
1128 while (entry->buf_count < count) {
1129 buf = &entry->buflist[entry->buf_count];
1130 buf->idx = dma->buf_count + entry->buf_count;
1131 buf->total = alignment;
1132 buf->order = order;
1133 buf->used = 0;
1134
1135 buf->offset = (dma->byte_count + offset);
1136 buf->bus_address = agp_offset + offset;
1137 buf->address = (void *)(agp_offset + offset);
1138 buf->next = NULL;
1139 buf->waiting = 0;
1140 buf->pending = 0;
1141 init_waitqueue_head(&buf->dma_wait);
1142 buf->filp = NULL;
1143
1144 buf->dev_priv_size = dev->driver->dev_priv_size;
1145 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1146 if (!buf->dev_private) {
1147 /* Set count correctly so we free the proper amount. */
1148 entry->buf_count = count;
1149 drm_cleanup_buf_error(dev, entry);
1150 up(&dev->struct_sem);
1151 atomic_dec(&dev->buf_alloc);
1152 return -ENOMEM;
1153 }
1154 memset(buf->dev_private, 0, buf->dev_priv_size);
1155
1156 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1157
1158 offset += alignment;
1159 entry->buf_count++;
1160 byte_count += PAGE_SIZE << page_order;
1161 }
1162
1163 DRM_DEBUG("byte_count: %d\n", byte_count);
1164
1165 temp_buflist = drm_realloc(dma->buflist,
1166 dma->buf_count * sizeof(*dma->buflist),
1167 (dma->buf_count + entry->buf_count)
1168 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1169 if (!temp_buflist) {
1170 /* Free the entry because it isn't valid */
1171 drm_cleanup_buf_error(dev, entry);
1172 up(&dev->struct_sem);
1173 atomic_dec(&dev->buf_alloc);
1174 return -ENOMEM;
1175 }
1176 dma->buflist = temp_buflist;
1177
1178 for (i = 0; i < entry->buf_count; i++) {
1179 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1180 }
1181
1182 dma->buf_count += entry->buf_count;
1183 dma->byte_count += byte_count;
1184
1185 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1186 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1187
1188 up(&dev->struct_sem);
1189
1190 request->count = entry->buf_count;
1191 request->size = size;
1192
1193 dma->flags = _DRM_DMA_USE_FB;
1194
1195 atomic_dec(&dev->buf_alloc);
1196 return 0;
1197}
1198
920/** 1199/**
921 * Add buffers for DMA transfers (ioctl). 1200 * Add buffers for DMA transfers (ioctl).
922 * 1201 *
@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp,
937 drm_buf_desc_t request; 1216 drm_buf_desc_t request;
938 drm_file_t *priv = filp->private_data; 1217 drm_file_t *priv = filp->private_data;
939 drm_device_t *dev = priv->head->dev; 1218 drm_device_t *dev = priv->head->dev;
1219 int ret;
940 1220
941 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1221 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
942 return -EINVAL; 1222 return -EINVAL;
@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp,
947 1227
948#if __OS_HAS_AGP 1228#if __OS_HAS_AGP
949 if ( request.flags & _DRM_AGP_BUFFER ) 1229 if ( request.flags & _DRM_AGP_BUFFER )
950 return drm_addbufs_agp( inode, filp, cmd, arg ); 1230 ret=drm_addbufs_agp(dev, &request);
951 else 1231 else
952#endif 1232#endif
953 if ( request.flags & _DRM_SG_BUFFER ) 1233 if ( request.flags & _DRM_SG_BUFFER )
954 return drm_addbufs_sg( inode, filp, cmd, arg ); 1234 ret=drm_addbufs_sg(dev, &request);
1235 else if ( request.flags & _DRM_FB_BUFFER)
1236 ret=drm_addbufs_fb(dev, &request);
955 else 1237 else
956 return drm_addbufs_pci( inode, filp, cmd, arg ); 1238 ret=drm_addbufs_pci(dev, &request);
1239
1240 if (ret==0) {
1241 if (copy_to_user((void __user *)arg, &request,
1242 sizeof(request))) {
1243 ret = -EFAULT;
1244 }
1245 }
1246 return ret;
957} 1247}
958 1248
959 1249
@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1196 return -EFAULT; 1486 return -EFAULT;
1197 1487
1198 if ( request.count >= dma->buf_count ) { 1488 if ( request.count >= dma->buf_count ) {
1199 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || 1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1200 (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { 1490 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) {
1201 drm_map_t *map = dev->agp_buffer_map; 1494 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token;
1202 1496
1203 if ( !map ) { 1497 if ( !map ) {
1204 retcode = -EINVAL; 1498 retcode = -EINVAL;
1205 goto done; 1499 goto done;
1206 } 1500 }
1207 1501
1208#if LINUX_VERSION_CODE <= 0x020402
1209 down( &current->mm->mmap_sem );
1210#else
1211 down_write( &current->mm->mmap_sem ); 1502 down_write( &current->mm->mmap_sem );
1212#endif
1213 virtual = do_mmap( filp, 0, map->size, 1503 virtual = do_mmap( filp, 0, map->size,
1214 PROT_READ | PROT_WRITE, 1504 PROT_READ | PROT_WRITE,
1215 MAP_SHARED, 1505 MAP_SHARED,
1216 (unsigned long)map->offset ); 1506 token );
1217#if LINUX_VERSION_CODE <= 0x020402
1218 up( &current->mm->mmap_sem );
1219#else
1220 up_write( &current->mm->mmap_sem ); 1507 up_write( &current->mm->mmap_sem );
1221#endif
1222 } else { 1508 } else {
1223#if LINUX_VERSION_CODE <= 0x020402
1224 down( &current->mm->mmap_sem );
1225#else
1226 down_write( &current->mm->mmap_sem ); 1509 down_write( &current->mm->mmap_sem );
1227#endif
1228 virtual = do_mmap( filp, 0, dma->byte_count, 1510 virtual = do_mmap( filp, 0, dma->byte_count,
1229 PROT_READ | PROT_WRITE, 1511 PROT_READ | PROT_WRITE,
1230 MAP_SHARED, 0 ); 1512 MAP_SHARED, 0 );
1231#if LINUX_VERSION_CODE <= 0x020402
1232 up( &current->mm->mmap_sem );
1233#else
1234 up_write( &current->mm->mmap_sem ); 1513 up_write( &current->mm->mmap_sem );
1235#endif
1236 } 1514 }
1237 if ( virtual > -1024UL ) { 1515 if ( virtual > -1024UL ) {
1238 /* Real error */ 1516 /* Real error */
@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1279 return retcode; 1557 return retcode;
1280} 1558}
1281 1559
1560/**
1561 * Compute size order. Returns the exponent of the smaller power of two which
1562 * is greater or equal to given number.
1563 *
1564 * \param size size.
1565 * \return order.
1566 *
1567 * \todo Can be made faster.
1568 */
1569int drm_order( unsigned long size )
1570{
1571 int order;
1572 unsigned long tmp;
1573
1574 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1575 ;
1576
1577 if (size & (size - 1))
1578 ++order;
1579
1580 return order;
1581}
1582EXPORT_SYMBOL(drm_order);
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index a7cfabd1ca2e..f515567e5b6f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
212 drm_ctx_priv_map_t __user *argp = (void __user *)arg; 212 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
213 drm_ctx_priv_map_t request; 213 drm_ctx_priv_map_t request;
214 drm_map_t *map; 214 drm_map_t *map;
215 drm_map_list_t *_entry;
215 216
216 if (copy_from_user(&request, argp, sizeof(request))) 217 if (copy_from_user(&request, argp, sizeof(request)))
217 return -EFAULT; 218 return -EFAULT;
@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
225 map = dev->context_sareas[request.ctx_id]; 226 map = dev->context_sareas[request.ctx_id];
226 up(&dev->struct_sem); 227 up(&dev->struct_sem);
227 228
228 request.handle = (void *) map->offset; 229 request.handle = 0;
230 list_for_each_entry(_entry, &dev->maplist->head,head) {
231 if (_entry->map == map) {
232 request.handle = (void *)(unsigned long)_entry->user_token;
233 break;
234 }
235 }
236 if (request.handle == 0)
237 return -EINVAL;
238
239
229 if (copy_to_user(argp, &request, sizeof(request))) 240 if (copy_to_user(argp, &request, sizeof(request)))
230 return -EFAULT; 241 return -EFAULT;
231 return 0; 242 return 0;
@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
262 list_for_each(list, &dev->maplist->head) { 273 list_for_each(list, &dev->maplist->head) {
263 r_list = list_entry(list, drm_map_list_t, head); 274 r_list = list_entry(list, drm_map_list_t, head);
264 if (r_list->map 275 if (r_list->map
265 && r_list->map->offset == (unsigned long) request.handle) 276 && r_list->user_token == (unsigned long) request.handle)
266 goto found; 277 goto found;
267 } 278 }
268bad: 279bad:
@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
369 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { 380 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
370 ctx.handle = i; 381 ctx.handle = i;
371 if ( copy_to_user( &res.contexts[i], 382 if ( copy_to_user( &res.contexts[i],
372 &i, sizeof(i) ) ) 383 &ctx, sizeof(ctx) ) )
373 return -EFAULT; 384 return -EFAULT;
374 } 385 }
375 } 386 }
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 3333c250c4d9..6ba48f346fcf 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, 70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, 71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
72 72
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, 73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, 74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 },
75 75
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, 76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, 77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = {
102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, 102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
103 103
104#if __OS_HAS_AGP 104#if __OS_HAS_AGP
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, 105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 },
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, 106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 },
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, 107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 },
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, 108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 },
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, 109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, 110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, 111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = {
127 * 127 *
128 * Frees every resource in \p dev. 128 * Frees every resource in \p dev.
129 * 129 *
130 * \sa drm_device and setup(). 130 * \sa drm_device
131 */ 131 */
132int drm_takedown( drm_device_t *dev ) 132int drm_takedown( drm_device_t *dev )
133{ 133{
134 drm_magic_entry_t *pt, *next; 134 drm_magic_entry_t *pt, *next;
135 drm_map_t *map;
136 drm_map_list_t *r_list; 135 drm_map_list_t *r_list;
137 struct list_head *list, *list_next;
138 drm_vma_entry_t *vma, *vma_next; 136 drm_vma_entry_t *vma, *vma_next;
139 int i; 137 int i;
140 138
@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev )
142 140
143 if (dev->driver->pretakedown) 141 if (dev->driver->pretakedown)
144 dev->driver->pretakedown(dev); 142 dev->driver->pretakedown(dev);
143 DRM_DEBUG("driver pretakedown completed\n");
145 144
146 if (dev->unique) { 145 if (dev->unique) {
147 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev )
178 } 177 }
179 dev->agp->memory = NULL; 178 dev->agp->memory = NULL;
180 179
181 if ( dev->agp->acquired ) drm_agp_do_release(dev); 180 if (dev->agp->acquired)
181 drm_agp_release(dev);
182 182
183 dev->agp->acquired = 0; 183 dev->agp->acquired = 0;
184 dev->agp->enabled = 0; 184 dev->agp->enabled = 0;
185 } 185 }
186 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
187 drm_sg_cleanup(dev->sg);
188 dev->sg = NULL;
189 }
186 190
187 /* Clear vma list (only built for debugging) */ 191 /* Clear vma list (only built for debugging) */
188 if ( dev->vmalist ) { 192 if ( dev->vmalist ) {
@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev )
194 } 198 }
195 199
196 if( dev->maplist ) { 200 if( dev->maplist ) {
197 list_for_each_safe( list, list_next, &dev->maplist->head ) { 201 while (!list_empty(&dev->maplist->head)) {
198 r_list = (drm_map_list_t *)list; 202 struct list_head *list = dev->maplist->head.next;
199 203 r_list = list_entry(list, drm_map_list_t, head);
200 if ( ( map = r_list->map ) ) { 204 drm_rmmap_locked(dev, r_list->map);
201 switch ( map->type ) { 205 }
202 case _DRM_REGISTERS:
203 case _DRM_FRAME_BUFFER:
204 if (drm_core_has_MTRR(dev)) {
205 if ( map->mtrr >= 0 ) {
206 int retcode;
207 retcode = mtrr_del( map->mtrr,
208 map->offset,
209 map->size );
210 DRM_DEBUG( "mtrr_del=%d\n", retcode );
211 }
212 }
213 drm_ioremapfree( map->handle, map->size, dev );
214 break;
215 case _DRM_SHM:
216 vfree(map->handle);
217 break;
218
219 case _DRM_AGP:
220 /* Do nothing here, because this is all
221 * handled in the AGP/GART driver.
222 */
223 break;
224 case _DRM_SCATTER_GATHER:
225 /* Handle it */
226 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
227 drm_sg_cleanup(dev->sg);
228 dev->sg = NULL;
229 }
230 break;
231 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 }
234 list_del( list );
235 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
236 }
237 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
238 dev->maplist = NULL;
239 } 206 }
240 207
241 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { 208 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev )
264 } 231 }
265 up( &dev->struct_sem ); 232 up( &dev->struct_sem );
266 233
234 DRM_DEBUG("takedown completed\n");
267 return 0; 235 return 0;
268} 236}
269 237
@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init);
312 * 280 *
313 * Cleans up all DRM device, calling takedown(). 281 * Cleans up all DRM device, calling takedown().
314 * 282 *
315 * \sa drm_init(). 283 * \sa drm_init
316 */ 284 */
317static void drm_cleanup( drm_device_t *dev ) 285static void drm_cleanup( drm_device_t *dev )
318{ 286{
@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev )
325 293
326 drm_takedown( dev ); 294 drm_takedown( dev );
327 295
296 if (dev->maplist) {
297 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
298 dev->maplist = NULL;
299 }
300
328 drm_ctxbitmap_cleanup( dev ); 301 drm_ctxbitmap_cleanup( dev );
329 302
330 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 303 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 10e64fde8d78..a1f4e9cd64ed 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev )
71 dev->magiclist[i].tail = NULL; 71 dev->magiclist[i].tail = NULL;
72 } 72 }
73 73
74 dev->maplist = drm_alloc(sizeof(*dev->maplist),
75 DRM_MEM_MAPS);
76 if(dev->maplist == NULL) return -ENOMEM;
77 memset(dev->maplist, 0, sizeof(*dev->maplist));
78 INIT_LIST_HEAD(&dev->maplist->head);
79
80 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), 74 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
81 DRM_MEM_CTXLIST); 75 DRM_MEM_CTXLIST);
82 if(dev->ctxlist == NULL) return -ENOMEM; 76 if(dev->ctxlist == NULL) return -ENOMEM;
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 39afda0ccabe..d2ed3ba5aca9 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp,
208 map.size = r_list->map->size; 208 map.size = r_list->map->size;
209 map.type = r_list->map->type; 209 map.type = r_list->map->type;
210 map.flags = r_list->map->flags; 210 map.flags = r_list->map->flags;
211 map.handle = r_list->map->handle; 211 map.handle = (void *)(unsigned long) r_list->user_token;
212 map.mtrr = r_list->map->mtrr; 212 map.mtrr = r_list->map->mtrr;
213 up(&dev->struct_sem); 213 up(&dev->struct_sem);
214 214
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index ace3d42f4407..ff483fb418aa 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area)
142 142
143#if __OS_HAS_AGP 143#if __OS_HAS_AGP
144/** Wrapper around agp_allocate_memory() */ 144/** Wrapper around agp_allocate_memory() */
145DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) 145DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
146{ 146{
147 return drm_agp_allocate_memory(bridge, pages, type); 147 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
148} 148}
149EXPORT_SYMBOL(drm_alloc_agp);
149 150
150/** Wrapper around agp_free_memory() */ 151/** Wrapper around agp_free_memory() */
151int drm_free_agp(DRM_AGP_MEM *handle, int pages) 152int drm_free_agp(DRM_AGP_MEM *handle, int pages)
152{ 153{
153 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 154 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
154} 155}
156EXPORT_SYMBOL(drm_free_agp);
155 157
156/** Wrapper around agp_bind_memory() */ 158/** Wrapper around agp_bind_memory() */
157int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) 159int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
158{ 160{
159 return drm_agp_bind_memory(handle, start); 161 return drm_agp_bind_memory(handle, start);
160} 162}
163EXPORT_SYMBOL(drm_bind_agp);
161 164
162/** Wrapper around agp_unbind_memory() */ 165/** Wrapper around agp_unbind_memory() */
163int drm_unbind_agp(DRM_AGP_MEM *handle) 166int drm_unbind_agp(DRM_AGP_MEM *handle)
164{ 167{
165 return drm_agp_unbind_memory(handle); 168 return drm_agp_unbind_memory(handle);
166} 169}
170EXPORT_SYMBOL(drm_unbind_agp);
167#endif /* agp */ 171#endif /* agp */
168#endif /* debug_memory */ 172#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 192e8762571c..09ed712c1a7f 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -46,11 +46,11 @@
46/** 46/**
47 * \brief Allocate a PCI consistent memory block, for DMA. 47 * \brief Allocate a PCI consistent memory block, for DMA.
48 */ 48 */
49void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, 49drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
50 dma_addr_t maxaddr, dma_addr_t * busaddr) 50 dma_addr_t maxaddr)
51{ 51{
52 void *address; 52 drm_dma_handle_t *dmah;
53#if DRM_DEBUG_MEMORY 53#ifdef DRM_DEBUG_MEMORY
54 int area = DRM_MEM_DMA; 54 int area = DRM_MEM_DMA;
55 55
56 spin_lock(&drm_mem_lock); 56 spin_lock(&drm_mem_lock);
@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
74 return NULL; 74 return NULL;
75 } 75 }
76 76
77 address = pci_alloc_consistent(dev->pdev, size, busaddr); 77 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
78 if (!dmah)
79 return NULL;
80
81 dmah->size = size;
82 dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
78 83
79#if DRM_DEBUG_MEMORY 84#ifdef DRM_DEBUG_MEMORY
80 if (address == NULL) { 85 if (dmah->vaddr == NULL) {
81 spin_lock(&drm_mem_lock); 86 spin_lock(&drm_mem_lock);
82 ++drm_mem_stats[area].fail_count; 87 ++drm_mem_stats[area].fail_count;
83 spin_unlock(&drm_mem_lock); 88 spin_unlock(&drm_mem_lock);
89 kfree(dmah);
84 return NULL; 90 return NULL;
85 } 91 }
86 92
@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
90 drm_ram_used += size; 96 drm_ram_used += size;
91 spin_unlock(&drm_mem_lock); 97 spin_unlock(&drm_mem_lock);
92#else 98#else
93 if (address == NULL) 99 if (dmah->vaddr == NULL) {
100 kfree(dmah);
94 return NULL; 101 return NULL;
102 }
95#endif 103#endif
96 104
97 memset(address, 0, size); 105 memset(dmah->vaddr, 0, size);
98 106
99 return address; 107 return dmah;
100} 108}
101EXPORT_SYMBOL(drm_pci_alloc); 109EXPORT_SYMBOL(drm_pci_alloc);
102 110
103/** 111/**
104 * \brief Free a PCI consistent memory block. 112 * \brief Free a PCI consistent memory block with freeing its descriptor.
113 *
114 * This function is for internal use in the Linux-specific DRM core code.
105 */ 115 */
106void 116void
107drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) 117__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
108{ 118{
109#if DRM_DEBUG_MEMORY 119#ifdef DRM_DEBUG_MEMORY
110 int area = DRM_MEM_DMA; 120 int area = DRM_MEM_DMA;
111 int alloc_count; 121 int alloc_count;
112 int free_count; 122 int free_count;
113#endif 123#endif
114 124
115 if (!vaddr) { 125 if (!dmah->vaddr) {
116#if DRM_DEBUG_MEMORY 126#ifdef DRM_DEBUG_MEMORY
117 DRM_MEM_ERROR(area, "Attempt to free address 0\n"); 127 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
118#endif 128#endif
119 } else { 129 } else {
120 pci_free_consistent(dev->pdev, size, vaddr, busaddr); 130 pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
131 dmah->busaddr);
121 } 132 }
122 133
123#if DRM_DEBUG_MEMORY 134#ifdef DRM_DEBUG_MEMORY
124 spin_lock(&drm_mem_lock); 135 spin_lock(&drm_mem_lock);
125 free_count = ++drm_mem_stats[area].free_count; 136 free_count = ++drm_mem_stats[area].free_count;
126 alloc_count = drm_mem_stats[area].succeed_count; 137 alloc_count = drm_mem_stats[area].succeed_count;
@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
135#endif 146#endif
136 147
137} 148}
149
150/**
151 * \brief Free a PCI consistent memory block
152 */
153void
154drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
155{
156 __drm_pci_free(dev, dmah);
157 kfree(dmah);
158}
138EXPORT_SYMBOL(drm_pci_free); 159EXPORT_SYMBOL(drm_pci_free);
139 160
140/*@}*/ 161/*@}*/
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 70ca4fa55c9d..58b1747cd440 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -25,6 +25,8 @@
25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
28 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
29 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
28 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 30 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
29 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 31 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
30 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ 32 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
@@ -33,7 +35,17 @@
33 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 35 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
34 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 36 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
35 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 37 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
38 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
39 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
40 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
41 {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
42 {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
43 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
44 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
45 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
36 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
37 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 49 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
38 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 50 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
39 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -56,6 +68,7 @@
56 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 68 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
57 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 69 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
58 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 70 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
71 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
59 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 72 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
60 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 73 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
61 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -116,9 +129,10 @@
116 {0, 0, 0} 129 {0, 0, 0}
117 130
118#define mga_PCI_IDS \ 131#define mga_PCI_IDS \
119 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 132 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
120 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 133 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
121 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 134 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
135 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
122 {0, 0, 0} 136 {0, 0, 0}
123 137
124#define mach64_PCI_IDS \ 138#define mach64_PCI_IDS \
@@ -162,9 +176,10 @@
162 176
163#define viadrv_PCI_IDS \ 177#define viadrv_PCI_IDS \
164 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 178 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
179 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
165 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 180 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
166 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 181 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
167 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 182 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
168 {0, 0, 0} 183 {0, 0, 0}
169 184
170#define i810_PCI_IDS \ 185#define i810_PCI_IDS \
@@ -181,33 +196,30 @@
181 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 196 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
182 {0, 0, 0} 197 {0, 0, 0}
183 198
184#define gamma_PCI_IDS \
185 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
186 {0, 0, 0}
187
188#define savage_PCI_IDS \ 199#define savage_PCI_IDS \
189 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 200 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
190 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
191 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 202 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
192 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 203 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
193 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 204 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
194 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 205 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
195 {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 206 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
196 {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 207 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
197 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 208 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
198 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 209 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
199 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 210 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
200 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 211 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
201 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 212 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
202 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 213 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
203 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 214 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
204 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 215 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
205 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 216 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
206 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 217 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
207 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 218 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
208 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 219 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
209 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 220 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
210 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 221 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
222 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
211 {0, 0, 0} 223 {0, 0, 0}
212 224
213#define ffb_PCI_IDS \ 225#define ffb_PCI_IDS \
@@ -223,10 +235,3 @@
223 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
224 {0, 0, 0} 236 {0, 0, 0}
225 237
226#define viadrv_PCI_IDS \
227 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0, 0, 0}
232
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 4774087d2e9e..32d2bb99462c 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
210 210
211 /* Hardcoded from _DRM_FRAME_BUFFER, 211 /* Hardcoded from _DRM_FRAME_BUFFER,
212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and 212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
213 _DRM_SCATTER_GATHER. */ 213 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; 214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
215 const char *type; 215 const char *type;
216 int i; 216 int i;
217 217
@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { 229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
230 r_list = list_entry(list, drm_map_list_t, head); 230 r_list = list_entry(list, drm_map_list_t, head);
231 map = r_list->map; 231 map = r_list->map;
232 if(!map) continue; 232 if(!map)
233 if (map->type < 0 || map->type > 4) type = "??"; 233 continue;
234 else type = types[map->type]; 234 if (map->type < 0 || map->type > 5)
235 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", 235 type = "??";
236 else
237 type = types[map->type];
238 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
236 i, 239 i,
237 map->offset, 240 map->offset,
238 map->size, 241 map->size,
239 type, 242 type,
240 map->flags, 243 map->flags,
241 (unsigned long)map->handle); 244 r_list->user_token);
242 if (map->mtrr < 0) { 245 if (map->mtrr < 0) {
243 DRM_PROC_PRINT("none\n"); 246 DRM_PROC_PRINT("none\n");
244 } else { 247 } else {
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 54fddb6ea2d1..ed267d49bc6a 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
61 DRM_MEM_SGLISTS ); 61 DRM_MEM_SGLISTS );
62} 62}
63 63
64#ifdef _LP64
65# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
66#else
67# define ScatterHandle(x) (unsigned int)(x)
68#endif
69
64int drm_sg_alloc( struct inode *inode, struct file *filp, 70int drm_sg_alloc( struct inode *inode, struct file *filp,
65 unsigned int cmd, unsigned long arg ) 71 unsigned int cmd, unsigned long arg )
66{ 72{
@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
133 */ 139 */
134 memset( entry->virtual, 0, pages << PAGE_SHIFT ); 140 memset( entry->virtual, 0, pages << PAGE_SHIFT );
135 141
136 entry->handle = (unsigned long)entry->virtual; 142 entry->handle = ScatterHandle((unsigned long)entry->virtual);
137 143
138 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); 144 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
139 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); 145 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
140 146
141 for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { 147 for (i = (unsigned long)entry->virtual, j = 0; j < pages;
148 i += PAGE_SIZE, j++) {
142 entry->pagelist[j] = vmalloc_to_page((void *)i); 149 entry->pagelist[j] = vmalloc_to_page((void *)i);
143 if (!entry->pagelist[j]) 150 if (!entry->pagelist[j])
144 goto failed; 151 goto failed;
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 48829a1a086a..95a976c96eb8 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
75 dev->pci_func = PCI_FUNC(pdev->devfn); 75 dev->pci_func = PCI_FUNC(pdev->devfn);
76 dev->irq = pdev->irq; 76 dev->irq = pdev->irq;
77 77
78 dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
79 if (dev->maplist == NULL)
80 return -ENOMEM;
81 INIT_LIST_HEAD(&dev->maplist->head);
82
78 /* the DRM has 6 basic counters */ 83 /* the DRM has 6 basic counters */
79 dev->counters = 6; 84 dev->counters = 6;
80 dev->types[0] = _DRM_STAT_LOCK; 85 dev->types[0] = _DRM_STAT_LOCK;
@@ -91,7 +96,8 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
91 goto error_out_unreg; 96 goto error_out_unreg;
92 97
93 if (drm_core_has_AGP(dev)) { 98 if (drm_core_has_AGP(dev)) {
94 dev->agp = drm_agp_init(dev); 99 if (drm_device_is_agp(dev))
100 dev->agp = drm_agp_init(dev);
95 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { 101 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
96 DRM_ERROR( "Cannot initialize the agpgart module.\n" ); 102 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
97 retcode = -EINVAL; 103 retcode = -EINVAL;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 621220f3f372..ced4215e2275 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
73 r_list = list_entry(list, drm_map_list_t, head); 73 r_list = list_entry(list, drm_map_list_t, head);
74 map = r_list->map; 74 map = r_list->map;
75 if (!map) continue; 75 if (!map) continue;
76 if (map->offset == VM_OFFSET(vma)) break; 76 if (r_list->user_token == VM_OFFSET(vma))
77 break;
77 } 78 }
78 79
79 if (map && map->type == _DRM_AGP) { 80 if (map && map->type == _DRM_AGP) {
80 unsigned long offset = address - vma->vm_start; 81 unsigned long offset = address - vma->vm_start;
81 unsigned long baddr = VM_OFFSET(vma) + offset; 82 unsigned long baddr = map->offset + offset;
82 struct drm_agp_mem *agpmem; 83 struct drm_agp_mem *agpmem;
83 struct page *page; 84 struct page *page;
84 85
@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
210 } 211 }
211 212
212 if(!found_maps) { 213 if(!found_maps) {
214 drm_dma_handle_t dmah;
215
213 switch (map->type) { 216 switch (map->type) {
214 case _DRM_REGISTERS: 217 case _DRM_REGISTERS:
215 case _DRM_FRAME_BUFFER: 218 case _DRM_FRAME_BUFFER:
@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
228 case _DRM_AGP: 231 case _DRM_AGP:
229 case _DRM_SCATTER_GATHER: 232 case _DRM_SCATTER_GATHER:
230 break; 233 break;
234 case _DRM_CONSISTENT:
235 dmah.vaddr = map->handle;
236 dmah.busaddr = map->offset;
237 dmah.size = map->size;
238 __drm_pci_free(dev, &dmah);
239 break;
231 } 240 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 241 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 } 242 }
@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
296 305
297 306
298 offset = address - vma->vm_start; 307 offset = address - vma->vm_start;
299 map_offset = map->offset - dev->sg->handle; 308 map_offset = map->offset - (unsigned long)dev->sg->virtual;
300 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 309 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
301 page = entry->pagelist[page_offset]; 310 page = entry->pagelist[page_offset];
302 get_page(page); 311 get_page(page);
@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
305} 314}
306 315
307 316
308#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
309
310static struct page *drm_vm_nopage(struct vm_area_struct *vma, 317static struct page *drm_vm_nopage(struct vm_area_struct *vma,
311 unsigned long address, 318 unsigned long address,
312 int *type) { 319 int *type) {
@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
335 return drm_do_vm_sg_nopage(vma, address); 342 return drm_do_vm_sg_nopage(vma, address);
336} 343}
337 344
338#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
339
340static struct page *drm_vm_nopage(struct vm_area_struct *vma,
341 unsigned long address,
342 int unused) {
343 return drm_do_vm_nopage(vma, address);
344}
345
346static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
347 unsigned long address,
348 int unused) {
349 return drm_do_vm_shm_nopage(vma, address);
350}
351
352static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
353 unsigned long address,
354 int unused) {
355 return drm_do_vm_dma_nopage(vma, address);
356}
357
358static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
359 unsigned long address,
360 int unused) {
361 return drm_do_vm_sg_nopage(vma, address);
362}
363
364#endif
365
366
367/** AGP virtual memory operations */ 345/** AGP virtual memory operations */
368static struct vm_operations_struct drm_vm_ops = { 346static struct vm_operations_struct drm_vm_ops = {
369 .nopage = drm_vm_nopage, 347 .nopage = drm_vm_nopage,
@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
487 465
488 vma->vm_ops = &drm_vm_dma_ops; 466 vma->vm_ops = &drm_vm_dma_ops;
489 467
490#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
491 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
492#else
493 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 468 vma->vm_flags |= VM_RESERVED; /* Don't swap */
494#endif
495 469
496 vma->vm_file = filp; /* Needed for drm_vm_open() */ 470 vma->vm_file = filp; /* Needed for drm_vm_open() */
497 drm_vm_open(vma); 471 drm_vm_open(vma);
@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
560 for performance, even if the list was a 534 for performance, even if the list was a
561 bit longer. */ 535 bit longer. */
562 list_for_each(list, &dev->maplist->head) { 536 list_for_each(list, &dev->maplist->head) {
563 unsigned long off;
564 537
565 r_list = list_entry(list, drm_map_list_t, head); 538 r_list = list_entry(list, drm_map_list_t, head);
566 map = r_list->map; 539 map = r_list->map;
567 if (!map) continue; 540 if (!map) continue;
568 off = dev->driver->get_map_ofs(map); 541 if (r_list->user_token == VM_OFFSET(vma))
569 if (off == VM_OFFSET(vma)) break; 542 break;
570 } 543 }
571 544
572 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 545 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
605 /* fall through to _DRM_FRAME_BUFFER... */ 578 /* fall through to _DRM_FRAME_BUFFER... */
606 case _DRM_FRAME_BUFFER: 579 case _DRM_FRAME_BUFFER:
607 case _DRM_REGISTERS: 580 case _DRM_REGISTERS:
608 if (VM_OFFSET(vma) >= __pa(high_memory)) {
609#if defined(__i386__) || defined(__x86_64__) 581#if defined(__i386__) || defined(__x86_64__)
610 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { 582 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
611 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 583 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
612 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 584 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
613 } 585 }
614#elif defined(__powerpc__) 586#elif defined(__powerpc__)
615 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; 587 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
588 if (map->type == _DRM_REGISTERS)
589 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
616#endif 590#endif
617 vma->vm_flags |= VM_IO; /* not in core dump */ 591 vma->vm_flags |= VM_IO; /* not in core dump */
618 }
619#if defined(__ia64__) 592#if defined(__ia64__)
620 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 593 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
621 vma->vm_start)) 594 vma->vm_start))
@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
628 offset = dev->driver->get_reg_ofs(dev); 601 offset = dev->driver->get_reg_ofs(dev);
629#ifdef __sparc__ 602#ifdef __sparc__
630 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, 603 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
631 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 604 (map->offset + offset) >> PAGE_SHIFT,
632 vma->vm_end - vma->vm_start, 605 vma->vm_end - vma->vm_start,
633 vma->vm_page_prot)) 606 vma->vm_page_prot))
634#else 607#else
635 if (io_remap_pfn_range(vma, vma->vm_start, 608 if (io_remap_pfn_range(vma, vma->vm_start,
636 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 609 (map->offset + offset) >> PAGE_SHIFT,
637 vma->vm_end - vma->vm_start, 610 vma->vm_end - vma->vm_start,
638 vma->vm_page_prot)) 611 vma->vm_page_prot))
639#endif 612#endif
@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
641 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 614 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
642 " offset = 0x%lx\n", 615 " offset = 0x%lx\n",
643 map->type, 616 map->type,
644 vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); 617 vma->vm_start, vma->vm_end, map->offset + offset);
645 vma->vm_ops = &drm_vm_ops; 618 vma->vm_ops = &drm_vm_ops;
646 break; 619 break;
647 case _DRM_SHM: 620 case _DRM_SHM:
621 case _DRM_CONSISTENT:
622 /* Consistent memory is really like shared memory. It's only
623 * allocate in a different way */
648 vma->vm_ops = &drm_vm_shm_ops; 624 vma->vm_ops = &drm_vm_shm_ops;
649 vma->vm_private_data = (void *)map; 625 vma->vm_private_data = (void *)map;
650 /* Don't let this area swap. Change when 626 /* Don't let this area swap. Change when
651 DRM_KERNEL advisory is supported. */ 627 DRM_KERNEL advisory is supported. */
652#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
653 vma->vm_flags |= VM_LOCKED;
654#else
655 vma->vm_flags |= VM_RESERVED; 628 vma->vm_flags |= VM_RESERVED;
656#endif
657 break; 629 break;
658 case _DRM_SCATTER_GATHER: 630 case _DRM_SCATTER_GATHER:
659 vma->vm_ops = &drm_vm_sg_ops; 631 vma->vm_ops = &drm_vm_sg_ops;
660 vma->vm_private_data = (void *)map; 632 vma->vm_private_data = (void *)map;
661#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
662 vma->vm_flags |= VM_LOCKED;
663#else
664 vma->vm_flags |= VM_RESERVED; 633 vma->vm_flags |= VM_RESERVED;
665#endif
666 break; 634 break;
667 default: 635 default:
668 return -EINVAL; /* This should never happen. */ 636 return -EINVAL; /* This should never happen. */
669 } 637 }
670#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
671 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
672#else
673 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 638 vma->vm_flags |= VM_RESERVED; /* Don't swap */
674#endif
675 639
676 vma->vm_file = filp; /* Needed for drm_vm_open() */ 640 vma->vm_file = filp; /* Needed for drm_vm_open() */
677 drm_vm_open(vma); 641 drm_vm_open(vma);
diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c
index ec614fff8f04..1bd0d55ee0f0 100644
--- a/drivers/char/drm/ffb_drv.c
+++ b/drivers/char/drm/ffb_drv.c
@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
152 return NULL; 152 return NULL;
153 153
154 list_for_each(list, &dev->maplist->head) { 154 list_for_each(list, &dev->maplist->head) {
155 unsigned long uoff;
156
157 r_list = (drm_map_list_t *)list; 155 r_list = (drm_map_list_t *)list;
158 map = r_list->map; 156 map = r_list->map;
159 if (!map) 157 if (!map)
160 continue; 158 continue;
161 uoff = (map->offset & 0xffffffff); 159 if (r_list->user_token == off)
162 if (uoff == off)
163 return map; 160 return map;
164 } 161 }
165 162
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h
deleted file mode 100644
index d11b507f87ee..000000000000
--- a/drivers/char/drm/gamma_context.h
+++ /dev/null
@@ -1,492 +0,0 @@
1/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * ChangeLog:
31 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
32 * added context constructor/destructor hooks,
33 * needed by SiS driver's memory management.
34 */
35
36/* ================================================================
37 * Old-style context support -- only used by gamma.
38 */
39
40
41/* The drm_read and drm_write_string code (especially that which manages
42 the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
43 DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
44
45ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
46{
47 drm_file_t *priv = filp->private_data;
48 drm_device_t *dev = priv->dev;
49 int left;
50 int avail;
51 int send;
52 int cur;
53
54 DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
55
56 while (dev->buf_rp == dev->buf_wp) {
57 DRM_DEBUG(" sleeping\n");
58 if (filp->f_flags & O_NONBLOCK) {
59 return -EAGAIN;
60 }
61 interruptible_sleep_on(&dev->buf_readers);
62 if (signal_pending(current)) {
63 DRM_DEBUG(" interrupted\n");
64 return -ERESTARTSYS;
65 }
66 DRM_DEBUG(" awake\n");
67 }
68
69 left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
70 avail = DRM_BSZ - left;
71 send = DRM_MIN(avail, count);
72
73 while (send) {
74 if (dev->buf_wp > dev->buf_rp) {
75 cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
76 } else {
77 cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
78 }
79 if (copy_to_user(buf, dev->buf_rp, cur))
80 return -EFAULT;
81 dev->buf_rp += cur;
82 if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
83 send -= cur;
84 }
85
86 wake_up_interruptible(&dev->buf_writers);
87 return DRM_MIN(avail, count);
88}
89
90
91/* In an incredibly convoluted setup, the kernel module actually calls
92 * back into the X server to perform context switches on behalf of the
93 * 3d clients.
94 */
95int DRM(write_string)(drm_device_t *dev, const char *s)
96{
97 int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
98 int send = strlen(s);
99 int count;
100
101 DRM_DEBUG("%d left, %d to send (%p, %p)\n",
102 left, send, dev->buf_rp, dev->buf_wp);
103
104 if (left == 1 || dev->buf_wp != dev->buf_rp) {
105 DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
106 left,
107 dev->buf_wp,
108 dev->buf_rp);
109 }
110
111 while (send) {
112 if (dev->buf_wp >= dev->buf_rp) {
113 count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
114 if (count == left) --count; /* Leave a hole */
115 } else {
116 count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
117 }
118 strncpy(dev->buf_wp, s, count);
119 dev->buf_wp += count;
120 if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
121 send -= count;
122 }
123
124 if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
125
126 DRM_DEBUG("waking\n");
127 wake_up_interruptible(&dev->buf_readers);
128 return 0;
129}
130
131unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
132{
133 drm_file_t *priv = filp->private_data;
134 drm_device_t *dev = priv->dev;
135
136 poll_wait(filp, &dev->buf_readers, wait);
137 if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
138 return 0;
139}
140
141int DRM(context_switch)(drm_device_t *dev, int old, int new)
142{
143 char buf[64];
144 drm_queue_t *q;
145
146 if (test_and_set_bit(0, &dev->context_flag)) {
147 DRM_ERROR("Reentering -- FIXME\n");
148 return -EBUSY;
149 }
150
151 DRM_DEBUG("Context switch from %d to %d\n", old, new);
152
153 if (new >= dev->queue_count) {
154 clear_bit(0, &dev->context_flag);
155 return -EINVAL;
156 }
157
158 if (new == dev->last_context) {
159 clear_bit(0, &dev->context_flag);
160 return 0;
161 }
162
163 q = dev->queuelist[new];
164 atomic_inc(&q->use_count);
165 if (atomic_read(&q->use_count) == 1) {
166 atomic_dec(&q->use_count);
167 clear_bit(0, &dev->context_flag);
168 return -EINVAL;
169 }
170
171 /* This causes the X server to wake up & do a bunch of hardware
172 * interaction to actually effect the context switch.
173 */
174 sprintf(buf, "C %d %d\n", old, new);
175 DRM(write_string)(dev, buf);
176
177 atomic_dec(&q->use_count);
178
179 return 0;
180}
181
182int DRM(context_switch_complete)(drm_device_t *dev, int new)
183{
184 drm_device_dma_t *dma = dev->dma;
185
186 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
187 dev->last_switch = jiffies;
188
189 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
190 DRM_ERROR("Lock isn't held after context switch\n");
191 }
192
193 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
194 if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
195 DRM_KERNEL_CONTEXT)) {
196 DRM_ERROR("Cannot free lock\n");
197 }
198 }
199
200 clear_bit(0, &dev->context_flag);
201 wake_up_interruptible(&dev->context_wait);
202
203 return 0;
204}
205
206static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
207{
208 DRM_DEBUG("\n");
209
210 if (atomic_read(&q->use_count) != 1
211 || atomic_read(&q->finalization)
212 || atomic_read(&q->block_count)) {
213 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
214 atomic_read(&q->use_count),
215 atomic_read(&q->finalization),
216 atomic_read(&q->block_count));
217 }
218
219 atomic_set(&q->finalization, 0);
220 atomic_set(&q->block_count, 0);
221 atomic_set(&q->block_read, 0);
222 atomic_set(&q->block_write, 0);
223 atomic_set(&q->total_queued, 0);
224 atomic_set(&q->total_flushed, 0);
225 atomic_set(&q->total_locks, 0);
226
227 init_waitqueue_head(&q->write_queue);
228 init_waitqueue_head(&q->read_queue);
229 init_waitqueue_head(&q->flush_queue);
230
231 q->flags = ctx->flags;
232
233 DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
234
235 return 0;
236}
237
238
239/* drm_alloc_queue:
240PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
241 disappear (so all deallocation must be done after IOCTLs are off)
242 2) dev->queue_count < dev->queue_slots
243 3) dev->queuelist[i].use_count == 0 and
244 dev->queuelist[i].finalization == 0 if i not in use
245POST: 1) dev->queuelist[i].use_count == 1
246 2) dev->queue_count < dev->queue_slots */
247
248static int DRM(alloc_queue)(drm_device_t *dev)
249{
250 int i;
251 drm_queue_t *queue;
252 int oldslots;
253 int newslots;
254 /* Check for a free queue */
255 for (i = 0; i < dev->queue_count; i++) {
256 atomic_inc(&dev->queuelist[i]->use_count);
257 if (atomic_read(&dev->queuelist[i]->use_count) == 1
258 && !atomic_read(&dev->queuelist[i]->finalization)) {
259 DRM_DEBUG("%d (free)\n", i);
260 return i;
261 }
262 atomic_dec(&dev->queuelist[i]->use_count);
263 }
264 /* Allocate a new queue */
265 down(&dev->struct_sem);
266
267 queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
268 memset(queue, 0, sizeof(*queue));
269 atomic_set(&queue->use_count, 1);
270
271 ++dev->queue_count;
272 if (dev->queue_count >= dev->queue_slots) {
273 oldslots = dev->queue_slots * sizeof(*dev->queuelist);
274 if (!dev->queue_slots) dev->queue_slots = 1;
275 dev->queue_slots *= 2;
276 newslots = dev->queue_slots * sizeof(*dev->queuelist);
277
278 dev->queuelist = DRM(realloc)(dev->queuelist,
279 oldslots,
280 newslots,
281 DRM_MEM_QUEUES);
282 if (!dev->queuelist) {
283 up(&dev->struct_sem);
284 DRM_DEBUG("out of memory\n");
285 return -ENOMEM;
286 }
287 }
288 dev->queuelist[dev->queue_count-1] = queue;
289
290 up(&dev->struct_sem);
291 DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
292 return dev->queue_count - 1;
293}
294
295int DRM(resctx)(struct inode *inode, struct file *filp,
296 unsigned int cmd, unsigned long arg)
297{
298 drm_ctx_res_t __user *argp = (void __user *)arg;
299 drm_ctx_res_t res;
300 drm_ctx_t ctx;
301 int i;
302
303 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
304 if (copy_from_user(&res, argp, sizeof(res)))
305 return -EFAULT;
306 if (res.count >= DRM_RESERVED_CONTEXTS) {
307 memset(&ctx, 0, sizeof(ctx));
308 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
309 ctx.handle = i;
310 if (copy_to_user(&res.contexts[i],
311 &i,
312 sizeof(i)))
313 return -EFAULT;
314 }
315 }
316 res.count = DRM_RESERVED_CONTEXTS;
317 if (copy_to_user(argp, &res, sizeof(res)))
318 return -EFAULT;
319 return 0;
320}
321
322int DRM(addctx)(struct inode *inode, struct file *filp,
323 unsigned int cmd, unsigned long arg)
324{
325 drm_file_t *priv = filp->private_data;
326 drm_device_t *dev = priv->dev;
327 drm_ctx_t ctx;
328 drm_ctx_t __user *argp = (void __user *)arg;
329
330 if (copy_from_user(&ctx, argp, sizeof(ctx)))
331 return -EFAULT;
332 if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
333 /* Init kernel's context and get a new one. */
334 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
335 ctx.handle = DRM(alloc_queue)(dev);
336 }
337 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
338 DRM_DEBUG("%d\n", ctx.handle);
339 if (copy_to_user(argp, &ctx, sizeof(ctx)))
340 return -EFAULT;
341 return 0;
342}
343
344int DRM(modctx)(struct inode *inode, struct file *filp,
345 unsigned int cmd, unsigned long arg)
346{
347 drm_file_t *priv = filp->private_data;
348 drm_device_t *dev = priv->dev;
349 drm_ctx_t ctx;
350 drm_queue_t *q;
351
352 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
353 return -EFAULT;
354
355 DRM_DEBUG("%d\n", ctx.handle);
356
357 if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
358 q = dev->queuelist[ctx.handle];
359
360 atomic_inc(&q->use_count);
361 if (atomic_read(&q->use_count) == 1) {
362 /* No longer in use */
363 atomic_dec(&q->use_count);
364 return -EINVAL;
365 }
366
367 if (DRM_BUFCOUNT(&q->waitlist)) {
368 atomic_dec(&q->use_count);
369 return -EBUSY;
370 }
371
372 q->flags = ctx.flags;
373
374 atomic_dec(&q->use_count);
375 return 0;
376}
377
378int DRM(getctx)(struct inode *inode, struct file *filp,
379 unsigned int cmd, unsigned long arg)
380{
381 drm_file_t *priv = filp->private_data;
382 drm_device_t *dev = priv->dev;
383 drm_ctx_t __user *argp = (void __user *)arg;
384 drm_ctx_t ctx;
385 drm_queue_t *q;
386
387 if (copy_from_user(&ctx, argp, sizeof(ctx)))
388 return -EFAULT;
389
390 DRM_DEBUG("%d\n", ctx.handle);
391
392 if (ctx.handle >= dev->queue_count) return -EINVAL;
393 q = dev->queuelist[ctx.handle];
394
395 atomic_inc(&q->use_count);
396 if (atomic_read(&q->use_count) == 1) {
397 /* No longer in use */
398 atomic_dec(&q->use_count);
399 return -EINVAL;
400 }
401
402 ctx.flags = q->flags;
403 atomic_dec(&q->use_count);
404
405 if (copy_to_user(argp, &ctx, sizeof(ctx)))
406 return -EFAULT;
407
408 return 0;
409}
410
411int DRM(switchctx)(struct inode *inode, struct file *filp,
412 unsigned int cmd, unsigned long arg)
413{
414 drm_file_t *priv = filp->private_data;
415 drm_device_t *dev = priv->dev;
416 drm_ctx_t ctx;
417
418 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
419 return -EFAULT;
420 DRM_DEBUG("%d\n", ctx.handle);
421 return DRM(context_switch)(dev, dev->last_context, ctx.handle);
422}
423
424int DRM(newctx)(struct inode *inode, struct file *filp,
425 unsigned int cmd, unsigned long arg)
426{
427 drm_file_t *priv = filp->private_data;
428 drm_device_t *dev = priv->dev;
429 drm_ctx_t ctx;
430
431 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
432 return -EFAULT;
433 DRM_DEBUG("%d\n", ctx.handle);
434 DRM(context_switch_complete)(dev, ctx.handle);
435
436 return 0;
437}
438
439int DRM(rmctx)(struct inode *inode, struct file *filp,
440 unsigned int cmd, unsigned long arg)
441{
442 drm_file_t *priv = filp->private_data;
443 drm_device_t *dev = priv->dev;
444 drm_ctx_t ctx;
445 drm_queue_t *q;
446 drm_buf_t *buf;
447
448 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
449 return -EFAULT;
450 DRM_DEBUG("%d\n", ctx.handle);
451
452 if (ctx.handle >= dev->queue_count) return -EINVAL;
453 q = dev->queuelist[ctx.handle];
454
455 atomic_inc(&q->use_count);
456 if (atomic_read(&q->use_count) == 1) {
457 /* No longer in use */
458 atomic_dec(&q->use_count);
459 return -EINVAL;
460 }
461
462 atomic_inc(&q->finalization); /* Mark queue in finalization state */
463 atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
464 finalization) */
465
466 while (test_and_set_bit(0, &dev->interrupt_flag)) {
467 schedule();
468 if (signal_pending(current)) {
469 clear_bit(0, &dev->interrupt_flag);
470 return -EINTR;
471 }
472 }
473 /* Remove queued buffers */
474 while ((buf = DRM(waitlist_get)(&q->waitlist))) {
475 DRM(free_buffer)(dev, buf);
476 }
477 clear_bit(0, &dev->interrupt_flag);
478
479 /* Wakeup blocked processes */
480 wake_up_interruptible(&q->read_queue);
481 wake_up_interruptible(&q->write_queue);
482 wake_up_interruptible(&q->flush_queue);
483
484 /* Finalization over. Queue is made
485 available when both use_count and
486 finalization become 0, which won't
487 happen until all the waiting processes
488 stop waiting. */
489 atomic_dec(&q->finalization);
490 return 0;
491}
492
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c
deleted file mode 100644
index e486fb8d31e9..000000000000
--- a/drivers/char/drm/gamma_dma.c
+++ /dev/null
@@ -1,946 +0,0 @@
1/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#include "gamma.h"
33#include "drmP.h"
34#include "drm.h"
35#include "gamma_drm.h"
36#include "gamma_drv.h"
37
38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/delay.h>
40
41static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42 unsigned long length)
43{
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
46 mb();
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
48 cpu_relax();
49
50 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
51
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53 cpu_relax();
54
55 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
56}
57
58void gamma_dma_quiescent_single(drm_device_t *dev)
59{
60 drm_gamma_private_t *dev_priv =
61 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
63 cpu_relax();
64
65 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
66 cpu_relax();
67
68 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC, 0);
70
71 do {
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73 cpu_relax();
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
75}
76
77void gamma_dma_quiescent_dual(drm_device_t *dev)
78{
79 drm_gamma_private_t *dev_priv =
80 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
82 cpu_relax();
83
84 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
85 cpu_relax();
86
87 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
90
91 /* Read from first MX */
92 do {
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94 cpu_relax();
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97 /* Read from second MX */
98 do {
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100 cpu_relax();
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102}
103
104void gamma_dma_ready(drm_device_t *dev)
105{
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
108 while (GAMMA_READ(GAMMA_DMACOUNT))
109 cpu_relax();
110}
111
112static inline int gamma_dma_is_ready(drm_device_t *dev)
113{
114 drm_gamma_private_t *dev_priv =
115 (drm_gamma_private_t *)dev->dev_private;
116 return (!GAMMA_READ(GAMMA_DMACOUNT));
117}
118
119irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
120{
121 drm_device_t *dev = (drm_device_t *)arg;
122 drm_device_dma_t *dma = dev->dma;
123 drm_gamma_private_t *dev_priv =
124 (drm_gamma_private_t *)dev->dev_private;
125
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
128
129 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
130 cpu_relax();
131
132 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135 if (gamma_dma_is_ready(dev)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev->dma_flag))
138 return IRQ_HANDLED;
139 if (dma->this_buffer) {
140 gamma_free_buffer(dev, dma->this_buffer);
141 dma->this_buffer = NULL;
142 }
143 clear_bit(0, &dev->dma_flag);
144
145 /* Dispatch new buffer */
146 schedule_work(&dev->work);
147 }
148 return IRQ_HANDLED;
149}
150
151/* Only called by gamma_dma_schedule. */
152static int gamma_do_dma(drm_device_t *dev, int locked)
153{
154 unsigned long address;
155 unsigned long length;
156 drm_buf_t *buf;
157 int retcode = 0;
158 drm_device_dma_t *dma = dev->dma;
159
160 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
161
162
163 if (!dma->next_buffer) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev->dma_flag);
166 return -EINVAL;
167 }
168
169 buf = dma->next_buffer;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address = buf->idx << 12;
173 length = buf->used;
174
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf->context, buf->idx, length);
177
178 if (buf->list == DRM_LIST_RECLAIM) {
179 gamma_clear_next_buffer(dev);
180 gamma_free_buffer(dev, buf);
181 clear_bit(0, &dev->dma_flag);
182 return -EINVAL;
183 }
184
185 if (!length) {
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev);
188 gamma_free_buffer(dev, buf);
189 clear_bit(0, &dev->dma_flag);
190 return 0;
191 }
192
193 if (!gamma_dma_is_ready(dev)) {
194 clear_bit(0, &dev->dma_flag);
195 return -EBUSY;
196 }
197
198 if (buf->while_locked) {
199 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf->idx, current->pid);
203 }
204 } else {
205 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206 DRM_KERNEL_CONTEXT)) {
207 clear_bit(0, &dev->dma_flag);
208 return -EBUSY;
209 }
210 }
211
212 if (dev->last_context != buf->context
213 && !(dev->queuelist[buf->context]->flags
214 & _DRM_CONTEXT_PRESERVED)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch)(dev, dev->last_context,
217 buf->context)) {
218 DRM(clear_next_buffer)(dev);
219 DRM(free_buffer)(dev, buf);
220 }
221 retcode = -EBUSY;
222 goto cleanup;
223
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
228 TIME! */
229 }
230
231 gamma_clear_next_buffer(dev);
232 buf->pending = 1;
233 buf->waiting = 0;
234 buf->list = DRM_LIST_PEND;
235
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address = buf->idx << 12;
238
239 gamma_dma_dispatch(dev, address, length);
240 gamma_free_buffer(dev, dma->this_buffer);
241 dma->this_buffer = buf;
242
243 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
245
246 if (!buf->while_locked && !dev->context_flag && !locked) {
247 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248 DRM_KERNEL_CONTEXT)) {
249 DRM_ERROR("\n");
250 }
251 }
252cleanup:
253
254 clear_bit(0, &dev->dma_flag);
255
256
257 return retcode;
258}
259
260static void gamma_dma_timer_bh(unsigned long dev)
261{
262 gamma_dma_schedule((drm_device_t *)dev, 0);
263}
264
265void gamma_irq_immediate_bh(void *dev)
266{
267 gamma_dma_schedule(dev, 0);
268}
269
270int gamma_dma_schedule(drm_device_t *dev, int locked)
271{
272 int next;
273 drm_queue_t *q;
274 drm_buf_t *buf;
275 int retcode = 0;
276 int processed = 0;
277 int missed;
278 int expire = 20;
279 drm_device_dma_t *dma = dev->dma;
280
281 if (test_and_set_bit(0, &dev->interrupt_flag)) {
282 /* Not reentrant */
283 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284 return -EBUSY;
285 }
286 missed = atomic_read(&dev->counts[10]);
287
288
289again:
290 if (dev->context_flag) {
291 clear_bit(0, &dev->interrupt_flag);
292 return -EBUSY;
293 }
294 if (dma->next_buffer) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
299 again. */
300 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
301 } else {
302 do {
303 next = gamma_select_queue(dev, gamma_dma_timer_bh);
304 if (next >= 0) {
305 q = dev->queuelist[next];
306 buf = gamma_waitlist_get(&q->waitlist);
307 dma->next_buffer = buf;
308 dma->next_queue = q;
309 if (buf && buf->list == DRM_LIST_RECLAIM) {
310 gamma_clear_next_buffer(dev);
311 gamma_free_buffer(dev, buf);
312 }
313 }
314 } while (next >= 0 && !dma->next_buffer);
315 if (dma->next_buffer) {
316 if (!(retcode = gamma_do_dma(dev, locked))) {
317 ++processed;
318 }
319 }
320 }
321
322 if (--expire) {
323 if (missed != atomic_read(&dev->counts[10])) {
324 if (gamma_dma_is_ready(dev)) goto again;
325 }
326 if (processed && gamma_dma_is_ready(dev)) {
327 processed = 0;
328 goto again;
329 }
330 }
331
332 clear_bit(0, &dev->interrupt_flag);
333
334 return retcode;
335}
336
337static int gamma_dma_priority(struct file *filp,
338 drm_device_t *dev, drm_dma_t *d)
339{
340 unsigned long address;
341 unsigned long length;
342 int must_free = 0;
343 int retcode = 0;
344 int i;
345 int idx;
346 drm_buf_t *buf;
347 drm_buf_t *last_buf = NULL;
348 drm_device_dma_t *dma = dev->dma;
349 int *send_indices = NULL;
350 int *send_sizes = NULL;
351
352 DECLARE_WAITQUEUE(entry, current);
353
354 /* Turn off interrupt handling */
355 while (test_and_set_bit(0, &dev->interrupt_flag)) {
356 schedule();
357 if (signal_pending(current)) return -EINTR;
358 }
359 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
360 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
361 DRM_KERNEL_CONTEXT)) {
362 schedule();
363 if (signal_pending(current)) {
364 clear_bit(0, &dev->interrupt_flag);
365 return -EINTR;
366 }
367 }
368 ++must_free;
369 }
370
371 send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
372 DRM_MEM_DRIVER);
373 if (send_indices == NULL)
374 return -ENOMEM;
375 if (copy_from_user(send_indices, d->send_indices,
376 d->send_count * sizeof(*send_indices))) {
377 retcode = -EFAULT;
378 goto cleanup;
379 }
380
381 send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
382 DRM_MEM_DRIVER);
383 if (send_sizes == NULL)
384 return -ENOMEM;
385 if (copy_from_user(send_sizes, d->send_sizes,
386 d->send_count * sizeof(*send_sizes))) {
387 retcode = -EFAULT;
388 goto cleanup;
389 }
390
391 for (i = 0; i < d->send_count; i++) {
392 idx = send_indices[i];
393 if (idx < 0 || idx >= dma->buf_count) {
394 DRM_ERROR("Index %d (of %d max)\n",
395 send_indices[i], dma->buf_count - 1);
396 continue;
397 }
398 buf = dma->buflist[ idx ];
399 if (buf->filp != filp) {
400 DRM_ERROR("Process %d using buffer not owned\n",
401 current->pid);
402 retcode = -EINVAL;
403 goto cleanup;
404 }
405 if (buf->list != DRM_LIST_NONE) {
406 DRM_ERROR("Process %d using buffer on list %d\n",
407 current->pid, buf->list);
408 retcode = -EINVAL;
409 goto cleanup;
410 }
411 /* This isn't a race condition on
412 buf->list, since our concern is the
413 buffer reclaim during the time the
414 process closes the /dev/drm? handle, so
415 it can't also be doing DMA. */
416 buf->list = DRM_LIST_PRIO;
417 buf->used = send_sizes[i];
418 buf->context = d->context;
419 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
420 address = (unsigned long)buf->address;
421 length = buf->used;
422 if (!length) {
423 DRM_ERROR("0 length buffer\n");
424 }
425 if (buf->pending) {
426 DRM_ERROR("Sending pending buffer:"
427 " buffer %d, offset %d\n",
428 send_indices[i], i);
429 retcode = -EINVAL;
430 goto cleanup;
431 }
432 if (buf->waiting) {
433 DRM_ERROR("Sending waiting buffer:"
434 " buffer %d, offset %d\n",
435 send_indices[i], i);
436 retcode = -EINVAL;
437 goto cleanup;
438 }
439 buf->pending = 1;
440
441 if (dev->last_context != buf->context
442 && !(dev->queuelist[buf->context]->flags
443 & _DRM_CONTEXT_PRESERVED)) {
444 add_wait_queue(&dev->context_wait, &entry);
445 current->state = TASK_INTERRUPTIBLE;
446 /* PRE: dev->last_context != buf->context */
447 DRM(context_switch)(dev, dev->last_context,
448 buf->context);
449 /* POST: we will wait for the context
450 switch and will dispatch on a later call
451 when dev->last_context == buf->context.
452 NOTE WE HOLD THE LOCK THROUGHOUT THIS
453 TIME! */
454 schedule();
455 current->state = TASK_RUNNING;
456 remove_wait_queue(&dev->context_wait, &entry);
457 if (signal_pending(current)) {
458 retcode = -EINTR;
459 goto cleanup;
460 }
461 if (dev->last_context != buf->context) {
462 DRM_ERROR("Context mismatch: %d %d\n",
463 dev->last_context,
464 buf->context);
465 }
466 }
467
468 gamma_dma_dispatch(dev, address, length);
469 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
470 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
471
472 if (last_buf) {
473 gamma_free_buffer(dev, last_buf);
474 }
475 last_buf = buf;
476 }
477
478
479cleanup:
480 if (last_buf) {
481 gamma_dma_ready(dev);
482 gamma_free_buffer(dev, last_buf);
483 }
484 if (send_indices)
485 DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
486 DRM_MEM_DRIVER);
487 if (send_sizes)
488 DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
489 DRM_MEM_DRIVER);
490
491 if (must_free && !dev->context_flag) {
492 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
493 DRM_KERNEL_CONTEXT)) {
494 DRM_ERROR("\n");
495 }
496 }
497 clear_bit(0, &dev->interrupt_flag);
498 return retcode;
499}
500
501static int gamma_dma_send_buffers(struct file *filp,
502 drm_device_t *dev, drm_dma_t *d)
503{
504 DECLARE_WAITQUEUE(entry, current);
505 drm_buf_t *last_buf = NULL;
506 int retcode = 0;
507 drm_device_dma_t *dma = dev->dma;
508 int send_index;
509
510 if (get_user(send_index, &d->send_indices[d->send_count-1]))
511 return -EFAULT;
512
513 if (d->flags & _DRM_DMA_BLOCK) {
514 last_buf = dma->buflist[send_index];
515 add_wait_queue(&last_buf->dma_wait, &entry);
516 }
517
518 if ((retcode = gamma_dma_enqueue(filp, d))) {
519 if (d->flags & _DRM_DMA_BLOCK)
520 remove_wait_queue(&last_buf->dma_wait, &entry);
521 return retcode;
522 }
523
524 gamma_dma_schedule(dev, 0);
525
526 if (d->flags & _DRM_DMA_BLOCK) {
527 DRM_DEBUG("%d waiting\n", current->pid);
528 for (;;) {
529 current->state = TASK_INTERRUPTIBLE;
530 if (!last_buf->waiting && !last_buf->pending)
531 break; /* finished */
532 schedule();
533 if (signal_pending(current)) {
534 retcode = -EINTR; /* Can't restart */
535 break;
536 }
537 }
538 current->state = TASK_RUNNING;
539 DRM_DEBUG("%d running\n", current->pid);
540 remove_wait_queue(&last_buf->dma_wait, &entry);
541 if (!retcode
542 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
543 if (!waitqueue_active(&last_buf->dma_wait)) {
544 gamma_free_buffer(dev, last_buf);
545 }
546 }
547 if (retcode) {
548 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
549 d->context,
550 last_buf->waiting,
551 last_buf->pending,
552 (long)DRM_WAITCOUNT(dev, d->context),
553 last_buf->idx,
554 last_buf->list,
555 current->pid);
556 }
557 }
558 return retcode;
559}
560
561int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
562 unsigned long arg)
563{
564 drm_file_t *priv = filp->private_data;
565 drm_device_t *dev = priv->dev;
566 drm_device_dma_t *dma = dev->dma;
567 int retcode = 0;
568 drm_dma_t __user *argp = (void __user *)arg;
569 drm_dma_t d;
570
571 if (copy_from_user(&d, argp, sizeof(d)))
572 return -EFAULT;
573
574 if (d.send_count < 0 || d.send_count > dma->buf_count) {
575 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576 current->pid, d.send_count, dma->buf_count);
577 return -EINVAL;
578 }
579
580 if (d.request_count < 0 || d.request_count > dma->buf_count) {
581 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582 current->pid, d.request_count, dma->buf_count);
583 return -EINVAL;
584 }
585
586 if (d.send_count) {
587 if (d.flags & _DRM_DMA_PRIORITY)
588 retcode = gamma_dma_priority(filp, dev, &d);
589 else
590 retcode = gamma_dma_send_buffers(filp, dev, &d);
591 }
592
593 d.granted_count = 0;
594
595 if (!retcode && d.request_count) {
596 retcode = gamma_dma_get_buffers(filp, &d);
597 }
598
599 DRM_DEBUG("%d returning, granted = %d\n",
600 current->pid, d.granted_count);
601 if (copy_to_user(argp, &d, sizeof(d)))
602 return -EFAULT;
603
604 return retcode;
605}
606
607/* =============================================================
608 * DMA initialization, cleanup
609 */
610
611static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
612{
613 drm_gamma_private_t *dev_priv;
614 drm_device_dma_t *dma = dev->dma;
615 drm_buf_t *buf;
616 int i;
617 struct list_head *list;
618 unsigned long *pgt;
619
620 DRM_DEBUG( "%s\n", __FUNCTION__ );
621
622 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
623 DRM_MEM_DRIVER );
624 if ( !dev_priv )
625 return -ENOMEM;
626
627 dev->dev_private = (void *)dev_priv;
628
629 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
630
631 dev_priv->num_rast = init->num_rast;
632
633 list_for_each(list, &dev->maplist->head) {
634 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
635 if( r_list->map &&
636 r_list->map->type == _DRM_SHM &&
637 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
638 dev_priv->sarea = r_list->map;
639 break;
640 }
641 }
642
643 dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
644 dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
645 dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
646 dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
647
648 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
649 ((u8 *)dev_priv->sarea->handle +
650 init->sarea_priv_offset);
651
652 if (init->pcimode) {
653 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
654 pgt = buf->address;
655
656 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
657 buf = dma->buflist[i];
658 *pgt = virt_to_phys((void*)buf->address) | 0x07;
659 pgt++;
660 }
661
662 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
663 } else {
664 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
665 drm_core_ioremap( dev->agp_buffer_map, dev);
666
667 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
668 pgt = buf->address;
669
670 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
671 buf = dma->buflist[i];
672 *pgt = (unsigned long)buf->address + 0x07;
673 pgt++;
674 }
675
676 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
677
678 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
679 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
680 }
681 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
682 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
683 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
684
685 return 0;
686}
687
688int gamma_do_cleanup_dma( drm_device_t *dev )
689{
690 DRM_DEBUG( "%s\n", __FUNCTION__ );
691
692 /* Make sure interrupts are disabled here because the uninstall ioctl
693 * may not have been called from userspace and after dev_private
694 * is freed, it's too late.
695 */
696 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
697 if ( dev->irq_enabled )
698 DRM(irq_uninstall)(dev);
699
700 if ( dev->dev_private ) {
701
702 if ( dev->agp_buffer_map != NULL )
703 drm_core_ioremapfree( dev->agp_buffer_map, dev );
704
705 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
706 DRM_MEM_DRIVER );
707 dev->dev_private = NULL;
708 }
709
710 return 0;
711}
712
713int gamma_dma_init( struct inode *inode, struct file *filp,
714 unsigned int cmd, unsigned long arg )
715{
716 drm_file_t *priv = filp->private_data;
717 drm_device_t *dev = priv->dev;
718 drm_gamma_init_t init;
719
720 LOCK_TEST_WITH_RETURN( dev, filp );
721
722 if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
723 return -EFAULT;
724
725 switch ( init.func ) {
726 case GAMMA_INIT_DMA:
727 return gamma_do_init_dma( dev, &init );
728 case GAMMA_CLEANUP_DMA:
729 return gamma_do_cleanup_dma( dev );
730 }
731
732 return -EINVAL;
733}
734
735static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
736{
737 drm_device_dma_t *dma = dev->dma;
738 unsigned int *screenbuf;
739
740 DRM_DEBUG( "%s\n", __FUNCTION__ );
741
742 /* We've DRM_RESTRICTED this DMA buffer */
743
744 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
745
746#if 0
747 *buffer++ = 0x180; /* Tag (FilterMode) */
748 *buffer++ = 0x200; /* Allow FBColor through */
749 *buffer++ = 0x53B; /* Tag */
750 *buffer++ = copy->Pitch;
751 *buffer++ = 0x53A; /* Tag */
752 *buffer++ = copy->SrcAddress;
753 *buffer++ = 0x539; /* Tag */
754 *buffer++ = copy->WidthHeight; /* Initiates transfer */
755 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
756 *buffer++ = virt_to_phys((void*)screenbuf);
757 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
758 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
759
760 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
761 /* Now put it back to the screen */
762
763 *buffer++ = 0x180; /* Tag (FilterMode) */
764 *buffer++ = 0x400; /* Allow Sync through */
765 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
766 *buffer++ = 0x155; /* FBSourceData | count */
767 *buffer++ = 0x537; /* Tag */
768 *buffer++ = copy->Pitch;
769 *buffer++ = 0x536; /* Tag */
770 *buffer++ = copy->DstAddress;
771 *buffer++ = 0x535; /* Tag */
772 *buffer++ = copy->WidthHeight; /* Initiates transfer */
773 *buffer++ = 0x530; /* Tag - DMAAddr */
774 *buffer++ = virt_to_phys((void*)screenbuf);
775 *buffer++ = 0x531;
776 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
777#endif
778
779 /* need to dispatch it now */
780
781 return 0;
782}
783
784int gamma_dma_copy( struct inode *inode, struct file *filp,
785 unsigned int cmd, unsigned long arg )
786{
787 drm_file_t *priv = filp->private_data;
788 drm_device_t *dev = priv->dev;
789 drm_gamma_copy_t copy;
790
791 if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
792 return -EFAULT;
793
794 return gamma_do_copy_dma( dev, &copy );
795}
796
797/* =============================================================
798 * Per Context SAREA Support
799 */
800
801int gamma_getsareactx(struct inode *inode, struct file *filp,
802 unsigned int cmd, unsigned long arg)
803{
804 drm_file_t *priv = filp->private_data;
805 drm_device_t *dev = priv->dev;
806 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
807 drm_ctx_priv_map_t request;
808 drm_map_t *map;
809
810 if (copy_from_user(&request, argp, sizeof(request)))
811 return -EFAULT;
812
813 down(&dev->struct_sem);
814 if ((int)request.ctx_id >= dev->max_context) {
815 up(&dev->struct_sem);
816 return -EINVAL;
817 }
818
819 map = dev->context_sareas[request.ctx_id];
820 up(&dev->struct_sem);
821
822 request.handle = map->handle;
823 if (copy_to_user(argp, &request, sizeof(request)))
824 return -EFAULT;
825 return 0;
826}
827
828int gamma_setsareactx(struct inode *inode, struct file *filp,
829 unsigned int cmd, unsigned long arg)
830{
831 drm_file_t *priv = filp->private_data;
832 drm_device_t *dev = priv->dev;
833 drm_ctx_priv_map_t request;
834 drm_map_t *map = NULL;
835 drm_map_list_t *r_list;
836 struct list_head *list;
837
838 if (copy_from_user(&request,
839 (drm_ctx_priv_map_t __user *)arg,
840 sizeof(request)))
841 return -EFAULT;
842
843 down(&dev->struct_sem);
844 r_list = NULL;
845 list_for_each(list, &dev->maplist->head) {
846 r_list = list_entry(list, drm_map_list_t, head);
847 if(r_list->map &&
848 r_list->map->handle == request.handle) break;
849 }
850 if (list == &(dev->maplist->head)) {
851 up(&dev->struct_sem);
852 return -EINVAL;
853 }
854 map = r_list->map;
855 up(&dev->struct_sem);
856
857 if (!map) return -EINVAL;
858
859 down(&dev->struct_sem);
860 if ((int)request.ctx_id >= dev->max_context) {
861 up(&dev->struct_sem);
862 return -EINVAL;
863 }
864 dev->context_sareas[request.ctx_id] = map;
865 up(&dev->struct_sem);
866 return 0;
867}
868
869void gamma_driver_irq_preinstall( drm_device_t *dev ) {
870 drm_gamma_private_t *dev_priv =
871 (drm_gamma_private_t *)dev->dev_private;
872
873 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
874 cpu_relax();
875
876 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
877 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
878}
879
880void gamma_driver_irq_postinstall( drm_device_t *dev ) {
881 drm_gamma_private_t *dev_priv =
882 (drm_gamma_private_t *)dev->dev_private;
883
884 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
885 cpu_relax();
886
887 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
888 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
889 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
890}
891
892void gamma_driver_irq_uninstall( drm_device_t *dev ) {
893 drm_gamma_private_t *dev_priv =
894 (drm_gamma_private_t *)dev->dev_private;
895 if (!dev_priv)
896 return;
897
898 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
899 cpu_relax();
900
901 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
902 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
903 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
904}
905
906extern drm_ioctl_desc_t DRM(ioctls)[];
907
908static int gamma_driver_preinit(drm_device_t *dev)
909{
910 /* reset the finish ioctl */
911 DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
912 return 0;
913}
914
915static void gamma_driver_pretakedown(drm_device_t *dev)
916{
917 gamma_do_cleanup_dma(dev);
918}
919
920static void gamma_driver_dma_ready(drm_device_t *dev)
921{
922 gamma_dma_ready(dev);
923}
924
925static int gamma_driver_dma_quiescent(drm_device_t *dev)
926{
927 drm_gamma_private_t *dev_priv = (
928 drm_gamma_private_t *)dev->dev_private;
929 if (dev_priv->num_rast == 2)
930 gamma_dma_quiescent_dual(dev);
931 else gamma_dma_quiescent_single(dev);
932 return 0;
933}
934
935void gamma_driver_register_fns(drm_device_t *dev)
936{
937 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
938 DRM(fops).read = gamma_fops_read;
939 DRM(fops).poll = gamma_fops_poll;
940 dev->driver.preinit = gamma_driver_preinit;
941 dev->driver.pretakedown = gamma_driver_pretakedown;
942 dev->driver.dma_ready = gamma_driver_dma_ready;
943 dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
944 dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
945 dev->driver.dma_flush_unblock = gamma_flush_unblock;
946}
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h
deleted file mode 100644
index 20819ded0e15..000000000000
--- a/drivers/char/drm/gamma_drm.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _GAMMA_DRM_H_
2#define _GAMMA_DRM_H_
3
4typedef struct _drm_gamma_tex_region {
5 unsigned char next, prev; /* indices to form a circular LRU */
6 unsigned char in_use; /* owned by a client, or free? */
7 int age; /* tracked by clients to update local LRU's */
8} drm_gamma_tex_region_t;
9
10typedef struct {
11 unsigned int GDeltaMode;
12 unsigned int GDepthMode;
13 unsigned int GGeometryMode;
14 unsigned int GTransformMode;
15} drm_gamma_context_regs_t;
16
17typedef struct _drm_gamma_sarea {
18 drm_gamma_context_regs_t context_state;
19
20 unsigned int dirty;
21
22
23 /* Maintain an LRU of contiguous regions of texture space. If
24 * you think you own a region of texture memory, and it has an
25 * age different to the one you set, then you are mistaken and
26 * it has been stolen by another client. If global texAge
27 * hasn't changed, there is no need to walk the list.
28 *
29 * These regions can be used as a proxy for the fine-grained
30 * texture information of other clients - by maintaining them
31 * in the same lru which is used to age their own textures,
32 * clients have an approximate lru for the whole of global
33 * texture space, and can make informed decisions as to which
34 * areas to kick out. There is no need to choose whether to
35 * kick out your own texture or someone else's - simply eject
36 * them all in LRU order.
37 */
38
39#define GAMMA_NR_TEX_REGIONS 64
40 drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
41 /* Last elt is sentinal */
42 int texAge; /* last time texture was uploaded */
43 int last_enqueue; /* last time a buffer was enqueued */
44 int last_dispatch; /* age of the most recently dispatched buffer */
45 int last_quiescent; /* */
46 int ctxOwner; /* last context to upload state */
47
48 int vertex_prim;
49} drm_gamma_sarea_t;
50
51/* WARNING: If you change any of these defines, make sure to change the
52 * defines in the Xserver file (xf86drmGamma.h)
53 */
54
55/* Gamma specific ioctls
56 * The device specific ioctl range is 0x40 to 0x79.
57 */
58#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
59#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
60
61typedef struct drm_gamma_copy {
62 unsigned int DMAOutputAddress;
63 unsigned int DMAOutputCount;
64 unsigned int DMAReadGLINTSource;
65 unsigned int DMARectangleWriteAddress;
66 unsigned int DMARectangleWriteLinePitch;
67 unsigned int DMARectangleWrite;
68 unsigned int DMARectangleReadAddress;
69 unsigned int DMARectangleReadLinePitch;
70 unsigned int DMARectangleRead;
71 unsigned int DMARectangleReadTarget;
72} drm_gamma_copy_t;
73
74typedef struct drm_gamma_init {
75 enum {
76 GAMMA_INIT_DMA = 0x01,
77 GAMMA_CLEANUP_DMA = 0x02
78 } func;
79
80 int sarea_priv_offset;
81 int pcimode;
82 unsigned int mmio0;
83 unsigned int mmio1;
84 unsigned int mmio2;
85 unsigned int mmio3;
86 unsigned int buffers_offset;
87 int num_rast;
88} drm_gamma_init_t;
89
90#endif /* _GAMMA_DRM_H_ */
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c
deleted file mode 100644
index e7e64b62792a..000000000000
--- a/drivers/char/drm/gamma_drv.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include <linux/config.h>
33#include "gamma.h"
34#include "drmP.h"
35#include "drm.h"
36#include "gamma_drm.h"
37#include "gamma_drv.h"
38
39#include "drm_auth.h"
40#include "drm_agpsupport.h"
41#include "drm_bufs.h"
42#include "gamma_context.h" /* NOTE! */
43#include "drm_dma.h"
44#include "gamma_old_dma.h" /* NOTE */
45#include "drm_drawable.h"
46#include "drm_drv.h"
47
48#include "drm_fops.h"
49#include "drm_init.h"
50#include "drm_ioctl.h"
51#include "drm_irq.h"
52#include "gamma_lists.h" /* NOTE */
53#include "drm_lock.h"
54#include "gamma_lock.h" /* NOTE */
55#include "drm_memory.h"
56#include "drm_proc.h"
57#include "drm_vm.h"
58#include "drm_stub.h"
59#include "drm_scatter.h"
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h
deleted file mode 100644
index 146fcc6253cd..000000000000
--- a/drivers/char/drm/gamma_drv.h
+++ /dev/null
@@ -1,147 +0,0 @@
1/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#ifndef _GAMMA_DRV_H_
33#define _GAMMA_DRV_H_
34
35typedef struct drm_gamma_private {
36 drm_gamma_sarea_t *sarea_priv;
37 drm_map_t *sarea;
38 drm_map_t *mmio0;
39 drm_map_t *mmio1;
40 drm_map_t *mmio2;
41 drm_map_t *mmio3;
42 int num_rast;
43} drm_gamma_private_t;
44
45 /* gamma_dma.c */
46extern int gamma_dma_init( struct inode *inode, struct file *filp,
47 unsigned int cmd, unsigned long arg );
48extern int gamma_dma_copy( struct inode *inode, struct file *filp,
49 unsigned int cmd, unsigned long arg );
50
51extern int gamma_do_cleanup_dma( drm_device_t *dev );
52extern void gamma_dma_ready(drm_device_t *dev);
53extern void gamma_dma_quiescent_single(drm_device_t *dev);
54extern void gamma_dma_quiescent_dual(drm_device_t *dev);
55
56 /* gamma_dma.c */
57extern int gamma_dma_schedule(drm_device_t *dev, int locked);
58extern int gamma_dma(struct inode *inode, struct file *filp,
59 unsigned int cmd, unsigned long arg);
60extern int gamma_find_devices(void);
61extern int gamma_found(void);
62
63/* Gamma-specific code pulled from drm_fops.h:
64 */
65extern int DRM(finish)(struct inode *inode, struct file *filp,
66 unsigned int cmd, unsigned long arg);
67extern int DRM(flush_unblock)(drm_device_t *dev, int context,
68 drm_lock_flags_t flags);
69extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
70 drm_lock_flags_t flags);
71
72/* Gamma-specific code pulled from drm_dma.h:
73 */
74extern void DRM(clear_next_buffer)(drm_device_t *dev);
75extern int DRM(select_queue)(drm_device_t *dev,
76 void (*wrapper)(unsigned long));
77extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
78extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
79
80
81/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
82 */
83extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
84extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
85extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
86extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
87extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
88extern int DRM(freelist_destroy)(drm_freelist_t *bl);
89extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
90 drm_buf_t *buf);
91extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
92
93/* externs for gamma changes to the ops */
94extern struct file_operations DRM(fops);
95extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
96extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
97
98
99#define GLINT_DRI_BUF_COUNT 256
100
101#define GAMMA_OFF(reg) \
102 ((reg < 0x1000) \
103 ? reg \
104 : ((reg < 0x10000) \
105 ? (reg - 0x1000) \
106 : ((reg < 0x11000) \
107 ? (reg - 0x10000) \
108 : (reg - 0x11000))))
109
110#define GAMMA_BASE(reg) ((unsigned long) \
111 ((reg < 0x1000) ? dev_priv->mmio0->handle : \
112 ((reg < 0x10000) ? dev_priv->mmio1->handle : \
113 ((reg < 0x11000) ? dev_priv->mmio2->handle : \
114 dev_priv->mmio3->handle))))
115#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
116#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
117#define GAMMA_READ(reg) GAMMA_DEREF(reg)
118#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
119
120#define GAMMA_BROADCASTMASK 0x9378
121#define GAMMA_COMMANDINTENABLE 0x0c48
122#define GAMMA_DMAADDRESS 0x0028
123#define GAMMA_DMACOUNT 0x0030
124#define GAMMA_FILTERMODE 0x8c00
125#define GAMMA_GCOMMANDINTFLAGS 0x0c50
126#define GAMMA_GCOMMANDMODE 0x0c40
127#define GAMMA_QUEUED_DMA_MODE 1<<1
128#define GAMMA_GCOMMANDSTATUS 0x0c60
129#define GAMMA_GDELAYTIMER 0x0c38
130#define GAMMA_GDMACONTROL 0x0060
131#define GAMMA_USE_AGP 1<<1
132#define GAMMA_GINTENABLE 0x0808
133#define GAMMA_GINTFLAGS 0x0810
134#define GAMMA_INFIFOSPACE 0x0018
135#define GAMMA_OUTFIFOWORDS 0x0020
136#define GAMMA_OUTPUTFIFO 0x2000
137#define GAMMA_SYNC 0x8c40
138#define GAMMA_SYNC_TAG 0x0188
139#define GAMMA_PAGETABLEADDR 0x0C00
140#define GAMMA_PAGETABLELENGTH 0x0C08
141
142#define GAMMA_PASSTHROUGH 0x1FE
143#define GAMMA_DMAADDRTAG 0x530
144#define GAMMA_DMACOUNTTAG 0x531
145#define GAMMA_COMMANDINTTAG 0x532
146
147#endif
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h
deleted file mode 100644
index 2d93f412b96b..000000000000
--- a/drivers/char/drm/gamma_lists.h
+++ /dev/null
@@ -1,215 +0,0 @@
1/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
2 * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33
34
35int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
36{
37 if (bl->count) return -EINVAL;
38
39 bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
40 DRM_MEM_BUFLISTS);
41
42 if(!bl->bufs) return -ENOMEM;
43 memset(bl->bufs, 0, sizeof(*bl->bufs));
44 bl->count = count;
45 bl->rp = bl->bufs;
46 bl->wp = bl->bufs;
47 bl->end = &bl->bufs[bl->count+1];
48 spin_lock_init(&bl->write_lock);
49 spin_lock_init(&bl->read_lock);
50 return 0;
51}
52
53int DRM(waitlist_destroy)(drm_waitlist_t *bl)
54{
55 if (bl->rp != bl->wp) return -EINVAL;
56 if (bl->bufs) DRM(free)(bl->bufs,
57 (bl->count + 2) * sizeof(*bl->bufs),
58 DRM_MEM_BUFLISTS);
59 bl->count = 0;
60 bl->bufs = NULL;
61 bl->rp = NULL;
62 bl->wp = NULL;
63 bl->end = NULL;
64 return 0;
65}
66
67int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
68{
69 int left;
70 unsigned long flags;
71
72 left = DRM_LEFTCOUNT(bl);
73 if (!left) {
74 DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
75 buf->idx, buf->filp);
76 return -EINVAL;
77 }
78 buf->list = DRM_LIST_WAIT;
79
80 spin_lock_irqsave(&bl->write_lock, flags);
81 *bl->wp = buf;
82 if (++bl->wp >= bl->end) bl->wp = bl->bufs;
83 spin_unlock_irqrestore(&bl->write_lock, flags);
84
85 return 0;
86}
87
88drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
89{
90 drm_buf_t *buf;
91 unsigned long flags;
92
93 spin_lock_irqsave(&bl->read_lock, flags);
94 buf = *bl->rp;
95 if (bl->rp == bl->wp) {
96 spin_unlock_irqrestore(&bl->read_lock, flags);
97 return NULL;
98 }
99 if (++bl->rp >= bl->end) bl->rp = bl->bufs;
100 spin_unlock_irqrestore(&bl->read_lock, flags);
101
102 return buf;
103}
104
105int DRM(freelist_create)(drm_freelist_t *bl, int count)
106{
107 atomic_set(&bl->count, 0);
108 bl->next = NULL;
109 init_waitqueue_head(&bl->waiting);
110 bl->low_mark = 0;
111 bl->high_mark = 0;
112 atomic_set(&bl->wfh, 0);
113 spin_lock_init(&bl->lock);
114 ++bl->initialized;
115 return 0;
116}
117
118int DRM(freelist_destroy)(drm_freelist_t *bl)
119{
120 atomic_set(&bl->count, 0);
121 bl->next = NULL;
122 return 0;
123}
124
125int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
126{
127 drm_device_dma_t *dma = dev->dma;
128
129 if (!dma) {
130 DRM_ERROR("No DMA support\n");
131 return 1;
132 }
133
134 if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
135 DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
136 buf->idx, buf->waiting, buf->pending, buf->list);
137 }
138 if (!bl) return 1;
139 buf->list = DRM_LIST_FREE;
140
141 spin_lock(&bl->lock);
142 buf->next = bl->next;
143 bl->next = buf;
144 spin_unlock(&bl->lock);
145
146 atomic_inc(&bl->count);
147 if (atomic_read(&bl->count) > dma->buf_count) {
148 DRM_ERROR("%d of %d buffers free after addition of %d\n",
149 atomic_read(&bl->count), dma->buf_count, buf->idx);
150 return 1;
151 }
152 /* Check for high water mark */
153 if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
154 atomic_set(&bl->wfh, 0);
155 wake_up_interruptible(&bl->waiting);
156 }
157 return 0;
158}
159
160static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
161{
162 drm_buf_t *buf;
163
164 if (!bl) return NULL;
165
166 /* Get buffer */
167 spin_lock(&bl->lock);
168 if (!bl->next) {
169 spin_unlock(&bl->lock);
170 return NULL;
171 }
172 buf = bl->next;
173 bl->next = bl->next->next;
174 spin_unlock(&bl->lock);
175
176 atomic_dec(&bl->count);
177 buf->next = NULL;
178 buf->list = DRM_LIST_NONE;
179 if (buf->waiting || buf->pending) {
180 DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
181 buf->idx, buf->waiting, buf->pending, buf->list);
182 }
183
184 return buf;
185}
186
187drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
188{
189 drm_buf_t *buf = NULL;
190 DECLARE_WAITQUEUE(entry, current);
191
192 if (!bl || !bl->initialized) return NULL;
193
194 /* Check for low water mark */
195 if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
196 atomic_set(&bl->wfh, 1);
197 if (atomic_read(&bl->wfh)) {
198 if (block) {
199 add_wait_queue(&bl->waiting, &entry);
200 for (;;) {
201 current->state = TASK_INTERRUPTIBLE;
202 if (!atomic_read(&bl->wfh)
203 && (buf = DRM(freelist_try)(bl))) break;
204 schedule();
205 if (signal_pending(current)) break;
206 }
207 current->state = TASK_RUNNING;
208 remove_wait_queue(&bl->waiting, &entry);
209 }
210 return buf;
211 }
212
213 return DRM(freelist_try)(bl);
214}
215
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h
deleted file mode 100644
index ddec67e4ed16..000000000000
--- a/drivers/char/drm/gamma_lock.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/* lock.c -- IOCTLs for locking -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code extracted from drm_lock.h:
34 */
35static int DRM(flush_queue)(drm_device_t *dev, int context)
36{
37 DECLARE_WAITQUEUE(entry, current);
38 int ret = 0;
39 drm_queue_t *q = dev->queuelist[context];
40
41 DRM_DEBUG("\n");
42
43 atomic_inc(&q->use_count);
44 if (atomic_read(&q->use_count) > 1) {
45 atomic_inc(&q->block_write);
46 add_wait_queue(&q->flush_queue, &entry);
47 atomic_inc(&q->block_count);
48 for (;;) {
49 current->state = TASK_INTERRUPTIBLE;
50 if (!DRM_BUFCOUNT(&q->waitlist)) break;
51 schedule();
52 if (signal_pending(current)) {
53 ret = -EINTR; /* Can't restart */
54 break;
55 }
56 }
57 atomic_dec(&q->block_count);
58 current->state = TASK_RUNNING;
59 remove_wait_queue(&q->flush_queue, &entry);
60 }
61 atomic_dec(&q->use_count);
62
63 /* NOTE: block_write is still incremented!
64 Use drm_flush_unlock_queue to decrement. */
65 return ret;
66}
67
68static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
69{
70 drm_queue_t *q = dev->queuelist[context];
71
72 DRM_DEBUG("\n");
73
74 atomic_inc(&q->use_count);
75 if (atomic_read(&q->use_count) > 1) {
76 if (atomic_read(&q->block_write)) {
77 atomic_dec(&q->block_write);
78 wake_up_interruptible(&q->write_queue);
79 }
80 }
81 atomic_dec(&q->use_count);
82 return 0;
83}
84
85int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
86 drm_lock_flags_t flags)
87{
88 int ret = 0;
89 int i;
90
91 DRM_DEBUG("\n");
92
93 if (flags & _DRM_LOCK_FLUSH) {
94 ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
95 if (!ret) ret = DRM(flush_queue)(dev, context);
96 }
97 if (flags & _DRM_LOCK_FLUSH_ALL) {
98 for (i = 0; !ret && i < dev->queue_count; i++) {
99 ret = DRM(flush_queue)(dev, i);
100 }
101 }
102 return ret;
103}
104
105int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
106{
107 int ret = 0;
108 int i;
109
110 DRM_DEBUG("\n");
111
112 if (flags & _DRM_LOCK_FLUSH) {
113 ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
114 if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
115 }
116 if (flags & _DRM_LOCK_FLUSH_ALL) {
117 for (i = 0; !ret && i < dev->queue_count; i++) {
118 ret = DRM(flush_unblock_queue)(dev, i);
119 }
120 }
121
122 return ret;
123}
124
125int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
126 unsigned long arg)
127{
128 drm_file_t *priv = filp->private_data;
129 drm_device_t *dev = priv->dev;
130 int ret = 0;
131 drm_lock_t lock;
132
133 DRM_DEBUG("\n");
134
135 if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
136 return -EFAULT;
137 ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
138 DRM(flush_unblock)(dev, lock.context, lock.flags);
139 return ret;
140}
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h
deleted file mode 100644
index abdd454aab9f..000000000000
--- a/drivers/char/drm/gamma_old_dma.h
+++ /dev/null
@@ -1,313 +0,0 @@
1/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code pulled from drm_dma.h:
34 */
35
36void DRM(clear_next_buffer)(drm_device_t *dev)
37{
38 drm_device_dma_t *dma = dev->dma;
39
40 dma->next_buffer = NULL;
41 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
42 wake_up_interruptible(&dma->next_queue->flush_queue);
43 }
44 dma->next_queue = NULL;
45}
46
47int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
48{
49 int i;
50 int candidate = -1;
51 int j = jiffies;
52
53 if (!dev) {
54 DRM_ERROR("No device\n");
55 return -1;
56 }
57 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
58 /* This only happens between the time the
59 interrupt is initialized and the time
60 the queues are initialized. */
61 return -1;
62 }
63
64 /* Doing "while locked" DMA? */
65 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
66 return DRM_KERNEL_CONTEXT;
67 }
68
69 /* If there are buffers on the last_context
70 queue, and we have not been executing
71 this context very long, continue to
72 execute this context. */
73 if (dev->last_switch <= j
74 && dev->last_switch + DRM_TIME_SLICE > j
75 && DRM_WAITCOUNT(dev, dev->last_context)) {
76 return dev->last_context;
77 }
78
79 /* Otherwise, find a candidate */
80 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
81 if (DRM_WAITCOUNT(dev, i)) {
82 candidate = dev->last_checked = i;
83 break;
84 }
85 }
86
87 if (candidate < 0) {
88 for (i = 0; i < dev->queue_count; i++) {
89 if (DRM_WAITCOUNT(dev, i)) {
90 candidate = dev->last_checked = i;
91 break;
92 }
93 }
94 }
95
96 if (wrapper
97 && candidate >= 0
98 && candidate != dev->last_context
99 && dev->last_switch <= j
100 && dev->last_switch + DRM_TIME_SLICE > j) {
101 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
102 del_timer(&dev->timer);
103 dev->timer.function = wrapper;
104 dev->timer.data = (unsigned long)dev;
105 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
106 add_timer(&dev->timer);
107 }
108 return -1;
109 }
110
111 return candidate;
112}
113
114
115int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
116{
117 drm_file_t *priv = filp->private_data;
118 drm_device_t *dev = priv->dev;
119 int i;
120 drm_queue_t *q;
121 drm_buf_t *buf;
122 int idx;
123 int while_locked = 0;
124 drm_device_dma_t *dma = dev->dma;
125 int *ind;
126 int err;
127 DECLARE_WAITQUEUE(entry, current);
128
129 DRM_DEBUG("%d\n", d->send_count);
130
131 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
132 int context = dev->lock.hw_lock->lock;
133
134 if (!_DRM_LOCK_IS_HELD(context)) {
135 DRM_ERROR("No lock held during \"while locked\""
136 " request\n");
137 return -EINVAL;
138 }
139 if (d->context != _DRM_LOCKING_CONTEXT(context)
140 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
141 DRM_ERROR("Lock held by %d while %d makes"
142 " \"while locked\" request\n",
143 _DRM_LOCKING_CONTEXT(context),
144 d->context);
145 return -EINVAL;
146 }
147 q = dev->queuelist[DRM_KERNEL_CONTEXT];
148 while_locked = 1;
149 } else {
150 q = dev->queuelist[d->context];
151 }
152
153
154 atomic_inc(&q->use_count);
155 if (atomic_read(&q->block_write)) {
156 add_wait_queue(&q->write_queue, &entry);
157 atomic_inc(&q->block_count);
158 for (;;) {
159 current->state = TASK_INTERRUPTIBLE;
160 if (!atomic_read(&q->block_write)) break;
161 schedule();
162 if (signal_pending(current)) {
163 atomic_dec(&q->use_count);
164 remove_wait_queue(&q->write_queue, &entry);
165 return -EINTR;
166 }
167 }
168 atomic_dec(&q->block_count);
169 current->state = TASK_RUNNING;
170 remove_wait_queue(&q->write_queue, &entry);
171 }
172
173 ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
174 if (!ind)
175 return -ENOMEM;
176
177 if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
178 err = -EFAULT;
179 goto out;
180 }
181
182 err = -EINVAL;
183 for (i = 0; i < d->send_count; i++) {
184 idx = ind[i];
185 if (idx < 0 || idx >= dma->buf_count) {
186 DRM_ERROR("Index %d (of %d max)\n",
187 ind[i], dma->buf_count - 1);
188 goto out;
189 }
190 buf = dma->buflist[ idx ];
191 if (buf->filp != filp) {
192 DRM_ERROR("Process %d using buffer not owned\n",
193 current->pid);
194 goto out;
195 }
196 if (buf->list != DRM_LIST_NONE) {
197 DRM_ERROR("Process %d using buffer %d on list %d\n",
198 current->pid, buf->idx, buf->list);
199 goto out;
200 }
201 buf->used = ind[i];
202 buf->while_locked = while_locked;
203 buf->context = d->context;
204 if (!buf->used) {
205 DRM_ERROR("Queueing 0 length buffer\n");
206 }
207 if (buf->pending) {
208 DRM_ERROR("Queueing pending buffer:"
209 " buffer %d, offset %d\n",
210 ind[i], i);
211 goto out;
212 }
213 if (buf->waiting) {
214 DRM_ERROR("Queueing waiting buffer:"
215 " buffer %d, offset %d\n",
216 ind[i], i);
217 goto out;
218 }
219 buf->waiting = 1;
220 if (atomic_read(&q->use_count) == 1
221 || atomic_read(&q->finalization)) {
222 DRM(free_buffer)(dev, buf);
223 } else {
224 DRM(waitlist_put)(&q->waitlist, buf);
225 atomic_inc(&q->total_queued);
226 }
227 }
228 atomic_dec(&q->use_count);
229
230 return 0;
231
232out:
233 DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
234 atomic_dec(&q->use_count);
235 return err;
236}
237
238static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
239 int order)
240{
241 drm_file_t *priv = filp->private_data;
242 drm_device_t *dev = priv->dev;
243 int i;
244 drm_buf_t *buf;
245 drm_device_dma_t *dma = dev->dma;
246
247 for (i = d->granted_count; i < d->request_count; i++) {
248 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
249 d->flags & _DRM_DMA_WAIT);
250 if (!buf) break;
251 if (buf->pending || buf->waiting) {
252 DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
253 buf->idx,
254 buf->filp,
255 buf->waiting,
256 buf->pending);
257 }
258 buf->filp = filp;
259 if (copy_to_user(&d->request_indices[i],
260 &buf->idx,
261 sizeof(buf->idx)))
262 return -EFAULT;
263
264 if (copy_to_user(&d->request_sizes[i],
265 &buf->total,
266 sizeof(buf->total)))
267 return -EFAULT;
268
269 ++d->granted_count;
270 }
271 return 0;
272}
273
274
275int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
276{
277 int order;
278 int retcode = 0;
279 int tmp_order;
280
281 order = DRM(order)(dma->request_size);
282
283 dma->granted_count = 0;
284 retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
285
286 if (dma->granted_count < dma->request_count
287 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
288 for (tmp_order = order - 1;
289 !retcode
290 && dma->granted_count < dma->request_count
291 && tmp_order >= DRM_MIN_ORDER;
292 --tmp_order) {
293
294 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
295 tmp_order);
296 }
297 }
298
299 if (dma->granted_count < dma->request_count
300 && (dma->flags & _DRM_DMA_LARGER_OK)) {
301 for (tmp_order = order + 1;
302 !retcode
303 && dma->granted_count < dma->request_count
304 && tmp_order <= DRM_MAX_ORDER;
305 ++tmp_order) {
306
307 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
308 tmp_order);
309 }
310 }
311 return 0;
312}
313
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 18e0b7622893..2f1659b96fd1 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,11 +45,6 @@
45#define I810_BUF_UNMAPPED 0 45#define I810_BUF_UNMAPPED 0
46#define I810_BUF_MAPPED 1 46#define I810_BUF_MAPPED 1
47 47
48#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
49#define down_write down
50#define up_write up
51#endif
52
53static drm_buf_t *i810_freelist_get(drm_device_t *dev) 48static drm_buf_t *i810_freelist_get(drm_device_t *dev)
54{ 49{
55 drm_device_dma_t *dma = dev->dma; 50 drm_device_dma_t *dma = dev->dma;
@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev,
351 DRM_ERROR("can not find mmio map!\n"); 346 DRM_ERROR("can not find mmio map!\n");
352 return -EINVAL; 347 return -EINVAL;
353 } 348 }
349 dev->agp_buffer_token = init->buffers_offset;
354 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 350 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
355 if (!dev->agp_buffer_map) { 351 if (!dev->agp_buffer_map) {
356 dev->dev_private = (void *)dev_priv; 352 dev->dev_private = (void *)dev_priv;
@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = {
1383}; 1379};
1384 1380
1385int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1381int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1382
1383/**
1384 * Determine if the device really is AGP or not.
1385 *
1386 * All Intel graphics chipsets are treated as AGP, even if they are really
1387 * PCI-e.
1388 *
1389 * \param dev The device to be tested.
1390 *
1391 * \returns
1392 * A value of 1 is always retured to indictate every i810 is AGP.
1393 */
1394int i810_driver_device_is_agp(drm_device_t * dev)
1395{
1396 return 1;
1397}
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index ff51b3259af9..00609329d578 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -84,6 +84,7 @@ static struct drm_driver driver = {
84 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 84 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
85 .pretakedown = i810_driver_pretakedown, 85 .pretakedown = i810_driver_pretakedown,
86 .prerelease = i810_driver_prerelease, 86 .prerelease = i810_driver_prerelease,
87 .device_is_agp = i810_driver_device_is_agp,
87 .release = i810_driver_release, 88 .release = i810_driver_release,
88 .dma_quiescent = i810_driver_dma_quiescent, 89 .dma_quiescent = i810_driver_dma_quiescent,
89 .reclaim_buffers = i810_reclaim_buffers, 90 .reclaim_buffers = i810_reclaim_buffers,
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 1b40538d1725..62ee4f58c59a 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev);
120extern void i810_driver_release(drm_device_t *dev, struct file *filp); 120extern void i810_driver_release(drm_device_t *dev, struct file *filp);
121extern void i810_driver_pretakedown(drm_device_t *dev); 121extern void i810_driver_pretakedown(drm_device_t *dev);
122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); 122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
123extern int i810_driver_device_is_agp(drm_device_t * dev);
123 124
124#define I810_BASE(reg) ((unsigned long) \ 125#define I810_BASE(reg) ((unsigned long) \
125 dev_priv->mmio_map->handle) 126 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc7733035864..6f89d5796ef3 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,11 +47,6 @@
47#define I830_BUF_UNMAPPED 0 47#define I830_BUF_UNMAPPED 0
48#define I830_BUF_MAPPED 1 48#define I830_BUF_MAPPED 1
49 49
50#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
51#define down_write down
52#define up_write up
53#endif
54
55static drm_buf_t *i830_freelist_get(drm_device_t *dev) 50static drm_buf_t *i830_freelist_get(drm_device_t *dev)
56{ 51{
57 drm_device_dma_t *dma = dev->dma; 52 drm_device_dma_t *dma = dev->dma;
@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev,
358 DRM_ERROR("can not find mmio map!\n"); 353 DRM_ERROR("can not find mmio map!\n");
359 return -EINVAL; 354 return -EINVAL;
360 } 355 }
356 dev->agp_buffer_token = init->buffers_offset;
361 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 357 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
362 if(!dev->agp_buffer_map) { 358 if(!dev->agp_buffer_map) {
363 dev->dev_private = (void *)dev_priv; 359 dev->dev_private = (void *)dev_priv;
@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = {
1586}; 1582};
1587 1583
1588int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1584int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1585
1586/**
1587 * Determine if the device really is AGP or not.
1588 *
1589 * All Intel graphics chipsets are treated as AGP, even if they are really
1590 * PCI-e.
1591 *
1592 * \param dev The device to be tested.
1593 *
1594 * \returns
1595 * A value of 1 is always retured to indictate every i8xx is AGP.
1596 */
1597int i830_driver_device_is_agp(drm_device_t * dev)
1598{
1599 return 1;
1600}
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index bc36be76b8b2..0da9cd19919e 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -88,6 +88,7 @@ static struct drm_driver driver = {
88 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 88 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
89 .pretakedown = i830_driver_pretakedown, 89 .pretakedown = i830_driver_pretakedown,
90 .prerelease = i830_driver_prerelease, 90 .prerelease = i830_driver_prerelease,
91 .device_is_agp = i830_driver_device_is_agp,
91 .release = i830_driver_release, 92 .release = i830_driver_release,
92 .dma_quiescent = i830_driver_dma_quiescent, 93 .dma_quiescent = i830_driver_dma_quiescent,
93 .reclaim_buffers = i830_reclaim_buffers, 94 .reclaim_buffers = i830_reclaim_buffers,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index df7746131dea..63f96a8b6a4a 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev);
137extern void i830_driver_release(drm_device_t *dev, struct file *filp); 137extern void i830_driver_release(drm_device_t *dev, struct file *filp);
138extern int i830_driver_dma_quiescent(drm_device_t *dev); 138extern int i830_driver_dma_quiescent(drm_device_t *dev);
139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); 139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
140extern int i830_driver_device_is_agp(drm_device_t * dev);
140 141
141#define I830_BASE(reg) ((unsigned long) \ 142#define I830_BASE(reg) ((unsigned long) \
142 dev_priv->mmio_map->handle) 143 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index acf9e52a9507..34f552f90c4a 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev)
95 drm_core_ioremapfree( &dev_priv->ring.map, dev); 95 drm_core_ioremapfree( &dev_priv->ring.map, dev);
96 } 96 }
97 97
98 if (dev_priv->hw_status_page) { 98 if (dev_priv->status_page_dmah) {
99 drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, 99 drm_pci_free(dev, dev_priv->status_page_dmah);
100 dev_priv->dma_status_page);
101 /* Need to rewrite hardware status page */ 100 /* Need to rewrite hardware status page */
102 I915_WRITE(0x02080, 0x1ffff000); 101 I915_WRITE(0x02080, 0x1ffff000);
103 } 102 }
@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev,
174 dev_priv->allow_batchbuffer = 1; 173 dev_priv->allow_batchbuffer = 1;
175 174
176 /* Program Hardware Status Page */ 175 /* Program Hardware Status Page */
177 dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 176 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
178 0xffffffff, 177 0xffffffff);
179 &dev_priv->dma_status_page);
180 178
181 if (!dev_priv->hw_status_page) { 179 if (!dev_priv->status_page_dmah) {
182 dev->dev_private = (void *)dev_priv; 180 dev->dev_private = (void *)dev_priv;
183 i915_dma_cleanup(dev); 181 i915_dma_cleanup(dev);
184 DRM_ERROR("Can not allocate hardware status page\n"); 182 DRM_ERROR("Can not allocate hardware status page\n");
185 return DRM_ERR(ENOMEM); 183 return DRM_ERR(ENOMEM);
186 } 184 }
185 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
186 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
187
187 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 188 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
188 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 189 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
189 190
@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = {
731}; 732};
732 733
733int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 734int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
735
736/**
737 * Determine if the device really is AGP or not.
738 *
739 * All Intel graphics chipsets are treated as AGP, even if they are really
740 * PCI-e.
741 *
742 * \param dev The device to be tested.
743 *
744 * \returns
745 * A value of 1 is always retured to indictate every i9x5 is AGP.
746 */
747int i915_driver_device_is_agp(drm_device_t * dev)
748{
749 return 1;
750}
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 1f59d3fc79bc..106b9ec02213 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -79,6 +79,7 @@ static struct drm_driver driver = {
79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
80 .pretakedown = i915_driver_pretakedown, 80 .pretakedown = i915_driver_pretakedown,
81 .prerelease = i915_driver_prerelease, 81 .prerelease = i915_driver_prerelease,
82 .device_is_agp = i915_driver_device_is_agp,
82 .irq_preinstall = i915_driver_irq_preinstall, 83 .irq_preinstall = i915_driver_irq_preinstall,
83 .irq_postinstall = i915_driver_irq_postinstall, 84 .irq_postinstall = i915_driver_irq_postinstall,
84 .irq_uninstall = i915_driver_irq_uninstall, 85 .irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 9c37d2367dd5..70ed4e68eac8 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -79,9 +79,10 @@ typedef struct drm_i915_private {
79 drm_i915_sarea_t *sarea_priv; 79 drm_i915_sarea_t *sarea_priv;
80 drm_i915_ring_buffer_t ring; 80 drm_i915_ring_buffer_t ring;
81 81
82 drm_dma_handle_t *status_page_dmah;
82 void *hw_status_page; 83 void *hw_status_page;
83 unsigned long counter;
84 dma_addr_t dma_status_page; 84 dma_addr_t dma_status_page;
85 unsigned long counter;
85 86
86 int back_offset; 87 int back_offset;
87 int front_offset; 88 int front_offset;
@@ -102,6 +103,7 @@ typedef struct drm_i915_private {
102extern void i915_kernel_lost_context(drm_device_t * dev); 103extern void i915_kernel_lost_context(drm_device_t * dev);
103extern void i915_driver_pretakedown(drm_device_t *dev); 104extern void i915_driver_pretakedown(drm_device_t *dev);
104extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); 105extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
106extern int i915_driver_device_is_agp(drm_device_t *dev);
105 107
106/* i915_irq.c */ 108/* i915_irq.c */
107extern int i915_irq_emit(DRM_IOCTL_ARGS); 109extern int i915_irq_emit(DRM_IOCTL_ARGS);
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 832eaf8a5068..567b425b784f 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -23,18 +23,21 @@
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE. 25 * DEALINGS IN THE SOFTWARE.
26 * 26 */
27 * Authors: 27
28 * Rickard E. (Rik) Faith <faith@valinux.com> 28/**
29 * Jeff Hartmann <jhartmann@valinux.com> 29 * \file mga_dma.c
30 * Keith Whitwell <keith@tungstengraphics.com> 30 * DMA support for MGA G200 / G400.
31 * 31 *
32 * Rewritten by: 32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
33 * Gareth Hughes <gareth@valinux.com> 33 * \author Jeff Hartmann <jhartmann@valinux.com>
34 * \author Keith Whitwell <keith@tungstengraphics.com>
35 * \author Gareth Hughes <gareth@valinux.com>
34 */ 36 */
35 37
36#include "drmP.h" 38#include "drmP.h"
37#include "drm.h" 39#include "drm.h"
40#include "drm_sarea.h"
38#include "mga_drm.h" 41#include "mga_drm.h"
39#include "mga_drv.h" 42#include "mga_drv.h"
40 43
@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
148 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 151 DRM_DEBUG( " space = 0x%06x\n", primary->space );
149 152
150 mga_flush_write_combine(); 153 mga_flush_write_combine();
151 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 154 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
152 155
153 DRM_DEBUG( "done.\n" ); 156 DRM_DEBUG( "done.\n" );
154} 157}
@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
190 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 193 DRM_DEBUG( " space = 0x%06x\n", primary->space );
191 194
192 mga_flush_write_combine(); 195 mga_flush_write_combine();
193 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 196 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
194 197
195 set_bit( 0, &primary->wrapped ); 198 set_bit( 0, &primary->wrapped );
196 DRM_DEBUG( "done.\n" ); 199 DRM_DEBUG( "done.\n" );
@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
396 * DMA initialization, cleanup 399 * DMA initialization, cleanup
397 */ 400 */
398 401
402
403int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
404{
405 drm_mga_private_t * dev_priv;
406
407 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
408 if (!dev_priv)
409 return DRM_ERR(ENOMEM);
410
411 dev->dev_private = (void *)dev_priv;
412 memset(dev_priv, 0, sizeof(drm_mga_private_t));
413
414 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
415 dev_priv->chipset = flags;
416
417 return 0;
418}
419
420/**
421 * Bootstrap the driver for AGP DMA.
422 *
423 * \todo
424 * Investigate whether there is any benifit to storing the WARP microcode in
425 * AGP memory. If not, the microcode may as well always be put in PCI
426 * memory.
427 *
428 * \todo
429 * This routine needs to set dma_bs->agp_mode to the mode actually configured
430 * in the hardware. Looking just at the Linux AGP driver code, I don't see
431 * an easy way to determine this.
432 *
433 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
434 */
435static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
436 drm_mga_dma_bootstrap_t * dma_bs)
437{
438 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
439 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
440 int err;
441 unsigned offset;
442 const unsigned secondary_size = dma_bs->secondary_bin_count
443 * dma_bs->secondary_bin_size;
444 const unsigned agp_size = (dma_bs->agp_size << 20);
445 drm_buf_desc_t req;
446 drm_agp_mode_t mode;
447 drm_agp_info_t info;
448
449
450 /* Acquire AGP. */
451 err = drm_agp_acquire(dev);
452 if (err) {
453 DRM_ERROR("Unable to acquire AGP\n");
454 return err;
455 }
456
457 err = drm_agp_info(dev, &info);
458 if (err) {
459 DRM_ERROR("Unable to get AGP info\n");
460 return err;
461 }
462
463 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
464 err = drm_agp_enable(dev, mode);
465 if (err) {
466 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
467 return err;
468 }
469
470
471 /* In addition to the usual AGP mode configuration, the G200 AGP cards
472 * need to have the AGP mode "manually" set.
473 */
474
475 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
476 if (mode.mode & 0x02) {
477 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
478 }
479 else {
480 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
481 }
482 }
483
484
485 /* Allocate and bind AGP memory. */
486 dev_priv->agp_pages = agp_size / PAGE_SIZE;
487 dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
488 if (dev_priv->agp_mem == NULL) {
489 dev_priv->agp_pages = 0;
490 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
491 dma_bs->agp_size);
492 return DRM_ERR(ENOMEM);
493 }
494
495 err = drm_bind_agp( dev_priv->agp_mem, 0 );
496 if (err) {
497 DRM_ERROR("Unable to bind AGP memory\n");
498 return err;
499 }
500
501 offset = 0;
502 err = drm_addmap( dev, offset, warp_size,
503 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
504 if (err) {
505 DRM_ERROR("Unable to map WARP microcode\n");
506 return err;
507 }
508
509 offset += warp_size;
510 err = drm_addmap( dev, offset, dma_bs->primary_size,
511 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
512 if (err) {
513 DRM_ERROR("Unable to map primary DMA region\n");
514 return err;
515 }
516
517 offset += dma_bs->primary_size;
518 err = drm_addmap( dev, offset, secondary_size,
519 _DRM_AGP, 0, & dev->agp_buffer_map );
520 if (err) {
521 DRM_ERROR("Unable to map secondary DMA region\n");
522 return err;
523 }
524
525 (void) memset( &req, 0, sizeof(req) );
526 req.count = dma_bs->secondary_bin_count;
527 req.size = dma_bs->secondary_bin_size;
528 req.flags = _DRM_AGP_BUFFER;
529 req.agp_start = offset;
530
531 err = drm_addbufs_agp( dev, & req );
532 if (err) {
533 DRM_ERROR("Unable to add secondary DMA buffers\n");
534 return err;
535 }
536
537 offset += secondary_size;
538 err = drm_addmap( dev, offset, agp_size - offset,
539 _DRM_AGP, 0, & dev_priv->agp_textures );
540 if (err) {
541 DRM_ERROR("Unable to map AGP texture region\n");
542 return err;
543 }
544
545 drm_core_ioremap(dev_priv->warp, dev);
546 drm_core_ioremap(dev_priv->primary, dev);
547 drm_core_ioremap(dev->agp_buffer_map, dev);
548
549 if (!dev_priv->warp->handle ||
550 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
551 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
552 dev_priv->warp->handle, dev_priv->primary->handle,
553 dev->agp_buffer_map->handle);
554 return DRM_ERR(ENOMEM);
555 }
556
557 dev_priv->dma_access = MGA_PAGPXFER;
558 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
559
560 DRM_INFO("Initialized card for AGP DMA.\n");
561 return 0;
562}
563
564/**
565 * Bootstrap the driver for PCI DMA.
566 *
567 * \todo
568 * The algorithm for decreasing the size of the primary DMA buffer could be
569 * better. The size should be rounded up to the nearest page size, then
570 * decrease the request size by a single page each pass through the loop.
571 *
572 * \todo
573 * Determine whether the maximum address passed to drm_pci_alloc is correct.
574 * The same goes for drm_addbufs_pci.
575 *
576 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
577 */
578static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
579 drm_mga_dma_bootstrap_t * dma_bs)
580{
581 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
582 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
583 unsigned int primary_size;
584 unsigned int bin_count;
585 int err;
586 drm_buf_desc_t req;
587
588
589 if (dev->dma == NULL) {
590 DRM_ERROR("dev->dma is NULL\n");
591 return DRM_ERR(EFAULT);
592 }
593
594 /* The proper alignment is 0x100 for this mapping */
595 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
596 _DRM_READ_ONLY, &dev_priv->warp);
597 if (err != 0) {
598 DRM_ERROR("Unable to create mapping for WARP microcode\n");
599 return err;
600 }
601
602 /* Other than the bottom two bits being used to encode other
603 * information, there don't appear to be any restrictions on the
604 * alignment of the primary or secondary DMA buffers.
605 */
606
607 for ( primary_size = dma_bs->primary_size
608 ; primary_size != 0
609 ; primary_size >>= 1 ) {
610 /* The proper alignment for this mapping is 0x04 */
611 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
612 _DRM_READ_ONLY, &dev_priv->primary);
613 if (!err)
614 break;
615 }
616
617 if (err != 0) {
618 DRM_ERROR("Unable to allocate primary DMA region\n");
619 return DRM_ERR(ENOMEM);
620 }
621
622 if (dev_priv->primary->size != dma_bs->primary_size) {
623 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
624 dma_bs->primary_size,
625 (unsigned) dev_priv->primary->size);
626 dma_bs->primary_size = dev_priv->primary->size;
627 }
628
629 for ( bin_count = dma_bs->secondary_bin_count
630 ; bin_count > 0
631 ; bin_count-- ) {
632 (void) memset( &req, 0, sizeof(req) );
633 req.count = bin_count;
634 req.size = dma_bs->secondary_bin_size;
635
636 err = drm_addbufs_pci( dev, & req );
637 if (!err) {
638 break;
639 }
640 }
641
642 if (bin_count == 0) {
643 DRM_ERROR("Unable to add secondary DMA buffers\n");
644 return err;
645 }
646
647 if (bin_count != dma_bs->secondary_bin_count) {
648 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
649 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
650
651 dma_bs->secondary_bin_count = bin_count;
652 }
653
654 dev_priv->dma_access = 0;
655 dev_priv->wagp_enable = 0;
656
657 dma_bs->agp_mode = 0;
658
659 DRM_INFO("Initialized card for PCI DMA.\n");
660 return 0;
661}
662
663
664static int mga_do_dma_bootstrap(drm_device_t * dev,
665 drm_mga_dma_bootstrap_t * dma_bs)
666{
667 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
668 int err;
669 drm_mga_private_t * const dev_priv =
670 (drm_mga_private_t *) dev->dev_private;
671
672
673 dev_priv->used_new_dma_init = 1;
674
675 /* The first steps are the same for both PCI and AGP based DMA. Map
676 * the cards MMIO registers and map a status page.
677 */
678 err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
679 _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
680 if (err) {
681 DRM_ERROR("Unable to map MMIO region\n");
682 return err;
683 }
684
685
686 err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
687 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
688 & dev_priv->status );
689 if (err) {
690 DRM_ERROR("Unable to map status region\n");
691 return err;
692 }
693
694
695 /* The DMA initialization procedure is slightly different for PCI and
696 * AGP cards. AGP cards just allocate a large block of AGP memory and
697 * carve off portions of it for internal uses. The remaining memory
698 * is returned to user-mode to be used for AGP textures.
699 */
700
701 if (is_agp) {
702 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
703 }
704
705 /* If we attempted to initialize the card for AGP DMA but failed,
706 * clean-up any mess that may have been created.
707 */
708
709 if (err) {
710 mga_do_cleanup_dma(dev);
711 }
712
713
714 /* Not only do we want to try and initialized PCI cards for PCI DMA,
715 * but we also try to initialized AGP cards that could not be
716 * initialized for AGP DMA. This covers the case where we have an AGP
717 * card in a system with an unsupported AGP chipset. In that case the
718 * card will be detected as AGP, but we won't be able to allocate any
719 * AGP memory, etc.
720 */
721
722 if (!is_agp || err) {
723 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
724 }
725
726
727 return err;
728}
729
730int mga_dma_bootstrap(DRM_IOCTL_ARGS)
731{
732 DRM_DEVICE;
733 drm_mga_dma_bootstrap_t bootstrap;
734 int err;
735
736
737 DRM_COPY_FROM_USER_IOCTL(bootstrap,
738 (drm_mga_dma_bootstrap_t __user *) data,
739 sizeof(bootstrap));
740
741 err = mga_do_dma_bootstrap(dev, & bootstrap);
742 if (! err) {
743 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
744 const drm_mga_private_t * const dev_priv =
745 (drm_mga_private_t *) dev->dev_private;
746
747 if (dev_priv->agp_textures != NULL) {
748 bootstrap.texture_handle = dev_priv->agp_textures->offset;
749 bootstrap.texture_size = dev_priv->agp_textures->size;
750 }
751 else {
752 bootstrap.texture_handle = 0;
753 bootstrap.texture_size = 0;
754 }
755
756 bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
757 if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
758 sizeof(bootstrap))) {
759 err = DRM_ERR(EFAULT);
760 }
761 }
762 else {
763 mga_do_cleanup_dma(dev);
764 }
765
766 return err;
767}
768
399static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) 769static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
400{ 770{
401 drm_mga_private_t *dev_priv; 771 drm_mga_private_t *dev_priv;
402 int ret; 772 int ret;
403 DRM_DEBUG( "\n" ); 773 DRM_DEBUG( "\n" );
404 774
405 dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
406 if ( !dev_priv )
407 return DRM_ERR(ENOMEM);
408
409 memset( dev_priv, 0, sizeof(drm_mga_private_t) );
410 775
411 dev_priv->chipset = init->chipset; 776 dev_priv = dev->dev_private;
412 777
413 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 778 if (init->sgram) {
414
415 if ( init->sgram ) {
416 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; 779 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
417 } else { 780 } else {
418 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; 781 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
@@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
436 799
437 DRM_GETSAREA(); 800 DRM_GETSAREA();
438 801
439 if(!dev_priv->sarea) { 802 if (!dev_priv->sarea) {
440 DRM_ERROR( "failed to find sarea!\n" ); 803 DRM_ERROR("failed to find sarea!\n");
441 /* Assign dev_private so we can do cleanup. */
442 dev->dev_private = (void *)dev_priv;
443 mga_do_cleanup_dma( dev );
444 return DRM_ERR(EINVAL); 804 return DRM_ERR(EINVAL);
445 } 805 }
446 806
447 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 807 if (! dev_priv->used_new_dma_init) {
448 if(!dev_priv->mmio) { 808 dev_priv->status = drm_core_findmap(dev, init->status_offset);
449 DRM_ERROR( "failed to find mmio region!\n" ); 809 if (!dev_priv->status) {
450 /* Assign dev_private so we can do cleanup. */ 810 DRM_ERROR("failed to find status page!\n");
451 dev->dev_private = (void *)dev_priv; 811 return DRM_ERR(EINVAL);
452 mga_do_cleanup_dma( dev ); 812 }
453 return DRM_ERR(EINVAL); 813 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
454 } 814 if (!dev_priv->mmio) {
455 dev_priv->status = drm_core_findmap(dev, init->status_offset); 815 DRM_ERROR("failed to find mmio region!\n");
456 if(!dev_priv->status) { 816 return DRM_ERR(EINVAL);
457 DRM_ERROR( "failed to find status page!\n" ); 817 }
458 /* Assign dev_private so we can do cleanup. */ 818 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
459 dev->dev_private = (void *)dev_priv; 819 if (!dev_priv->warp) {
460 mga_do_cleanup_dma( dev ); 820 DRM_ERROR("failed to find warp microcode region!\n");
461 return DRM_ERR(EINVAL); 821 return DRM_ERR(EINVAL);
462 } 822 }
463 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 823 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
464 if(!dev_priv->warp) { 824 if (!dev_priv->primary) {
465 DRM_ERROR( "failed to find warp microcode region!\n" ); 825 DRM_ERROR("failed to find primary dma region!\n");
466 /* Assign dev_private so we can do cleanup. */ 826 return DRM_ERR(EINVAL);
467 dev->dev_private = (void *)dev_priv; 827 }
468 mga_do_cleanup_dma( dev ); 828 dev->agp_buffer_token = init->buffers_offset;
469 return DRM_ERR(EINVAL); 829 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
470 } 830 if (!dev->agp_buffer_map) {
471 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 831 DRM_ERROR("failed to find dma buffer region!\n");
472 if(!dev_priv->primary) { 832 return DRM_ERR(EINVAL);
473 DRM_ERROR( "failed to find primary dma region!\n" ); 833 }
474 /* Assign dev_private so we can do cleanup. */ 834
475 dev->dev_private = (void *)dev_priv; 835 drm_core_ioremap(dev_priv->warp, dev);
476 mga_do_cleanup_dma( dev ); 836 drm_core_ioremap(dev_priv->primary, dev);
477 return DRM_ERR(EINVAL); 837 drm_core_ioremap(dev->agp_buffer_map, dev);
478 }
479 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
480 if(!dev->agp_buffer_map) {
481 DRM_ERROR( "failed to find dma buffer region!\n" );
482 /* Assign dev_private so we can do cleanup. */
483 dev->dev_private = (void *)dev_priv;
484 mga_do_cleanup_dma( dev );
485 return DRM_ERR(EINVAL);
486 } 838 }
487 839
488 dev_priv->sarea_priv = 840 dev_priv->sarea_priv =
489 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + 841 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
490 init->sarea_priv_offset); 842 init->sarea_priv_offset);
491 843
492 drm_core_ioremap( dev_priv->warp, dev ); 844 if (!dev_priv->warp->handle ||
493 drm_core_ioremap( dev_priv->primary, dev ); 845 !dev_priv->primary->handle ||
494 drm_core_ioremap( dev->agp_buffer_map, dev ); 846 ((dev_priv->dma_access != 0) &&
495 847 ((dev->agp_buffer_map == NULL) ||
496 if(!dev_priv->warp->handle || 848 (dev->agp_buffer_map->handle == NULL)))) {
497 !dev_priv->primary->handle || 849 DRM_ERROR("failed to ioremap agp regions!\n");
498 !dev->agp_buffer_map->handle ) {
499 DRM_ERROR( "failed to ioremap agp regions!\n" );
500 /* Assign dev_private so we can do cleanup. */
501 dev->dev_private = (void *)dev_priv;
502 mga_do_cleanup_dma( dev );
503 return DRM_ERR(ENOMEM); 850 return DRM_ERR(ENOMEM);
504 } 851 }
505 852
506 ret = mga_warp_install_microcode( dev_priv ); 853 ret = mga_warp_install_microcode(dev_priv);
507 if ( ret < 0 ) { 854 if (ret < 0) {
508 DRM_ERROR( "failed to install WARP ucode!\n" ); 855 DRM_ERROR("failed to install WARP ucode!\n");
509 /* Assign dev_private so we can do cleanup. */
510 dev->dev_private = (void *)dev_priv;
511 mga_do_cleanup_dma( dev );
512 return ret; 856 return ret;
513 } 857 }
514 858
515 ret = mga_warp_init( dev_priv ); 859 ret = mga_warp_init(dev_priv);
516 if ( ret < 0 ) { 860 if (ret < 0) {
517 DRM_ERROR( "failed to init WARP engine!\n" ); 861 DRM_ERROR("failed to init WARP engine!\n");
518 /* Assign dev_private so we can do cleanup. */
519 dev->dev_private = (void *)dev_priv;
520 mga_do_cleanup_dma( dev );
521 return ret; 862 return ret;
522 } 863 }
523 864
@@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
557 dev_priv->sarea_priv->last_frame.head = 0; 898 dev_priv->sarea_priv->last_frame.head = 0;
558 dev_priv->sarea_priv->last_frame.wrap = 0; 899 dev_priv->sarea_priv->last_frame.wrap = 0;
559 900
560 if ( mga_freelist_init( dev, dev_priv ) < 0 ) { 901 if (mga_freelist_init(dev, dev_priv) < 0) {
561 DRM_ERROR( "could not initialize freelist\n" ); 902 DRM_ERROR("could not initialize freelist\n");
562 /* Assign dev_private so we can do cleanup. */
563 dev->dev_private = (void *)dev_priv;
564 mga_do_cleanup_dma( dev );
565 return DRM_ERR(ENOMEM); 903 return DRM_ERR(ENOMEM);
566 } 904 }
567 905
568 /* Make dev_private visable to others. */
569 dev->dev_private = (void *)dev_priv;
570 return 0; 906 return 0;
571} 907}
572 908
573static int mga_do_cleanup_dma( drm_device_t *dev ) 909static int mga_do_cleanup_dma( drm_device_t *dev )
574{ 910{
575 DRM_DEBUG( "\n" ); 911 int err = 0;
912 DRM_DEBUG("\n");
576 913
577 /* Make sure interrupts are disabled here because the uninstall ioctl 914 /* Make sure interrupts are disabled here because the uninstall ioctl
578 * may not have been called from userspace and after dev_private 915 * may not have been called from userspace and after dev_private
@@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev )
583 if ( dev->dev_private ) { 920 if ( dev->dev_private ) {
584 drm_mga_private_t *dev_priv = dev->dev_private; 921 drm_mga_private_t *dev_priv = dev->dev_private;
585 922
586 if ( dev_priv->warp != NULL ) 923 if ((dev_priv->warp != NULL)
587 drm_core_ioremapfree( dev_priv->warp, dev ); 924 && (dev_priv->mmio->type != _DRM_CONSISTENT))
588 if ( dev_priv->primary != NULL ) 925 drm_core_ioremapfree(dev_priv->warp, dev);
589 drm_core_ioremapfree( dev_priv->primary, dev ); 926
590 if ( dev->agp_buffer_map != NULL ) 927 if ((dev_priv->primary != NULL)
591 drm_core_ioremapfree( dev->agp_buffer_map, dev ); 928 && (dev_priv->primary->type != _DRM_CONSISTENT))
929 drm_core_ioremapfree(dev_priv->primary, dev);
592 930
593 if ( dev_priv->head != NULL ) { 931 if (dev->agp_buffer_map != NULL)
594 mga_freelist_cleanup( dev ); 932 drm_core_ioremapfree(dev->agp_buffer_map, dev);
933
934 if (dev_priv->used_new_dma_init) {
935 if (dev_priv->agp_mem != NULL) {
936 dev_priv->agp_textures = NULL;
937 drm_unbind_agp(dev_priv->agp_mem);
938
939 drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
940 dev_priv->agp_pages = 0;
941 dev_priv->agp_mem = NULL;
942 }
943
944 if ((dev->agp != NULL) && dev->agp->acquired) {
945 err = drm_agp_release(dev);
946 }
947
948 dev_priv->used_new_dma_init = 0;
595 } 949 }
596 950
597 drm_free( dev->dev_private, sizeof(drm_mga_private_t), 951 dev_priv->warp = NULL;
598 DRM_MEM_DRIVER ); 952 dev_priv->primary = NULL;
599 dev->dev_private = NULL; 953 dev_priv->mmio = NULL;
954 dev_priv->status = NULL;
955 dev_priv->sarea = NULL;
956 dev_priv->sarea_priv = NULL;
957 dev->agp_buffer_map = NULL;
958
959 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
960 dev_priv->warp_pipe = 0;
961 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
962
963 if (dev_priv->head != NULL) {
964 mga_freelist_cleanup(dev);
965 }
600 } 966 }
601 967
602 return 0; 968 return 0;
@@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS )
606{ 972{
607 DRM_DEVICE; 973 DRM_DEVICE;
608 drm_mga_init_t init; 974 drm_mga_init_t init;
975 int err;
609 976
610 LOCK_TEST_WITH_RETURN( dev, filp ); 977 LOCK_TEST_WITH_RETURN( dev, filp );
611 978
612 DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); 979 DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
980 sizeof(init));
613 981
614 switch ( init.func ) { 982 switch ( init.func ) {
615 case MGA_INIT_DMA: 983 case MGA_INIT_DMA:
616 return mga_do_init_dma( dev, &init ); 984 err = mga_do_init_dma(dev, &init);
985 if (err) {
986 (void) mga_do_cleanup_dma(dev);
987 }
988 return err;
617 case MGA_CLEANUP_DMA: 989 case MGA_CLEANUP_DMA:
618 return mga_do_cleanup_dma( dev ); 990 return mga_do_cleanup_dma( dev );
619 } 991 }
@@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
742 return ret; 1114 return ret;
743} 1115}
744 1116
745void mga_driver_pretakedown(drm_device_t *dev) 1117/**
1118 * Called just before the module is unloaded.
1119 */
1120int mga_driver_postcleanup(drm_device_t * dev)
1121{
1122 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1123 dev->dev_private = NULL;
1124
1125 return 0;
1126}
1127
1128/**
1129 * Called when the last opener of the device is closed.
1130 */
1131void mga_driver_pretakedown(drm_device_t * dev)
746{ 1132{
747 mga_do_cleanup_dma( dev ); 1133 mga_do_cleanup_dma( dev );
748} 1134}
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 521d4451d012..d20aab3bd57b 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -73,7 +73,8 @@
73 73
74#define MGA_CARD_TYPE_G200 1 74#define MGA_CARD_TYPE_G200 1
75#define MGA_CARD_TYPE_G400 2 75#define MGA_CARD_TYPE_G400 2
76 76#define MGA_CARD_TYPE_G450 3 /* not currently used */
77#define MGA_CARD_TYPE_G550 4
77 78
78#define MGA_FRONT 0x1 79#define MGA_FRONT 0x1
79#define MGA_BACK 0x2 80#define MGA_BACK 0x2
@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea {
225} drm_mga_sarea_t; 226} drm_mga_sarea_t;
226 227
227 228
228/* WARNING: If you change any of these defines, make sure to change the
229 * defines in the Xserver file (xf86drmMga.h)
230 */
231
232/* MGA specific ioctls 229/* MGA specific ioctls
233 * The device specific ioctl range is 0x40 to 0x79. 230 * The device specific ioctl range is 0x40 to 0x79.
234 */ 231 */
@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea {
243#define DRM_MGA_BLIT 0x08 240#define DRM_MGA_BLIT 0x08
244#define DRM_MGA_GETPARAM 0x09 241#define DRM_MGA_GETPARAM 0x09
245 242
243/* 3.2:
244 * ioctls for operating on fences.
245 */
246#define DRM_MGA_SET_FENCE 0x0a
247#define DRM_MGA_WAIT_FENCE 0x0b
248#define DRM_MGA_DMA_BOOTSTRAP 0x0c
249
250
246#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) 251#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
247#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) 252#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
248#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) 253#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea {
253#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) 258#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
254#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) 259#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
255#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) 260#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
261#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
262#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
263#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
256 264
257typedef struct _drm_mga_warp_index { 265typedef struct _drm_mga_warp_index {
258 int installed; 266 int installed;
@@ -291,12 +299,72 @@ typedef struct drm_mga_init {
291 unsigned long buffers_offset; 299 unsigned long buffers_offset;
292} drm_mga_init_t; 300} drm_mga_init_t;
293 301
294typedef struct drm_mga_fullscreen { 302typedef struct drm_mga_dma_bootstrap {
295 enum { 303 /**
296 MGA_INIT_FULLSCREEN = 0x01, 304 * \name AGP texture region
297 MGA_CLEANUP_FULLSCREEN = 0x02 305 *
298 } func; 306 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
299} drm_mga_fullscreen_t; 307 * be filled in with the actual AGP texture settings.
308 *
309 * \warning
310 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
311 * is zero, it means that PCI memory (most likely through the use of
312 * an IOMMU) is being used for "AGP" textures.
313 */
314 /*@{*/
315 unsigned long texture_handle; /**< Handle used to map AGP textures. */
316 uint32_t texture_size; /**< Size of the AGP texture region. */
317 /*@}*/
318
319
320 /**
321 * Requested size of the primary DMA region.
322 *
323 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
324 * filled in with the actual AGP mode. If AGP was not available
325 */
326 uint32_t primary_size;
327
328
329 /**
330 * Requested number of secondary DMA buffers.
331 *
332 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
333 * filled in with the actual number of secondary DMA buffers
334 * allocated. Particularly when PCI DMA is used, this may be
335 * (subtantially) less than the number requested.
336 */
337 uint32_t secondary_bin_count;
338
339
340 /**
341 * Requested size of each secondary DMA buffer.
342 *
343 * While the kernel \b is free to reduce
344 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
345 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
346 */
347 uint32_t secondary_bin_size;
348
349
350 /**
351 * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
352 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
353 * zero, it means that PCI DMA should be used, even if AGP is
354 * possible.
355 *
356 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
357 * filled in with the actual AGP mode. If AGP was not available
358 * (i.e., PCI DMA was used), this value will be zero.
359 */
360 uint32_t agp_mode;
361
362
363 /**
364 * Desired AGP GART size, measured in megabytes.
365 */
366 uint8_t agp_size;
367} drm_mga_dma_bootstrap_t;
300 368
301typedef struct drm_mga_clear { 369typedef struct drm_mga_clear {
302 unsigned int flags; 370 unsigned int flags;
@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit {
341 */ 409 */
342#define MGA_PARAM_IRQ_NR 1 410#define MGA_PARAM_IRQ_NR 1
343 411
412/* 3.2: Query the actual card type. The DDX only distinguishes between
413 * G200 chips and non-G200 chips, which it calls G400. It turns out that
414 * there are some very sublte differences between the G4x0 chips and the G550
415 * chips. Using this parameter query, a client-side driver can detect the
416 * difference between a G4x0 and a G550.
417 */
418#define MGA_PARAM_CARD_TYPE 2
419
344typedef struct drm_mga_getparam { 420typedef struct drm_mga_getparam {
345 int param; 421 int param;
346 void __user *value; 422 void __user *value;
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 844cca9cb29d..daabbba3b297 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,8 +38,15 @@
38 38
39#include "drm_pciids.h" 39#include "drm_pciids.h"
40 40
41static int mga_driver_device_is_agp(drm_device_t * dev);
41static int postinit( struct drm_device *dev, unsigned long flags ) 42static int postinit( struct drm_device *dev, unsigned long flags )
42{ 43{
44 drm_mga_private_t * const dev_priv =
45 (drm_mga_private_t *) dev->dev_private;
46
47 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
48 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
49
43 dev->counters += 3; 50 dev->counters += 3;
44 dev->types[6] = _DRM_STAT_IRQ; 51 dev->types[6] = _DRM_STAT_IRQ;
45 dev->types[7] = _DRM_STAT_PRIMARY; 52 dev->types[7] = _DRM_STAT_PRIMARY;
@@ -79,8 +86,11 @@ extern int mga_max_ioctl;
79 86
80static struct drm_driver driver = { 87static struct drm_driver driver = {
81 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 88 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
89 .preinit = mga_driver_preinit,
90 .postcleanup = mga_driver_postcleanup,
82 .pretakedown = mga_driver_pretakedown, 91 .pretakedown = mga_driver_pretakedown,
83 .dma_quiescent = mga_driver_dma_quiescent, 92 .dma_quiescent = mga_driver_dma_quiescent,
93 .device_is_agp = mga_driver_device_is_agp,
84 .vblank_wait = mga_driver_vblank_wait, 94 .vblank_wait = mga_driver_vblank_wait,
85 .irq_preinstall = mga_driver_irq_preinstall, 95 .irq_preinstall = mga_driver_irq_preinstall,
86 .irq_postinstall = mga_driver_irq_postinstall, 96 .irq_postinstall = mga_driver_irq_postinstall,
@@ -128,3 +138,38 @@ module_exit(mga_exit);
128MODULE_AUTHOR( DRIVER_AUTHOR ); 138MODULE_AUTHOR( DRIVER_AUTHOR );
129MODULE_DESCRIPTION( DRIVER_DESC ); 139MODULE_DESCRIPTION( DRIVER_DESC );
130MODULE_LICENSE("GPL and additional rights"); 140MODULE_LICENSE("GPL and additional rights");
141
142/**
143 * Determine if the device really is AGP or not.
144 *
145 * In addition to the usual tests performed by \c drm_device_is_agp, this
146 * function detects PCI G450 cards that appear to the system exactly like
147 * AGP G450 cards.
148 *
149 * \param dev The device to be tested.
150 *
151 * \returns
152 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
153 */
154int mga_driver_device_is_agp(drm_device_t * dev)
155{
156 const struct pci_dev * const pdev = dev->pdev;
157
158
159 /* There are PCI versions of the G450. These cards have the
160 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
161 * bridge chip. We detect these cards, which are not currently
162 * supported by this driver, by looking at the device ID of the
163 * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
164 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
165 * device.
166 */
167
168 if ( (pdev->device == 0x0525)
169 && (pdev->bus->self->vendor == 0x3388)
170 && (pdev->bus->self->device == 0x0021) ) {
171 return 0;
172 }
173
174 return 2;
175}
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 9412e2816eb7..b22fdbd4f830 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,10 +38,10 @@
38 38
39#define DRIVER_NAME "mga" 39#define DRIVER_NAME "mga"
40#define DRIVER_DESC "Matrox G200/G400" 40#define DRIVER_DESC "Matrox G200/G400"
41#define DRIVER_DATE "20021029" 41#define DRIVER_DATE "20050607"
42 42
43#define DRIVER_MAJOR 3 43#define DRIVER_MAJOR 3
44#define DRIVER_MINOR 1 44#define DRIVER_MINOR 2
45#define DRIVER_PATCHLEVEL 0 45#define DRIVER_PATCHLEVEL 0
46 46
47typedef struct drm_mga_primary_buffer { 47typedef struct drm_mga_primary_buffer {
@@ -87,9 +87,43 @@ typedef struct drm_mga_private {
87 int chipset; 87 int chipset;
88 int usec_timeout; 88 int usec_timeout;
89 89
90 /**
91 * If set, the new DMA initialization sequence was used. This is
92 * primarilly used to select how the driver should uninitialized its
93 * internal DMA structures.
94 */
95 int used_new_dma_init;
96
97 /**
98 * If AGP memory is used for DMA buffers, this will be the value
99 * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
100 */
101 u32 dma_access;
102
103 /**
104 * If AGP memory is used for DMA buffers, this will be the value
105 * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
106 * transfer).
107 */
108 u32 wagp_enable;
109
110 /**
111 * \name MMIO region parameters.
112 *
113 * \sa drm_mga_private_t::mmio
114 */
115 /*@{*/
116 u32 mmio_base; /**< Bus address of base of MMIO. */
117 u32 mmio_size; /**< Size of the MMIO region. */
118 /*@}*/
119
90 u32 clear_cmd; 120 u32 clear_cmd;
91 u32 maccess; 121 u32 maccess;
92 122
123 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired;
125 u32 next_fence_to_post;
126
93 unsigned int fb_cpp; 127 unsigned int fb_cpp;
94 unsigned int front_offset; 128 unsigned int front_offset;
95 unsigned int front_pitch; 129 unsigned int front_pitch;
@@ -108,35 +142,43 @@ typedef struct drm_mga_private {
108 drm_local_map_t *status; 142 drm_local_map_t *status;
109 drm_local_map_t *warp; 143 drm_local_map_t *warp;
110 drm_local_map_t *primary; 144 drm_local_map_t *primary;
111 drm_local_map_t *buffers;
112 drm_local_map_t *agp_textures; 145 drm_local_map_t *agp_textures;
146
147 DRM_AGP_MEM *agp_mem;
148 unsigned int agp_pages;
113} drm_mga_private_t; 149} drm_mga_private_t;
114 150
115 /* mga_dma.c */ 151 /* mga_dma.c */
116extern int mga_dma_init( DRM_IOCTL_ARGS ); 152extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
117extern int mga_dma_flush( DRM_IOCTL_ARGS ); 153extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
118extern int mga_dma_reset( DRM_IOCTL_ARGS ); 154extern int mga_dma_init(DRM_IOCTL_ARGS);
119extern int mga_dma_buffers( DRM_IOCTL_ARGS ); 155extern int mga_dma_flush(DRM_IOCTL_ARGS);
120extern void mga_driver_pretakedown(drm_device_t *dev); 156extern int mga_dma_reset(DRM_IOCTL_ARGS);
121extern int mga_driver_dma_quiescent(drm_device_t *dev); 157extern int mga_dma_buffers(DRM_IOCTL_ARGS);
122 158extern int mga_driver_postcleanup(drm_device_t * dev);
123extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); 159extern void mga_driver_pretakedown(drm_device_t * dev);
124 160extern int mga_driver_dma_quiescent(drm_device_t * dev);
125extern void mga_do_dma_flush( drm_mga_private_t *dev_priv ); 161
126extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); 162extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
127extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ); 163
164extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
165extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
166extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
128 167
129extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); 168extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
130 169
131 /* mga_warp.c */ 170 /* mga_warp.c */
132extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); 171extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
133extern int mga_warp_init( drm_mga_private_t *dev_priv ); 172extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
134 173extern int mga_warp_init(drm_mga_private_t * dev_priv);
135extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); 174
136extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); 175 /* mga_irq.c */
137extern void mga_driver_irq_preinstall( drm_device_t *dev ); 176extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
138extern void mga_driver_irq_postinstall( drm_device_t *dev ); 177extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
139extern void mga_driver_irq_uninstall( drm_device_t *dev ); 178extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
179extern void mga_driver_irq_preinstall(drm_device_t * dev);
180extern void mga_driver_irq_postinstall(drm_device_t * dev);
181extern void mga_driver_irq_uninstall(drm_device_t * dev);
140extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 182extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
141 unsigned long arg); 183 unsigned long arg);
142 184
@@ -527,6 +569,12 @@ do { \
527 */ 569 */
528#define MGA_EXEC 0x0100 570#define MGA_EXEC 0x0100
529 571
572/* AGP PLL encoding (for G200 only).
573 */
574#define MGA_AGP_PLL 0x1e4c
575# define MGA_AGP2XPLL_DISABLE (0 << 0)
576# define MGA_AGP2XPLL_ENABLE (1 << 0)
577
530/* Warp registers 578/* Warp registers
531 */ 579 */
532#define MGA_WR0 0x2d00 580#define MGA_WR0 0x2d00
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c
index bc745cfa2095..77d738e75a4d 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/char/drm/mga_ioc32.c
@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); 129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130} 130}
131 131
132typedef struct drm_mga_drm_bootstrap32 {
133 u32 texture_handle;
134 u32 texture_size;
135 u32 primary_size;
136 u32 secondary_bin_count;
137 u32 secondary_bin_size;
138 u32 agp_mode;
139 u8 agp_size;
140} drm_mga_dma_bootstrap32_t;
141
142static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
143 unsigned long arg)
144{
145 drm_mga_dma_bootstrap32_t dma_bootstrap32;
146 drm_mga_dma_bootstrap_t __user *dma_bootstrap;
147 int err;
148
149 if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
150 sizeof(dma_bootstrap32)))
151 return -EFAULT;
152
153 dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
154 if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
155 || __put_user(dma_bootstrap32.texture_handle,
156 &dma_bootstrap->texture_handle)
157 || __put_user(dma_bootstrap32.texture_size,
158 &dma_bootstrap->texture_size)
159 || __put_user(dma_bootstrap32.primary_size,
160 &dma_bootstrap->primary_size)
161 || __put_user(dma_bootstrap32.secondary_bin_count,
162 &dma_bootstrap->secondary_bin_count)
163 || __put_user(dma_bootstrap32.secondary_bin_size,
164 &dma_bootstrap->secondary_bin_size)
165 || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT;
168
169 err = drm_ioctl(file->f_dentry->d_inode, file,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap);
172 if (err)
173 return err;
174
175 if (__get_user(dma_bootstrap32.texture_handle,
176 &dma_bootstrap->texture_handle)
177 || __get_user(dma_bootstrap32.texture_size,
178 &dma_bootstrap->texture_size)
179 || __get_user(dma_bootstrap32.primary_size,
180 &dma_bootstrap->primary_size)
181 || __get_user(dma_bootstrap32.secondary_bin_count,
182 &dma_bootstrap->secondary_bin_count)
183 || __get_user(dma_bootstrap32.secondary_bin_size,
184 &dma_bootstrap->secondary_bin_size)
185 || __get_user(dma_bootstrap32.agp_mode,
186 &dma_bootstrap->agp_mode)
187 || __get_user(dma_bootstrap32.agp_size,
188 &dma_bootstrap->agp_size))
189 return -EFAULT;
190
191 if (copy_to_user((void __user *)arg, &dma_bootstrap32,
192 sizeof(dma_bootstrap32)))
193 return -EFAULT;
194
195 return 0;
196}
197
132drm_ioctl_compat_t *mga_compat_ioctls[] = { 198drm_ioctl_compat_t *mga_compat_ioctls[] = {
133 [DRM_MGA_INIT] = compat_mga_init, 199 [DRM_MGA_INIT] = compat_mga_init,
134 [DRM_MGA_GETPARAM] = compat_mga_getparam, 200 [DRM_MGA_GETPARAM] = compat_mga_getparam,
201 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
135}; 202};
136 203
137/** 204/**
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index bc0b6b5d43a6..52eaa4e788f9 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
41 drm_mga_private_t *dev_priv = 41 drm_mga_private_t *dev_priv =
42 (drm_mga_private_t *)dev->dev_private; 42 (drm_mga_private_t *)dev->dev_private;
43 int status; 43 int status;
44 int handled = 0;
45
46 status = MGA_READ(MGA_STATUS);
44 47
45 status = MGA_READ( MGA_STATUS );
46
47 /* VBLANK interrupt */ 48 /* VBLANK interrupt */
48 if ( status & MGA_VLINEPEN ) { 49 if ( status & MGA_VLINEPEN ) {
49 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); 50 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
50 atomic_inc(&dev->vbl_received); 51 atomic_inc(&dev->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 52 DRM_WAKEUP(&dev->vbl_queue);
52 drm_vbl_send_signals( dev ); 53 drm_vbl_send_signals(dev);
54 handled = 1;
55 }
56
57 /* SOFTRAP interrupt */
58 if (status & MGA_SOFTRAPEN) {
59 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
60 const u32 prim_end = MGA_READ(MGA_PRIMEND);
61
62
63 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
64
65 /* In addition to clearing the interrupt-pending bit, we
66 * have to write to MGA_PRIMEND to re-start the DMA operation.
67 */
68 if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
69 MGA_WRITE(MGA_PRIMEND, prim_end);
70 }
71
72 atomic_inc(&dev_priv->last_fence_retired);
73 DRM_WAKEUP(&dev_priv->fence_queue);
74 handled = 1;
75 }
76
77 if ( handled ) {
53 return IRQ_HANDLED; 78 return IRQ_HANDLED;
54 } 79 }
55 return IRQ_NONE; 80 return IRQ_NONE;
@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
73 return ret; 98 return ret;
74} 99}
75 100
76void mga_driver_irq_preinstall( drm_device_t *dev ) { 101int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
77 drm_mga_private_t *dev_priv = 102{
78 (drm_mga_private_t *)dev->dev_private; 103 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
104 unsigned int cur_fence;
105 int ret = 0;
106
107 /* Assume that the user has missed the current sequence number
108 * by about a day rather than she wants to wait for years
109 * using fences.
110 */
111 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
112 (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
113 - *sequence) <= (1 << 23)));
114
115 *sequence = cur_fence;
116
117 return ret;
118}
119
120void mga_driver_irq_preinstall(drm_device_t * dev)
121{
122 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
79 123
80 /* Disable *all* interrupts */ 124 /* Disable *all* interrupts */
81 MGA_WRITE( MGA_IEN, 0 ); 125 MGA_WRITE( MGA_IEN, 0 );
@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
83 MGA_WRITE( MGA_ICLEAR, ~0 ); 127 MGA_WRITE( MGA_ICLEAR, ~0 );
84} 128}
85 129
86void mga_driver_irq_postinstall( drm_device_t *dev ) { 130void mga_driver_irq_postinstall(drm_device_t * dev)
87 drm_mga_private_t *dev_priv = 131{
88 (drm_mga_private_t *)dev->dev_private; 132 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
133
134 DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
89 135
90 /* Turn on VBL interrupt */ 136 /* Turn on vertical blank interrupt and soft trap interrupt. */
91 MGA_WRITE( MGA_IEN, MGA_VLINEIEN ); 137 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
92} 138}
93 139
94void mga_driver_irq_uninstall( drm_device_t *dev ) { 140void mga_driver_irq_uninstall( drm_device_t *dev ) {
@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) {
98 return; 144 return;
99 145
100 /* Disable *all* interrupts */ 146 /* Disable *all* interrupts */
101 MGA_WRITE( MGA_IEN, 0 ); 147 MGA_WRITE(MGA_IEN, 0);
148
149 dev->irq_enabled = 0;
102} 150}
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 3c7a8f5ba501..05bbb4719376 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
53 53
54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit). 54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
55 */ 55 */
56 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 56 if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
57 DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, 57 DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
58 MGA_LEN + MGA_EXEC, 0x80000000, 58 MGA_LEN + MGA_EXEC, 0x80000000,
59 MGA_DWGCTL, ctx->dwgctl, 59 MGA_DWGCTL, ctx->dwgctl,
60 MGA_LEN + MGA_EXEC, 0x80000000 ); 60 MGA_LEN + MGA_EXEC, 0x80000000);
61 } 61 }
62 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 62 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
63 MGA_CXBNDRY, (box->x2 << 16) | box->x1, 63 MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
64 MGA_YTOP, box->y1 * pitch, 64 MGA_YTOP, box->y1 * pitch,
65 MGA_YBOT, box->y2 * pitch ); 65 MGA_YBOT, (box->y2 - 1) * pitch);
66 66
67 ADVANCE_DMA(); 67 ADVANCE_DMA();
68} 68}
@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
260 260
261 /* Padding required to to hardware bug. 261 /* Padding required to to hardware bug.
262 */ 262 */
263 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 263 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
264 MGA_DMAPAD, 0xffffffff, 264 MGA_DMAPAD, 0xffffffff,
265 MGA_DMAPAD, 0xffffffff, 265 MGA_DMAPAD, 0xffffffff,
266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | 266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
267 MGA_WMODE_START | 267 MGA_WMODE_START | dev_priv->wagp_enable));
268 MGA_WAGP_ENABLE) );
269 268
270 ADVANCE_DMA(); 269 ADVANCE_DMA();
271} 270}
@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
342 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ 341 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
343 342
344 /* Padding required to to hardware bug */ 343 /* Padding required to to hardware bug */
345 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 344 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
346 MGA_DMAPAD, 0xffffffff, 345 MGA_DMAPAD, 0xffffffff,
347 MGA_DMAPAD, 0xffffffff, 346 MGA_DMAPAD, 0xffffffff,
348 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | 347 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
349 MGA_WMODE_START | 348 MGA_WMODE_START | dev_priv->wagp_enable));
350 MGA_WAGP_ENABLE) );
351 349
352 ADVANCE_DMA(); 350 ADVANCE_DMA();
353} 351}
@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv )
459 if ( dirty & MGA_UPLOAD_TEX0 ) 457 if ( dirty & MGA_UPLOAD_TEX0 )
460 ret |= mga_verify_tex( dev_priv, 0 ); 458 ret |= mga_verify_tex( dev_priv, 0 );
461 459
462 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 460 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
463 if ( dirty & MGA_UPLOAD_TEX1 ) 461 if (dirty & MGA_UPLOAD_TEX1)
464 ret |= mga_verify_tex( dev_priv, 1 ); 462 ret |= mga_verify_tex(dev_priv, 1);
465 463
466 if ( dirty & MGA_UPLOAD_PIPE ) 464 if ( dirty & MGA_UPLOAD_PIPE )
467 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); 465 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
686 684
687 BEGIN_DMA( 1 ); 685 BEGIN_DMA( 1 );
688 686
689 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 687 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
690 MGA_DMAPAD, 0x00000000, 688 MGA_DMAPAD, 0x00000000,
691 MGA_SECADDRESS, (address | 689 MGA_SECADDRESS, (address |
692 MGA_DMA_VERTEX), 690 MGA_DMA_VERTEX),
693 MGA_SECEND, ((address + length) | 691 MGA_SECEND, ((address + length) |
694 MGA_PAGPXFER) ); 692 dev_priv->dma_access));
695 693
696 ADVANCE_DMA(); 694 ADVANCE_DMA();
697 } while ( ++i < sarea_priv->nbox ); 695 } while ( ++i < sarea_priv->nbox );
@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
733 731
734 BEGIN_DMA( 1 ); 732 BEGIN_DMA( 1 );
735 733
736 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 734 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
737 MGA_DMAPAD, 0x00000000, 735 MGA_DMAPAD, 0x00000000,
738 MGA_SETUPADDRESS, address + start, 736 MGA_SETUPADDRESS, address + start,
739 MGA_SETUPEND, ((address + end) | 737 MGA_SETUPEND, ((address + end) |
740 MGA_PAGPXFER) ); 738 dev_priv->dma_access));
741 739
742 ADVANCE_DMA(); 740 ADVANCE_DMA();
743 } while ( ++i < sarea_priv->nbox ); 741 } while ( ++i < sarea_priv->nbox );
@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
764 drm_mga_private_t *dev_priv = dev->dev_private; 762 drm_mga_private_t *dev_priv = dev->dev_private;
765 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 763 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
766 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; 764 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
767 u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; 765 u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
768 u32 y2; 766 u32 y2;
769 DMA_LOCALS; 767 DMA_LOCALS;
770 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); 768 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1095 case MGA_PARAM_IRQ_NR: 1093 case MGA_PARAM_IRQ_NR:
1096 value = dev->irq; 1094 value = dev->irq;
1097 break; 1095 break;
1096 case MGA_PARAM_CARD_TYPE:
1097 value = dev_priv->chipset;
1098 break;
1098 default: 1099 default:
1099 return DRM_ERR(EINVAL); 1100 return DRM_ERR(EINVAL);
1100 } 1101 }
@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1107 return 0; 1108 return 0;
1108} 1109}
1109 1110
1111static int mga_set_fence(DRM_IOCTL_ARGS)
1112{
1113 DRM_DEVICE;
1114 drm_mga_private_t *dev_priv = dev->dev_private;
1115 u32 temp;
1116 DMA_LOCALS;
1117
1118 if (!dev_priv) {
1119 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1120 return DRM_ERR(EINVAL);
1121 }
1122
1123 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1124
1125 /* I would normal do this assignment in the declaration of temp,
1126 * but dev_priv may be NULL.
1127 */
1128
1129 temp = dev_priv->next_fence_to_post;
1130 dev_priv->next_fence_to_post++;
1131
1132 BEGIN_DMA(1);
1133 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
1134 MGA_DMAPAD, 0x00000000,
1135 MGA_DMAPAD, 0x00000000,
1136 MGA_SOFTRAP, 0x00000000);
1137 ADVANCE_DMA();
1138
1139 if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
1140 DRM_ERROR("copy_to_user\n");
1141 return DRM_ERR(EFAULT);
1142 }
1143
1144 return 0;
1145}
1146
1147static int mga_wait_fence(DRM_IOCTL_ARGS)
1148{
1149 DRM_DEVICE;
1150 drm_mga_private_t *dev_priv = dev->dev_private;
1151 u32 fence;
1152
1153 if (!dev_priv) {
1154 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1155 return DRM_ERR(EINVAL);
1156 }
1157
1158 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
1159
1160 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1161
1162 mga_driver_fence_wait(dev, & fence);
1163
1164 if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
1165 DRM_ERROR("copy_to_user\n");
1166 return DRM_ERR(EFAULT);
1167 }
1168
1169 return 0;
1170}
1171
1110drm_ioctl_desc_t mga_ioctls[] = { 1172drm_ioctl_desc_t mga_ioctls[] = {
1111 [DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, 1173 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
1112 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, 1174 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
1113 [DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, 1175 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
1114 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, 1176 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
1115 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, 1177 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
1116 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, 1178 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
1117 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, 1179 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
1118 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, 1180 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
1119 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, 1181 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
1120 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, 1182 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
1183 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
1184 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
1185 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
1186
1121}; 1187};
1122 1188
1123int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1189int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index 0a3a0cc700dc..55ccc8a0ac29 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -48,65 +48,52 @@ do { \
48 vcbase += WARP_UCODE_SIZE( which ); \ 48 vcbase += WARP_UCODE_SIZE( which ); \
49} while (0) 49} while (0)
50 50
51 51static const unsigned int mga_warp_g400_microcode_size =
52static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) 52 (WARP_UCODE_SIZE(warp_g400_tgz) +
53{ 53 WARP_UCODE_SIZE(warp_g400_tgza) +
54 unsigned int size; 54 WARP_UCODE_SIZE(warp_g400_tgzaf) +
55 55 WARP_UCODE_SIZE(warp_g400_tgzf) +
56 size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + 56 WARP_UCODE_SIZE(warp_g400_tgzs) +
57 WARP_UCODE_SIZE( warp_g400_tgza ) + 57 WARP_UCODE_SIZE(warp_g400_tgzsa) +
58 WARP_UCODE_SIZE( warp_g400_tgzaf ) + 58 WARP_UCODE_SIZE(warp_g400_tgzsaf) +
59 WARP_UCODE_SIZE( warp_g400_tgzf ) + 59 WARP_UCODE_SIZE(warp_g400_tgzsf) +
60 WARP_UCODE_SIZE( warp_g400_tgzs ) + 60 WARP_UCODE_SIZE(warp_g400_t2gz) +
61 WARP_UCODE_SIZE( warp_g400_tgzsa ) + 61 WARP_UCODE_SIZE(warp_g400_t2gza) +
62 WARP_UCODE_SIZE( warp_g400_tgzsaf ) + 62 WARP_UCODE_SIZE(warp_g400_t2gzaf) +
63 WARP_UCODE_SIZE( warp_g400_tgzsf ) + 63 WARP_UCODE_SIZE(warp_g400_t2gzf) +
64 WARP_UCODE_SIZE( warp_g400_t2gz ) + 64 WARP_UCODE_SIZE(warp_g400_t2gzs) +
65 WARP_UCODE_SIZE( warp_g400_t2gza ) + 65 WARP_UCODE_SIZE(warp_g400_t2gzsa) +
66 WARP_UCODE_SIZE( warp_g400_t2gzaf ) + 66 WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
67 WARP_UCODE_SIZE( warp_g400_t2gzf ) + 67 WARP_UCODE_SIZE(warp_g400_t2gzsf));
68 WARP_UCODE_SIZE( warp_g400_t2gzs ) + 68
69 WARP_UCODE_SIZE( warp_g400_t2gzsa ) + 69static const unsigned int mga_warp_g200_microcode_size =
70 WARP_UCODE_SIZE( warp_g400_t2gzsaf ) + 70 (WARP_UCODE_SIZE(warp_g200_tgz) +
71 WARP_UCODE_SIZE( warp_g400_t2gzsf ) ); 71 WARP_UCODE_SIZE(warp_g200_tgza) +
72 72 WARP_UCODE_SIZE(warp_g200_tgzaf) +
73 size = PAGE_ALIGN( size ); 73 WARP_UCODE_SIZE(warp_g200_tgzf) +
74 74 WARP_UCODE_SIZE(warp_g200_tgzs) +
75 DRM_DEBUG( "G400 ucode size = %d bytes\n", size ); 75 WARP_UCODE_SIZE(warp_g200_tgzsa) +
76 return size; 76 WARP_UCODE_SIZE(warp_g200_tgzsaf) +
77} 77 WARP_UCODE_SIZE(warp_g200_tgzsf));
78 78
79static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv ) 79
80unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
80{ 81{
81 unsigned int size; 82 switch (dev_priv->chipset) {
82 83 case MGA_CARD_TYPE_G400:
83 size = ( WARP_UCODE_SIZE( warp_g200_tgz ) + 84 case MGA_CARD_TYPE_G550:
84 WARP_UCODE_SIZE( warp_g200_tgza ) + 85 return PAGE_ALIGN(mga_warp_g400_microcode_size);
85 WARP_UCODE_SIZE( warp_g200_tgzaf ) + 86 case MGA_CARD_TYPE_G200:
86 WARP_UCODE_SIZE( warp_g200_tgzf ) + 87 return PAGE_ALIGN(mga_warp_g200_microcode_size);
87 WARP_UCODE_SIZE( warp_g200_tgzs ) + 88 default:
88 WARP_UCODE_SIZE( warp_g200_tgzsa ) + 89 return 0;
89 WARP_UCODE_SIZE( warp_g200_tgzsaf ) + 90 }
90 WARP_UCODE_SIZE( warp_g200_tgzsf ) );
91
92 size = PAGE_ALIGN( size );
93
94 DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
95 return size;
96} 91}
97 92
98static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) 93static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
99{ 94{
100 unsigned char *vcbase = dev_priv->warp->handle; 95 unsigned char *vcbase = dev_priv->warp->handle;
101 unsigned long pcbase = dev_priv->warp->offset; 96 unsigned long pcbase = dev_priv->warp->offset;
102 unsigned int size;
103
104 size = mga_warp_g400_microcode_size( dev_priv );
105 if ( size > dev_priv->warp->size ) {
106 DRM_ERROR( "microcode too large! (%u > %lu)\n",
107 size, dev_priv->warp->size );
108 return DRM_ERR(ENOMEM);
109 }
110 97
111 memset( dev_priv->warp_pipe_phys, 0, 98 memset( dev_priv->warp_pipe_phys, 0,
112 sizeof(dev_priv->warp_pipe_phys) ); 99 sizeof(dev_priv->warp_pipe_phys) );
@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
136{ 123{
137 unsigned char *vcbase = dev_priv->warp->handle; 124 unsigned char *vcbase = dev_priv->warp->handle;
138 unsigned long pcbase = dev_priv->warp->offset; 125 unsigned long pcbase = dev_priv->warp->offset;
139 unsigned int size;
140
141 size = mga_warp_g200_microcode_size( dev_priv );
142 if ( size > dev_priv->warp->size ) {
143 DRM_ERROR( "microcode too large! (%u > %lu)\n",
144 size, dev_priv->warp->size );
145 return DRM_ERR(ENOMEM);
146 }
147 126
148 memset( dev_priv->warp_pipe_phys, 0, 127 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
149 sizeof(dev_priv->warp_pipe_phys) );
150 128
151 WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); 129 WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
152 WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); 130 WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
153 WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); 131 WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
154 WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); 132 WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
155 WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); 133 WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
156 WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); 134 WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
157 WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); 135 WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
158 WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); 136 WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
159 137
160 return 0; 138 return 0;
161} 139}
162 140
163int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) 141int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
164{ 142{
165 switch ( dev_priv->chipset ) { 143 const unsigned int size = mga_warp_microcode_size(dev_priv);
144
145 DRM_DEBUG("MGA ucode size = %d bytes\n", size);
146 if (size > dev_priv->warp->size) {
147 DRM_ERROR("microcode too large! (%u > %lu)\n",
148 size, dev_priv->warp->size);
149 return DRM_ERR(ENOMEM);
150 }
151
152 switch (dev_priv->chipset) {
166 case MGA_CARD_TYPE_G400: 153 case MGA_CARD_TYPE_G400:
167 return mga_warp_install_g400_microcode( dev_priv ); 154 case MGA_CARD_TYPE_G550:
155 return mga_warp_install_g400_microcode(dev_priv);
168 case MGA_CARD_TYPE_G200: 156 case MGA_CARD_TYPE_G200:
169 return mga_warp_install_g200_microcode( dev_priv ); 157 return mga_warp_install_g200_microcode( dev_priv );
170 default: 158 default:
@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
182 */ 170 */
183 switch ( dev_priv->chipset ) { 171 switch ( dev_priv->chipset ) {
184 case MGA_CARD_TYPE_G400: 172 case MGA_CARD_TYPE_G400:
185 MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); 173 case MGA_CARD_TYPE_G550:
186 MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); 174 MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
187 MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); 175 MGA_WRITE(MGA_WGETMSB, 0x00000E00);
188 MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); 176 MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
177 MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
189 break; 178 break;
190 case MGA_CARD_TYPE_G200: 179 case MGA_CARD_TYPE_G200:
191 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); 180 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 08ed8d01d9d9..895152206b31 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
326 ring_start = dev_priv->cce_ring->offset - dev->agp->base; 326 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
327 else 327 else
328#endif 328#endif
329 ring_start = dev_priv->cce_ring->offset - dev->sg->handle; 329 ring_start = dev_priv->cce_ring->offset -
330 (unsigned long)dev->sg->virtual;
330 331
331 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); 332 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
332 333
@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
487 r128_do_cleanup_cce( dev ); 488 r128_do_cleanup_cce( dev );
488 return DRM_ERR(EINVAL); 489 return DRM_ERR(EINVAL);
489 } 490 }
491 dev->agp_buffer_token = init->buffers_offset;
490 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 492 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
491 if(!dev->agp_buffer_map) { 493 if(!dev->agp_buffer_map) {
492 DRM_ERROR("could not find dma buffer region!\n"); 494 DRM_ERROR("could not find dma buffer region!\n");
@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
537 dev_priv->cce_buffers_offset = dev->agp->base; 539 dev_priv->cce_buffers_offset = dev->agp->base;
538 else 540 else
539#endif 541#endif
540 dev_priv->cce_buffers_offset = dev->sg->handle; 542 dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
541 543
542 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; 544 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle 545 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 0cba17d1e0ff..b616cd3ed2cd 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea {
215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) 215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) 216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) 217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
218#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) 218#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) 219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
220 220
221typedef struct drm_r128_init { 221typedef struct drm_r128_init {
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
new file mode 100644
index 000000000000..623f1f460cb5
--- /dev/null
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -0,0 +1,801 @@
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
40
41#define R300_SIMULTANEOUS_CLIPRECTS 4
42
43/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
44 */
45static const int r300_cliprect_cntl[4] = {
46 0xAAAA,
47 0xEEEE,
48 0xFEFE,
49 0xFFFE
50};
51
52
53/**
54 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55 * buffer, starting with index n.
56 */
57static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
58 drm_radeon_cmd_buffer_t* cmdbuf,
59 int n)
60{
61 drm_clip_rect_t box;
62 int nr;
63 int i;
64 RING_LOCALS;
65
66 nr = cmdbuf->nbox - n;
67 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
68 nr = R300_SIMULTANEOUS_CLIPRECTS;
69
70 DRM_DEBUG("%i cliprects\n", nr);
71
72 if (nr) {
73 BEGIN_RING(6 + nr*2);
74 OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
75
76 for(i = 0; i < nr; ++i) {
77 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
78 DRM_ERROR("copy cliprect faulted\n");
79 return DRM_ERR(EFAULT);
80 }
81
82 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
83 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
84 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
85 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
86
87 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
88 (box.y1 << R300_CLIPRECT_Y_SHIFT));
89 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
90 (box.y2 << R300_CLIPRECT_Y_SHIFT));
91 }
92
93 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
94
95 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
96 * client might be able to trample over memory.
97 * The impact should be very limited, but I'd rather be safe than
98 * sorry.
99 */
100 OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
101 OUT_RING( 0 );
102 OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
103 ADVANCE_RING();
104 } else {
105 /* Why we allow zero cliprect rendering:
106 * There are some commands in a command buffer that must be submitted
107 * even when there are no cliprects, e.g. DMA buffer discard
108 * or state setting (though state setting could be avoided by
109 * simulating a loss of context).
110 *
111 * Now since the cmdbuf interface is so chaotic right now (and is
112 * bound to remain that way for a bit until things settle down),
113 * it is basically impossible to filter out the commands that are
114 * necessary and those that aren't.
115 *
116 * So I choose the safe way and don't do any filtering at all;
117 * instead, I simply set up the engine so that all rendering
118 * can't produce any fragments.
119 */
120 BEGIN_RING(2);
121 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
122 ADVANCE_RING();
123 }
124
125 return 0;
126}
127
128u8 r300_reg_flags[0x10000>>2];
129
130
131void r300_init_reg_flags(void)
132{
133 int i;
134 memset(r300_reg_flags, 0, 0x10000>>2);
135 #define ADD_RANGE_MARK(reg, count,mark) \
136 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
137 r300_reg_flags[i]|=(mark);
138
139 #define MARK_SAFE 1
140 #define MARK_CHECK_OFFSET 2
141
142 #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
143
144 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
145 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
146 ADD_RANGE(0x2080, 1);
147 ADD_RANGE(R300_SE_VTE_CNTL, 2);
148 ADD_RANGE(0x2134, 2);
149 ADD_RANGE(0x2140, 1);
150 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
151 ADD_RANGE(0x21DC, 1);
152 ADD_RANGE(0x221C, 1);
153 ADD_RANGE(0x2220, 4);
154 ADD_RANGE(0x2288, 1);
155 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
156 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
157 ADD_RANGE(R300_GB_ENABLE, 1);
158 ADD_RANGE(R300_GB_MSPOS0, 5);
159 ADD_RANGE(R300_TX_ENABLE, 1);
160 ADD_RANGE(0x4200, 4);
161 ADD_RANGE(0x4214, 1);
162 ADD_RANGE(R300_RE_POINTSIZE, 1);
163 ADD_RANGE(0x4230, 3);
164 ADD_RANGE(R300_RE_LINE_CNT, 1);
165 ADD_RANGE(0x4238, 1);
166 ADD_RANGE(0x4260, 3);
167 ADD_RANGE(0x4274, 4);
168 ADD_RANGE(0x4288, 5);
169 ADD_RANGE(0x42A0, 1);
170 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
171 ADD_RANGE(0x42B4, 1);
172 ADD_RANGE(R300_RE_CULL_CNTL, 1);
173 ADD_RANGE(0x42C0, 2);
174 ADD_RANGE(R300_RS_CNTL_0, 2);
175 ADD_RANGE(R300_RS_INTERP_0, 8);
176 ADD_RANGE(R300_RS_ROUTE_0, 8);
177 ADD_RANGE(0x43A4, 2);
178 ADD_RANGE(0x43E8, 1);
179 ADD_RANGE(R300_PFS_CNTL_0, 3);
180 ADD_RANGE(R300_PFS_NODE_0, 4);
181 ADD_RANGE(R300_PFS_TEXI_0, 64);
182 ADD_RANGE(0x46A4, 5);
183 ADD_RANGE(R300_PFS_INSTR0_0, 64);
184 ADD_RANGE(R300_PFS_INSTR1_0, 64);
185 ADD_RANGE(R300_PFS_INSTR2_0, 64);
186 ADD_RANGE(R300_PFS_INSTR3_0, 64);
187 ADD_RANGE(0x4BC0, 1);
188 ADD_RANGE(0x4BC8, 3);
189 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
190 ADD_RANGE(0x4BD8, 1);
191 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
192 ADD_RANGE(0x4E00, 1);
193 ADD_RANGE(R300_RB3D_CBLEND, 2);
194 ADD_RANGE(R300_RB3D_COLORMASK, 1);
195 ADD_RANGE(0x4E10, 3);
196 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
197 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
198 ADD_RANGE(0x4E50, 9);
199 ADD_RANGE(0x4E88, 1);
200 ADD_RANGE(0x4EA0, 2);
201 ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
202 ADD_RANGE(0x4F10, 4);
203 ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
204 ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
205 ADD_RANGE(0x4F28, 1);
206 ADD_RANGE(0x4F30, 2);
207 ADD_RANGE(0x4F44, 1);
208 ADD_RANGE(0x4F54, 1);
209
210 ADD_RANGE(R300_TX_FILTER_0, 16);
211 ADD_RANGE(R300_TX_UNK1_0, 16);
212 ADD_RANGE(R300_TX_SIZE_0, 16);
213 ADD_RANGE(R300_TX_FORMAT_0, 16);
214 /* Texture offset is dangerous and needs more checking */
215 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
216 ADD_RANGE(R300_TX_UNK4_0, 16);
217 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
218
219 /* Sporadic registers used as primitives are emitted */
220 ADD_RANGE(0x4f18, 1);
221 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
222 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
223 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
224
225}
226
227static __inline__ int r300_check_range(unsigned reg, int count)
228{
229 int i;
230 if(reg & ~0xffff)return -1;
231 for(i=(reg>>2);i<(reg>>2)+count;i++)
232 if(r300_reg_flags[i]!=MARK_SAFE)return 1;
233 return 0;
234}
235
236 /* we expect offsets passed to the framebuffer to be either within video memory or
237 within AGP space */
238static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
239{
240 /* we realy want to check against end of video aperture
241 but this value is not being kept.
242 This code is correct for now (does the same thing as the
243 code that sets MC_FB_LOCATION) in radeon_cp.c */
244 if((offset>=dev_priv->fb_location) &&
245 (offset<dev_priv->gart_vm_start))return 0;
246 if((offset>=dev_priv->gart_vm_start) &&
247 (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
248 return 1;
249}
250
251static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
252 drm_radeon_cmd_buffer_t* cmdbuf,
253 drm_r300_cmd_header_t header)
254{
255 int reg;
256 int sz;
257 int i;
258 int values[64];
259 RING_LOCALS;
260
261 sz = header.packet0.count;
262 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
263
264 if((sz>64)||(sz<0)){
265 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
266 return DRM_ERR(EINVAL);
267 }
268 for(i=0;i<sz;i++){
269 values[i]=((int __user*)cmdbuf->buf)[i];
270 switch(r300_reg_flags[(reg>>2)+i]){
271 case MARK_SAFE:
272 break;
273 case MARK_CHECK_OFFSET:
274 if(r300_check_offset(dev_priv, (u32)values[i])){
275 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
276 return DRM_ERR(EINVAL);
277 }
278 break;
279 default:
280 DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
281 return DRM_ERR(EINVAL);
282 }
283 }
284
285 BEGIN_RING(1+sz);
286 OUT_RING( CP_PACKET0( reg, sz-1 ) );
287 OUT_RING_TABLE( values, sz );
288 ADVANCE_RING();
289
290 cmdbuf->buf += sz*4;
291 cmdbuf->bufsz -= sz*4;
292
293 return 0;
294}
295
296/**
297 * Emits a packet0 setting arbitrary registers.
298 * Called by r300_do_cp_cmdbuf.
299 *
300 * Note that checks are performed on contents and addresses of the registers
301 */
302static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
303 drm_radeon_cmd_buffer_t* cmdbuf,
304 drm_r300_cmd_header_t header)
305{
306 int reg;
307 int sz;
308 RING_LOCALS;
309
310 sz = header.packet0.count;
311 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
312
313 if (!sz)
314 return 0;
315
316 if (sz*4 > cmdbuf->bufsz)
317 return DRM_ERR(EINVAL);
318
319 if (reg+sz*4 >= 0x10000){
320 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
321 return DRM_ERR(EINVAL);
322 }
323
324 if(r300_check_range(reg, sz)){
325 /* go and check everything */
326 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
327 }
328 /* the rest of the data is safe to emit, whatever the values the user passed */
329
330 BEGIN_RING(1+sz);
331 OUT_RING( CP_PACKET0( reg, sz-1 ) );
332 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
333 ADVANCE_RING();
334
335 cmdbuf->buf += sz*4;
336 cmdbuf->bufsz -= sz*4;
337
338 return 0;
339}
340
341
342/**
343 * Uploads user-supplied vertex program instructions or parameters onto
344 * the graphics card.
345 * Called by r300_do_cp_cmdbuf.
346 */
347static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
348 drm_radeon_cmd_buffer_t* cmdbuf,
349 drm_r300_cmd_header_t header)
350{
351 int sz;
352 int addr;
353 RING_LOCALS;
354
355 sz = header.vpu.count;
356 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
357
358 if (!sz)
359 return 0;
360 if (sz*16 > cmdbuf->bufsz)
361 return DRM_ERR(EINVAL);
362
363 BEGIN_RING(5+sz*4);
364 /* Wait for VAP to come to senses.. */
365 /* there is no need to emit it multiple times, (only once before VAP is programmed,
366 but this optimization is for later */
367 OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
368 OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
369 OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
370 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
371
372 ADVANCE_RING();
373
374 cmdbuf->buf += sz*16;
375 cmdbuf->bufsz -= sz*16;
376
377 return 0;
378}
379
380
381/**
382 * Emit a clear packet from userspace.
383 * Called by r300_emit_packet3.
384 */
385static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
386 drm_radeon_cmd_buffer_t* cmdbuf)
387{
388 RING_LOCALS;
389
390 if (8*4 > cmdbuf->bufsz)
391 return DRM_ERR(EINVAL);
392
393 BEGIN_RING(10);
394 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
395 OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
396 (1<<R300_PRIM_NUM_VERTICES_SHIFT) );
397 OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
398 ADVANCE_RING();
399
400 cmdbuf->buf += 8*4;
401 cmdbuf->bufsz -= 8*4;
402
403 return 0;
404}
405
406static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
407 drm_radeon_cmd_buffer_t* cmdbuf,
408 u32 header)
409{
410 int count, i,k;
411 #define MAX_ARRAY_PACKET 64
412 u32 payload[MAX_ARRAY_PACKET];
413 u32 narrays;
414 RING_LOCALS;
415
416 count=(header>>16) & 0x3fff;
417
418 if((count+1)>MAX_ARRAY_PACKET){
419 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
420 return DRM_ERR(EINVAL);
421 }
422 memset(payload, 0, MAX_ARRAY_PACKET*4);
423 memcpy(payload, cmdbuf->buf+4, (count+1)*4);
424
425 /* carefully check packet contents */
426
427 narrays=payload[0];
428 k=0;
429 i=1;
430 while((k<narrays) && (i<(count+1))){
431 i++; /* skip attribute field */
432 if(r300_check_offset(dev_priv, payload[i])){
433 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
434 return DRM_ERR(EINVAL);
435 }
436 k++;
437 i++;
438 if(k==narrays)break;
439 /* have one more to process, they come in pairs */
440 if(r300_check_offset(dev_priv, payload[i])){
441 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
442 return DRM_ERR(EINVAL);
443 }
444 k++;
445 i++;
446 }
447 /* do the counts match what we expect ? */
448 if((k!=narrays) || (i!=(count+1))){
449 DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
450 return DRM_ERR(EINVAL);
451 }
452
453 /* all clear, output packet */
454
455 BEGIN_RING(count+2);
456 OUT_RING(header);
457 OUT_RING_TABLE(payload, count+1);
458 ADVANCE_RING();
459
460 cmdbuf->buf += (count+2)*4;
461 cmdbuf->bufsz -= (count+2)*4;
462
463 return 0;
464}
465
466static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
467 drm_radeon_cmd_buffer_t* cmdbuf)
468{
469 u32 header;
470 int count;
471 RING_LOCALS;
472
473 if (4 > cmdbuf->bufsz)
474 return DRM_ERR(EINVAL);
475
476 /* Fixme !! This simply emits a packet without much checking.
477 We need to be smarter. */
478
479 /* obtain first word - actual packet3 header */
480 header = *(u32 __user*)cmdbuf->buf;
481
482 /* Is it packet 3 ? */
483 if( (header>>30)!=0x3 ) {
484 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
485 return DRM_ERR(EINVAL);
486 }
487
488 count=(header>>16) & 0x3fff;
489
490 /* Check again now that we know how much data to expect */
491 if ((count+2)*4 > cmdbuf->bufsz){
492 DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
493 (count+2)*4, cmdbuf->bufsz);
494 return DRM_ERR(EINVAL);
495 }
496
497 /* Is it a packet type we know about ? */
498 switch(header & 0xff00){
499 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
500 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
501
502 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
503 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
504 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
505 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
506 case RADEON_WAIT_FOR_IDLE:
507 case RADEON_CP_NOP:
508 /* these packets are safe */
509 break;
510 default:
511 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
512 return DRM_ERR(EINVAL);
513 }
514
515
516 BEGIN_RING(count+2);
517 OUT_RING(header);
518 OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
519 ADVANCE_RING();
520
521 cmdbuf->buf += (count+2)*4;
522 cmdbuf->bufsz -= (count+2)*4;
523
524 return 0;
525}
526
527
528/**
529 * Emit a rendering packet3 from userspace.
530 * Called by r300_do_cp_cmdbuf.
531 */
532static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
533 drm_radeon_cmd_buffer_t* cmdbuf,
534 drm_r300_cmd_header_t header)
535{
536 int n;
537 int ret;
538 char __user* orig_buf = cmdbuf->buf;
539 int orig_bufsz = cmdbuf->bufsz;
540
541 /* This is a do-while-loop so that we run the interior at least once,
542 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
543 */
544 n = 0;
545 do {
546 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
547 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
548 if (ret)
549 return ret;
550
551 cmdbuf->buf = orig_buf;
552 cmdbuf->bufsz = orig_bufsz;
553 }
554
555 switch(header.packet3.packet) {
556 case R300_CMD_PACKET3_CLEAR:
557 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
558 ret = r300_emit_clear(dev_priv, cmdbuf);
559 if (ret) {
560 DRM_ERROR("r300_emit_clear failed\n");
561 return ret;
562 }
563 break;
564
565 case R300_CMD_PACKET3_RAW:
566 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
567 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
568 if (ret) {
569 DRM_ERROR("r300_emit_raw_packet3 failed\n");
570 return ret;
571 }
572 break;
573
574 default:
575 DRM_ERROR("bad packet3 type %i at %p\n",
576 header.packet3.packet,
577 cmdbuf->buf - sizeof(header));
578 return DRM_ERR(EINVAL);
579 }
580
581 n += R300_SIMULTANEOUS_CLIPRECTS;
582 } while(n < cmdbuf->nbox);
583
584 return 0;
585}
586
587/* Some of the R300 chips seem to be extremely touchy about the two registers
588 * that are configured in r300_pacify.
589 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
590 * sends a command buffer that contains only state setting commands and a
591 * vertex program/parameter upload sequence, this will eventually lead to a
592 * lockup, unless the sequence is bracketed by calls to r300_pacify.
593 * So we should take great care to *always* call r300_pacify before
594 * *anything* 3D related, and again afterwards. This is what the
595 * call bracket in r300_do_cp_cmdbuf is for.
596 */
597
598/**
599 * Emit the sequence to pacify R300.
600 */
601static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
602{
603 RING_LOCALS;
604
605 BEGIN_RING(6);
606 OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
607 OUT_RING( 0xa );
608 OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
609 OUT_RING( 0x3 );
610 OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
611 OUT_RING( 0x0 );
612 ADVANCE_RING();
613}
614
615
616/**
617 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
618 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
619 * be careful about how this function is called.
620 */
621static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
622{
623 drm_radeon_private_t *dev_priv = dev->dev_private;
624 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
625
626 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
627 buf->pending = 1;
628 buf->used = 0;
629}
630
631
632/**
633 * Parses and validates a user-supplied command buffer and emits appropriate
634 * commands on the DMA ring buffer.
635 * Called by the ioctl handler function radeon_cp_cmdbuf.
636 */
637int r300_do_cp_cmdbuf(drm_device_t* dev,
638 DRMFILE filp,
639 drm_file_t* filp_priv,
640 drm_radeon_cmd_buffer_t* cmdbuf)
641{
642 drm_radeon_private_t *dev_priv = dev->dev_private;
643 drm_device_dma_t *dma = dev->dma;
644 drm_buf_t *buf = NULL;
645 int emit_dispatch_age = 0;
646 int ret = 0;
647
648 DRM_DEBUG("\n");
649
650 /* See the comment above r300_emit_begin3d for why this call must be here,
651 * and what the cleanup gotos are for. */
652 r300_pacify(dev_priv);
653
654 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
655 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
656 if (ret)
657 goto cleanup;
658 }
659
660 while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
661 int idx;
662 drm_r300_cmd_header_t header;
663
664 header.u = *(unsigned int *)cmdbuf->buf;
665
666 cmdbuf->buf += sizeof(header);
667 cmdbuf->bufsz -= sizeof(header);
668
669 switch(header.header.cmd_type) {
670 case R300_CMD_PACKET0:
671 DRM_DEBUG("R300_CMD_PACKET0\n");
672 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
673 if (ret) {
674 DRM_ERROR("r300_emit_packet0 failed\n");
675 goto cleanup;
676 }
677 break;
678
679 case R300_CMD_VPU:
680 DRM_DEBUG("R300_CMD_VPU\n");
681 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
682 if (ret) {
683 DRM_ERROR("r300_emit_vpu failed\n");
684 goto cleanup;
685 }
686 break;
687
688 case R300_CMD_PACKET3:
689 DRM_DEBUG("R300_CMD_PACKET3\n");
690 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
691 if (ret) {
692 DRM_ERROR("r300_emit_packet3 failed\n");
693 goto cleanup;
694 }
695 break;
696
697 case R300_CMD_END3D:
698 DRM_DEBUG("R300_CMD_END3D\n");
699 /* TODO:
700 Ideally userspace driver should not need to issue this call,
701 i.e. the drm driver should issue it automatically and prevent
702 lockups.
703
704 In practice, we do not understand why this call is needed and what
705 it does (except for some vague guesses that it has to do with cache
706 coherence) and so the user space driver does it.
707
708 Once we are sure which uses prevent lockups the code could be moved
709 into the kernel and the userspace driver will not
710 need to use this command.
711
712 Note that issuing this command does not hurt anything
713 except, possibly, performance */
714 r300_pacify(dev_priv);
715 break;
716
717 case R300_CMD_CP_DELAY:
718 /* simple enough, we can do it here */
719 DRM_DEBUG("R300_CMD_CP_DELAY\n");
720 {
721 int i;
722 RING_LOCALS;
723
724 BEGIN_RING(header.delay.count);
725 for(i=0;i<header.delay.count;i++)
726 OUT_RING(RADEON_CP_PACKET2);
727 ADVANCE_RING();
728 }
729 break;
730
731 case R300_CMD_DMA_DISCARD:
732 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
733 idx = header.dma.buf_idx;
734 if (idx < 0 || idx >= dma->buf_count) {
735 DRM_ERROR("buffer index %d (of %d max)\n",
736 idx, dma->buf_count - 1);
737 ret = DRM_ERR(EINVAL);
738 goto cleanup;
739 }
740
741 buf = dma->buflist[idx];
742 if (buf->filp != filp || buf->pending) {
743 DRM_ERROR("bad buffer %p %p %d\n",
744 buf->filp, filp, buf->pending);
745 ret = DRM_ERR(EINVAL);
746 goto cleanup;
747 }
748
749 emit_dispatch_age = 1;
750 r300_discard_buffer(dev, buf);
751 break;
752
753 case R300_CMD_WAIT:
754 /* simple enough, we can do it here */
755 DRM_DEBUG("R300_CMD_WAIT\n");
756 if(header.wait.flags==0)break; /* nothing to do */
757
758 {
759 RING_LOCALS;
760
761 BEGIN_RING(2);
762 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
763 OUT_RING( (header.wait.flags & 0xf)<<14 );
764 ADVANCE_RING();
765 }
766 break;
767
768 default:
769 DRM_ERROR("bad cmd_type %i at %p\n",
770 header.header.cmd_type,
771 cmdbuf->buf - sizeof(header));
772 ret = DRM_ERR(EINVAL);
773 goto cleanup;
774 }
775 }
776
777 DRM_DEBUG("END\n");
778
779cleanup:
780 r300_pacify(dev_priv);
781
782 /* We emit the vertex buffer age here, outside the pacifier "brackets"
783 * for two reasons:
784 * (1) This may coalesce multiple age emissions into a single one and
785 * (2) more importantly, some chips lock up hard when scratch registers
786 * are written inside the pacifier bracket.
787 */
788 if (emit_dispatch_age) {
789 RING_LOCALS;
790
791 /* Emit the vertex buffer age */
792 BEGIN_RING(2);
793 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
794 ADVANCE_RING();
795 }
796
797 COMMIT_RING();
798
799 return ret;
800}
801
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
new file mode 100644
index 000000000000..c3e7ca3dbe3d
--- /dev/null
+++ b/drivers/char/drm/r300_reg.h
@@ -0,0 +1,1412 @@
1/**************************************************************************
2
3Copyright (C) 2004-2005 Nicolai Haehnle et al.
4
5Permission is hereby granted, free of charge, to any person obtaining a
6copy of this software and associated documentation files (the "Software"),
7to deal in the Software without restriction, including without limitation
8on the rights to use, copy, modify, merge, publish, distribute, sub
9license, and/or sell copies of the Software, and to permit persons to whom
10the Software is furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice (including the next
13paragraph) shall be included in all copies or substantial portions of the
14Software.
15
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24**************************************************************************/
25
26#ifndef _R300_REG_H
27#define _R300_REG_H
28
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
31# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
32# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
33# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
34# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
35# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
36# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
37# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
38
39
40#define R300_MC_INIT_GFX_LAT_TIMER 0x154
41# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
42# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
43# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
44# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
45# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
46# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
47# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
48# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
49
50/*
51This file contains registers and constants for the R300. They have been
52found mostly by examining command buffers captured using glxtest, as well
53as by extrapolating some known registers and constants from the R200.
54
55I am fairly certain that they are correct unless stated otherwise in comments.
56*/
57
58#define R300_SE_VPORT_XSCALE 0x1D98
59#define R300_SE_VPORT_XOFFSET 0x1D9C
60#define R300_SE_VPORT_YSCALE 0x1DA0
61#define R300_SE_VPORT_YOFFSET 0x1DA4
62#define R300_SE_VPORT_ZSCALE 0x1DA8
63#define R300_SE_VPORT_ZOFFSET 0x1DAC
64
65
66/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
67#define R300_VAP_VF_CNTL 0x2084
68
69# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
70# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
71# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
72# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
73# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
74# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
75# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
76# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
77# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
78# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
79# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
80# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
81
82# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
83 /* State based - direct writes to registers trigger vertex generation */
84# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
85# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
86# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
87# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
88
89 /* I don't think I saw these three used.. */
90# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
91# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
92# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
93
94 /* index size - when not set the indices are assumed to be 16 bit */
95# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
96 /* number of vertices */
97# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
98
99/* BEGIN: Wild guesses */
100#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
101# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
102# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
103# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
104# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
105# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
106# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
107
108#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
109# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
110# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
111# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
112# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
113# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
114# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
115# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
116# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
117/* END */
118
119#define R300_SE_VTE_CNTL 0x20b0
120# define R300_VPORT_X_SCALE_ENA 0x00000001
121# define R300_VPORT_X_OFFSET_ENA 0x00000002
122# define R300_VPORT_Y_SCALE_ENA 0x00000004
123# define R300_VPORT_Y_OFFSET_ENA 0x00000008
124# define R300_VPORT_Z_SCALE_ENA 0x00000010
125# define R300_VPORT_Z_OFFSET_ENA 0x00000020
126# define R300_VTX_XY_FMT 0x00000100
127# define R300_VTX_Z_FMT 0x00000200
128# define R300_VTX_W0_FMT 0x00000400
129# define R300_VTX_W0_NORMALIZE 0x00000800
130# define R300_VTX_ST_DENORMALIZED 0x00001000
131
132/* BEGIN: Vertex data assembly - lots of uncertainties */
133/* gap */
134/* Where do we get our vertex data?
135//
136// Vertex data either comes either from immediate mode registers or from
137// vertex arrays.
138// There appears to be no mixed mode (though we can force the pitch of
139// vertex arrays to 0, effectively reusing the same element over and over
140// again).
141//
142// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
143// if these registers influence vertex array processing.
144//
145// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
146//
147// In both cases, vertex attributes are then passed through INPUT_ROUTE.
148
149// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
150// into the vertex processor's input registers.
151// The first word routes the first input, the second word the second, etc.
152// The corresponding input is routed into the register with the given index.
153// The list is ended by a word with INPUT_ROUTE_END set.
154//
155// Always set COMPONENTS_4 in immediate mode. */
156
157#define R300_VAP_INPUT_ROUTE_0_0 0x2150
158# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
159# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
160# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
161# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
162# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
163# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
164# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
165# define R300_VAP_INPUT_ROUTE_END (1 << 13)
166# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
167# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
168# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
169# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
170#define R300_VAP_INPUT_ROUTE_0_1 0x2154
171#define R300_VAP_INPUT_ROUTE_0_2 0x2158
172#define R300_VAP_INPUT_ROUTE_0_3 0x215C
173#define R300_VAP_INPUT_ROUTE_0_4 0x2160
174#define R300_VAP_INPUT_ROUTE_0_5 0x2164
175#define R300_VAP_INPUT_ROUTE_0_6 0x2168
176#define R300_VAP_INPUT_ROUTE_0_7 0x216C
177
178/* gap */
179/* Notes:
180// - always set up to produce at least two attributes:
181// if vertex program uses only position, fglrx will set normal, too
182// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
183#define R300_VAP_INPUT_CNTL_0 0x2180
184# define R300_INPUT_CNTL_0_COLOR 0x00000001
185#define R300_VAP_INPUT_CNTL_1 0x2184
186# define R300_INPUT_CNTL_POS 0x00000001
187# define R300_INPUT_CNTL_NORMAL 0x00000002
188# define R300_INPUT_CNTL_COLOR 0x00000004
189# define R300_INPUT_CNTL_TC0 0x00000400
190# define R300_INPUT_CNTL_TC1 0x00000800
191# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
192# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
193# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
194# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
195# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
196# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
197
198/* gap */
199/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
200// are set to a swizzling bit pattern, other words are 0.
201//
202// In immediate mode, the pattern is always set to xyzw. In vertex array
203// mode, the swizzling pattern is e.g. used to set zw components in texture
204// coordinates with only tweo components. */
205#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
206# define R300_INPUT_ROUTE_SELECT_X 0
207# define R300_INPUT_ROUTE_SELECT_Y 1
208# define R300_INPUT_ROUTE_SELECT_Z 2
209# define R300_INPUT_ROUTE_SELECT_W 3
210# define R300_INPUT_ROUTE_SELECT_ZERO 4
211# define R300_INPUT_ROUTE_SELECT_ONE 5
212# define R300_INPUT_ROUTE_SELECT_MASK 7
213# define R300_INPUT_ROUTE_X_SHIFT 0
214# define R300_INPUT_ROUTE_Y_SHIFT 3
215# define R300_INPUT_ROUTE_Z_SHIFT 6
216# define R300_INPUT_ROUTE_W_SHIFT 9
217# define R300_INPUT_ROUTE_ENABLE (15 << 12)
218#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
219#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
220#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
221#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
222#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
223#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
224#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
225
226/* END */
227
228/* gap */
229/* BEGIN: Upload vertex program and data
230// The programmable vertex shader unit has a memory bank of unknown size
231// that can be written to in 16 byte units by writing the address into
232// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
233//
234// Pointers into the memory bank are always in multiples of 16 bytes.
235//
236// The memory bank is divided into areas with fixed meaning.
237//
238// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
239// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
240// whereas the difference between known addresses suggests size 512.
241//
242// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
243// Native reported limits and the VPI layout suggest size 256, whereas
244// difference between known addresses suggests size 512.
245//
246// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
247// floating point pointsize. The exact purpose of this state is uncertain,
248// as there is also the R300_RE_POINTSIZE register.
249//
250// Multiple vertex programs and parameter sets can be loaded at once,
251// which could explain the size discrepancy. */
252#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
253# define R300_PVS_UPLOAD_PROGRAM 0x00000000
254# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
255# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
256/* gap */
257#define R300_VAP_PVS_UPLOAD_DATA 0x2208
258/* END */
259
260/* gap */
261/* I do not know the purpose of this register. However, I do know that
262// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
263// for normal rendering. */
264#define R300_VAP_UNKNOWN_221C 0x221C
265# define R300_221C_NORMAL 0x00000000
266# define R300_221C_CLEAR 0x0001C000
267
268/* gap */
269/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
270// rendering commands and overwriting vertex program parameters.
271// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
272// avoids bugs caused by still running shaders reading bad data from memory. */
273#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
274
275/* Absolutely no clue what this register is about. */
276#define R300_VAP_UNKNOWN_2288 0x2288
277# define R300_2288_R300 0x00750000 /* -- nh */
278# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
279
280/* gap */
281/* Addresses are relative to the vertex program instruction area of the
282// memory bank. PROGRAM_END points to the last instruction of the active
283// program
284//
285// The meaning of the two UNKNOWN fields is obviously not known. However,
286// experiments so far have shown that both *must* point to an instruction
287// inside the vertex program, otherwise the GPU locks up.
288// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
289// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
290// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
291// For some reason this "section" is sometimes accepted other instruction that have
292// no relationship with position calculations.
293*/
294#define R300_VAP_PVS_CNTL_1 0x22D0
295# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
296# define R300_PVS_CNTL_1_POS_END_SHIFT 10
297# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
298/* Addresses are relative the the vertex program parameters area. */
299#define R300_VAP_PVS_CNTL_2 0x22D4
300# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
301# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
302#define R300_VAP_PVS_CNTL_3 0x22D8
303# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
304# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
305
306/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
307// immediate vertices */
308#define R300_VAP_VTX_COLOR_R 0x2464
309#define R300_VAP_VTX_COLOR_G 0x2468
310#define R300_VAP_VTX_COLOR_B 0x246C
311#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
312#define R300_VAP_VTX_POS_0_Y_1 0x2494
313#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
314#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
315#define R300_VAP_VTX_POS_0_Y_2 0x24A4
316#define R300_VAP_VTX_POS_0_Z_2 0x24A8
317#define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */
318
319/* gap */
320
321/* These are values from r300_reg/r300_reg.h - they are known to be correct
322 and are here so we can use one register file instead of several
323 - Vladimir */
324#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
325# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
326# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
327# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
328# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
329# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
330# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
331# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
332
333#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
334 /* each of the following is 3 bits wide, specifies number
335 of components */
336# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
337# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
338# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
339# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
340# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
341# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
342# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
343# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
344
345/* UNK30 seems to enables point to quad transformation on textures
346 (or something closely related to that).
347 This bit is rather fatal at the time being due to lackings at pixel shader side */
348#define R300_GB_ENABLE 0x4008
349# define R300_GB_POINT_STUFF_ENABLE (1<<0)
350# define R300_GB_LINE_STUFF_ENABLE (1<<1)
351# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
352# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
353# define R300_GB_UNK30 (1<<30)
354 /* each of the following is 2 bits wide */
355#define R300_GB_TEX_REPLICATE 0
356#define R300_GB_TEX_ST 1
357#define R300_GB_TEX_STR 2
358# define R300_GB_TEX0_SOURCE_SHIFT 16
359# define R300_GB_TEX1_SOURCE_SHIFT 18
360# define R300_GB_TEX2_SOURCE_SHIFT 20
361# define R300_GB_TEX3_SOURCE_SHIFT 22
362# define R300_GB_TEX4_SOURCE_SHIFT 24
363# define R300_GB_TEX5_SOURCE_SHIFT 26
364# define R300_GB_TEX6_SOURCE_SHIFT 28
365# define R300_GB_TEX7_SOURCE_SHIFT 30
366
367/* MSPOS - positions for multisample antialiasing (?) */
368#define R300_GB_MSPOS0 0x4010
369 /* shifts - each of the fields is 4 bits */
370# define R300_GB_MSPOS0__MS_X0_SHIFT 0
371# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
372# define R300_GB_MSPOS0__MS_X1_SHIFT 8
373# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
374# define R300_GB_MSPOS0__MS_X2_SHIFT 16
375# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
376# define R300_GB_MSPOS0__MSBD0_Y 24
377# define R300_GB_MSPOS0__MSBD0_X 28
378
379#define R300_GB_MSPOS1 0x4014
380# define R300_GB_MSPOS1__MS_X3_SHIFT 0
381# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
382# define R300_GB_MSPOS1__MS_X4_SHIFT 8
383# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
384# define R300_GB_MSPOS1__MS_X5_SHIFT 16
385# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
386# define R300_GB_MSPOS1__MSBD1 24
387
388
389#define R300_GB_TILE_CONFIG 0x4018
390# define R300_GB_TILE_ENABLE (1<<0)
391# define R300_GB_TILE_PIPE_COUNT_RV300 0
392# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
393# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
394# define R300_GB_TILE_SIZE_8 0
395# define R300_GB_TILE_SIZE_16 (1<<4)
396# define R300_GB_TILE_SIZE_32 (2<<4)
397# define R300_GB_SUPER_SIZE_1 (0<<6)
398# define R300_GB_SUPER_SIZE_2 (1<<6)
399# define R300_GB_SUPER_SIZE_4 (2<<6)
400# define R300_GB_SUPER_SIZE_8 (3<<6)
401# define R300_GB_SUPER_SIZE_16 (4<<6)
402# define R300_GB_SUPER_SIZE_32 (5<<6)
403# define R300_GB_SUPER_SIZE_64 (6<<6)
404# define R300_GB_SUPER_SIZE_128 (7<<6)
405# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
406# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
407# define R300_GB_SUPER_TILE_A 0
408# define R300_GB_SUPER_TILE_B (1<<15)
409# define R300_GB_SUBPIXEL_1_12 0
410# define R300_GB_SUBPIXEL_1_16 (1<<16)
411
412#define R300_GB_FIFO_SIZE 0x4024
413 /* each of the following is 2 bits wide */
414#define R300_GB_FIFO_SIZE_32 0
415#define R300_GB_FIFO_SIZE_64 1
416#define R300_GB_FIFO_SIZE_128 2
417#define R300_GB_FIFO_SIZE_256 3
418# define R300_SC_IFIFO_SIZE_SHIFT 0
419# define R300_SC_TZFIFO_SIZE_SHIFT 2
420# define R300_SC_BFIFO_SIZE_SHIFT 4
421
422# define R300_US_OFIFO_SIZE_SHIFT 12
423# define R300_US_WFIFO_SIZE_SHIFT 14
424 /* the following use the same constants as above, but meaning is
425 is times 2 (i.e. instead of 32 words it means 64 */
426# define R300_RS_TFIFO_SIZE_SHIFT 6
427# define R300_RS_CFIFO_SIZE_SHIFT 8
428# define R300_US_RAM_SIZE_SHIFT 10
429 /* watermarks, 3 bits wide */
430# define R300_RS_HIGHWATER_COL_SHIFT 16
431# define R300_RS_HIGHWATER_TEX_SHIFT 19
432# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
433# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
434
435#define R300_GB_SELECT 0x401C
436# define R300_GB_FOG_SELECT_C0A 0
437# define R300_GB_FOG_SELECT_C1A 1
438# define R300_GB_FOG_SELECT_C2A 2
439# define R300_GB_FOG_SELECT_C3A 3
440# define R300_GB_FOG_SELECT_1_1_W 4
441# define R300_GB_FOG_SELECT_Z 5
442# define R300_GB_DEPTH_SELECT_Z 0
443# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
444# define R300_GB_W_SELECT_1_W 0
445# define R300_GB_W_SELECT_1 (1<<4)
446
447#define R300_GB_AA_CONFIG 0x4020
448# define R300_AA_ENABLE 0x01
449# define R300_AA_SUBSAMPLES_2 0
450# define R300_AA_SUBSAMPLES_3 (1<<1)
451# define R300_AA_SUBSAMPLES_4 (2<<1)
452# define R300_AA_SUBSAMPLES_6 (3<<1)
453
454/* END */
455
456/* gap */
457/* The upper enable bits are guessed, based on fglrx reported limits. */
458#define R300_TX_ENABLE 0x4104
459# define R300_TX_ENABLE_0 (1 << 0)
460# define R300_TX_ENABLE_1 (1 << 1)
461# define R300_TX_ENABLE_2 (1 << 2)
462# define R300_TX_ENABLE_3 (1 << 3)
463# define R300_TX_ENABLE_4 (1 << 4)
464# define R300_TX_ENABLE_5 (1 << 5)
465# define R300_TX_ENABLE_6 (1 << 6)
466# define R300_TX_ENABLE_7 (1 << 7)
467# define R300_TX_ENABLE_8 (1 << 8)
468# define R300_TX_ENABLE_9 (1 << 9)
469# define R300_TX_ENABLE_10 (1 << 10)
470# define R300_TX_ENABLE_11 (1 << 11)
471# define R300_TX_ENABLE_12 (1 << 12)
472# define R300_TX_ENABLE_13 (1 << 13)
473# define R300_TX_ENABLE_14 (1 << 14)
474# define R300_TX_ENABLE_15 (1 << 15)
475
476/* The pointsize is given in multiples of 6. The pointsize can be
477// enormous: Clear() renders a single point that fills the entire
478// framebuffer. */
479#define R300_RE_POINTSIZE 0x421C
480# define R300_POINTSIZE_Y_SHIFT 0
481# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
482# define R300_POINTSIZE_X_SHIFT 16
483# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
484# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
485
486/* The line width is given in multiples of 6.
487 In default mode lines are classified as vertical lines.
488 HO: horizontal
489 VE: vertical or horizontal
490 HO & VE: no classification
491*/
492#define R300_RE_LINE_CNT 0x4234
493# define R300_LINESIZE_SHIFT 0
494# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
495# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
496# define R300_LINE_CNT_HO (1 << 16)
497# define R300_LINE_CNT_VE (1 << 17)
498
499/* Some sort of scale or clamp value for texcoordless textures. */
500#define R300_RE_UNK4238 0x4238
501
502#define R300_RE_SHADE_MODEL 0x4278
503# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
504# define R300_RE_SHADE_MODEL_FLAT 0x39595
505
506/* Dangerous */
507#define R300_RE_POLYGON_MODE 0x4288
508# define R300_PM_ENABLED (1 << 0)
509# define R300_PM_FRONT_POINT (0 << 0)
510# define R300_PM_BACK_POINT (0 << 0)
511# define R300_PM_FRONT_LINE (1 << 4)
512# define R300_PM_FRONT_FILL (1 << 5)
513# define R300_PM_BACK_LINE (1 << 7)
514# define R300_PM_BACK_FILL (1 << 8)
515
516/* Not sure why there are duplicate of factor and constant values.
517 My best guess so far is that there are seperate zbiases for test and write.
518 Ordering might be wrong.
519 Some of the tests indicate that fgl has a fallback implementation of zbias
520 via pixel shaders. */
521#define R300_RE_ZBIAS_T_FACTOR 0x42A4
522#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
523#define R300_RE_ZBIAS_W_FACTOR 0x42AC
524#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
525
526/* This register needs to be set to (1<<1) for RV350 to correctly
527 perform depth test (see --vb-triangles in r300_demo)
528 Don't know about other chips. - Vladimir
529 This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
530 My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
531 One to enable depth test and one for depth write.
532 Yet this doesnt explain why depth writes work ...
533 */
534#define R300_RE_OCCLUSION_CNTL 0x42B4
535# define R300_OCCLUSION_ON (1<<1)
536
537#define R300_RE_CULL_CNTL 0x42B8
538# define R300_CULL_FRONT (1 << 0)
539# define R300_CULL_BACK (1 << 1)
540# define R300_FRONT_FACE_CCW (0 << 2)
541# define R300_FRONT_FACE_CW (1 << 2)
542
543
544/* BEGIN: Rasterization / Interpolators - many guesses
545// 0_UNKNOWN_18 has always been set except for clear operations.
546// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
547// on the vertex program, *not* the fragment program) */
548#define R300_RS_CNTL_0 0x4300
549# define R300_RS_CNTL_TC_CNT_SHIFT 2
550# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
551# define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */
552# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
553/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
554#define R300_RS_CNTL_1 0x4304
555
556/* gap */
557/* Only used for texture coordinates.
558// Use the source field to route texture coordinate input from the vertex program
559// to the desired interpolator. Note that the source field is relative to the
560// outputs the vertex program *actually* writes. If a vertex program only writes
561// texcoord[1], this will be source index 0.
562// Set INTERP_USED on all interpolators that produce data used by the
563// fragment program. INTERP_USED looks like a swizzling mask, but
564// I haven't seen it used that way.
565//
566// Note: The _UNKNOWN constants are always set in their respective register.
567// I don't know if this is necessary. */
568#define R300_RS_INTERP_0 0x4310
569#define R300_RS_INTERP_1 0x4314
570# define R300_RS_INTERP_1_UNKNOWN 0x40
571#define R300_RS_INTERP_2 0x4318
572# define R300_RS_INTERP_2_UNKNOWN 0x80
573#define R300_RS_INTERP_3 0x431C
574# define R300_RS_INTERP_3_UNKNOWN 0xC0
575#define R300_RS_INTERP_4 0x4320
576#define R300_RS_INTERP_5 0x4324
577#define R300_RS_INTERP_6 0x4328
578#define R300_RS_INTERP_7 0x432C
579# define R300_RS_INTERP_SRC_SHIFT 2
580# define R300_RS_INTERP_SRC_MASK (7 << 2)
581# define R300_RS_INTERP_USED 0x00D10000
582
583/* These DWORDs control how vertex data is routed into fragment program
584// registers, after interpolators. */
585#define R300_RS_ROUTE_0 0x4330
586#define R300_RS_ROUTE_1 0x4334
587#define R300_RS_ROUTE_2 0x4338
588#define R300_RS_ROUTE_3 0x433C /* GUESS */
589#define R300_RS_ROUTE_4 0x4340 /* GUESS */
590#define R300_RS_ROUTE_5 0x4344 /* GUESS */
591#define R300_RS_ROUTE_6 0x4348 /* GUESS */
592#define R300_RS_ROUTE_7 0x434C /* GUESS */
593# define R300_RS_ROUTE_SOURCE_INTERP_0 0
594# define R300_RS_ROUTE_SOURCE_INTERP_1 1
595# define R300_RS_ROUTE_SOURCE_INTERP_2 2
596# define R300_RS_ROUTE_SOURCE_INTERP_3 3
597# define R300_RS_ROUTE_SOURCE_INTERP_4 4
598# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
599# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
600# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
601# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
602# define R300_RS_ROUTE_DEST_SHIFT 6
603# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
604
605/* Special handling for color: When the fragment program uses color,
606// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
607// color register index. */
608# define R300_RS_ROUTE_0_COLOR (1 << 14)
609# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
610# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
611/* As above, but for secondary color */
612# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
613# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
614# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
615# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
616/* END */
617
618/* BEGIN: Scissors and cliprects
619// There are four clipping rectangles. Their corner coordinates are inclusive.
620// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
621// on whether the pixel is inside cliprects 0-3, respectively. For example,
622// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
623// the number 3 (binary 0011).
624// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
625// the pixel is rasterized.
626//
627// In addition to this, there is a scissors rectangle. Only pixels inside the
628// scissors rectangle are drawn. (coordinates are inclusive)
629//
630// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
631// for the purpose of clipping and scissors. */
632#define R300_RE_CLIPRECT_TL_0 0x43B0
633#define R300_RE_CLIPRECT_BR_0 0x43B4
634#define R300_RE_CLIPRECT_TL_1 0x43B8
635#define R300_RE_CLIPRECT_BR_1 0x43BC
636#define R300_RE_CLIPRECT_TL_2 0x43C0
637#define R300_RE_CLIPRECT_BR_2 0x43C4
638#define R300_RE_CLIPRECT_TL_3 0x43C8
639#define R300_RE_CLIPRECT_BR_3 0x43CC
640# define R300_CLIPRECT_OFFSET 1440
641# define R300_CLIPRECT_MASK 0x1FFF
642# define R300_CLIPRECT_X_SHIFT 0
643# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
644# define R300_CLIPRECT_Y_SHIFT 13
645# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
646#define R300_RE_CLIPRECT_CNTL 0x43D0
647# define R300_CLIP_OUT (1 << 0)
648# define R300_CLIP_0 (1 << 1)
649# define R300_CLIP_1 (1 << 2)
650# define R300_CLIP_10 (1 << 3)
651# define R300_CLIP_2 (1 << 4)
652# define R300_CLIP_20 (1 << 5)
653# define R300_CLIP_21 (1 << 6)
654# define R300_CLIP_210 (1 << 7)
655# define R300_CLIP_3 (1 << 8)
656# define R300_CLIP_30 (1 << 9)
657# define R300_CLIP_31 (1 << 10)
658# define R300_CLIP_310 (1 << 11)
659# define R300_CLIP_32 (1 << 12)
660# define R300_CLIP_320 (1 << 13)
661# define R300_CLIP_321 (1 << 14)
662# define R300_CLIP_3210 (1 << 15)
663
664/* gap */
665#define R300_RE_SCISSORS_TL 0x43E0
666#define R300_RE_SCISSORS_BR 0x43E4
667# define R300_SCISSORS_OFFSET 1440
668# define R300_SCISSORS_X_SHIFT 0
669# define R300_SCISSORS_X_MASK (0x1FFF << 0)
670# define R300_SCISSORS_Y_SHIFT 13
671# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
672/* END */
673
674/* BEGIN: Texture specification
675// The texture specification dwords are grouped by meaning and not by texture unit.
676// This means that e.g. the offset for texture image unit N is found in register
677// TX_OFFSET_0 + (4*N) */
678#define R300_TX_FILTER_0 0x4400
679# define R300_TX_REPEAT 0
680# define R300_TX_MIRRORED 1
681# define R300_TX_CLAMP 4
682# define R300_TX_CLAMP_TO_EDGE 2
683# define R300_TX_CLAMP_TO_BORDER 6
684# define R300_TX_WRAP_S_SHIFT 0
685# define R300_TX_WRAP_S_MASK (7 << 0)
686# define R300_TX_WRAP_T_SHIFT 3
687# define R300_TX_WRAP_T_MASK (7 << 3)
688# define R300_TX_WRAP_Q_SHIFT 6
689# define R300_TX_WRAP_Q_MASK (7 << 6)
690# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
691# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
692# define R300_TX_MAG_FILTER_MASK (3 << 9)
693# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
694# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
695# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
696# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
697# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
698# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
699
700/* NOTE: NEAREST doesnt seem to exist.
701 Im not seting MAG_FILTER_MASK and (3 << 11) on for all
702 anisotropy modes because that would void selected mag filter */
703# define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
704# define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
705# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
706# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
707# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
708# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
709# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
710# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
711# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
712# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
713# define R300_TX_MAX_ANISO_MASK (14 << 21)
714
715#define R300_TX_UNK1_0 0x4440
716# define R300_LOD_BIAS_MASK 0x1fff
717
718#define R300_TX_SIZE_0 0x4480
719# define R300_TX_WIDTHMASK_SHIFT 0
720# define R300_TX_WIDTHMASK_MASK (2047 << 0)
721# define R300_TX_HEIGHTMASK_SHIFT 11
722# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
723# define R300_TX_UNK23 (1 << 23)
724# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
725# define R300_TX_SIZE_MASK (15 << 26)
726#define R300_TX_FORMAT_0 0x44C0
727 /* The interpretation of the format word by Wladimir van der Laan */
728 /* The X, Y, Z and W refer to the layout of the components.
729 They are given meanings as R, G, B and Alpha by the swizzle
730 specification */
731# define R300_TX_FORMAT_X8 0x0
732# define R300_TX_FORMAT_X16 0x1
733# define R300_TX_FORMAT_Y4X4 0x2
734# define R300_TX_FORMAT_Y8X8 0x3
735# define R300_TX_FORMAT_Y16X16 0x4
736# define R300_TX_FORMAT_Z3Y3X2 0x5
737# define R300_TX_FORMAT_Z5Y6X5 0x6
738# define R300_TX_FORMAT_Z6Y5X5 0x7
739# define R300_TX_FORMAT_Z11Y11X10 0x8
740# define R300_TX_FORMAT_Z10Y11X11 0x9
741# define R300_TX_FORMAT_W4Z4Y4X4 0xA
742# define R300_TX_FORMAT_W1Z5Y5X5 0xB
743# define R300_TX_FORMAT_W8Z8Y8X8 0xC
744# define R300_TX_FORMAT_W2Z10Y10X10 0xD
745# define R300_TX_FORMAT_W16Z16Y16X16 0xE
746# define R300_TX_FORMAT_DXT1 0xF
747# define R300_TX_FORMAT_DXT3 0x10
748# define R300_TX_FORMAT_DXT5 0x11
749# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
750# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
751# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
752# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
753 /* 0x16 - some 16 bit green format.. ?? */
754# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
755
756 /* gap */
757 /* Floating point formats */
758 /* Note - hardware supports both 16 and 32 bit floating point */
759# define R300_TX_FORMAT_FL_I16 0x18
760# define R300_TX_FORMAT_FL_I16A16 0x19
761# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
762# define R300_TX_FORMAT_FL_I32 0x1B
763# define R300_TX_FORMAT_FL_I32A32 0x1C
764# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
765 /* alpha modes, convenience mostly */
766 /* if you have alpha, pick constant appropriate to the
767 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
768# define R300_TX_FORMAT_ALPHA_1CH 0x000
769# define R300_TX_FORMAT_ALPHA_2CH 0x200
770# define R300_TX_FORMAT_ALPHA_4CH 0x600
771# define R300_TX_FORMAT_ALPHA_NONE 0xA00
772 /* Swizzling */
773 /* constants */
774# define R300_TX_FORMAT_X 0
775# define R300_TX_FORMAT_Y 1
776# define R300_TX_FORMAT_Z 2
777# define R300_TX_FORMAT_W 3
778# define R300_TX_FORMAT_ZERO 4
779# define R300_TX_FORMAT_ONE 5
780# define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */
781# define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */
782
783# define R300_TX_FORMAT_B_SHIFT 18
784# define R300_TX_FORMAT_G_SHIFT 15
785# define R300_TX_FORMAT_R_SHIFT 12
786# define R300_TX_FORMAT_A_SHIFT 9
787 /* Convenience macro to take care of layout and swizzling */
788# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
789 ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
790 | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
791 | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
792 | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
793 | (R300_TX_FORMAT_##FMT) \
794 )
795 /* These can be ORed with result of R300_EASY_TX_FORMAT() */
796 /* We don't really know what they do. Take values from a constant color ? */
797# define R300_TX_FORMAT_CONST_X (1<<5)
798# define R300_TX_FORMAT_CONST_Y (2<<5)
799# define R300_TX_FORMAT_CONST_Z (4<<5)
800# define R300_TX_FORMAT_CONST_W (8<<5)
801
802# define R300_TX_FORMAT_YUV_MODE 0x00800000
803
804#define R300_TX_OFFSET_0 0x4540
805/* BEGIN: Guess from R200 */
806# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
807# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
808# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
809# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
810# define R300_TXO_OFFSET_MASK 0xffffffe0
811# define R300_TXO_OFFSET_SHIFT 5
812/* END */
813#define R300_TX_UNK4_0 0x4580
814#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
815
816/* END */
817
818/* BEGIN: Fragment program instruction set
819// Fragment programs are written directly into register space.
820// There are separate instruction streams for texture instructions and ALU
821// instructions.
822// In order to synchronize these streams, the program is divided into up
823// to 4 nodes. Each node begins with a number of TEX operations, followed
824// by a number of ALU operations.
825// The first node can have zero TEX ops, all subsequent nodes must have at least
826// one TEX ops.
827// All nodes must have at least one ALU op.
828//
829// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
830// 1 node, a value of 3 means 4 nodes.
831// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
832// offsets into the respective instruction streams, while *_END points to the
833// last instruction relative to this offset. */
834#define R300_PFS_CNTL_0 0x4600
835# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
836# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
837# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
838#define R300_PFS_CNTL_1 0x4604
839/* There is an unshifted value here which has so far always been equal to the
840// index of the highest used temporary register. */
841#define R300_PFS_CNTL_2 0x4608
842# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
843# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
844# define R300_PFS_CNTL_ALU_END_SHIFT 6
845# define R300_PFS_CNTL_ALU_END_MASK (63 << 0)
846# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
847# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
848# define R300_PFS_CNTL_TEX_END_SHIFT 18
849# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
850
851/* gap */
852/* Nodes are stored backwards. The last active node is always stored in
853// PFS_NODE_3.
854// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
855// first node is stored in NODE_2, the second node is stored in NODE_3.
856//
857// Offsets are relative to the master offset from PFS_CNTL_2.
858// LAST_NODE is set for the last node, and only for the last node. */
859#define R300_PFS_NODE_0 0x4610
860#define R300_PFS_NODE_1 0x4614
861#define R300_PFS_NODE_2 0x4618
862#define R300_PFS_NODE_3 0x461C
863# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
864# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
865# define R300_PFS_NODE_ALU_END_SHIFT 6
866# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
867# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
868# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
869# define R300_PFS_NODE_TEX_END_SHIFT 17
870# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
871# define R300_PFS_NODE_LAST_NODE (1 << 22)
872
873/* TEX
874// As far as I can tell, texture instructions cannot write into output
875// registers directly. A subsequent ALU instruction is always necessary,
876// even if it's just MAD o0, r0, 1, 0 */
877#define R300_PFS_TEXI_0 0x4620
878# define R300_FPITX_SRC_SHIFT 0
879# define R300_FPITX_SRC_MASK (31 << 0)
880# define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
881# define R300_FPITX_DST_SHIFT 6
882# define R300_FPITX_DST_MASK (31 << 6)
883# define R300_FPITX_IMAGE_SHIFT 11
884# define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */
885/* Unsure if these are opcodes, or some kind of bitfield, but this is how
886 * they were set when I checked
887 */
888# define R300_FPITX_OPCODE_SHIFT 15
889# define R300_FPITX_OP_TEX 1
890# define R300_FPITX_OP_TXP 3
891# define R300_FPITX_OP_TXB 4
892
893/* ALU
894// The ALU instructions register blocks are enumerated according to the order
895// in which fglrx. I assume there is space for 64 instructions, since
896// each block has space for a maximum of 64 DWORDs, and this matches reported
897// native limits.
898//
899// The basic functional block seems to be one MAD for each color and alpha,
900// and an adder that adds all components after the MUL.
901// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
902// - DP4: Use OUTC_DP4, OUTA_DP4
903// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
904// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
905// - CMP: If ARG2 < 0, return ARG1, else return ARG0
906// - FLR: use FRC+MAD
907// - XPD: use MAD+MAD
908// - SGE, SLT: use MAD+CMP
909// - RSQ: use ABS modifier for argument
910// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
911// into color register
912// - apparently, there's no quick DST operation
913// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
914// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
915// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
916//
917// Operand selection
918// First stage selects three sources from the available registers and
919// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
920// fglrx sorts the three source fields: Registers before constants,
921// lower indices before higher indices; I do not know whether this is necessary.
922// fglrx fills unused sources with "read constant 0"
923// According to specs, you cannot select more than two different constants.
924//
925// Second stage selects the operands from the sources. This is defined in
926// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
927// zero and one.
928// Swizzling and negation happens in this stage, as well.
929//
930// Important: Color and alpha seem to be mostly separate, i.e. their sources
931// selection appears to be fully independent (the register storage is probably
932// physically split into a color and an alpha section).
933// However (because of the apparent physical split), there is some interaction
934// WRT swizzling. If, for example, you want to load an R component into an
935// Alpha operand, this R component is taken from a *color* source, not from
936// an alpha source. The corresponding register doesn't even have to appear in
937// the alpha sources list. (I hope this alll makes sense to you)
938//
939// Destination selection
940// The destination register index is in FPI1 (color) and FPI3 (alpha) together
941// with enable bits.
942// There are separate enable bits for writing into temporary registers
943// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
944// You can write to both at once, or not write at all (the same index
945// must be used for both).
946//
947// Note: There is a special form for LRP
948// - Argument order is the same as in ARB_fragment_program.
949// - Operation is MAD
950// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
951// - Set FPI0/FPI2_SPECIAL_LRP
952// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
953#define R300_PFS_INSTR1_0 0x46C0
954# define R300_FPI1_SRC0C_SHIFT 0
955# define R300_FPI1_SRC0C_MASK (31 << 0)
956# define R300_FPI1_SRC0C_CONST (1 << 5)
957# define R300_FPI1_SRC1C_SHIFT 6
958# define R300_FPI1_SRC1C_MASK (31 << 6)
959# define R300_FPI1_SRC1C_CONST (1 << 11)
960# define R300_FPI1_SRC2C_SHIFT 12
961# define R300_FPI1_SRC2C_MASK (31 << 12)
962# define R300_FPI1_SRC2C_CONST (1 << 17)
963# define R300_FPI1_DSTC_SHIFT 18
964# define R300_FPI1_DSTC_MASK (31 << 18)
965# define R300_FPI1_DSTC_REG_X (1 << 23)
966# define R300_FPI1_DSTC_REG_Y (1 << 24)
967# define R300_FPI1_DSTC_REG_Z (1 << 25)
968# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
969# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
970# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
971
972#define R300_PFS_INSTR3_0 0x47C0
973# define R300_FPI3_SRC0A_SHIFT 0
974# define R300_FPI3_SRC0A_MASK (31 << 0)
975# define R300_FPI3_SRC0A_CONST (1 << 5)
976# define R300_FPI3_SRC1A_SHIFT 6
977# define R300_FPI3_SRC1A_MASK (31 << 6)
978# define R300_FPI3_SRC1A_CONST (1 << 11)
979# define R300_FPI3_SRC2A_SHIFT 12
980# define R300_FPI3_SRC2A_MASK (31 << 12)
981# define R300_FPI3_SRC2A_CONST (1 << 17)
982# define R300_FPI3_DSTA_SHIFT 18
983# define R300_FPI3_DSTA_MASK (31 << 18)
984# define R300_FPI3_DSTA_REG (1 << 23)
985# define R300_FPI3_DSTA_OUTPUT (1 << 24)
986
987#define R300_PFS_INSTR0_0 0x48C0
988# define R300_FPI0_ARGC_SRC0C_XYZ 0
989# define R300_FPI0_ARGC_SRC0C_XXX 1
990# define R300_FPI0_ARGC_SRC0C_YYY 2
991# define R300_FPI0_ARGC_SRC0C_ZZZ 3
992# define R300_FPI0_ARGC_SRC1C_XYZ 4
993# define R300_FPI0_ARGC_SRC1C_XXX 5
994# define R300_FPI0_ARGC_SRC1C_YYY 6
995# define R300_FPI0_ARGC_SRC1C_ZZZ 7
996# define R300_FPI0_ARGC_SRC2C_XYZ 8
997# define R300_FPI0_ARGC_SRC2C_XXX 9
998# define R300_FPI0_ARGC_SRC2C_YYY 10
999# define R300_FPI0_ARGC_SRC2C_ZZZ 11
1000# define R300_FPI0_ARGC_SRC0A 12
1001# define R300_FPI0_ARGC_SRC1A 13
1002# define R300_FPI0_ARGC_SRC2A 14
1003# define R300_FPI0_ARGC_SRC1C_LRP 15
1004# define R300_FPI0_ARGC_ZERO 20
1005# define R300_FPI0_ARGC_ONE 21
1006# define R300_FPI0_ARGC_HALF 22 /* GUESS */
1007# define R300_FPI0_ARGC_SRC0C_YZX 23
1008# define R300_FPI0_ARGC_SRC1C_YZX 24
1009# define R300_FPI0_ARGC_SRC2C_YZX 25
1010# define R300_FPI0_ARGC_SRC0C_ZXY 26
1011# define R300_FPI0_ARGC_SRC1C_ZXY 27
1012# define R300_FPI0_ARGC_SRC2C_ZXY 28
1013# define R300_FPI0_ARGC_SRC0CA_WZY 29
1014# define R300_FPI0_ARGC_SRC1CA_WZY 30
1015# define R300_FPI0_ARGC_SRC2CA_WZY 31
1016
1017# define R300_FPI0_ARG0C_SHIFT 0
1018# define R300_FPI0_ARG0C_MASK (31 << 0)
1019# define R300_FPI0_ARG0C_NEG (1 << 5)
1020# define R300_FPI0_ARG0C_ABS (1 << 6)
1021# define R300_FPI0_ARG1C_SHIFT 7
1022# define R300_FPI0_ARG1C_MASK (31 << 7)
1023# define R300_FPI0_ARG1C_NEG (1 << 12)
1024# define R300_FPI0_ARG1C_ABS (1 << 13)
1025# define R300_FPI0_ARG2C_SHIFT 14
1026# define R300_FPI0_ARG2C_MASK (31 << 14)
1027# define R300_FPI0_ARG2C_NEG (1 << 19)
1028# define R300_FPI0_ARG2C_ABS (1 << 20)
1029# define R300_FPI0_SPECIAL_LRP (1 << 21)
1030# define R300_FPI0_OUTC_MAD (0 << 23)
1031# define R300_FPI0_OUTC_DP3 (1 << 23)
1032# define R300_FPI0_OUTC_DP4 (2 << 23)
1033# define R300_FPI0_OUTC_MIN (4 << 23)
1034# define R300_FPI0_OUTC_MAX (5 << 23)
1035# define R300_FPI0_OUTC_CMP (8 << 23)
1036# define R300_FPI0_OUTC_FRC (9 << 23)
1037# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
1038# define R300_FPI0_OUTC_SAT (1 << 30)
1039# define R300_FPI0_UNKNOWN_31 (1 << 31)
1040
1041#define R300_PFS_INSTR2_0 0x49C0
1042# define R300_FPI2_ARGA_SRC0C_X 0
1043# define R300_FPI2_ARGA_SRC0C_Y 1
1044# define R300_FPI2_ARGA_SRC0C_Z 2
1045# define R300_FPI2_ARGA_SRC1C_X 3
1046# define R300_FPI2_ARGA_SRC1C_Y 4
1047# define R300_FPI2_ARGA_SRC1C_Z 5
1048# define R300_FPI2_ARGA_SRC2C_X 6
1049# define R300_FPI2_ARGA_SRC2C_Y 7
1050# define R300_FPI2_ARGA_SRC2C_Z 8
1051# define R300_FPI2_ARGA_SRC0A 9
1052# define R300_FPI2_ARGA_SRC1A 10
1053# define R300_FPI2_ARGA_SRC2A 11
1054# define R300_FPI2_ARGA_SRC1A_LRP 15
1055# define R300_FPI2_ARGA_ZERO 16
1056# define R300_FPI2_ARGA_ONE 17
1057# define R300_FPI2_ARGA_HALF 18 /* GUESS */
1058
1059# define R300_FPI2_ARG0A_SHIFT 0
1060# define R300_FPI2_ARG0A_MASK (31 << 0)
1061# define R300_FPI2_ARG0A_NEG (1 << 5)
1062# define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
1063# define R300_FPI2_ARG1A_SHIFT 7
1064# define R300_FPI2_ARG1A_MASK (31 << 7)
1065# define R300_FPI2_ARG1A_NEG (1 << 12)
1066# define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
1067# define R300_FPI2_ARG2A_SHIFT 14
1068# define R300_FPI2_ARG2A_MASK (31 << 14)
1069# define R300_FPI2_ARG2A_NEG (1 << 19)
1070# define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
1071# define R300_FPI2_SPECIAL_LRP (1 << 21)
1072# define R300_FPI2_OUTA_MAD (0 << 23)
1073# define R300_FPI2_OUTA_DP4 (1 << 23)
1074# define R300_FPI2_OUTA_MIN (2 << 23)
1075# define R300_FPI2_OUTA_MAX (3 << 23)
1076# define R300_FPI2_OUTA_CMP (6 << 23)
1077# define R300_FPI2_OUTA_FRC (7 << 23)
1078# define R300_FPI2_OUTA_EX2 (8 << 23)
1079# define R300_FPI2_OUTA_LG2 (9 << 23)
1080# define R300_FPI2_OUTA_RCP (10 << 23)
1081# define R300_FPI2_OUTA_RSQ (11 << 23)
1082# define R300_FPI2_OUTA_SAT (1 << 30)
1083# define R300_FPI2_UNKNOWN_31 (1 << 31)
1084/* END */
1085
1086/* gap */
1087#define R300_PP_ALPHA_TEST 0x4BD4
1088# define R300_REF_ALPHA_MASK 0x000000ff
1089# define R300_ALPHA_TEST_FAIL (0 << 8)
1090# define R300_ALPHA_TEST_LESS (1 << 8)
1091# define R300_ALPHA_TEST_LEQUAL (3 << 8)
1092# define R300_ALPHA_TEST_EQUAL (2 << 8)
1093# define R300_ALPHA_TEST_GEQUAL (6 << 8)
1094# define R300_ALPHA_TEST_GREATER (4 << 8)
1095# define R300_ALPHA_TEST_NEQUAL (5 << 8)
1096# define R300_ALPHA_TEST_PASS (7 << 8)
1097# define R300_ALPHA_TEST_OP_MASK (7 << 8)
1098# define R300_ALPHA_TEST_ENABLE (1 << 11)
1099
1100/* gap */
1101/* Fragment program parameters in 7.16 floating point */
1102#define R300_PFS_PARAM_0_X 0x4C00
1103#define R300_PFS_PARAM_0_Y 0x4C04
1104#define R300_PFS_PARAM_0_Z 0x4C08
1105#define R300_PFS_PARAM_0_W 0x4C0C
1106/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
1107#define R300_PFS_PARAM_31_X 0x4DF0
1108#define R300_PFS_PARAM_31_Y 0x4DF4
1109#define R300_PFS_PARAM_31_Z 0x4DF8
1110#define R300_PFS_PARAM_31_W 0x4DFC
1111
1112/* Notes:
1113// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
1114// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
1115// function (both registers are always set up completely in any case)
1116// - Most blend flags are simply copied from R200 and not tested yet */
1117#define R300_RB3D_CBLEND 0x4E04
1118#define R300_RB3D_ABLEND 0x4E08
1119 /* the following only appear in CBLEND */
1120# define R300_BLEND_ENABLE (1 << 0)
1121# define R300_BLEND_UNKNOWN (3 << 1)
1122# define R300_BLEND_NO_SEPARATE (1 << 3)
1123 /* the following are shared between CBLEND and ABLEND */
1124# define R300_FCN_MASK (3 << 12)
1125# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
1126# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
1127# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
1128# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
1129# define R300_SRC_BLEND_GL_ZERO (32 << 16)
1130# define R300_SRC_BLEND_GL_ONE (33 << 16)
1131# define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
1132# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
1133# define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
1134# define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
1135# define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
1136# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
1137# define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
1138# define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
1139# define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
1140# define R300_SRC_BLEND_MASK (63 << 16)
1141# define R300_DST_BLEND_GL_ZERO (32 << 24)
1142# define R300_DST_BLEND_GL_ONE (33 << 24)
1143# define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
1144# define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
1145# define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
1146# define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
1147# define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
1148# define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
1149# define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
1150# define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
1151# define R300_DST_BLEND_MASK (63 << 24)
1152#define R300_RB3D_COLORMASK 0x4E0C
1153# define R300_COLORMASK0_B (1<<0)
1154# define R300_COLORMASK0_G (1<<1)
1155# define R300_COLORMASK0_R (1<<2)
1156# define R300_COLORMASK0_A (1<<3)
1157
1158/* gap */
1159#define R300_RB3D_COLOROFFSET0 0x4E28
1160# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
1161#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
1162#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
1163#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
1164/* gap */
1165/* Bit 16: Larger tiles
1166// Bit 17: 4x2 tiles
1167// Bit 18: Extremely weird tile like, but some pixels duplicated? */
1168#define R300_RB3D_COLORPITCH0 0x4E38
1169# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1170# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1171# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1172# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1173# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1174# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1175# define R300_COLOR_FORMAT_RGB565 (2 << 22)
1176# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
1177#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
1178#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1179#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1180
1181/* gap */
1182/* Guess by Vladimir.
1183// Set to 0A before 3D operations, set to 02 afterwards. */
1184#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
1185# define R300_RB3D_DSTCACHE_02 0x00000002
1186# define R300_RB3D_DSTCACHE_0A 0x0000000A
1187
1188/* gap */
1189/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
1190/* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */
1191#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
1192# define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
1193# define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
1194# define R300_RB3D_Z_TEST 0x00000012
1195# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1196# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1197
1198# define R300_RB3D_Z_TEST 0x00000012
1199# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1200# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1201# define R300_RB3D_STENCIL_ENABLE 0x00000001
1202
1203#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
1204 /* functions */
1205# define R300_ZS_NEVER 0
1206# define R300_ZS_LESS 1
1207# define R300_ZS_LEQUAL 2
1208# define R300_ZS_EQUAL 3
1209# define R300_ZS_GEQUAL 4
1210# define R300_ZS_GREATER 5
1211# define R300_ZS_NOTEQUAL 6
1212# define R300_ZS_ALWAYS 7
1213# define R300_ZS_MASK 7
1214 /* operations */
1215# define R300_ZS_KEEP 0
1216# define R300_ZS_ZERO 1
1217# define R300_ZS_REPLACE 2
1218# define R300_ZS_INCR 3
1219# define R300_ZS_DECR 4
1220# define R300_ZS_INVERT 5
1221# define R300_ZS_INCR_WRAP 6
1222# define R300_ZS_DECR_WRAP 7
1223
1224 /* front and back refer to operations done for front
1225 and back faces, i.e. separate stencil function support */
1226# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
1227# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
1228# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
1229# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
1230# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
1231# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
1232# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
1233# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
1234# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
1235
1236
1237
1238#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
1239# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
1240# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
1241# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
1242# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
1243
1244/* gap */
1245
1246#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
1247# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
1248# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
1249
1250/* gap */
1251#define R300_RB3D_DEPTHOFFSET 0x4F20
1252#define R300_RB3D_DEPTHPITCH 0x4F24
1253# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
1254# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
1255# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
1256# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1257# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1258# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1259
1260/* BEGIN: Vertex program instruction set
1261// Every instruction is four dwords long:
1262// DWORD 0: output and opcode
1263// DWORD 1: first argument
1264// DWORD 2: second argument
1265// DWORD 3: third argument
1266//
1267// Notes:
1268// - ABS r, a is implemented as MAX r, a, -a
1269// - MOV is implemented as ADD to zero
1270// - XPD is implemented as MUL + MAD
1271// - FLR is implemented as FRC + ADD
1272// - apparently, fglrx tries to schedule instructions so that there is at least
1273// one instruction between the write to a temporary and the first read
1274// from said temporary; however, violations of this scheduling are allowed
1275// - register indices seem to be unrelated with OpenGL aliasing to conventional state
1276// - only one attribute and one parameter can be loaded at a time; however, the
1277// same attribute/parameter can be used for more than one argument
1278// - the second software argument for POW is the third hardware argument (no idea why)
1279// - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
1280//
1281// There is some magic surrounding LIT:
1282// The single argument is replicated across all three inputs, but swizzled:
1283// First argument: xyzy
1284// Second argument: xyzx
1285// Third argument: xyzw
1286// Whenever the result is used later in the fragment program, fglrx forces x and w
1287// to be 1.0 in the input selection; I don't know whether this is strictly necessary */
1288#define R300_VPI_OUT_OP_DOT (1 << 0)
1289#define R300_VPI_OUT_OP_MUL (2 << 0)
1290#define R300_VPI_OUT_OP_ADD (3 << 0)
1291#define R300_VPI_OUT_OP_MAD (4 << 0)
1292#define R300_VPI_OUT_OP_DST (5 << 0)
1293#define R300_VPI_OUT_OP_FRC (6 << 0)
1294#define R300_VPI_OUT_OP_MAX (7 << 0)
1295#define R300_VPI_OUT_OP_MIN (8 << 0)
1296#define R300_VPI_OUT_OP_SGE (9 << 0)
1297#define R300_VPI_OUT_OP_SLT (10 << 0)
1298#define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
1299#define R300_VPI_OUT_OP_EXP (65 << 0)
1300#define R300_VPI_OUT_OP_LOG (66 << 0)
1301#define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */
1302#define R300_VPI_OUT_OP_LIT (68 << 0)
1303#define R300_VPI_OUT_OP_POW (69 << 0)
1304#define R300_VPI_OUT_OP_RCP (70 << 0)
1305#define R300_VPI_OUT_OP_RSQ (72 << 0)
1306#define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
1307#define R300_VPI_OUT_OP_EX2 (75 << 0)
1308#define R300_VPI_OUT_OP_LG2 (76 << 0)
1309#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
1310#define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */
1311
1312#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
1313#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
1314#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
1315
1316#define R300_VPI_OUT_REG_INDEX_SHIFT 13
1317#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */
1318
1319#define R300_VPI_OUT_WRITE_X (1 << 20)
1320#define R300_VPI_OUT_WRITE_Y (1 << 21)
1321#define R300_VPI_OUT_WRITE_Z (1 << 22)
1322#define R300_VPI_OUT_WRITE_W (1 << 23)
1323
1324#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
1325#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
1326#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
1327#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
1328#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
1329
1330#define R300_VPI_IN_REG_INDEX_SHIFT 5
1331#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */
1332
1333/* The R300 can select components from the input register arbitrarily.
1334// Use the following constants, shifted by the component shift you
1335// want to select */
1336#define R300_VPI_IN_SELECT_X 0
1337#define R300_VPI_IN_SELECT_Y 1
1338#define R300_VPI_IN_SELECT_Z 2
1339#define R300_VPI_IN_SELECT_W 3
1340#define R300_VPI_IN_SELECT_ZERO 4
1341#define R300_VPI_IN_SELECT_ONE 5
1342#define R300_VPI_IN_SELECT_MASK 7
1343
1344#define R300_VPI_IN_X_SHIFT 13
1345#define R300_VPI_IN_Y_SHIFT 16
1346#define R300_VPI_IN_Z_SHIFT 19
1347#define R300_VPI_IN_W_SHIFT 22
1348
1349#define R300_VPI_IN_NEG_X (1 << 25)
1350#define R300_VPI_IN_NEG_Y (1 << 26)
1351#define R300_VPI_IN_NEG_Z (1 << 27)
1352#define R300_VPI_IN_NEG_W (1 << 28)
1353/* END */
1354
1355//BEGIN: Packet 3 commands
1356
1357// A primitive emission dword.
1358#define R300_PRIM_TYPE_NONE (0 << 0)
1359#define R300_PRIM_TYPE_POINT (1 << 0)
1360#define R300_PRIM_TYPE_LINE (2 << 0)
1361#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
1362#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
1363#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
1364#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
1365#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
1366#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
1367#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
1368#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
1369#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200)
1370#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
1371#define R300_PRIM_TYPE_QUADS (13 << 0)
1372#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
1373#define R300_PRIM_TYPE_POLYGON (15 << 0)
1374#define R300_PRIM_TYPE_MASK 0xF
1375#define R300_PRIM_WALK_IND (1 << 4)
1376#define R300_PRIM_WALK_LIST (2 << 4)
1377#define R300_PRIM_WALK_RING (3 << 4)
1378#define R300_PRIM_WALK_MASK (3 << 4)
1379#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200)
1380#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
1381#define R300_PRIM_NUM_VERTICES_SHIFT 16
1382
1383// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
1384// Two parameter dwords:
1385// 0. The first parameter appears to be always 0
1386// 1. The second parameter is a standard primitive emission dword.
1387#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
1388
1389// Specify the full set of vertex arrays as (address, stride).
1390// The first parameter is the number of vertex arrays specified.
1391// The rest of the command is a variable length list of blocks, where
1392// each block is three dwords long and specifies two arrays.
1393// The first dword of a block is split into two words, the lower significant
1394// word refers to the first array, the more significant word to the second
1395// array in the block.
1396// The low byte of each word contains the size of an array entry in dwords,
1397// the high byte contains the stride of the array.
1398// The second dword of a block contains the pointer to the first array,
1399// the third dword of a block contains the pointer to the second array.
1400// Note that if the total number of arrays is odd, the third dword of
1401// the last block is omitted.
1402#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
1403
1404#define R300_PACKET3_INDX_BUFFER 0x00003300
1405# define R300_EB_UNK1_SHIFT 24
1406# define R300_EB_UNK1 (0x80<<24)
1407# define R300_EB_UNK2 0x0810
1408#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
1409
1410//END
1411
1412#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 20bcf872b348..6d9080a3ca7e 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35#include "r300_reg.h"
35 36
36#define RADEON_FIFO_DEBUG 0 37#define RADEON_FIFO_DEBUG 0
37 38
@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
1151 1152
1152#if __OS_HAS_AGP 1153#if __OS_HAS_AGP
1153 if ( !dev_priv->is_pci ) { 1154 if ( !dev_priv->is_pci ) {
1155 /* set RADEON_AGP_BASE here instead of relying on X from user space */
1156 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1154 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, 1157 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
1155 dev_priv->ring_rptr->offset 1158 dev_priv->ring_rptr->offset
1156 - dev->agp->base 1159 - dev->agp->base
@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
1407 radeon_do_cleanup_cp(dev); 1410 radeon_do_cleanup_cp(dev);
1408 return DRM_ERR(EINVAL); 1411 return DRM_ERR(EINVAL);
1409 } 1412 }
1413 dev->agp_buffer_token = init->buffers_offset;
1410 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1414 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1411 if(!dev->agp_buffer_map) { 1415 if(!dev->agp_buffer_map) {
1412 DRM_ERROR("could not find dma buffer region!\n"); 1416 DRM_ERROR("could not find dma buffer region!\n");
@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
1625 1629
1626 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); 1630 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
1627 1631
1632 if(init.func == RADEON_INIT_R300_CP)
1633 r300_init_reg_flags();
1634
1628 switch ( init.func ) { 1635 switch ( init.func ) {
1629 case RADEON_INIT_CP: 1636 case RADEON_INIT_CP:
1630 case RADEON_INIT_R200_CP: 1637 case RADEON_INIT_R200_CP:
@@ -2039,15 +2046,43 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
2039 case CHIP_RV200: 2046 case CHIP_RV200:
2040 case CHIP_R200: 2047 case CHIP_R200:
2041 case CHIP_R300: 2048 case CHIP_R300:
2049 case CHIP_R420:
2042 dev_priv->flags |= CHIP_HAS_HIERZ; 2050 dev_priv->flags |= CHIP_HAS_HIERZ;
2043 break; 2051 break;
2044 default: 2052 default:
2045 /* all other chips have no hierarchical z buffer */ 2053 /* all other chips have no hierarchical z buffer */
2046 break; 2054 break;
2047 } 2055 }
2056
2057 if (drm_device_is_agp(dev))
2058 dev_priv->flags |= CHIP_IS_AGP;
2059
2060 DRM_DEBUG("%s card detected\n",
2061 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
2048 return ret; 2062 return ret;
2049} 2063}
2050 2064
2065int radeon_presetup(struct drm_device *dev)
2066{
2067 int ret;
2068 drm_local_map_t *map;
2069 drm_radeon_private_t *dev_priv = dev->dev_private;
2070
2071 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
2072 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
2073 _DRM_READ_ONLY, &dev_priv->mmio);
2074 if (ret != 0)
2075 return ret;
2076
2077 ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
2078 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
2079 _DRM_WRITE_COMBINING, &map);
2080 if (ret != 0)
2081 return ret;
2082
2083 return 0;
2084}
2085
2051int radeon_driver_postcleanup(struct drm_device *dev) 2086int radeon_driver_postcleanup(struct drm_device *dev)
2052{ 2087{
2053 drm_radeon_private_t *dev_priv = dev->dev_private; 2088 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index c1e62d047989..3792798270a4 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -195,6 +195,52 @@ typedef union {
195#define RADEON_WAIT_2D 0x1 195#define RADEON_WAIT_2D 0x1
196#define RADEON_WAIT_3D 0x2 196#define RADEON_WAIT_3D 0x2
197 197
198/* Allowed parameters for R300_CMD_PACKET3
199 */
200#define R300_CMD_PACKET3_CLEAR 0
201#define R300_CMD_PACKET3_RAW 1
202
203/* Commands understood by cmd_buffer ioctl for R300.
204 * The interface has not been stabilized, so some of these may be removed
205 * and eventually reordered before stabilization.
206 */
207#define R300_CMD_PACKET0 1
208#define R300_CMD_VPU 2 /* emit vertex program upload */
209#define R300_CMD_PACKET3 3 /* emit a packet3 */
210#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
211#define R300_CMD_CP_DELAY 5
212#define R300_CMD_DMA_DISCARD 6
213#define R300_CMD_WAIT 7
214# define R300_WAIT_2D 0x1
215# define R300_WAIT_3D 0x2
216# define R300_WAIT_2D_CLEAN 0x3
217# define R300_WAIT_3D_CLEAN 0x4
218
219typedef union {
220 unsigned int u;
221 struct {
222 unsigned char cmd_type, pad0, pad1, pad2;
223 } header;
224 struct {
225 unsigned char cmd_type, count, reglo, reghi;
226 } packet0;
227 struct {
228 unsigned char cmd_type, count, adrlo, adrhi;
229 } vpu;
230 struct {
231 unsigned char cmd_type, packet, pad0, pad1;
232 } packet3;
233 struct {
234 unsigned char cmd_type, packet;
235 unsigned short count; /* amount of packet2 to emit */
236 } delay;
237 struct {
238 unsigned char cmd_type, buf_idx, pad0, pad1;
239 } dma;
240 struct {
241 unsigned char cmd_type, flags, pad0, pad1;
242 } wait;
243} drm_r300_cmd_header_t;
198 244
199#define RADEON_FRONT 0x1 245#define RADEON_FRONT 0x1
200#define RADEON_BACK 0x2 246#define RADEON_BACK 0x2
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 18e4e5b0952f..e0682f64b400 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -76,6 +76,7 @@ static struct drm_driver driver = {
76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
78 .preinit = radeon_driver_preinit, 78 .preinit = radeon_driver_preinit,
79 .presetup = radeon_presetup,
79 .postcleanup = radeon_driver_postcleanup, 80 .postcleanup = radeon_driver_postcleanup,
80 .prerelease = radeon_driver_prerelease, 81 .prerelease = radeon_driver_prerelease,
81 .pretakedown = radeon_driver_pretakedown, 82 .pretakedown = radeon_driver_pretakedown,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 771aa80a5e8c..f12a963ede18 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -82,9 +82,10 @@
82 * - Add support for r100 cube maps 82 * - Add support for r100 cube maps
83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear 83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
84 * texture filtering on r200 84 * texture filtering on r200
85 * 1.17- Add initial support for R300 (3D).
85 */ 86 */
86#define DRIVER_MAJOR 1 87#define DRIVER_MAJOR 1
87#define DRIVER_MINOR 16 88#define DRIVER_MINOR 17
88#define DRIVER_PATCHLEVEL 0 89#define DRIVER_PATCHLEVEL 0
89 90
90#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) 91#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
@@ -106,7 +107,9 @@ enum radeon_family {
106 CHIP_RV280, 107 CHIP_RV280,
107 CHIP_R300, 108 CHIP_R300,
108 CHIP_RS300, 109 CHIP_RS300,
110 CHIP_R350,
109 CHIP_RV350, 111 CHIP_RV350,
112 CHIP_R420,
110 CHIP_LAST, 113 CHIP_LAST,
111}; 114};
112 115
@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
290extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); 293extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
291 294
292extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); 295extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
296extern int radeon_presetup(struct drm_device *dev);
293extern int radeon_driver_postcleanup(struct drm_device *dev); 297extern int radeon_driver_postcleanup(struct drm_device *dev);
294 298
295extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); 299extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev );
320extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 324extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
321 unsigned long arg); 325 unsigned long arg);
322 326
327
328/* r300_cmdbuf.c */
329extern void r300_init_reg_flags(void);
330
331extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
332 drm_file_t* filp_priv,
333 drm_radeon_cmd_buffer_t* cmdbuf);
334
323/* Flags for stats.boxes 335/* Flags for stats.boxes
324 */ 336 */
325#define RADEON_BOX_DMA_IDLE 0x1 337#define RADEON_BOX_DMA_IDLE 0x1
@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
357#define RADEON_CRTC2_OFFSET 0x0324 369#define RADEON_CRTC2_OFFSET 0x0324
358#define RADEON_CRTC2_OFFSET_CNTL 0x0328 370#define RADEON_CRTC2_OFFSET_CNTL 0x0328
359 371
372#define RADEON_MPP_TB_CONFIG 0x01c0
373#define RADEON_MEM_CNTL 0x0140
374#define RADEON_MEM_SDRAM_MODE_REG 0x0158
375#define RADEON_AGP_BASE 0x0170
376
360#define RADEON_RB3D_COLOROFFSET 0x1c40 377#define RADEON_RB3D_COLOROFFSET 0x1c40
361#define RADEON_RB3D_COLORPITCH 0x1c48 378#define RADEON_RB3D_COLORPITCH 0x1c48
362 379
@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
651#define RADEON_CP_PACKET1 0x40000000 668#define RADEON_CP_PACKET1 0x40000000
652#define RADEON_CP_PACKET2 0x80000000 669#define RADEON_CP_PACKET2 0x80000000
653#define RADEON_CP_PACKET3 0xC0000000 670#define RADEON_CP_PACKET3 0xC0000000
671# define RADEON_CP_NOP 0x00001000
672# define RADEON_CP_NEXT_CHAR 0x00001900
673# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
674# define RADEON_CP_SET_SCISSORS 0x00001E00
675 /* GEN_INDX_PRIM is unsupported starting with R300 */
654# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 676# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
655# define RADEON_WAIT_FOR_IDLE 0x00002600 677# define RADEON_WAIT_FOR_IDLE 0x00002600
656# define RADEON_3D_DRAW_VBUF 0x00002800 678# define RADEON_3D_DRAW_VBUF 0x00002800
657# define RADEON_3D_DRAW_IMMD 0x00002900 679# define RADEON_3D_DRAW_IMMD 0x00002900
658# define RADEON_3D_DRAW_INDX 0x00002A00 680# define RADEON_3D_DRAW_INDX 0x00002A00
681# define RADEON_CP_LOAD_PALETTE 0x00002C00
659# define RADEON_3D_LOAD_VBPNTR 0x00002F00 682# define RADEON_3D_LOAD_VBPNTR 0x00002F00
660# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 683# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
661# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 684# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
662# define RADEON_3D_CLEAR_ZMASK 0x00003200 685# define RADEON_3D_CLEAR_ZMASK 0x00003200
686# define RADEON_CP_INDX_BUFFER 0x00003300
687# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
688# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
689# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
663# define RADEON_3D_CLEAR_HIZ 0x00003700 690# define RADEON_3D_CLEAR_HIZ 0x00003700
691# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
664# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 692# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
665# define RADEON_CNTL_PAINT_MULTI 0x00009A00 693# define RADEON_CNTL_PAINT_MULTI 0x00009A00
666# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 694# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 1f79e249146c..64a3e3a406ef 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
1493 1493
1494} 1494}
1495 1495
1496#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) 1496#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1497 1497
1498static int radeon_cp_dispatch_texture( DRMFILE filp, 1498static int radeon_cp_dispatch_texture( DRMFILE filp,
1499 drm_device_t *dev, 1499 drm_device_t *dev,
@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1506 u32 format; 1506 u32 format;
1507 u32 *buffer; 1507 u32 *buffer;
1508 const u8 __user *data; 1508 const u8 __user *data;
1509 int size, dwords, tex_width, blit_width; 1509 int size, dwords, tex_width, blit_width, spitch;
1510 u32 height; 1510 u32 height;
1511 int i; 1511 int i;
1512 u32 texpitch, microtile; 1512 u32 texpitch, microtile;
1513 u32 offset;
1513 RING_LOCALS; 1514 RING_LOCALS;
1514 1515
1515 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); 1516 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1530 RADEON_WAIT_UNTIL_IDLE(); 1531 RADEON_WAIT_UNTIL_IDLE();
1531 ADVANCE_RING(); 1532 ADVANCE_RING();
1532 1533
1533#ifdef __BIG_ENDIAN
1534 /* The Mesa texture functions provide the data in little endian as the
1535 * chip wants it, but we need to compensate for the fact that the CP
1536 * ring gets byte-swapped
1537 */
1538 BEGIN_RING( 2 );
1539 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1540 ADVANCE_RING();
1541#endif
1542
1543
1544 /* The compiler won't optimize away a division by a variable, 1534 /* The compiler won't optimize away a division by a variable,
1545 * even if the only legal values are powers of two. Thus, we'll 1535 * even if the only legal values are powers of two. Thus, we'll
1546 * use a shift instead. 1536 * use a shift instead.
@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1572 DRM_ERROR( "invalid texture format %d\n", tex->format ); 1562 DRM_ERROR( "invalid texture format %d\n", tex->format );
1573 return DRM_ERR(EINVAL); 1563 return DRM_ERR(EINVAL);
1574 } 1564 }
1565 spitch = blit_width >> 6;
1566 if (spitch == 0 && image->height > 1)
1567 return DRM_ERR(EINVAL);
1568
1575 texpitch = tex->pitch; 1569 texpitch = tex->pitch;
1576 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { 1570 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1577 microtile = 1; 1571 microtile = 1;
@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1624 */ 1618 */
1625 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); 1619 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1626 dwords = size / 4; 1620 dwords = size / 4;
1627 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1628 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1629 RADEON_GMC_BRUSH_NONE |
1630 (format << 8) |
1631 RADEON_GMC_SRC_DATATYPE_COLOR |
1632 RADEON_ROP3_S |
1633 RADEON_DP_SRC_SOURCE_HOST_DATA |
1634 RADEON_GMC_CLR_CMP_CNTL_DIS |
1635 RADEON_GMC_WR_MSK_DIS);
1636
1637 buffer[2] = (texpitch << 22) | (tex->offset >> 10);
1638 buffer[3] = 0xffffffff;
1639 buffer[4] = 0xffffffff;
1640 buffer[5] = (image->y << 16) | image->x;
1641 buffer[6] = (height << 16) | image->width;
1642 buffer[7] = dwords;
1643 buffer += 8;
1644
1645
1646 1621
1647 if (microtile) { 1622 if (microtile) {
1648 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1623 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1750 } 1725 }
1751 1726
1752 buf->filp = filp; 1727 buf->filp = filp;
1753 buf->used = (dwords + 8) * sizeof(u32); 1728 buf->used = size;
1754 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); 1729 offset = dev_priv->gart_buffers_offset + buf->offset;
1755 radeon_cp_discard_buffer( dev, buf ); 1730 BEGIN_RING(9);
1731 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1732 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1733 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1734 RADEON_GMC_BRUSH_NONE |
1735 (format << 8) |
1736 RADEON_GMC_SRC_DATATYPE_COLOR |
1737 RADEON_ROP3_S |
1738 RADEON_DP_SRC_SOURCE_MEMORY |
1739 RADEON_GMC_CLR_CMP_CNTL_DIS |
1740 RADEON_GMC_WR_MSK_DIS );
1741 OUT_RING((spitch << 22) | (offset >> 10));
1742 OUT_RING((texpitch << 22) | (tex->offset >> 10));
1743 OUT_RING(0);
1744 OUT_RING((image->x << 16) | image->y);
1745 OUT_RING((image->width << 16) | height);
1746 RADEON_WAIT_UNTIL_2D_IDLE();
1747 ADVANCE_RING();
1748
1749 radeon_cp_discard_buffer(dev, buf);
1756 1750
1757 /* Update the input parameters for next time */ 1751 /* Update the input parameters for next time */
1758 image->y += height; 1752 image->y += height;
@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2797 2791
2798 orig_nbox = cmdbuf.nbox; 2792 orig_nbox = cmdbuf.nbox;
2799 2793
2794 if(dev_priv->microcode_version == UCODE_R300) {
2795 int temp;
2796 temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
2797
2798 if (orig_bufsz != 0)
2799 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2800
2801 return temp;
2802 }
2803
2804 /* microcode_version != r300 */
2800 while ( cmdbuf.bufsz >= sizeof(header) ) { 2805 while ( cmdbuf.bufsz >= sizeof(header) ) {
2801 2806
2802 header.i = *(int *)cmdbuf.buf; 2807 header.i = *(int *)cmdbuf.buf;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
new file mode 100644
index 000000000000..2fd40bac7c97
--- /dev/null
+++ b/drivers/char/drm/savage_bci.c
@@ -0,0 +1,1096 @@
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0
34
35static int
36savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
37{
38 uint32_t mask = dev_priv->status_used_mask;
39 uint32_t threshold = dev_priv->bci_threshold_hi;
40 uint32_t status;
41 int i;
42
43#if SAVAGE_BCI_DEBUG
44 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
45 DRM_ERROR("Trying to emit %d words "
46 "(more than guaranteed space in COB)\n", n);
47#endif
48
49 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
50 DRM_MEMORYBARRIER();
51 status = dev_priv->status_ptr[0];
52 if ((status & mask) < threshold)
53 return 0;
54 DRM_UDELAY(1);
55 }
56
57#if SAVAGE_BCI_DEBUG
58 DRM_ERROR("failed!\n");
59 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
60#endif
61 return DRM_ERR(EBUSY);
62}
63
64static int
65savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
66{
67 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
68 uint32_t status;
69 int i;
70
71 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
72 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
73 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
74 return 0;
75 DRM_UDELAY(1);
76 }
77
78#if SAVAGE_BCI_DEBUG
79 DRM_ERROR("failed!\n");
80 DRM_INFO(" status=0x%08x\n", status);
81#endif
82 return DRM_ERR(EBUSY);
83}
84
85static int
86savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
87{
88 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
89 uint32_t status;
90 int i;
91
92 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
93 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
94 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
95 return 0;
96 DRM_UDELAY(1);
97 }
98
99#if SAVAGE_BCI_DEBUG
100 DRM_ERROR("failed!\n");
101 DRM_INFO(" status=0x%08x\n", status);
102#endif
103 return DRM_ERR(EBUSY);
104}
105
106/*
107 * Waiting for events.
108 *
109 * The BIOSresets the event tag to 0 on mode changes. Therefore we
110 * never emit 0 to the event tag. If we find a 0 event tag we know the
111 * BIOS stomped on it and return success assuming that the BIOS waited
112 * for engine idle.
113 *
114 * Note: if the Xserver uses the event tag it has to follow the same
115 * rule. Otherwise there may be glitches every 2^16 events.
116 */
117static int
118savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
119{
120 uint32_t status;
121 int i;
122
123 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
124 DRM_MEMORYBARRIER();
125 status = dev_priv->status_ptr[1];
126 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
127 (status & 0xffff) == 0)
128 return 0;
129 DRM_UDELAY(1);
130 }
131
132#if SAVAGE_BCI_DEBUG
133 DRM_ERROR("failed!\n");
134 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
135#endif
136
137 return DRM_ERR(EBUSY);
138}
139
140static int
141savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
142{
143 uint32_t status;
144 int i;
145
146 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
147 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
148 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
149 (status & 0xffff) == 0)
150 return 0;
151 DRM_UDELAY(1);
152 }
153
154#if SAVAGE_BCI_DEBUG
155 DRM_ERROR("failed!\n");
156 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
157#endif
158
159 return DRM_ERR(EBUSY);
160}
161
162uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
163 unsigned int flags)
164{
165 uint16_t count;
166 BCI_LOCALS;
167
168 if (dev_priv->status_ptr) {
169 /* coordinate with Xserver */
170 count = dev_priv->status_ptr[1023];
171 if (count < dev_priv->event_counter)
172 dev_priv->event_wrap++;
173 } else {
174 count = dev_priv->event_counter;
175 }
176 count = (count + 1) & 0xffff;
177 if (count == 0) {
178 count++; /* See the comment above savage_wait_event_*. */
179 dev_priv->event_wrap++;
180 }
181 dev_priv->event_counter = count;
182 if (dev_priv->status_ptr)
183 dev_priv->status_ptr[1023] = (uint32_t)count;
184
185 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
186 unsigned int wait_cmd = BCI_CMD_WAIT;
187 if ((flags & SAVAGE_WAIT_2D))
188 wait_cmd |= BCI_CMD_WAIT_2D;
189 if ((flags & SAVAGE_WAIT_3D))
190 wait_cmd |= BCI_CMD_WAIT_3D;
191 BEGIN_BCI(2);
192 BCI_WRITE(wait_cmd);
193 } else {
194 BEGIN_BCI(1);
195 }
196 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
197
198 return count;
199}
200
201/*
202 * Freelist management
203 */
204static int savage_freelist_init(drm_device_t *dev)
205{
206 drm_savage_private_t *dev_priv = dev->dev_private;
207 drm_device_dma_t *dma = dev->dma;
208 drm_buf_t *buf;
209 drm_savage_buf_priv_t *entry;
210 int i;
211 DRM_DEBUG("count=%d\n", dma->buf_count);
212
213 dev_priv->head.next = &dev_priv->tail;
214 dev_priv->head.prev = NULL;
215 dev_priv->head.buf = NULL;
216
217 dev_priv->tail.next = NULL;
218 dev_priv->tail.prev = &dev_priv->head;
219 dev_priv->tail.buf = NULL;
220
221 for (i = 0; i < dma->buf_count; i++) {
222 buf = dma->buflist[i];
223 entry = buf->dev_private;
224
225 SET_AGE(&entry->age, 0, 0);
226 entry->buf = buf;
227
228 entry->next = dev_priv->head.next;
229 entry->prev = &dev_priv->head;
230 dev_priv->head.next->prev = entry;
231 dev_priv->head.next = entry;
232 }
233
234 return 0;
235}
236
237static drm_buf_t *savage_freelist_get(drm_device_t *dev)
238{
239 drm_savage_private_t *dev_priv = dev->dev_private;
240 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
241 uint16_t event;
242 unsigned int wrap;
243 DRM_DEBUG("\n");
244
245 UPDATE_EVENT_COUNTER();
246 if (dev_priv->status_ptr)
247 event = dev_priv->status_ptr[1] & 0xffff;
248 else
249 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
250 wrap = dev_priv->event_wrap;
251 if (event > dev_priv->event_counter)
252 wrap--; /* hardware hasn't passed the last wrap yet */
253
254 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
255 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
256
257 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
258 drm_savage_buf_priv_t *next = tail->next;
259 drm_savage_buf_priv_t *prev = tail->prev;
260 prev->next = next;
261 next->prev = prev;
262 tail->next = tail->prev = NULL;
263 return tail->buf;
264 }
265
266 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
267 return NULL;
268}
269
270void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
271{
272 drm_savage_private_t *dev_priv = dev->dev_private;
273 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
274
275 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
276
277 if (entry->next != NULL || entry->prev != NULL) {
278 DRM_ERROR("entry already on freelist.\n");
279 return;
280 }
281
282 prev = &dev_priv->head;
283 next = prev->next;
284 prev->next = entry;
285 next->prev = entry;
286 entry->prev = prev;
287 entry->next = next;
288}
289
290/*
291 * Command DMA
292 */
293static int savage_dma_init(drm_savage_private_t *dev_priv)
294{
295 unsigned int i;
296
297 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
298 (SAVAGE_DMA_PAGE_SIZE*4);
299 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
300 dev_priv->nr_dma_pages,
301 DRM_MEM_DRIVER);
302 if (dev_priv->dma_pages == NULL)
303 return DRM_ERR(ENOMEM);
304
305 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
306 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
307 dev_priv->dma_pages[i].used = 0;
308 dev_priv->dma_pages[i].flushed = 0;
309 }
310 SET_AGE(&dev_priv->last_dma_age, 0, 0);
311
312 dev_priv->first_dma_page = 0;
313 dev_priv->current_dma_page = 0;
314
315 return 0;
316}
317
318void savage_dma_reset(drm_savage_private_t *dev_priv)
319{
320 uint16_t event;
321 unsigned int wrap, i;
322 event = savage_bci_emit_event(dev_priv, 0);
323 wrap = dev_priv->event_wrap;
324 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
325 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
326 dev_priv->dma_pages[i].used = 0;
327 dev_priv->dma_pages[i].flushed = 0;
328 }
329 SET_AGE(&dev_priv->last_dma_age, event, wrap);
330 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
331}
332
333void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
334{
335 uint16_t event;
336 unsigned int wrap;
337
338 /* Faked DMA buffer pages don't age. */
339 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
340 return;
341
342 UPDATE_EVENT_COUNTER();
343 if (dev_priv->status_ptr)
344 event = dev_priv->status_ptr[1] & 0xffff;
345 else
346 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
347 wrap = dev_priv->event_wrap;
348 if (event > dev_priv->event_counter)
349 wrap--; /* hardware hasn't passed the last wrap yet */
350
351 if (dev_priv->dma_pages[page].age.wrap > wrap ||
352 (dev_priv->dma_pages[page].age.wrap == wrap &&
353 dev_priv->dma_pages[page].age.event > event)) {
354 if (dev_priv->wait_evnt(dev_priv,
355 dev_priv->dma_pages[page].age.event)
356 < 0)
357 DRM_ERROR("wait_evnt failed!\n");
358 }
359}
360
361uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
362{
363 unsigned int cur = dev_priv->current_dma_page;
364 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
365 dev_priv->dma_pages[cur].used;
366 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
367 SAVAGE_DMA_PAGE_SIZE;
368 uint32_t *dma_ptr;
369 unsigned int i;
370
371 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
372 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
373
374 if (cur + nr_pages < dev_priv->nr_dma_pages) {
375 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
376 cur*SAVAGE_DMA_PAGE_SIZE +
377 dev_priv->dma_pages[cur].used;
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
385 nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
386 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
387 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
388 dev_priv->dma_pages[i].used = 0;
389 dev_priv->dma_pages[i].flushed = 0;
390 }
391 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
392 dev_priv->first_dma_page = cur = 0;
393 }
394 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
395#if SAVAGE_DMA_DEBUG
396 if (dev_priv->dma_pages[i].used) {
397 DRM_ERROR("unflushed page %u: used=%u\n",
398 i, dev_priv->dma_pages[i].used);
399 }
400#endif
401 if (n > SAVAGE_DMA_PAGE_SIZE)
402 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
403 else
404 dev_priv->dma_pages[i].used = n;
405 n -= SAVAGE_DMA_PAGE_SIZE;
406 }
407 dev_priv->current_dma_page = --i;
408
409 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
410 i, dev_priv->dma_pages[i].used, n);
411
412 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
413
414 return dma_ptr;
415}
416
417static void savage_dma_flush(drm_savage_private_t *dev_priv)
418{
419 unsigned int first = dev_priv->first_dma_page;
420 unsigned int cur = dev_priv->current_dma_page;
421 uint16_t event;
422 unsigned int wrap, pad, align, len, i;
423 unsigned long phys_addr;
424 BCI_LOCALS;
425
426 if (first == cur &&
427 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
428 return;
429
430 /* pad length to multiples of 2 entries
431 * align start of next DMA block to multiles of 8 entries */
432 pad = -dev_priv->dma_pages[cur].used & 1;
433 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
434
435 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
436 "pad=%u, align=%u\n",
437 first, cur, dev_priv->dma_pages[first].flushed,
438 dev_priv->dma_pages[cur].used, pad, align);
439
440 /* pad with noops */
441 if (pad) {
442 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
443 cur * SAVAGE_DMA_PAGE_SIZE +
444 dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad;
446 while(pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used -
460 dev_priv->dma_pages[first].flushed;
461
462 DRM_DEBUG("phys_addr=%lx, len=%u\n",
463 phys_addr | dev_priv->dma_type, len);
464
465 BEGIN_BCI(3);
466 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
467 BCI_WRITE(phys_addr | dev_priv->dma_type);
468 BCI_DMA(len);
469
470 /* fix alignment of the start of the next block */
471 dev_priv->dma_pages[cur].used += align;
472
473 /* age DMA pages */
474 event = savage_bci_emit_event(dev_priv, 0);
475 wrap = dev_priv->event_wrap;
476 for (i = first; i < cur; ++i) {
477 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
478 dev_priv->dma_pages[i].used = 0;
479 dev_priv->dma_pages[i].flushed = 0;
480 }
481 /* age the current page only when it's full */
482 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
483 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
484 dev_priv->dma_pages[cur].used = 0;
485 dev_priv->dma_pages[cur].flushed = 0;
486 /* advance to next page */
487 cur++;
488 if (cur == dev_priv->nr_dma_pages)
489 cur = 0;
490 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
491 } else {
492 dev_priv->first_dma_page = cur;
493 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
494 }
495 SET_AGE(&dev_priv->last_dma_age, event, wrap);
496
497 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
498 dev_priv->dma_pages[cur].used,
499 dev_priv->dma_pages[cur].flushed);
500}
501
502static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
503{
504 unsigned int i, j;
505 BCI_LOCALS;
506
507 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
508 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
509 return;
510
511 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
512 dev_priv->first_dma_page, dev_priv->current_dma_page,
513 dev_priv->dma_pages[dev_priv->current_dma_page].used);
514
515 for (i = dev_priv->first_dma_page;
516 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
517 ++i) {
518 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
519 i * SAVAGE_DMA_PAGE_SIZE;
520#if SAVAGE_DMA_DEBUG
521 /* Sanity check: all pages except the last one must be full. */
522 if (i < dev_priv->current_dma_page &&
523 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
524 DRM_ERROR("partial DMA page %u: used=%u",
525 i, dev_priv->dma_pages[i].used);
526 }
527#endif
528 BEGIN_BCI(dev_priv->dma_pages[i].used);
529 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
530 BCI_WRITE(dma_ptr[j]);
531 }
532 dev_priv->dma_pages[i].used = 0;
533 }
534
535 /* reset to first page */
536 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
537}
538
539/*
540 * Initalize mappings. On Savage4 and SavageIX the alignment
541 * and size of the aperture is not suitable for automatic MTRR setup
542 * in drm_addmap. Therefore we do it manually before the maps are
543 * initialized. We also need to take care of deleting the MTRRs in
544 * postcleanup.
545 */
546int savage_preinit(drm_device_t *dev, unsigned long chipset)
547{
548 drm_savage_private_t *dev_priv;
549 unsigned long mmio_base, fb_base, fb_size, aperture_base;
550 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
551 * in case we decide we need information on the BAR for BSD in the
552 * future.
553 */
554 unsigned int fb_rsrc, aper_rsrc;
555 int ret = 0;
556
557 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
558 if (dev_priv == NULL)
559 return DRM_ERR(ENOMEM);
560
561 memset(dev_priv, 0, sizeof(drm_savage_private_t));
562 dev->dev_private = (void *)dev_priv;
563 dev_priv->chipset = (enum savage_family)chipset;
564
565 dev_priv->mtrr[0].handle = -1;
566 dev_priv->mtrr[1].handle = -1;
567 dev_priv->mtrr[2].handle = -1;
568 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
569 fb_rsrc = 0;
570 fb_base = drm_get_resource_start(dev, 0);
571 fb_size = SAVAGE_FB_SIZE_S3;
572 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
573 aper_rsrc = 0;
574 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
575 /* this should always be true */
576 if (drm_get_resource_len(dev, 0) == 0x08000000) {
577 /* Don't make MMIO write-cobining! We need 3
578 * MTRRs. */
579 dev_priv->mtrr[0].base = fb_base;
580 dev_priv->mtrr[0].size = 0x01000000;
581 dev_priv->mtrr[0].handle = mtrr_add(
582 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
583 MTRR_TYPE_WRCOMB, 1);
584 dev_priv->mtrr[1].base = fb_base+0x02000000;
585 dev_priv->mtrr[1].size = 0x02000000;
586 dev_priv->mtrr[1].handle = mtrr_add(
587 dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
588 MTRR_TYPE_WRCOMB, 1);
589 dev_priv->mtrr[2].base = fb_base+0x04000000;
590 dev_priv->mtrr[2].size = 0x04000000;
591 dev_priv->mtrr[2].handle = mtrr_add(
592 dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
593 MTRR_TYPE_WRCOMB, 1);
594 } else {
595 DRM_ERROR("strange pci_resource_len %08lx\n",
596 drm_get_resource_len(dev, 0));
597 }
598 } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
599 mmio_base = drm_get_resource_start(dev, 0);
600 fb_rsrc = 1;
601 fb_base = drm_get_resource_start(dev, 1);
602 fb_size = SAVAGE_FB_SIZE_S4;
603 aper_rsrc = 1;
604 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
605 /* this should always be true */
606 if (drm_get_resource_len(dev, 1) == 0x08000000) {
607 /* Can use one MTRR to cover both fb and
608 * aperture. */
609 dev_priv->mtrr[0].base = fb_base;
610 dev_priv->mtrr[0].size = 0x08000000;
611 dev_priv->mtrr[0].handle = mtrr_add(
612 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
613 MTRR_TYPE_WRCOMB, 1);
614 } else {
615 DRM_ERROR("strange pci_resource_len %08lx\n",
616 drm_get_resource_len(dev, 1));
617 }
618 } else {
619 mmio_base = drm_get_resource_start(dev, 0);
620 fb_rsrc = 1;
621 fb_base = drm_get_resource_start(dev, 1);
622 fb_size = drm_get_resource_len(dev, 1);
623 aper_rsrc = 2;
624 aperture_base = drm_get_resource_start(dev, 2);
625 /* Automatic MTRR setup will do the right thing. */
626 }
627
628 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
629 _DRM_READ_ONLY, &dev_priv->mmio);
630 if (ret)
631 return ret;
632
633 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
634 _DRM_WRITE_COMBINING, &dev_priv->fb);
635 if (ret)
636 return ret;
637
638 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
639 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
640 &dev_priv->aperture);
641 if (ret)
642 return ret;
643
644 return ret;
645}
646
647/*
648 * Delete MTRRs and free device-private data.
649 */
650int savage_postcleanup(drm_device_t *dev)
651{
652 drm_savage_private_t *dev_priv = dev->dev_private;
653 int i;
654
655 for (i = 0; i < 3; ++i)
656 if (dev_priv->mtrr[i].handle >= 0)
657 mtrr_del(dev_priv->mtrr[i].handle,
658 dev_priv->mtrr[i].base,
659 dev_priv->mtrr[i].size);
660
661 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
662
663 return 0;
664}
665
666static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
667{
668 drm_savage_private_t *dev_priv = dev->dev_private;
669
670 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
671 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
672 return DRM_ERR(EINVAL);
673 }
674 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
675 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
676 return DRM_ERR(EINVAL);
677 }
678 if (init->dma_type != SAVAGE_DMA_AGP &&
679 init->dma_type != SAVAGE_DMA_PCI) {
680 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
681 return DRM_ERR(EINVAL);
682 }
683
684 dev_priv->cob_size = init->cob_size;
685 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
686 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
687 dev_priv->dma_type = init->dma_type;
688
689 dev_priv->fb_bpp = init->fb_bpp;
690 dev_priv->front_offset = init->front_offset;
691 dev_priv->front_pitch = init->front_pitch;
692 dev_priv->back_offset = init->back_offset;
693 dev_priv->back_pitch = init->back_pitch;
694 dev_priv->depth_bpp = init->depth_bpp;
695 dev_priv->depth_offset = init->depth_offset;
696 dev_priv->depth_pitch = init->depth_pitch;
697
698 dev_priv->texture_offset = init->texture_offset;
699 dev_priv->texture_size = init->texture_size;
700
701 DRM_GETSAREA();
702 if (!dev_priv->sarea) {
703 DRM_ERROR("could not find sarea!\n");
704 savage_do_cleanup_bci(dev);
705 return DRM_ERR(EINVAL);
706 }
707 if (init->status_offset != 0) {
708 dev_priv->status = drm_core_findmap(dev, init->status_offset);
709 if (!dev_priv->status) {
710 DRM_ERROR("could not find shadow status region!\n");
711 savage_do_cleanup_bci(dev);
712 return DRM_ERR(EINVAL);
713 }
714 } else {
715 dev_priv->status = NULL;
716 }
717 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
718 dev->agp_buffer_map = drm_core_findmap(dev,
719 init->buffers_offset);
720 if (!dev->agp_buffer_map) {
721 DRM_ERROR("could not find DMA buffer region!\n");
722 savage_do_cleanup_bci(dev);
723 return DRM_ERR(EINVAL);
724 }
725 drm_core_ioremap(dev->agp_buffer_map, dev);
726 if (!dev->agp_buffer_map) {
727 DRM_ERROR("failed to ioremap DMA buffer region!\n");
728 savage_do_cleanup_bci(dev);
729 return DRM_ERR(ENOMEM);
730 }
731 }
732 if (init->agp_textures_offset) {
733 dev_priv->agp_textures =
734 drm_core_findmap(dev, init->agp_textures_offset);
735 if (!dev_priv->agp_textures) {
736 DRM_ERROR("could not find agp texture region!\n");
737 savage_do_cleanup_bci(dev);
738 return DRM_ERR(EINVAL);
739 }
740 } else {
741 dev_priv->agp_textures = NULL;
742 }
743
744 if (init->cmd_dma_offset) {
745 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
746 DRM_ERROR("command DMA not supported on "
747 "Savage3D/MX/IX.\n");
748 savage_do_cleanup_bci(dev);
749 return DRM_ERR(EINVAL);
750 }
751 if (dev->dma && dev->dma->buflist) {
752 DRM_ERROR("command and vertex DMA not supported "
753 "at the same time.\n");
754 savage_do_cleanup_bci(dev);
755 return DRM_ERR(EINVAL);
756 }
757 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
758 if (!dev_priv->cmd_dma) {
759 DRM_ERROR("could not find command DMA region!\n");
760 savage_do_cleanup_bci(dev);
761 return DRM_ERR(EINVAL);
762 }
763 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
764 if (dev_priv->cmd_dma->type != _DRM_AGP) {
765 DRM_ERROR("AGP command DMA region is not a "
766 "_DRM_AGP map!\n");
767 savage_do_cleanup_bci(dev);
768 return DRM_ERR(EINVAL);
769 }
770 drm_core_ioremap(dev_priv->cmd_dma, dev);
771 if (!dev_priv->cmd_dma->handle) {
772 DRM_ERROR("failed to ioremap command "
773 "DMA region!\n");
774 savage_do_cleanup_bci(dev);
775 return DRM_ERR(ENOMEM);
776 }
777 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
778 DRM_ERROR("PCI command DMA region is not a "
779 "_DRM_CONSISTENT map!\n");
780 savage_do_cleanup_bci(dev);
781 return DRM_ERR(EINVAL);
782 }
783 } else {
784 dev_priv->cmd_dma = NULL;
785 }
786
787 dev_priv->dma_flush = savage_dma_flush;
788 if (!dev_priv->cmd_dma) {
789 DRM_DEBUG("falling back to faked command DMA.\n");
790 dev_priv->fake_dma.offset = 0;
791 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
792 dev_priv->fake_dma.type = _DRM_SHM;
793 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
794 DRM_MEM_DRIVER);
795 if (!dev_priv->fake_dma.handle) {
796 DRM_ERROR("could not allocate faked DMA buffer!\n");
797 savage_do_cleanup_bci(dev);
798 return DRM_ERR(ENOMEM);
799 }
800 dev_priv->cmd_dma = &dev_priv->fake_dma;
801 dev_priv->dma_flush = savage_fake_dma_flush;
802 }
803
804 dev_priv->sarea_priv =
805 (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
806 init->sarea_priv_offset);
807
808 /* setup bitmap descriptors */
809 {
810 unsigned int color_tile_format;
811 unsigned int depth_tile_format;
812 unsigned int front_stride, back_stride, depth_stride;
813 if (dev_priv->chipset <= S3_SAVAGE4) {
814 color_tile_format = dev_priv->fb_bpp == 16 ?
815 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
816 depth_tile_format = dev_priv->depth_bpp == 16 ?
817 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
818 } else {
819 color_tile_format = SAVAGE_BD_TILE_DEST;
820 depth_tile_format = SAVAGE_BD_TILE_DEST;
821 }
822 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
823 back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
824 depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
825
826 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
827 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
828 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
829
830 dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
831 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
832 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
833
834 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
835 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
836 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
837 }
838
839 /* setup status and bci ptr */
840 dev_priv->event_counter = 0;
841 dev_priv->event_wrap = 0;
842 dev_priv->bci_ptr = (volatile uint32_t *)
843 ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
844 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
845 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
846 } else {
847 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
848 }
849 if (dev_priv->status != NULL) {
850 dev_priv->status_ptr =
851 (volatile uint32_t *)dev_priv->status->handle;
852 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
853 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
854 dev_priv->status_ptr[1023] = dev_priv->event_counter;
855 } else {
856 dev_priv->status_ptr = NULL;
857 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
858 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
859 } else {
860 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
861 }
862 dev_priv->wait_evnt = savage_bci_wait_event_reg;
863 }
864
865 /* cliprect functions */
866 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
867 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
868 else
869 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
870
871 if (savage_freelist_init(dev) < 0) {
872 DRM_ERROR("could not initialize freelist\n");
873 savage_do_cleanup_bci(dev);
874 return DRM_ERR(ENOMEM);
875 }
876
877 if (savage_dma_init(dev_priv) < 0) {
878 DRM_ERROR("could not initialize command DMA\n");
879 savage_do_cleanup_bci(dev);
880 return DRM_ERR(ENOMEM);
881 }
882
883 return 0;
884}
885
886int savage_do_cleanup_bci(drm_device_t *dev)
887{
888 drm_savage_private_t *dev_priv = dev->dev_private;
889
890 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
891 if (dev_priv->fake_dma.handle)
892 drm_free(dev_priv->fake_dma.handle,
893 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
894 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
895 dev_priv->cmd_dma->type == _DRM_AGP &&
896 dev_priv->dma_type == SAVAGE_DMA_AGP)
897 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
898
899 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
900 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
901 drm_core_ioremapfree(dev->agp_buffer_map, dev);
902 /* make sure the next instance (which may be running
903 * in PCI mode) doesn't try to use an old
904 * agp_buffer_map. */
905 dev->agp_buffer_map = NULL;
906 }
907
908 if (dev_priv->dma_pages)
909 drm_free(dev_priv->dma_pages,
910 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
911 DRM_MEM_DRIVER);
912
913 return 0;
914}
915
916static int savage_bci_init(DRM_IOCTL_ARGS)
917{
918 DRM_DEVICE;
919 drm_savage_init_t init;
920
921 LOCK_TEST_WITH_RETURN(dev, filp);
922
923 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
924 sizeof(init));
925
926 switch (init.func) {
927 case SAVAGE_INIT_BCI:
928 return savage_do_init_bci(dev, &init);
929 case SAVAGE_CLEANUP_BCI:
930 return savage_do_cleanup_bci(dev);
931 }
932
933 return DRM_ERR(EINVAL);
934}
935
936static int savage_bci_event_emit(DRM_IOCTL_ARGS)
937{
938 DRM_DEVICE;
939 drm_savage_private_t *dev_priv = dev->dev_private;
940 drm_savage_event_emit_t event;
941
942 DRM_DEBUG("\n");
943
944 LOCK_TEST_WITH_RETURN(dev, filp);
945
946 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
947 sizeof(event));
948
949 event.count = savage_bci_emit_event(dev_priv, event.flags);
950 event.count |= dev_priv->event_wrap << 16;
951 DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,
952 event.count, sizeof(event.count));
953 return 0;
954}
955
956static int savage_bci_event_wait(DRM_IOCTL_ARGS)
957{
958 DRM_DEVICE;
959 drm_savage_private_t *dev_priv = dev->dev_private;
960 drm_savage_event_wait_t event;
961 unsigned int event_e, hw_e;
962 unsigned int event_w, hw_w;
963
964 DRM_DEBUG("\n");
965
966 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
967 sizeof(event));
968
969 UPDATE_EVENT_COUNTER();
970 if (dev_priv->status_ptr)
971 hw_e = dev_priv->status_ptr[1] & 0xffff;
972 else
973 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
974 hw_w = dev_priv->event_wrap;
975 if (hw_e > dev_priv->event_counter)
976 hw_w--; /* hardware hasn't passed the last wrap yet */
977
978 event_e = event.count & 0xffff;
979 event_w = event.count >> 16;
980
981 /* Don't need to wait if
982 * - event counter wrapped since the event was emitted or
983 * - the hardware has advanced up to or over the event to wait for.
984 */
985 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
986 return 0;
987 else
988 return dev_priv->wait_evnt(dev_priv, event_e);
989}
990
991/*
992 * DMA buffer management
993 */
994
995static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
996{
997 drm_buf_t *buf;
998 int i;
999
1000 for (i = d->granted_count; i < d->request_count; i++) {
1001 buf = savage_freelist_get(dev);
1002 if (!buf)
1003 return DRM_ERR(EAGAIN);
1004
1005 buf->filp = filp;
1006
1007 if (DRM_COPY_TO_USER(&d->request_indices[i],
1008 &buf->idx, sizeof(buf->idx)))
1009 return DRM_ERR(EFAULT);
1010 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1011 &buf->total, sizeof(buf->total)))
1012 return DRM_ERR(EFAULT);
1013
1014 d->granted_count++;
1015 }
1016 return 0;
1017}
1018
1019int savage_bci_buffers(DRM_IOCTL_ARGS)
1020{
1021 DRM_DEVICE;
1022 drm_device_dma_t *dma = dev->dma;
1023 drm_dma_t d;
1024 int ret = 0;
1025
1026 LOCK_TEST_WITH_RETURN(dev, filp);
1027
1028 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
1029
1030 /* Please don't send us buffers.
1031 */
1032 if (d.send_count != 0) {
1033 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1034 DRM_CURRENTPID, d.send_count);
1035 return DRM_ERR(EINVAL);
1036 }
1037
1038 /* We'll send you buffers.
1039 */
1040 if (d.request_count < 0 || d.request_count > dma->buf_count) {
1041 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1042 DRM_CURRENTPID, d.request_count, dma->buf_count);
1043 return DRM_ERR(EINVAL);
1044 }
1045
1046 d.granted_count = 0;
1047
1048 if (d.request_count) {
1049 ret = savage_bci_get_buffers(filp, dev, &d);
1050 }
1051
1052 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
1053
1054 return ret;
1055}
1056
1057void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
1058 drm_device_dma_t *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i;
1061
1062 if (!dma)
1063 return;
1064 if (!dev_priv)
1065 return;
1066 if (!dma->buflist)
1067 return;
1068
1069 /*i830_flush_queue(dev);*/
1070
1071 for (i = 0; i < dma->buf_count; i++) {
1072 drm_buf_t *buf = dma->buflist[i];
1073 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1074
1075 if (buf->filp == filp && buf_priv &&
1076 buf_priv->next == NULL && buf_priv->prev == NULL) {
1077 uint16_t event;
1078 DRM_DEBUG("reclaimed from client\n");
1079 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1080 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1081 savage_freelist_put(dev, buf);
1082 }
1083 }
1084
1085 drm_core_reclaim_buffers(dev, filp);
1086}
1087
1088
1089drm_ioctl_desc_t savage_ioctls[] = {
1090 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
1091 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
1092 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
1094};
1095
1096int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
new file mode 100644
index 000000000000..6526c9aa7589
--- /dev/null
+++ b/drivers/char/drm/savage_drm.h
@@ -0,0 +1,209 @@
1/* savage_drm.h -- Public header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__
28
29#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__
31
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
33 * regions, subject to a minimum region size of (1<<16) == 64k.
34 *
35 * Clients may subdivide regions internally, but when sharing between
36 * clients, the region size is the minimum granularity.
37 */
38
39#define SAVAGE_CARD_HEAP 0
40#define SAVAGE_AGP_HEAP 1
41#define SAVAGE_NR_TEX_HEAPS 2
42#define SAVAGE_NR_TEX_REGIONS 16
43#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
44
45#endif /* __SAVAGE_SAREA_DEFINES__ */
46
47typedef struct _drm_savage_sarea {
48 /* LRU lists for texture memory in agp space and on the card.
49 */
50 drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
51 unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
52
53 /* Mechanism to validate card state.
54 */
55 int ctxOwner;
56} drm_savage_sarea_t, *drm_savage_sarea_ptr;
57
58/* Savage-specific ioctls
59 */
60#define DRM_SAVAGE_BCI_INIT 0x00
61#define DRM_SAVAGE_BCI_CMDBUF 0x01
62#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
63#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
64
65#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
66#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
67#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
68#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
69
70#define SAVAGE_DMA_PCI 1
71#define SAVAGE_DMA_AGP 3
72typedef struct drm_savage_init {
73 enum {
74 SAVAGE_INIT_BCI = 1,
75 SAVAGE_CLEANUP_BCI = 2
76 } func;
77 unsigned int sarea_priv_offset;
78
79 /* some parameters */
80 unsigned int cob_size;
81 unsigned int bci_threshold_lo, bci_threshold_hi;
82 unsigned int dma_type;
83
84 /* frame buffer layout */
85 unsigned int fb_bpp;
86 unsigned int front_offset, front_pitch;
87 unsigned int back_offset, back_pitch;
88 unsigned int depth_bpp;
89 unsigned int depth_offset, depth_pitch;
90
91 /* local textures */
92 unsigned int texture_offset;
93 unsigned int texture_size;
94
95 /* physical locations of non-permanent maps */
96 unsigned long status_offset;
97 unsigned long buffers_offset;
98 unsigned long agp_textures_offset;
99 unsigned long cmd_dma_offset;
100} drm_savage_init_t;
101
102typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
103typedef struct drm_savage_cmdbuf {
104 /* command buffer in client's address space */
105 drm_savage_cmd_header_t __user *cmd_addr;
106 unsigned int size; /* size of the command buffer in 64bit units */
107
108 unsigned int dma_idx; /* DMA buffer index to use */
109 int discard; /* discard DMA buffer when done */
110 /* vertex buffer in client's address space */
111 unsigned int __user *vb_addr;
112 unsigned int vb_size; /* size of client vertex buffer in bytes */
113 unsigned int vb_stride; /* stride of vertices in 32bit words */
114 /* boxes in client's address space */
115 drm_clip_rect_t __user *box_addr;
116 unsigned int nbox; /* number of clipping boxes */
117} drm_savage_cmdbuf_t;
118
119#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
120#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
121#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
122typedef struct drm_savage_event {
123 unsigned int count;
124 unsigned int flags;
125} drm_savage_event_emit_t, drm_savage_event_wait_t;
126
127/* Commands for the cmdbuf ioctl
128 */
129#define SAVAGE_CMD_STATE 0 /* a range of state registers */
130#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
131#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
132#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
133#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
134#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
135#define SAVAGE_CMD_SWAP 6 /* swap buffers */
136
137/* Primitive types
138*/
139#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
140#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
141#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
142#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
143 * shading on s3d */
144
145/* Skip flags (vertex format)
146 */
147#define SAVAGE_SKIP_Z 0x01
148#define SAVAGE_SKIP_W 0x02
149#define SAVAGE_SKIP_C0 0x04
150#define SAVAGE_SKIP_C1 0x08
151#define SAVAGE_SKIP_S0 0x10
152#define SAVAGE_SKIP_T0 0x20
153#define SAVAGE_SKIP_ST0 0x30
154#define SAVAGE_SKIP_S1 0x40
155#define SAVAGE_SKIP_T1 0x80
156#define SAVAGE_SKIP_ST1 0xc0
157#define SAVAGE_SKIP_ALL_S3D 0x3f
158#define SAVAGE_SKIP_ALL_S4 0xff
159
160/* Buffer names for clear command
161 */
162#define SAVAGE_FRONT 0x1
163#define SAVAGE_BACK 0x2
164#define SAVAGE_DEPTH 0x4
165
166/* 64-bit command header
167 */
168union drm_savage_cmd_header {
169 struct {
170 unsigned char cmd; /* command */
171 unsigned char pad0;
172 unsigned short pad1;
173 unsigned short pad2;
174 unsigned short pad3;
175 } cmd; /* generic */
176 struct {
177 unsigned char cmd;
178 unsigned char global; /* need idle engine? */
179 unsigned short count; /* number of consecutive registers */
180 unsigned short start; /* first register */
181 unsigned short pad3;
182 } state; /* SAVAGE_CMD_STATE */
183 struct {
184 unsigned char cmd;
185 unsigned char prim; /* primitive type */
186 unsigned short skip; /* vertex format (skip flags) */
187 unsigned short count; /* number of vertices */
188 unsigned short start; /* first vertex in DMA/vertex buffer */
189 } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
190 struct {
191 unsigned char cmd;
192 unsigned char prim;
193 unsigned short skip;
194 unsigned short count; /* number of indices that follow */
195 unsigned short pad3;
196 } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
197 struct {
198 unsigned char cmd;
199 unsigned char pad0;
200 unsigned short pad1;
201 unsigned int flags;
202 } clear0; /* SAVAGE_CMD_CLEAR */
203 struct {
204 unsigned int mask;
205 unsigned int value;
206 } clear1; /* SAVAGE_CMD_CLEAR data */
207};
208
209#endif
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
new file mode 100644
index 000000000000..ac8d270427ca
--- /dev/null
+++ b/drivers/char/drm/savage_drv.c
@@ -0,0 +1,112 @@
1/* savage_drv.c -- Savage driver for Linux
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/config.h>
27#include "drmP.h"
28#include "savage_drm.h"
29#include "savage_drv.h"
30
31#include "drm_pciids.h"
32
33static int postinit( struct drm_device *dev, unsigned long flags )
34{
35 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
36 DRIVER_NAME,
37 DRIVER_MAJOR,
38 DRIVER_MINOR,
39 DRIVER_PATCHLEVEL,
40 DRIVER_DATE,
41 dev->primary.minor,
42 pci_pretty_name(dev->pdev)
43 );
44 return 0;
45}
46
47static int version( drm_version_t *version )
48{
49 int len;
50
51 version->version_major = DRIVER_MAJOR;
52 version->version_minor = DRIVER_MINOR;
53 version->version_patchlevel = DRIVER_PATCHLEVEL;
54 DRM_COPY( version->name, DRIVER_NAME );
55 DRM_COPY( version->date, DRIVER_DATE );
56 DRM_COPY( version->desc, DRIVER_DESC );
57 return 0;
58}
59
60static struct pci_device_id pciidlist[] = {
61 savage_PCI_IDS
62};
63
64extern drm_ioctl_desc_t savage_ioctls[];
65extern int savage_max_ioctl;
66
67static struct drm_driver driver = {
68 .driver_features =
69 DRIVER_USE_AGP | DRIVER_USE_MTRR |
70 DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
71 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
72 .preinit = savage_preinit,
73 .postinit = postinit,
74 .postcleanup = savage_postcleanup,
75 .reclaim_buffers = savage_reclaim_buffers,
76 .get_map_ofs = drm_core_get_map_ofs,
77 .get_reg_ofs = drm_core_get_reg_ofs,
78 .version = version,
79 .ioctls = savage_ioctls,
80 .dma_ioctl = savage_bci_buffers,
81 .fops = {
82 .owner = THIS_MODULE,
83 .open = drm_open,
84 .release = drm_release,
85 .ioctl = drm_ioctl,
86 .mmap = drm_mmap,
87 .poll = drm_poll,
88 .fasync = drm_fasync,
89 },
90 .pci_driver = {
91 .name = DRIVER_NAME,
92 .id_table = pciidlist,
93 }
94};
95
96static int __init savage_init(void)
97{
98 driver.num_ioctls = savage_max_ioctl;
99 return drm_init(&driver);
100}
101
102static void __exit savage_exit(void)
103{
104 drm_exit(&driver);
105}
106
107module_init(savage_init);
108module_exit(savage_exit);
109
110MODULE_AUTHOR( DRIVER_AUTHOR );
111MODULE_DESCRIPTION( DRIVER_DESC );
112MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
new file mode 100644
index 000000000000..a45434944658
--- /dev/null
+++ b/drivers/char/drm/savage_drv.h
@@ -0,0 +1,579 @@
1/* savage_drv.h -- Private header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRV_H__
27#define __SAVAGE_DRV_H__
28
29#define DRIVER_AUTHOR "Felix Kuehling"
30
31#define DRIVER_NAME "savage"
32#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
33#define DRIVER_DATE "20050313"
34
35#define DRIVER_MAJOR 2
36#define DRIVER_MINOR 4
37#define DRIVER_PATCHLEVEL 1
38/* Interface history:
39 *
40 * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
41 * 2.0 The first real DRM
42 * 2.1 Scissors registers managed by the DRM, 3D operations clipped by
43 * cliprects of the cmdbuf ioctl
44 * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
45 * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
46 * wide and thus very long lived (unlikely to ever wrap). The size
47 * in the struct was 32 bits before, but only 16 bits were used
48 * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
49 * actually used
50 */
51
52typedef struct drm_savage_age {
53 uint16_t event;
54 unsigned int wrap;
55} drm_savage_age_t;
56
57typedef struct drm_savage_buf_priv {
58 struct drm_savage_buf_priv *next;
59 struct drm_savage_buf_priv *prev;
60 drm_savage_age_t age;
61 drm_buf_t *buf;
62} drm_savage_buf_priv_t;
63
64typedef struct drm_savage_dma_page {
65 drm_savage_age_t age;
66 unsigned int used, flushed;
67} drm_savage_dma_page_t;
68#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
69/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
70 * size of 16kbytes or 4k entries. Minimum requirement would be
71 * 10kbytes for 255 40-byte vertices in one drawing command. */
72#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
73
74/* interesting bits of hardware state that are saved in dev_priv */
75typedef union {
76 struct drm_savage_common_state {
77 uint32_t vbaddr;
78 } common;
79 struct {
80 unsigned char pad[sizeof(struct drm_savage_common_state)];
81 uint32_t texctrl, texaddr;
82 uint32_t scstart, new_scstart;
83 uint32_t scend, new_scend;
84 } s3d;
85 struct {
86 unsigned char pad[sizeof(struct drm_savage_common_state)];
87 uint32_t texdescr, texaddr0, texaddr1;
88 uint32_t drawctrl0, new_drawctrl0;
89 uint32_t drawctrl1, new_drawctrl1;
90 } s4;
91} drm_savage_state_t;
92
93/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
94enum savage_family {
95 S3_UNKNOWN = 0,
96 S3_SAVAGE3D,
97 S3_SAVAGE_MX,
98 S3_SAVAGE4,
99 S3_PROSAVAGE,
100 S3_TWISTER,
101 S3_PROSAVAGEDDR,
102 S3_SUPERSAVAGE,
103 S3_SAVAGE2000,
104 S3_LAST
105};
106
107#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
108
109#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
110 || (chip==S3_PROSAVAGE) \
111 || (chip==S3_TWISTER) \
112 || (chip==S3_PROSAVAGEDDR))
113
114#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
115
116#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
117
118#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
119 ||(chip==S3_PROSAVAGEDDR))
120
121/* flags */
122#define SAVAGE_IS_AGP 1
123
124typedef struct drm_savage_private {
125 drm_savage_sarea_t *sarea_priv;
126
127 drm_savage_buf_priv_t head, tail;
128
129 /* who am I? */
130 enum savage_family chipset;
131
132 unsigned int cob_size;
133 unsigned int bci_threshold_lo, bci_threshold_hi;
134 unsigned int dma_type;
135
136 /* frame buffer layout */
137 unsigned int fb_bpp;
138 unsigned int front_offset, front_pitch;
139 unsigned int back_offset, back_pitch;
140 unsigned int depth_bpp;
141 unsigned int depth_offset, depth_pitch;
142
143 /* bitmap descriptors for swap and clear */
144 unsigned int front_bd, back_bd, depth_bd;
145
146 /* local textures */
147 unsigned int texture_offset;
148 unsigned int texture_size;
149
150 /* memory regions in physical memory */
151 drm_local_map_t *sarea;
152 drm_local_map_t *mmio;
153 drm_local_map_t *fb;
154 drm_local_map_t *aperture;
155 drm_local_map_t *status;
156 drm_local_map_t *agp_textures;
157 drm_local_map_t *cmd_dma;
158 drm_local_map_t fake_dma;
159
160 struct {
161 int handle;
162 unsigned long base, size;
163 } mtrr[3];
164
165 /* BCI and status-related stuff */
166 volatile uint32_t *status_ptr, *bci_ptr;
167 uint32_t status_used_mask;
168 uint16_t event_counter;
169 unsigned int event_wrap;
170
171 /* Savage4 command DMA */
172 drm_savage_dma_page_t *dma_pages;
173 unsigned int nr_dma_pages, first_dma_page, current_dma_page;
174 drm_savage_age_t last_dma_age;
175
176 /* saved hw state for global/local check on S3D */
177 uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
178 /* and for scissors (global, so don't emit if not changed) */
179 uint32_t hw_scissors_start, hw_scissors_end;
180
181 drm_savage_state_t state;
182
183 /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
184 unsigned int waiting;
185
186 /* config/hardware-dependent function pointers */
187 int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
188 int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
189 /* Err, there is a macro wait_event in include/linux/wait.h.
190 * Avoid unwanted macro expansion. */
191 void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
192 drm_clip_rect_t *pbox);
193 void (*dma_flush)(struct drm_savage_private *dev_priv);
194} drm_savage_private_t;
195
196/* ioctls */
197extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
198extern int savage_bci_buffers(DRM_IOCTL_ARGS);
199
200/* BCI functions */
201extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
202 unsigned int flags);
203extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
204extern void savage_dma_reset(drm_savage_private_t *dev_priv);
205extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
206extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
207 unsigned int n);
208extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
209extern int savage_postcleanup(drm_device_t *dev);
210extern int savage_do_cleanup_bci(drm_device_t *dev);
211extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
212
213/* state functions */
214extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
215 drm_clip_rect_t *pbox);
216extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
217 drm_clip_rect_t *pbox);
218
219#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
220#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
221#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
222#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
223#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
224
225#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
226 * inside the MMIO region */
227#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
228 * BCI FIFO */
229
230/*
231 * MMIO registers
232 */
233#define SAVAGE_STATUS_WORD0 0x48C00
234#define SAVAGE_STATUS_WORD1 0x48C04
235#define SAVAGE_ALT_STATUS_WORD0 0x48C60
236
237#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
238#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
239
240/* Copied from savage_bci.h in the 2D driver with some renaming. */
241
242/* Bitmap descriptors */
243#define SAVAGE_BD_STRIDE_SHIFT 0
244#define SAVAGE_BD_BPP_SHIFT 16
245#define SAVAGE_BD_TILE_SHIFT 24
246#define SAVAGE_BD_BW_DISABLE (1<<28)
247/* common: */
248#define SAVAGE_BD_TILE_LINEAR 0
249/* savage4, MX, IX, 3D */
250#define SAVAGE_BD_TILE_16BPP 2
251#define SAVAGE_BD_TILE_32BPP 3
252/* twister, prosavage, DDR, supersavage, 2000 */
253#define SAVAGE_BD_TILE_DEST 1
254#define SAVAGE_BD_TILE_TEXTURE 2
255/* GBD - BCI enable */
256/* savage4, MX, IX, 3D */
257#define SAVAGE_GBD_BCI_ENABLE 8
258/* twister, prosavage, DDR, supersavage, 2000 */
259#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
260
261#define SAVAGE_GBD_BIG_ENDIAN 4
262#define SAVAGE_GBD_LITTLE_ENDIAN 0
263#define SAVAGE_GBD_64 1
264
265/* Global Bitmap Descriptor */
266#define SAVAGE_BCI_GLB_BD_LOW 0x8168
267#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
268
269/*
270 * BCI registers
271 */
272/* Savage4/Twister/ProSavage 3D registers */
273#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
274#define SAVAGE_TEXPALADDR_S4 0x1f
275#define SAVAGE_TEXCTRL0_S4 0x20
276#define SAVAGE_TEXCTRL1_S4 0x21
277#define SAVAGE_TEXADDR0_S4 0x22
278#define SAVAGE_TEXADDR1_S4 0x23
279#define SAVAGE_TEXBLEND0_S4 0x24
280#define SAVAGE_TEXBLEND1_S4 0x25
281#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
282#define SAVAGE_TEXDESCR_S4 0x27
283#define SAVAGE_FOGTABLE_S4 0x28
284#define SAVAGE_FOGCTRL_S4 0x30
285#define SAVAGE_STENCILCTRL_S4 0x31
286#define SAVAGE_ZBUFCTRL_S4 0x32
287#define SAVAGE_ZBUFOFF_S4 0x33
288#define SAVAGE_DESTCTRL_S4 0x34
289#define SAVAGE_DRAWCTRL0_S4 0x35
290#define SAVAGE_DRAWCTRL1_S4 0x36
291#define SAVAGE_ZWATERMARK_S4 0x37
292#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
293#define SAVAGE_TEXBLENDCOLOR_S4 0x39
294/* Savage3D/MX/IX 3D registers */
295#define SAVAGE_TEXPALADDR_S3D 0x18
296#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
297#define SAVAGE_TEXADDR_S3D 0x1A
298#define SAVAGE_TEXDESCR_S3D 0x1B
299#define SAVAGE_TEXCTRL_S3D 0x1C
300#define SAVAGE_FOGTABLE_S3D 0x20
301#define SAVAGE_FOGCTRL_S3D 0x30
302#define SAVAGE_DRAWCTRL_S3D 0x31
303#define SAVAGE_ZBUFCTRL_S3D 0x32
304#define SAVAGE_ZBUFOFF_S3D 0x33
305#define SAVAGE_DESTCTRL_S3D 0x34
306#define SAVAGE_SCSTART_S3D 0x35
307#define SAVAGE_SCEND_S3D 0x36
308#define SAVAGE_ZWATERMARK_S3D 0x37
309#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
310/* common stuff */
311#define SAVAGE_VERTBUFADDR 0x3e
312#define SAVAGE_BITPLANEWTMASK 0xd7
313#define SAVAGE_DMABUFADDR 0x51
314
315/* texture enable bits (needed for tex addr checking) */
316#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
317#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
318#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
319
320/* Global fields in Savage4/Twister/ProSavage 3D registers:
321 *
322 * All texture registers and DrawLocalCtrl are local. All other
323 * registers are global. */
324
325/* Global fields in Savage3D/MX/IX 3D registers:
326 *
327 * All texture registers are local. DrawCtrl and ZBufCtrl are
328 * partially local. All other registers are global.
329 *
330 * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
331 * ZBufCtrl global fields: zCmpFunc, zBufEn
332 */
333#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
334#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
335
336/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
337 */
338#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
339#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
340
341/*
342 * BCI commands
343 */
344#define BCI_CMD_NOP 0x40000000
345#define BCI_CMD_RECT 0x48000000
346#define BCI_CMD_RECT_XP 0x01000000
347#define BCI_CMD_RECT_YP 0x02000000
348#define BCI_CMD_SCANLINE 0x50000000
349#define BCI_CMD_LINE 0x5C000000
350#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
351#define BCI_CMD_BYTE_TEXT 0x63000000
352#define BCI_CMD_NT_BYTE_TEXT 0x67000000
353#define BCI_CMD_BIT_TEXT 0x6C000000
354#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
355#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
356#define BCI_CMD_SEND_COLOR 0x00008000
357
358#define BCI_CMD_CLIP_NONE 0x00000000
359#define BCI_CMD_CLIP_CURRENT 0x00002000
360#define BCI_CMD_CLIP_LR 0x00004000
361#define BCI_CMD_CLIP_NEW 0x00006000
362
363#define BCI_CMD_DEST_GBD 0x00000000
364#define BCI_CMD_DEST_PBD 0x00000800
365#define BCI_CMD_DEST_PBD_NEW 0x00000C00
366#define BCI_CMD_DEST_SBD 0x00001000
367#define BCI_CMD_DEST_SBD_NEW 0x00001400
368
369#define BCI_CMD_SRC_TRANSPARENT 0x00000200
370#define BCI_CMD_SRC_SOLID 0x00000000
371#define BCI_CMD_SRC_GBD 0x00000020
372#define BCI_CMD_SRC_COLOR 0x00000040
373#define BCI_CMD_SRC_MONO 0x00000060
374#define BCI_CMD_SRC_PBD_COLOR 0x00000080
375#define BCI_CMD_SRC_PBD_MONO 0x000000A0
376#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
377#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
378#define BCI_CMD_SRC_SBD_COLOR 0x00000100
379#define BCI_CMD_SRC_SBD_MONO 0x00000120
380#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
381#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
382
383#define BCI_CMD_PAT_TRANSPARENT 0x00000010
384#define BCI_CMD_PAT_NONE 0x00000000
385#define BCI_CMD_PAT_COLOR 0x00000002
386#define BCI_CMD_PAT_MONO 0x00000003
387#define BCI_CMD_PAT_PBD_COLOR 0x00000004
388#define BCI_CMD_PAT_PBD_MONO 0x00000005
389#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
390#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
391#define BCI_CMD_PAT_SBD_COLOR 0x00000008
392#define BCI_CMD_PAT_SBD_MONO 0x00000009
393#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
394#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
395
396#define BCI_BD_BW_DISABLE 0x10000000
397#define BCI_BD_TILE_MASK 0x03000000
398#define BCI_BD_TILE_NONE 0x00000000
399#define BCI_BD_TILE_16 0x02000000
400#define BCI_BD_TILE_32 0x03000000
401#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
402#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
403#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
404#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
405
406#define BCI_CMD_SET_REGISTER 0x96000000
407
408#define BCI_CMD_WAIT 0xC0000000
409#define BCI_CMD_WAIT_3D 0x00010000
410#define BCI_CMD_WAIT_2D 0x00020000
411
412#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
413
414#define BCI_CMD_DRAW_PRIM 0x80000000
415#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
416#define BCI_CMD_DRAW_CONT 0x01000000
417#define BCI_CMD_DRAW_TRILIST 0x00000000
418#define BCI_CMD_DRAW_TRISTRIP 0x02000000
419#define BCI_CMD_DRAW_TRIFAN 0x04000000
420#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
421#define BCI_CMD_DRAW_NO_Z 0x00000001
422#define BCI_CMD_DRAW_NO_W 0x00000002
423#define BCI_CMD_DRAW_NO_CD 0x00000004
424#define BCI_CMD_DRAW_NO_CS 0x00000008
425#define BCI_CMD_DRAW_NO_U0 0x00000010
426#define BCI_CMD_DRAW_NO_V0 0x00000020
427#define BCI_CMD_DRAW_NO_UV0 0x00000030
428#define BCI_CMD_DRAW_NO_U1 0x00000040
429#define BCI_CMD_DRAW_NO_V1 0x00000080
430#define BCI_CMD_DRAW_NO_UV1 0x000000c0
431
432#define BCI_CMD_DMA 0xa8000000
433
434#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
435#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
436#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
437#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
438#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
439#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
440
441#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
442#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
443#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
444 (((maj) & 0x1FFF) | \
445 ((ym) ? 1<<13 : 0) | \
446 ((xp) ? 1<<14 : 0) | \
447 ((yp) ? 1<<15 : 0) | \
448 ((err) << 16))
449
450/*
451 * common commands
452 */
453#define BCI_SET_REGISTERS( first, n ) \
454 BCI_WRITE(BCI_CMD_SET_REGISTER | \
455 ((uint32_t)(n) & 0xff) << 16 | \
456 ((uint32_t)(first) & 0xffff))
457#define DMA_SET_REGISTERS( first, n ) \
458 DMA_WRITE(BCI_CMD_SET_REGISTER | \
459 ((uint32_t)(n) & 0xff) << 16 | \
460 ((uint32_t)(first) & 0xffff))
461
462#define BCI_DRAW_PRIMITIVE(n, type, skip) \
463 BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
464 ((n) << 16))
465#define DMA_DRAW_PRIMITIVE(n, type, skip) \
466 DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
467 ((n) << 16))
468
469#define BCI_DRAW_INDICES_S3D(n, type, i0) \
470 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
471 ((n) << 16) | (i0))
472
473#define BCI_DRAW_INDICES_S4(n, type, skip) \
474 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
475 (skip) | ((n) << 16))
476
477#define BCI_DMA(n) \
478 BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
479
480/*
481 * access to MMIO
482 */
483#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
484#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
485
486/*
487 * access to the burst command interface (BCI)
488 */
489#define SAVAGE_BCI_DEBUG 1
490
491#define BCI_LOCALS volatile uint32_t *bci_ptr;
492
493#define BEGIN_BCI( n ) do { \
494 dev_priv->wait_fifo(dev_priv, (n)); \
495 bci_ptr = dev_priv->bci_ptr; \
496} while(0)
497
498#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
499
500#define BCI_COPY_FROM_USER(src,n) do { \
501 unsigned int i; \
502 for (i = 0; i < n; ++i) { \
503 uint32_t val; \
504 DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
505 BCI_WRITE(val); \
506 } \
507} while(0)
508
509/*
510 * command DMA support
511 */
512#define SAVAGE_DMA_DEBUG 1
513
514#define DMA_LOCALS uint32_t *dma_ptr;
515
516#define BEGIN_DMA( n ) do { \
517 unsigned int cur = dev_priv->current_dma_page; \
518 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
519 dev_priv->dma_pages[cur].used; \
520 if ((n) > rest) { \
521 dma_ptr = savage_dma_alloc(dev_priv, (n)); \
522 } else { /* fast path for small allocations */ \
523 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
524 cur * SAVAGE_DMA_PAGE_SIZE + \
525 dev_priv->dma_pages[cur].used; \
526 if (dev_priv->dma_pages[cur].used == 0) \
527 savage_dma_wait(dev_priv, cur); \
528 dev_priv->dma_pages[cur].used += (n); \
529 } \
530} while(0)
531
532#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
533
534#define DMA_COPY_FROM_USER(src,n) do { \
535 DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
536 dma_ptr += n; \
537} while(0)
538
539#if SAVAGE_DMA_DEBUG
540#define DMA_COMMIT() do { \
541 unsigned int cur = dev_priv->current_dma_page; \
542 uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
543 cur * SAVAGE_DMA_PAGE_SIZE + \
544 dev_priv->dma_pages[cur].used; \
545 if (dma_ptr != expected) { \
546 DRM_ERROR("DMA allocation and use don't match: " \
547 "%p != %p\n", expected, dma_ptr); \
548 savage_dma_reset(dev_priv); \
549 } \
550} while(0)
551#else
552#define DMA_COMMIT() do {/* nothing */} while(0)
553#endif
554
555#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
556
557/* Buffer aging via event tag
558 */
559
560#define UPDATE_EVENT_COUNTER( ) do { \
561 if (dev_priv->status_ptr) { \
562 uint16_t count; \
563 /* coordinate with Xserver */ \
564 count = dev_priv->status_ptr[1023]; \
565 if (count < dev_priv->event_counter) \
566 dev_priv->event_wrap++; \
567 dev_priv->event_counter = count; \
568 } \
569} while(0)
570
571#define SET_AGE( age, e, w ) do { \
572 (age)->event = e; \
573 (age)->wrap = w; \
574} while(0)
575
576#define TEST_AGE( age, e, w ) \
577 ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
578
579#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
new file mode 100644
index 000000000000..475695a00083
--- /dev/null
+++ b/drivers/char/drm/savage_state.c
@@ -0,0 +1,1146 @@
1/* savage_state.c -- State and drawing support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30 drm_clip_rect_t *pbox)
31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2-1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS;
43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart);
47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
51 DMA_COMMIT();
52 }
53}
54
55void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56 drm_clip_rect_t *pbox)
57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2-1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS;
69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
77 DMA_COMMIT();
78 }
79}
80
81static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
82 uint32_t addr)
83{
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL);
87 }
88 if (!(addr & 1)) { /* local */
89 addr &= ~7;
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset+dev_priv->texture_size) {
92 DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
93 unit, addr);
94 return DRM_ERR(EINVAL);
95 }
96 } else { /* AGP */
97 if (!dev_priv->agp_textures) {
98 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
99 unit, addr);
100 return DRM_ERR(EINVAL);
101 }
102 addr &= ~7;
103 if (addr < dev_priv->agp_textures->offset ||
104 addr >= (dev_priv->agp_textures->offset +
105 dev_priv->agp_textures->size)) {
106 DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
107 unit, addr);
108 return DRM_ERR(EINVAL);
109 }
110 }
111 return 0;
112}
113
114#define SAVE_STATE(reg,where) \
115 if(start <= reg && start+count > reg) \
116 DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
117#define SAVE_STATE_MASK(reg,where,mask) do { \
118 if(start <= reg && start+count > reg) { \
119 uint32_t tmp; \
120 DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \
121 dev_priv->state.where = (tmp & (mask)) | \
122 (dev_priv->state.where & ~(mask)); \
123 } \
124} while (0)
125static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
126 unsigned int start, unsigned int count,
127 const uint32_t __user *regs)
128{
129 if (start < SAVAGE_TEXPALADDR_S3D ||
130 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
131 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
132 start, start+count-1);
133 return DRM_ERR(EINVAL);
134 }
135
136 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
137 ~SAVAGE_SCISSOR_MASK_S3D);
138 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
139 ~SAVAGE_SCISSOR_MASK_S3D);
140
141 /* if any texture regs were changed ... */
142 if (start <= SAVAGE_TEXCTRL_S3D &&
143 start+count > SAVAGE_TEXPALADDR_S3D) {
144 /* ... check texture state */
145 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
146 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
147 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
148 return savage_verify_texaddr(
149 dev_priv, 0, dev_priv->state.s3d.texaddr);
150 }
151
152 return 0;
153}
154
155static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
156 unsigned int start, unsigned int count,
157 const uint32_t __user *regs)
158{
159 int ret = 0;
160
161 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
162 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
163 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
164 start, start+count-1);
165 return DRM_ERR(EINVAL);
166 }
167
168 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
169 ~SAVAGE_SCISSOR_MASK_S4);
170 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
171 ~SAVAGE_SCISSOR_MASK_S4);
172
173 /* if any texture regs were changed ... */
174 if (start <= SAVAGE_TEXDESCR_S4 &&
175 start+count > SAVAGE_TEXPALADDR_S4) {
176 /* ... check texture state */
177 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
178 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
179 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
180 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
181 ret |= savage_verify_texaddr(
182 dev_priv, 0, dev_priv->state.s4.texaddr0);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
184 ret |= savage_verify_texaddr(
185 dev_priv, 1, dev_priv->state.s4.texaddr1);
186 }
187
188 return ret;
189}
190#undef SAVE_STATE
191#undef SAVE_STATE_MASK
192
193static int savage_dispatch_state(drm_savage_private_t *dev_priv,
194 const drm_savage_cmd_header_t *cmd_header,
195 const uint32_t __user *regs)
196{
197 unsigned int count = cmd_header->state.count;
198 unsigned int start = cmd_header->state.start;
199 unsigned int count2 = 0;
200 unsigned int bci_size;
201 int ret;
202 DMA_LOCALS;
203
204 if (!count)
205 return 0;
206
207 if (DRM_VERIFYAREA_READ(regs, count*4))
208 return DRM_ERR(EFAULT);
209
210 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
211 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
212 if (ret != 0)
213 return ret;
214 /* scissor regs are emitted in savage_dispatch_draw */
215 if (start < SAVAGE_SCSTART_S3D) {
216 if (start+count > SAVAGE_SCEND_S3D+1)
217 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
218 if (start+count > SAVAGE_SCSTART_S3D)
219 count = SAVAGE_SCSTART_S3D - start;
220 } else if (start <= SAVAGE_SCEND_S3D) {
221 if (start+count > SAVAGE_SCEND_S3D+1) {
222 count -= SAVAGE_SCEND_S3D+1 - start;
223 start = SAVAGE_SCEND_S3D+1;
224 } else
225 return 0;
226 }
227 } else {
228 ret = savage_verify_state_s4(dev_priv, start, count, regs);
229 if (ret != 0)
230 return ret;
231 /* scissor regs are emitted in savage_dispatch_draw */
232 if (start < SAVAGE_DRAWCTRL0_S4) {
233 if (start+count > SAVAGE_DRAWCTRL1_S4+1)
234 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
235 if (start+count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
239 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
240 start = SAVAGE_DRAWCTRL1_S4+1;
241 } else
242 return 0;
243 }
244 }
245
246 bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
247
248 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size+1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1;
252 } else {
253 BEGIN_DMA(bci_size);
254 }
255
256 do {
257 while (count > 0) {
258 unsigned int n = count < 255 ? count : 255;
259 DMA_SET_REGISTERS(start, n);
260 DMA_COPY_FROM_USER(regs, n);
261 count -= n;
262 start += n;
263 regs += n;
264 }
265 start += 2;
266 regs += 2;
267 count = count2;
268 count2 = 0;
269 } while (count);
270
271 DMA_COMMIT();
272
273 return 0;
274}
275
276static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277 const drm_savage_cmd_header_t *cmd_header,
278 const drm_buf_t *dmabuf)
279{
280 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim;
282 unsigned int skip = cmd_header->prim.skip;
283 unsigned int n = cmd_header->prim.count;
284 unsigned int start = cmd_header->prim.start;
285 unsigned int i;
286 BCI_LOCALS;
287
288 if (!dmabuf) {
289 DRM_ERROR("called without dma buffers!\n");
290 return DRM_ERR(EINVAL);
291 }
292
293 if (!n)
294 return 0;
295
296 switch (prim) {
297 case SAVAGE_PRIM_TRILIST_201:
298 reorder = 1;
299 prim = SAVAGE_PRIM_TRILIST;
300 case SAVAGE_PRIM_TRILIST:
301 if (n % 3 != 0) {
302 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
303 n);
304 return DRM_ERR(EINVAL);
305 }
306 break;
307 case SAVAGE_PRIM_TRISTRIP:
308 case SAVAGE_PRIM_TRIFAN:
309 if (n < 3) {
310 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
311 n);
312 return DRM_ERR(EINVAL);
313 }
314 break;
315 default:
316 DRM_ERROR("invalid primitive type %u\n", prim);
317 return DRM_ERR(EINVAL);
318 }
319
320 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
321 if (skip != 0) {
322 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
323 skip);
324 return DRM_ERR(EINVAL);
325 }
326 } else {
327 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
332 skip);
333 return DRM_ERR(EINVAL);
334 }
335 if (reorder) {
336 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
337 return DRM_ERR(EINVAL);
338 }
339 }
340
341 if (start + n > dmabuf->total/32) {
342 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343 start, start + n - 1, dmabuf->total/32);
344 return DRM_ERR(EINVAL);
345 }
346
347 /* Vertex DMA doesn't work with command DMA at the same time,
348 * so we use BCI_... to submit commands here. Flush buffered
349 * faked DMA first. */
350 DMA_FLUSH();
351
352 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353 BEGIN_BCI(2);
354 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
355 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
356 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357 }
358 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
359 /* Workaround for what looks like a hardware bug. If a
360 * WAIT_3D_IDLE was emitted some time before the
361 * indexed drawing command then the engine will lock
362 * up. There are two known workarounds:
363 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364 BEGIN_BCI(63);
365 for (i = 0; i < 63; ++i)
366 BCI_WRITE(BCI_CMD_WAIT);
367 dev_priv->waiting = 0;
368 }
369
370 prim <<= 25;
371 while (n != 0) {
372 /* Can emit up to 255 indices (85 triangles) at once. */
373 unsigned int count = n > 255 ? 255 : n;
374 if (reorder) {
375 /* Need to reorder indices for correct flat
376 * shading while preserving the clock sense
377 * for correct culling. Only on Savage3D. */
378 int reorder[3] = {-1, -1, -1};
379 reorder[start%3] = 2;
380
381 BEGIN_BCI((count+1+1)/2);
382 BCI_DRAW_INDICES_S3D(count, prim, start+2);
383
384 for (i = start+1; i+1 < start+count; i += 2)
385 BCI_WRITE((i + reorder[i % 3]) |
386 ((i+1 + reorder[(i+1) % 3]) << 16));
387 if (i < start+count)
388 BCI_WRITE(i + reorder[i%3]);
389 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390 BEGIN_BCI((count+1+1)/2);
391 BCI_DRAW_INDICES_S3D(count, prim, start);
392
393 for (i = start+1; i+1 < start+count; i += 2)
394 BCI_WRITE(i | ((i+1) << 16));
395 if (i < start+count)
396 BCI_WRITE(i);
397 } else {
398 BEGIN_BCI((count+2+1)/2);
399 BCI_DRAW_INDICES_S4(count, prim, skip);
400
401 for (i = start; i+1 < start+count; i += 2)
402 BCI_WRITE(i | ((i+1) << 16));
403 if (i < start+count)
404 BCI_WRITE(i);
405 }
406
407 start += count;
408 n -= count;
409
410 prim |= BCI_CMD_DRAW_CONT;
411 }
412
413 return 0;
414}
415
416static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
417 const drm_savage_cmd_header_t *cmd_header,
418 const uint32_t __user *vtxbuf,
419 unsigned int vb_size,
420 unsigned int vb_stride)
421{
422 unsigned char reorder = 0;
423 unsigned int prim = cmd_header->prim.prim;
424 unsigned int skip = cmd_header->prim.skip;
425 unsigned int n = cmd_header->prim.count;
426 unsigned int start = cmd_header->prim.start;
427 unsigned int vtx_size;
428 unsigned int i;
429 DMA_LOCALS;
430
431 if (!n)
432 return 0;
433
434 switch (prim) {
435 case SAVAGE_PRIM_TRILIST_201:
436 reorder = 1;
437 prim = SAVAGE_PRIM_TRILIST;
438 case SAVAGE_PRIM_TRILIST:
439 if (n % 3 != 0) {
440 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
441 n);
442 return DRM_ERR(EINVAL);
443 }
444 break;
445 case SAVAGE_PRIM_TRISTRIP:
446 case SAVAGE_PRIM_TRIFAN:
447 if (n < 3) {
448 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
449 n);
450 return DRM_ERR(EINVAL);
451 }
452 break;
453 default:
454 DRM_ERROR("invalid primitive type %u\n", prim);
455 return DRM_ERR(EINVAL);
456 }
457
458 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
459 if (skip > SAVAGE_SKIP_ALL_S3D) {
460 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461 return DRM_ERR(EINVAL);
462 }
463 vtx_size = 8; /* full vertex */
464 } else {
465 if (skip > SAVAGE_SKIP_ALL_S4) {
466 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467 return DRM_ERR(EINVAL);
468 }
469 vtx_size = 10; /* full vertex */
470 }
471
472 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
475
476 if (vtx_size > vb_stride) {
477 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
478 vtx_size, vb_stride);
479 return DRM_ERR(EINVAL);
480 }
481
482 if (start + n > vb_size / (vb_stride*4)) {
483 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484 start, start + n - 1, vb_size / (vb_stride*4));
485 return DRM_ERR(EINVAL);
486 }
487
488 prim <<= 25;
489 while (n != 0) {
490 /* Can emit up to 255 vertices (85 triangles) at once. */
491 unsigned int count = n > 255 ? 255 : n;
492 if (reorder) {
493 /* Need to reorder vertices for correct flat
494 * shading while preserving the clock sense
495 * for correct culling. Only on Savage3D. */
496 int reorder[3] = {-1, -1, -1};
497 reorder[start%3] = 2;
498
499 BEGIN_DMA(count*vtx_size+1);
500 DMA_DRAW_PRIMITIVE(count, prim, skip);
501
502 for (i = start; i < start+count; ++i) {
503 unsigned int j = i + reorder[i % 3];
504 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
505 vtx_size);
506 }
507
508 DMA_COMMIT();
509 } else {
510 BEGIN_DMA(count*vtx_size+1);
511 DMA_DRAW_PRIMITIVE(count, prim, skip);
512
513 if (vb_stride == vtx_size) {
514 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
515 vtx_size*count);
516 } else {
517 for (i = start; i < start+count; ++i) {
518 DMA_COPY_FROM_USER(
519 &vtxbuf[vb_stride*i],
520 vtx_size);
521 }
522 }
523
524 DMA_COMMIT();
525 }
526
527 start += count;
528 n -= count;
529
530 prim |= BCI_CMD_DRAW_CONT;
531 }
532
533 return 0;
534}
535
536static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
537 const drm_savage_cmd_header_t *cmd_header,
538 const uint16_t __user *usr_idx,
539 const drm_buf_t *dmabuf)
540{
541 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim;
543 unsigned int skip = cmd_header->idx.skip;
544 unsigned int n = cmd_header->idx.count;
545 unsigned int i;
546 BCI_LOCALS;
547
548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL);
551 }
552
553 if (!n)
554 return 0;
555
556 switch (prim) {
557 case SAVAGE_PRIM_TRILIST_201:
558 reorder = 1;
559 prim = SAVAGE_PRIM_TRILIST;
560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n",
563 n);
564 return DRM_ERR(EINVAL);
565 }
566 break;
567 case SAVAGE_PRIM_TRISTRIP:
568 case SAVAGE_PRIM_TRIFAN:
569 if (n < 3) {
570 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
571 n);
572 return DRM_ERR(EINVAL);
573 }
574 break;
575 default:
576 DRM_ERROR("invalid primitive type %u\n", prim);
577 return DRM_ERR(EINVAL);
578 }
579
580 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581 if (skip != 0) {
582 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
583 skip);
584 return DRM_ERR(EINVAL);
585 }
586 } else {
587 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
588 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
589 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
590 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
591 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
592 skip);
593 return DRM_ERR(EINVAL);
594 }
595 if (reorder) {
596 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
597 return DRM_ERR(EINVAL);
598 }
599 }
600
601 /* Vertex DMA doesn't work with command DMA at the same time,
602 * so we use BCI_... to submit commands here. Flush buffered
603 * faked DMA first. */
604 DMA_FLUSH();
605
606 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
607 BEGIN_BCI(2);
608 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
609 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
610 dev_priv->state.common.vbaddr = dmabuf->bus_address;
611 }
612 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
613 /* Workaround for what looks like a hardware bug. If a
614 * WAIT_3D_IDLE was emitted some time before the
615 * indexed drawing command then the engine will lock
616 * up. There are two known workarounds:
617 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
618 BEGIN_BCI(63);
619 for (i = 0; i < 63; ++i)
620 BCI_WRITE(BCI_CMD_WAIT);
621 dev_priv->waiting = 0;
622 }
623
624 prim <<= 25;
625 while (n != 0) {
626 /* Can emit up to 255 indices (85 triangles) at once. */
627 unsigned int count = n > 255 ? 255 : n;
628 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
629 uint16_t idx[255];
630
631 /* Copy and check indices */
632 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
633 for (i = 0; i < count; ++i) {
634 if (idx[i] > dmabuf->total/32) {
635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
636 i, idx[i], dmabuf->total/32);
637 return DRM_ERR(EINVAL);
638 }
639 }
640
641 if (reorder) {
642 /* Need to reorder indices for correct flat
643 * shading while preserving the clock sense
644 * for correct culling. Only on Savage3D. */
645 int reorder[3] = {2, -1, -1};
646
647 BEGIN_BCI((count+1+1)/2);
648 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
649
650 for (i = 1; i+1 < count; i += 2)
651 BCI_WRITE(idx[i + reorder[i % 3]] |
652 (idx[i+1 + reorder[(i+1) % 3]] << 16));
653 if (i < count)
654 BCI_WRITE(idx[i + reorder[i%3]]);
655 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
656 BEGIN_BCI((count+1+1)/2);
657 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
658
659 for (i = 1; i+1 < count; i += 2)
660 BCI_WRITE(idx[i] | (idx[i+1] << 16));
661 if (i < count)
662 BCI_WRITE(idx[i]);
663 } else {
664 BEGIN_BCI((count+2+1)/2);
665 BCI_DRAW_INDICES_S4(count, prim, skip);
666
667 for (i = 0; i+1 < count; i += 2)
668 BCI_WRITE(idx[i] | (idx[i+1] << 16));
669 if (i < count)
670 BCI_WRITE(idx[i]);
671 }
672
673 usr_idx += count;
674 n -= count;
675
676 prim |= BCI_CMD_DRAW_CONT;
677 }
678
679 return 0;
680}
681
682static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
683 const drm_savage_cmd_header_t *cmd_header,
684 const uint16_t __user *usr_idx,
685 const uint32_t __user *vtxbuf,
686 unsigned int vb_size,
687 unsigned int vb_stride)
688{
689 unsigned char reorder = 0;
690 unsigned int prim = cmd_header->idx.prim;
691 unsigned int skip = cmd_header->idx.skip;
692 unsigned int n = cmd_header->idx.count;
693 unsigned int vtx_size;
694 unsigned int i;
695 DMA_LOCALS;
696
697 if (!n)
698 return 0;
699
700 switch (prim) {
701 case SAVAGE_PRIM_TRILIST_201:
702 reorder = 1;
703 prim = SAVAGE_PRIM_TRILIST;
704 case SAVAGE_PRIM_TRILIST:
705 if (n % 3 != 0) {
706 DRM_ERROR("wrong number of indices %u in TRILIST\n",
707 n);
708 return DRM_ERR(EINVAL);
709 }
710 break;
711 case SAVAGE_PRIM_TRISTRIP:
712 case SAVAGE_PRIM_TRIFAN:
713 if (n < 3) {
714 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
715 n);
716 return DRM_ERR(EINVAL);
717 }
718 break;
719 default:
720 DRM_ERROR("invalid primitive type %u\n", prim);
721 return DRM_ERR(EINVAL);
722 }
723
724 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
725 if (skip > SAVAGE_SKIP_ALL_S3D) {
726 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
727 return DRM_ERR(EINVAL);
728 }
729 vtx_size = 8; /* full vertex */
730 } else {
731 if (skip > SAVAGE_SKIP_ALL_S4) {
732 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
733 return DRM_ERR(EINVAL);
734 }
735 vtx_size = 10; /* full vertex */
736 }
737
738 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
739 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
740 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
741
742 if (vtx_size > vb_stride) {
743 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
744 vtx_size, vb_stride);
745 return DRM_ERR(EINVAL);
746 }
747
748 prim <<= 25;
749 while (n != 0) {
750 /* Can emit up to 255 vertices (85 triangles) at once. */
751 unsigned int count = n > 255 ? 255 : n;
752 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
753 uint16_t idx[255];
754
755 /* Copy and check indices */
756 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
757 for (i = 0; i < count; ++i) {
758 if (idx[i] > vb_size / (vb_stride*4)) {
759 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
760 i, idx[i], vb_size / (vb_stride*4));
761 return DRM_ERR(EINVAL);
762 }
763 }
764
765 if (reorder) {
766 /* Need to reorder vertices for correct flat
767 * shading while preserving the clock sense
768 * for correct culling. Only on Savage3D. */
769 int reorder[3] = {2, -1, -1};
770
771 BEGIN_DMA(count*vtx_size+1);
772 DMA_DRAW_PRIMITIVE(count, prim, skip);
773
774 for (i = 0; i < count; ++i) {
775 unsigned int j = idx[i + reorder[i % 3]];
776 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
777 vtx_size);
778 }
779
780 DMA_COMMIT();
781 } else {
782 BEGIN_DMA(count*vtx_size+1);
783 DMA_DRAW_PRIMITIVE(count, prim, skip);
784
785 for (i = 0; i < count; ++i) {
786 unsigned int j = idx[i];
787 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
788 vtx_size);
789 }
790
791 DMA_COMMIT();
792 }
793
794 usr_idx += count;
795 n -= count;
796
797 prim |= BCI_CMD_DRAW_CONT;
798 }
799
800 return 0;
801}
802
803static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
804 const drm_savage_cmd_header_t *cmd_header,
805 const drm_savage_cmd_header_t __user *data,
806 unsigned int nbox,
807 const drm_clip_rect_t __user *usr_boxes)
808{
809 unsigned int flags = cmd_header->clear0.flags, mask, value;
810 unsigned int clear_cmd;
811 unsigned int i, nbufs;
812 DMA_LOCALS;
813
814 if (nbox == 0)
815 return 0;
816
817 DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
818 ->clear1.mask);
819 DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
820 ->clear1.value);
821
822 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824 BCI_CMD_SET_ROP(clear_cmd,0xCC);
825
826 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
827 ((flags & SAVAGE_BACK) ? 1 : 0) +
828 ((flags & SAVAGE_DEPTH) ? 1 : 0);
829 if (nbufs == 0)
830 return 0;
831
832 if (mask != 0xffffffff) {
833 /* set mask */
834 BEGIN_DMA(2);
835 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
836 DMA_WRITE(mask);
837 DMA_COMMIT();
838 }
839 for (i = 0; i < nbox; ++i) {
840 drm_clip_rect_t box;
841 unsigned int x, y, w, h;
842 unsigned int buf;
843 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
844 x = box.x1, y = box.y1;
845 w = box.x2 - box.x1;
846 h = box.y2 - box.y1;
847 BEGIN_DMA(nbufs*6);
848 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
849 if (!(flags & buf))
850 continue;
851 DMA_WRITE(clear_cmd);
852 switch(buf) {
853 case SAVAGE_FRONT:
854 DMA_WRITE(dev_priv->front_offset);
855 DMA_WRITE(dev_priv->front_bd);
856 break;
857 case SAVAGE_BACK:
858 DMA_WRITE(dev_priv->back_offset);
859 DMA_WRITE(dev_priv->back_bd);
860 break;
861 case SAVAGE_DEPTH:
862 DMA_WRITE(dev_priv->depth_offset);
863 DMA_WRITE(dev_priv->depth_bd);
864 break;
865 }
866 DMA_WRITE(value);
867 DMA_WRITE(BCI_X_Y(x, y));
868 DMA_WRITE(BCI_W_H(w, h));
869 }
870 DMA_COMMIT();
871 }
872 if (mask != 0xffffffff) {
873 /* reset mask */
874 BEGIN_DMA(2);
875 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
876 DMA_WRITE(0xffffffff);
877 DMA_COMMIT();
878 }
879
880 return 0;
881}
882
883static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
884 unsigned int nbox,
885 const drm_clip_rect_t __user *usr_boxes)
886{
887 unsigned int swap_cmd;
888 unsigned int i;
889 DMA_LOCALS;
890
891 if (nbox == 0)
892 return 0;
893
894 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
895 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
896 BCI_CMD_SET_ROP(swap_cmd,0xCC);
897
898 for (i = 0; i < nbox; ++i) {
899 drm_clip_rect_t box;
900 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
901
902 BEGIN_DMA(6);
903 DMA_WRITE(swap_cmd);
904 DMA_WRITE(dev_priv->back_offset);
905 DMA_WRITE(dev_priv->back_bd);
906 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
907 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
908 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
909 DMA_COMMIT();
910 }
911
912 return 0;
913}
914
915static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
916 const drm_savage_cmd_header_t __user *start,
917 const drm_savage_cmd_header_t __user *end,
918 const drm_buf_t *dmabuf,
919 const unsigned int __user *usr_vtxbuf,
920 unsigned int vb_size, unsigned int vb_stride,
921 unsigned int nbox,
922 const drm_clip_rect_t __user *usr_boxes)
923{
924 unsigned int i, j;
925 int ret;
926
927 for (i = 0; i < nbox; ++i) {
928 drm_clip_rect_t box;
929 const drm_savage_cmd_header_t __user *usr_cmdbuf;
930 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
931 dev_priv->emit_clip_rect(dev_priv, &box);
932
933 usr_cmdbuf = start;
934 while (usr_cmdbuf < end) {
935 drm_savage_cmd_header_t cmd_header;
936 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
937 sizeof(cmd_header));
938 usr_cmdbuf++;
939 switch (cmd_header.cmd.cmd) {
940 case SAVAGE_CMD_DMA_PRIM:
941 ret = savage_dispatch_dma_prim(
942 dev_priv, &cmd_header, dmabuf);
943 break;
944 case SAVAGE_CMD_VB_PRIM:
945 ret = savage_dispatch_vb_prim(
946 dev_priv, &cmd_header,
947 (const uint32_t __user *)usr_vtxbuf,
948 vb_size, vb_stride);
949 break;
950 case SAVAGE_CMD_DMA_IDX:
951 j = (cmd_header.idx.count + 3) / 4;
952 /* j was check in savage_bci_cmdbuf */
953 ret = savage_dispatch_dma_idx(
954 dev_priv, &cmd_header,
955 (const uint16_t __user *)usr_cmdbuf,
956 dmabuf);
957 usr_cmdbuf += j;
958 break;
959 case SAVAGE_CMD_VB_IDX:
960 j = (cmd_header.idx.count + 3) / 4;
961 /* j was check in savage_bci_cmdbuf */
962 ret = savage_dispatch_vb_idx(
963 dev_priv, &cmd_header,
964 (const uint16_t __user *)usr_cmdbuf,
965 (const uint32_t __user *)usr_vtxbuf,
966 vb_size, vb_stride);
967 usr_cmdbuf += j;
968 break;
969 default:
970 /* What's the best return code? EFAULT? */
971 DRM_ERROR("IMPLEMENTATION ERROR: "
972 "non-drawing-command %d\n",
973 cmd_header.cmd.cmd);
974 return DRM_ERR(EINVAL);
975 }
976
977 if (ret != 0)
978 return ret;
979 }
980 }
981
982 return 0;
983}
984
985int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
986{
987 DRM_DEVICE;
988 drm_savage_private_t *dev_priv = dev->dev_private;
989 drm_device_dma_t *dma = dev->dma;
990 drm_buf_t *dmabuf;
991 drm_savage_cmdbuf_t cmdbuf;
992 drm_savage_cmd_header_t __user *usr_cmdbuf;
993 drm_savage_cmd_header_t __user *first_draw_cmd;
994 unsigned int __user *usr_vtxbuf;
995 drm_clip_rect_t __user *usr_boxes;
996 unsigned int i, j;
997 int ret = 0;
998
999 DRM_DEBUG("\n");
1000
1001 LOCK_TEST_WITH_RETURN(dev, filp);
1002
1003 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
1004 sizeof(cmdbuf));
1005
1006 if (dma && dma->buflist) {
1007 if (cmdbuf.dma_idx > dma->buf_count) {
1008 DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
1009 cmdbuf.dma_idx, dma->buf_count-1);
1010 return DRM_ERR(EINVAL);
1011 }
1012 dmabuf = dma->buflist[cmdbuf.dma_idx];
1013 } else {
1014 dmabuf = NULL;
1015 }
1016
1017 usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
1018 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1019 usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
1020 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
1021 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
1022 usr_vtxbuf, cmdbuf.vb_size)) ||
1023 (cmdbuf.nbox && DRM_VERIFYAREA_READ(
1024 usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
1025 return DRM_ERR(EFAULT);
1026
1027 /* Make sure writes to DMA buffers are finished before sending
1028 * DMA commands to the graphics hardware. */
1029 DRM_MEMORYBARRIER();
1030
1031 /* Coming from user space. Don't know if the Xserver has
1032 * emitted wait commands. Assuming the worst. */
1033 dev_priv->waiting = 1;
1034
1035 i = 0;
1036 first_draw_cmd = NULL;
1037 while (i < cmdbuf.size) {
1038 drm_savage_cmd_header_t cmd_header;
1039 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
1040 sizeof(cmd_header));
1041 usr_cmdbuf++;
1042 i++;
1043
1044 /* Group drawing commands with same state to minimize
1045 * iterations over clip rects. */
1046 j = 0;
1047 switch (cmd_header.cmd.cmd) {
1048 case SAVAGE_CMD_DMA_IDX:
1049 case SAVAGE_CMD_VB_IDX:
1050 j = (cmd_header.idx.count + 3) / 4;
1051 if (i + j > cmdbuf.size) {
1052 DRM_ERROR("indexed drawing command extends "
1053 "beyond end of command buffer\n");
1054 DMA_FLUSH();
1055 return DRM_ERR(EINVAL);
1056 }
1057 /* fall through */
1058 case SAVAGE_CMD_DMA_PRIM:
1059 case SAVAGE_CMD_VB_PRIM:
1060 if (!first_draw_cmd)
1061 first_draw_cmd = usr_cmdbuf-1;
1062 usr_cmdbuf += j;
1063 i += j;
1064 break;
1065 default:
1066 if (first_draw_cmd) {
1067 ret = savage_dispatch_draw (
1068 dev_priv, first_draw_cmd, usr_cmdbuf-1,
1069 dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1070 cmdbuf.vb_stride,
1071 cmdbuf.nbox, usr_boxes);
1072 if (ret != 0)
1073 return ret;
1074 first_draw_cmd = NULL;
1075 }
1076 }
1077 if (first_draw_cmd)
1078 continue;
1079
1080 switch (cmd_header.cmd.cmd) {
1081 case SAVAGE_CMD_STATE:
1082 j = (cmd_header.state.count + 1) / 2;
1083 if (i + j > cmdbuf.size) {
1084 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1085 "beyond end of command buffer\n");
1086 DMA_FLUSH();
1087 return DRM_ERR(EINVAL);
1088 }
1089 ret = savage_dispatch_state(
1090 dev_priv, &cmd_header,
1091 (uint32_t __user *)usr_cmdbuf);
1092 usr_cmdbuf += j;
1093 i += j;
1094 break;
1095 case SAVAGE_CMD_CLEAR:
1096 if (i + 1 > cmdbuf.size) {
1097 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1098 "beyond end of command buffer\n");
1099 DMA_FLUSH();
1100 return DRM_ERR(EINVAL);
1101 }
1102 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1103 usr_cmdbuf,
1104 cmdbuf.nbox, usr_boxes);
1105 usr_cmdbuf++;
1106 i++;
1107 break;
1108 case SAVAGE_CMD_SWAP:
1109 ret = savage_dispatch_swap(dev_priv,
1110 cmdbuf.nbox, usr_boxes);
1111 break;
1112 default:
1113 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1114 DMA_FLUSH();
1115 return DRM_ERR(EINVAL);
1116 }
1117
1118 if (ret != 0) {
1119 DMA_FLUSH();
1120 return ret;
1121 }
1122 }
1123
1124 if (first_draw_cmd) {
1125 ret = savage_dispatch_draw (
1126 dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
1127 usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
1128 cmdbuf.nbox, usr_boxes);
1129 if (ret != 0) {
1130 DMA_FLUSH();
1131 return ret;
1132 }
1133 }
1134
1135 DMA_FLUSH();
1136
1137 if (dmabuf && cmdbuf.discard) {
1138 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1139 uint16_t event;
1140 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1141 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1142 savage_freelist_put(dev, dmabuf);
1143 }
1144
1145 return 0;
1146}
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 60bb9152b832..78d681dc35a8 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@ char hvc_driver_name[] = "hvc_console";
39 39
40static struct vio_device_id hvc_driver_table[] __devinitdata = { 40static struct vio_device_id hvc_driver_table[] __devinitdata = {
41 {"serial", "hvterm1"}, 41 {"serial", "hvterm1"},
42 { NULL, } 42 { "", "" }
43}; 43};
44MODULE_DEVICE_TABLE(vio, hvc_driver_table); 44MODULE_DEVICE_TABLE(vio, hvc_driver_table);
45 45
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 3236d2404905..f47f009f9259 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -527,7 +527,7 @@ static int khvcsd(void *unused)
527 527
528static struct vio_device_id hvcs_driver_table[] __devinitdata= { 528static struct vio_device_id hvcs_driver_table[] __devinitdata= {
529 {"serial-server", "hvterm2"}, 529 {"serial-server", "hvterm2"},
530 { NULL, } 530 { "", "" }
531}; 531};
532MODULE_DEVICE_TABLE(vio, hvcs_driver_table); 532MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
533 533
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b11d6b2129f..7999da25fe40 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1589,6 +1589,40 @@ u32 secure_tcpv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dp
1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral); 1589EXPORT_SYMBOL(secure_tcpv6_port_ephemeral);
1590#endif 1590#endif
1591 1591
1592#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1593/* Similar to secure_tcp_sequence_number but generate a 48 bit value
1594 * bit's 32-47 increase every key exchange
1595 * 0-31 hash(source, dest)
1596 */
1597u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
1598 __u16 sport, __u16 dport)
1599{
1600 struct timeval tv;
1601 u64 seq;
1602 __u32 hash[4];
1603 struct keydata *keyptr = get_keyptr();
1604
1605 hash[0] = saddr;
1606 hash[1] = daddr;
1607 hash[2] = (sport << 16) + dport;
1608 hash[3] = keyptr->secret[11];
1609
1610 seq = half_md4_transform(hash, keyptr->secret);
1611 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1612
1613 do_gettimeofday(&tv);
1614 seq += tv.tv_usec + tv.tv_sec * 1000000;
1615 seq &= (1ull << 48) - 1;
1616#if 0
1617 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
1618 saddr, daddr, sport, dport, seq);
1619#endif
1620 return seq;
1621}
1622
1623EXPORT_SYMBOL(secure_dccp_sequence_number);
1624#endif
1625
1592#endif /* CONFIG_INET */ 1626#endif /* CONFIG_INET */
1593 1627
1594 1628
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 4764b4f9555d..0aff45fac2e6 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -991,7 +991,7 @@ static int viotape_remove(struct vio_dev *vdev)
991 */ 991 */
992static struct vio_device_id viotape_device_table[] __devinitdata = { 992static struct vio_device_id viotape_device_table[] __devinitdata = {
993 { "viotape", "" }, 993 { "viotape", "" },
994 { 0, } 994 { "", "" }
995}; 995};
996 996
997MODULE_DEVICE_TABLE(vio, viotape_device_table); 997MODULE_DEVICE_TABLE(vio, viotape_device_table);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 30d96739fb23..665103ccaee8 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2433,7 +2433,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2433 int ret = 0; 2433 int ret = 0;
2434 2434
2435 acquire_console_sem(); 2435 acquire_console_sem();
2436 if (tty->count == 1) { 2436 if (tty->driver_data == NULL) {
2437 ret = vc_allocate(currcons); 2437 ret = vc_allocate(currcons);
2438 if (ret == 0) { 2438 if (ret == 0) {
2439 struct vc_data *vc = vc_cons[currcons].d; 2439 struct vc_data *vc = vc_cons[currcons].d;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 4fa17c76eea2..c8a7f47911f9 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -325,7 +325,7 @@ int adm1026_attach_adapter(struct i2c_adapter *adapter)
325int adm1026_detach_client(struct i2c_client *client) 325int adm1026_detach_client(struct i2c_client *client)
326{ 326{
327 i2c_detach_client(client); 327 i2c_detach_client(client);
328 kfree(client); 328 kfree(i2c_get_clientdata(client));
329 return 0; 329 return 0;
330} 330}
331 331
@@ -1691,7 +1691,7 @@ int adm1026_detect(struct i2c_adapter *adapter, int address,
1691 1691
1692 /* Error out and cleanup code */ 1692 /* Error out and cleanup code */
1693exitfree: 1693exitfree:
1694 kfree(new_client); 1694 kfree(data);
1695exit: 1695exit:
1696 return err; 1696 return err;
1697} 1697}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 9168e983ca1d..936250957270 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -834,7 +834,7 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
834 return 0; 834 return 0;
835 835
836exit_free: 836exit_free:
837 kfree(new_client); 837 kfree(data);
838exit: 838exit:
839 return err; 839 return err;
840} 840}
@@ -845,7 +845,7 @@ static int adm1031_detach_client(struct i2c_client *client)
845 if ((ret = i2c_detach_client(client)) != 0) { 845 if ((ret = i2c_detach_client(client)) != 0) {
846 return ret; 846 return ret;
847 } 847 }
848 kfree(client); 848 kfree(i2c_get_clientdata(client));
849 return 0; 849 return 0;
850} 850}
851 851
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 5c68e9c311aa..ce2a6eb93f6e 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -616,7 +616,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
616 616
617 return 0; 617 return 0;
618exit_free: 618exit_free:
619 kfree(new_client); 619 kfree(data);
620exit: 620exit:
621 return err; 621 return err;
622} 622}
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
index 270015b626ad..301ae98bd0ad 100644
--- a/drivers/hwmon/fscpos.c
+++ b/drivers/hwmon/fscpos.c
@@ -167,7 +167,7 @@ static ssize_t set_temp_reset(struct i2c_client *client, struct fscpos_data
167 "experience to the module author.\n"); 167 "experience to the module author.\n");
168 168
169 /* Supported value: 2 (clears the status) */ 169 /* Supported value: 2 (clears the status) */
170 fscpos_write_value(client, FSCPOS_REG_TEMP_STATE[nr], 2); 170 fscpos_write_value(client, FSCPOS_REG_TEMP_STATE[nr - 1], 2);
171 return count; 171 return count;
172} 172}
173 173
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 251ac2659554..fdeeb3ab6f2f 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -298,7 +298,7 @@ static int smsc47b397_detect(struct i2c_adapter *adapter, int addr, int kind)
298 return 0; 298 return 0;
299 299
300error_free: 300error_free:
301 kfree(new_client); 301 kfree(data);
302error_release: 302error_release:
303 release_region(addr, SMSC_EXTENT); 303 release_region(addr, SMSC_EXTENT);
304 return err; 304 return err;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 897117a7213f..7166ad0b2fda 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -495,7 +495,7 @@ static int smsc47m1_detect(struct i2c_adapter *adapter, int address, int kind)
495 return 0; 495 return 0;
496 496
497error_free: 497error_free:
498 kfree(new_client); 498 kfree(data);
499error_release: 499error_release:
500 release_region(address, SMSC_EXTENT); 500 release_region(address, SMSC_EXTENT);
501 return err; 501 return err;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index b248d89de8b4..d633770fac8e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
681 return; 681 return;
682 } 682 }
683 683
684 __skb_unlink(skb, skb->list); 684 __skb_unlink(skb, &host->pending_packet_queue);
685 685
686 if (packet->state == hpsb_queued) { 686 if (packet->state == hpsb_queued) {
687 packet->sendtime = jiffies; 687 packet->sendtime = jiffies;
@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
989 packet = (struct hpsb_packet *)skb->data; 989 packet = (struct hpsb_packet *)skb->data;
990 990
991 if (time_before(packet->sendtime + expire, jiffies)) { 991 if (time_before(packet->sendtime + expire, jiffies)) {
992 __skb_unlink(skb, skb->list); 992 __skb_unlink(skb, &host->pending_packet_queue);
993 packet->state = hpsb_complete; 993 packet->state = hpsb_complete;
994 packet->ack_code = ACKX_TIMEOUT; 994 packet->ack_code = ACKX_TIMEOUT;
995 queue_packet_complete(packet); 995 queue_packet_complete(packet);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index b12a970cc9a3..27018c8efc24 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -478,7 +478,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
478 int num_ports, i; 478 int num_ports, i;
479 479
480 spin_lock_init(&ohci->phy_reg_lock); 480 spin_lock_init(&ohci->phy_reg_lock);
481 spin_lock_init(&ohci->event_lock);
482 481
483 /* Put some defaults to these undefined bus options */ 482 /* Put some defaults to these undefined bus options */
484 buf = reg_read(ohci, OHCI1394_BusOptions); 483 buf = reg_read(ohci, OHCI1394_BusOptions);
@@ -3402,7 +3401,14 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3402 /* We hopefully don't have to pre-allocate IT DMA like we did 3401 /* We hopefully don't have to pre-allocate IT DMA like we did
3403 * for IR DMA above. Allocate it on-demand and mark inactive. */ 3402 * for IR DMA above. Allocate it on-demand and mark inactive. */
3404 ohci->it_legacy_context.ohci = NULL; 3403 ohci->it_legacy_context.ohci = NULL;
3404 spin_lock_init(&ohci->event_lock);
3405 3405
3406 /*
3407 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3408 * might get called anyway. We'll see no event, of course, but
3409 * we need to get to that "no event", so enough should be initialized
3410 * by that point.
3411 */
3406 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ, 3412 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3407 OHCI1394_DRIVER_NAME, ohci)) 3413 OHCI1394_DRIVER_NAME, ohci))
3408 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq); 3414 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 10be36731ed7..678a7e097f32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 1obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o ib_umad.o ib_ucm.o 2 ib_cm.o ib_umad.o ib_ucm.o
5obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o 3obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 729f0b0d983a..5ac86f566dc0 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -40,7 +41,7 @@
40 41
41#include <asm/bug.h> 42#include <asm/bug.h>
42 43
43#include <ib_smi.h> 44#include <rdma/ib_smi.h>
44 45
45#include "smi.h" 46#include "smi.h"
46#include "agent_priv.h" 47#include "agent_priv.h"
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h
index 17435af1e914..2ec6d7f1b7d0 100644
--- a/drivers/infiniband/core/agent_priv.h
+++ b/drivers/infiniband/core/agent_priv.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3042360c97e1..f014e639088c 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 6 *
4 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,12 +35,11 @@
32 * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $ 35 * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
33 */ 36 */
34 37
35#include <linux/version.h>
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/errno.h> 39#include <linux/errno.h>
38#include <linux/slab.h> 40#include <linux/slab.h>
39 41
40#include <ib_cache.h> 42#include <rdma/ib_cache.h>
41 43
42#include "core_priv.h" 44#include "core_priv.h"
43 45
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 403ed125d8f4..4de93ba274a6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -43,8 +43,8 @@
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45 45
46#include <ib_cache.h> 46#include <rdma/ib_cache.h>
47#include <ib_cm.h> 47#include <rdma/ib_cm.h>
48#include "cm_msgs.h" 48#include "cm_msgs.h"
49 49
50MODULE_AUTHOR("Sean Hefty"); 50MODULE_AUTHOR("Sean Hefty");
@@ -83,7 +83,7 @@ struct cm_port {
83struct cm_device { 83struct cm_device {
84 struct list_head list; 84 struct list_head list;
85 struct ib_device *device; 85 struct ib_device *device;
86 u64 ca_guid; 86 __be64 ca_guid;
87 struct cm_port port[0]; 87 struct cm_port port[0];
88}; 88};
89 89
@@ -100,8 +100,8 @@ struct cm_work {
100 struct list_head list; 100 struct list_head list;
101 struct cm_port *port; 101 struct cm_port *port;
102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
103 u32 local_id; /* Established / timewait */ 103 __be32 local_id; /* Established / timewait */
104 u32 remote_id; 104 __be32 remote_id;
105 struct ib_cm_event cm_event; 105 struct ib_cm_event cm_event;
106 struct ib_sa_path_rec path[0]; 106 struct ib_sa_path_rec path[0];
107}; 107};
@@ -110,8 +110,8 @@ struct cm_timewait_info {
110 struct cm_work work; /* Must be first. */ 110 struct cm_work work; /* Must be first. */
111 struct rb_node remote_qp_node; 111 struct rb_node remote_qp_node;
112 struct rb_node remote_id_node; 112 struct rb_node remote_id_node;
113 u64 remote_ca_guid; 113 __be64 remote_ca_guid;
114 u32 remote_qpn; 114 __be32 remote_qpn;
115 u8 inserted_remote_qp; 115 u8 inserted_remote_qp;
116 u8 inserted_remote_id; 116 u8 inserted_remote_id;
117}; 117};
@@ -132,11 +132,11 @@ struct cm_id_private {
132 struct cm_av alt_av; 132 struct cm_av alt_av;
133 133
134 void *private_data; 134 void *private_data;
135 u64 tid; 135 __be64 tid;
136 u32 local_qpn; 136 __be32 local_qpn;
137 u32 remote_qpn; 137 __be32 remote_qpn;
138 u32 sq_psn; 138 __be32 sq_psn;
139 u32 rq_psn; 139 __be32 rq_psn;
140 int timeout_ms; 140 int timeout_ms;
141 enum ib_mtu path_mtu; 141 enum ib_mtu path_mtu;
142 u8 private_data_len; 142 u8 private_data_len;
@@ -253,7 +253,7 @@ static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
253 u16 dlid, u8 sl, u16 src_path_bits) 253 u16 dlid, u8 sl, u16 src_path_bits)
254{ 254{
255 memset(ah_attr, 0, sizeof ah_attr); 255 memset(ah_attr, 0, sizeof ah_attr);
256 ah_attr->dlid = be16_to_cpu(dlid); 256 ah_attr->dlid = dlid;
257 ah_attr->sl = sl; 257 ah_attr->sl = sl;
258 ah_attr->src_path_bits = src_path_bits; 258 ah_attr->src_path_bits = src_path_bits;
259 ah_attr->port_num = port_num; 259 ah_attr->port_num = port_num;
@@ -264,7 +264,7 @@ static void cm_init_av_for_response(struct cm_port *port,
264{ 264{
265 av->port = port; 265 av->port = port;
266 av->pkey_index = wc->pkey_index; 266 av->pkey_index = wc->pkey_index;
267 cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid), 267 cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
268 wc->sl, wc->dlid_path_bits); 268 wc->sl, wc->dlid_path_bits);
269} 269}
270 270
@@ -295,8 +295,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
295 return ret; 295 return ret;
296 296
297 av->port = port; 297 av->port = port;
298 cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid, 298 cm_set_ah_attr(&av->ah_attr, av->port->port_num,
299 path->sl, path->slid & 0x7F); 299 be16_to_cpu(path->dlid), path->sl,
300 be16_to_cpu(path->slid) & 0x7F);
300 av->packet_life_time = path->packet_life_time; 301 av->packet_life_time = path->packet_life_time;
301 return 0; 302 return 0;
302} 303}
@@ -309,26 +310,26 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
309 do { 310 do {
310 spin_lock_irqsave(&cm.lock, flags); 311 spin_lock_irqsave(&cm.lock, flags);
311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, 312 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
312 (int *) &cm_id_priv->id.local_id); 313 (__force int *) &cm_id_priv->id.local_id);
313 spin_unlock_irqrestore(&cm.lock, flags); 314 spin_unlock_irqrestore(&cm.lock, flags);
314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 315 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
315 return ret; 316 return ret;
316} 317}
317 318
318static void cm_free_id(u32 local_id) 319static void cm_free_id(__be32 local_id)
319{ 320{
320 unsigned long flags; 321 unsigned long flags;
321 322
322 spin_lock_irqsave(&cm.lock, flags); 323 spin_lock_irqsave(&cm.lock, flags);
323 idr_remove(&cm.local_id_table, (int) local_id); 324 idr_remove(&cm.local_id_table, (__force int) local_id);
324 spin_unlock_irqrestore(&cm.lock, flags); 325 spin_unlock_irqrestore(&cm.lock, flags);
325} 326}
326 327
327static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) 328static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
328{ 329{
329 struct cm_id_private *cm_id_priv; 330 struct cm_id_private *cm_id_priv;
330 331
331 cm_id_priv = idr_find(&cm.local_id_table, (int) local_id); 332 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
332 if (cm_id_priv) { 333 if (cm_id_priv) {
333 if (cm_id_priv->id.remote_id == remote_id) 334 if (cm_id_priv->id.remote_id == remote_id)
334 atomic_inc(&cm_id_priv->refcount); 335 atomic_inc(&cm_id_priv->refcount);
@@ -339,7 +340,7 @@ static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
339 return cm_id_priv; 340 return cm_id_priv;
340} 341}
341 342
342static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id) 343static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
343{ 344{
344 struct cm_id_private *cm_id_priv; 345 struct cm_id_private *cm_id_priv;
345 unsigned long flags; 346 unsigned long flags;
@@ -356,8 +357,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
356 struct rb_node **link = &cm.listen_service_table.rb_node; 357 struct rb_node **link = &cm.listen_service_table.rb_node;
357 struct rb_node *parent = NULL; 358 struct rb_node *parent = NULL;
358 struct cm_id_private *cur_cm_id_priv; 359 struct cm_id_private *cur_cm_id_priv;
359 u64 service_id = cm_id_priv->id.service_id; 360 __be64 service_id = cm_id_priv->id.service_id;
360 u64 service_mask = cm_id_priv->id.service_mask; 361 __be64 service_mask = cm_id_priv->id.service_mask;
361 362
362 while (*link) { 363 while (*link) {
363 parent = *link; 364 parent = *link;
@@ -376,7 +377,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
376 return NULL; 377 return NULL;
377} 378}
378 379
379static struct cm_id_private * cm_find_listen(u64 service_id) 380static struct cm_id_private * cm_find_listen(__be64 service_id)
380{ 381{
381 struct rb_node *node = cm.listen_service_table.rb_node; 382 struct rb_node *node = cm.listen_service_table.rb_node;
382 struct cm_id_private *cm_id_priv; 383 struct cm_id_private *cm_id_priv;
@@ -400,8 +401,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
400 struct rb_node **link = &cm.remote_id_table.rb_node; 401 struct rb_node **link = &cm.remote_id_table.rb_node;
401 struct rb_node *parent = NULL; 402 struct rb_node *parent = NULL;
402 struct cm_timewait_info *cur_timewait_info; 403 struct cm_timewait_info *cur_timewait_info;
403 u64 remote_ca_guid = timewait_info->remote_ca_guid; 404 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
404 u32 remote_id = timewait_info->work.remote_id; 405 __be32 remote_id = timewait_info->work.remote_id;
405 406
406 while (*link) { 407 while (*link) {
407 parent = *link; 408 parent = *link;
@@ -424,8 +425,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
424 return NULL; 425 return NULL;
425} 426}
426 427
427static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid, 428static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
428 u32 remote_id) 429 __be32 remote_id)
429{ 430{
430 struct rb_node *node = cm.remote_id_table.rb_node; 431 struct rb_node *node = cm.remote_id_table.rb_node;
431 struct cm_timewait_info *timewait_info; 432 struct cm_timewait_info *timewait_info;
@@ -453,8 +454,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
453 struct rb_node **link = &cm.remote_qp_table.rb_node; 454 struct rb_node **link = &cm.remote_qp_table.rb_node;
454 struct rb_node *parent = NULL; 455 struct rb_node *parent = NULL;
455 struct cm_timewait_info *cur_timewait_info; 456 struct cm_timewait_info *cur_timewait_info;
456 u64 remote_ca_guid = timewait_info->remote_ca_guid; 457 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
457 u32 remote_qpn = timewait_info->remote_qpn; 458 __be32 remote_qpn = timewait_info->remote_qpn;
458 459
459 while (*link) { 460 while (*link) {
460 parent = *link; 461 parent = *link;
@@ -484,7 +485,7 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
484 struct rb_node *parent = NULL; 485 struct rb_node *parent = NULL;
485 struct cm_id_private *cur_cm_id_priv; 486 struct cm_id_private *cur_cm_id_priv;
486 union ib_gid *port_gid = &cm_id_priv->av.dgid; 487 union ib_gid *port_gid = &cm_id_priv->av.dgid;
487 u32 remote_id = cm_id_priv->id.remote_id; 488 __be32 remote_id = cm_id_priv->id.remote_id;
488 489
489 while (*link) { 490 while (*link) {
490 parent = *link; 491 parent = *link;
@@ -598,7 +599,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
598 spin_unlock_irqrestore(&cm.lock, flags); 599 spin_unlock_irqrestore(&cm.lock, flags);
599} 600}
600 601
601static struct cm_timewait_info * cm_create_timewait_info(u32 local_id) 602static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
602{ 603{
603 struct cm_timewait_info *timewait_info; 604 struct cm_timewait_info *timewait_info;
604 605
@@ -715,14 +716,15 @@ retest:
715EXPORT_SYMBOL(ib_destroy_cm_id); 716EXPORT_SYMBOL(ib_destroy_cm_id);
716 717
717int ib_cm_listen(struct ib_cm_id *cm_id, 718int ib_cm_listen(struct ib_cm_id *cm_id,
718 u64 service_id, 719 __be64 service_id,
719 u64 service_mask) 720 __be64 service_mask)
720{ 721{
721 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 722 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
722 unsigned long flags; 723 unsigned long flags;
723 int ret = 0; 724 int ret = 0;
724 725
725 service_mask = service_mask ? service_mask : ~0ULL; 726 service_mask = service_mask ? service_mask :
727 __constant_cpu_to_be64(~0ULL);
726 service_id &= service_mask; 728 service_id &= service_mask;
727 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 729 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
728 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 730 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -735,8 +737,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
735 737
736 spin_lock_irqsave(&cm.lock, flags); 738 spin_lock_irqsave(&cm.lock, flags);
737 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 739 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
738 cm_id->service_id = __cpu_to_be64(cm.listen_service_id++); 740 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
739 cm_id->service_mask = ~0ULL; 741 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
740 } else { 742 } else {
741 cm_id->service_id = service_id; 743 cm_id->service_id = service_id;
742 cm_id->service_mask = service_mask; 744 cm_id->service_mask = service_mask;
@@ -752,18 +754,19 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
752} 754}
753EXPORT_SYMBOL(ib_cm_listen); 755EXPORT_SYMBOL(ib_cm_listen);
754 756
755static u64 cm_form_tid(struct cm_id_private *cm_id_priv, 757static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
756 enum cm_msg_sequence msg_seq) 758 enum cm_msg_sequence msg_seq)
757{ 759{
758 u64 hi_tid, low_tid; 760 u64 hi_tid, low_tid;
759 761
760 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 762 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
761 low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30)); 763 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
764 (msg_seq << 30));
762 return cpu_to_be64(hi_tid | low_tid); 765 return cpu_to_be64(hi_tid | low_tid);
763} 766}
764 767
765static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 768static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
766 enum cm_msg_attr_id attr_id, u64 tid) 769 __be16 attr_id, __be64 tid)
767{ 770{
768 hdr->base_version = IB_MGMT_BASE_VERSION; 771 hdr->base_version = IB_MGMT_BASE_VERSION;
769 hdr->mgmt_class = IB_MGMT_CLASS_CM; 772 hdr->mgmt_class = IB_MGMT_CLASS_CM;
@@ -896,7 +899,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
896 goto error1; 899 goto error1;
897 } 900 }
898 cm_id->service_id = param->service_id; 901 cm_id->service_id = param->service_id;
899 cm_id->service_mask = ~0ULL; 902 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
900 cm_id_priv->timeout_ms = cm_convert_to_ms( 903 cm_id_priv->timeout_ms = cm_convert_to_ms(
901 param->primary_path->packet_life_time) * 2 + 904 param->primary_path->packet_life_time) * 2 +
902 cm_convert_to_ms( 905 cm_convert_to_ms(
@@ -963,7 +966,7 @@ static int cm_issue_rej(struct cm_port *port,
963 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 966 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
964 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 967 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
965 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 968 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
966 rej_msg->reason = reason; 969 rej_msg->reason = cpu_to_be16(reason);
967 970
968 if (ari && ari_length) { 971 if (ari && ari_length) {
969 cm_rej_set_reject_info_len(rej_msg, ari_length); 972 cm_rej_set_reject_info_len(rej_msg, ari_length);
@@ -977,8 +980,8 @@ static int cm_issue_rej(struct cm_port *port,
977 return ret; 980 return ret;
978} 981}
979 982
980static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid, 983static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
981 u32 local_qpn, u32 remote_qpn) 984 __be32 local_qpn, __be32 remote_qpn)
982{ 985{
983 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 986 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
984 ((local_ca_guid == remote_ca_guid) && 987 ((local_ca_guid == remote_ca_guid) &&
@@ -1137,7 +1140,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
1137 break; 1140 break;
1138 } 1141 }
1139 1142
1140 rej_msg->reason = reason; 1143 rej_msg->reason = cpu_to_be16(reason);
1141 if (ari && ari_length) { 1144 if (ari && ari_length) {
1142 cm_rej_set_reject_info_len(rej_msg, ari_length); 1145 cm_rej_set_reject_info_len(rej_msg, ari_length);
1143 memcpy(rej_msg->ari, ari, ari_length); 1146 memcpy(rej_msg->ari, ari, ari_length);
@@ -1276,7 +1279,7 @@ static int cm_req_handler(struct cm_work *work)
1276 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1279 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1277 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1280 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1278 cm_id_priv->id.service_id = req_msg->service_id; 1281 cm_id_priv->id.service_id = req_msg->service_id;
1279 cm_id_priv->id.service_mask = ~0ULL; 1282 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1280 1283
1281 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1284 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1282 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1285 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
@@ -1969,7 +1972,7 @@ static void cm_format_rej_event(struct cm_work *work)
1969 param = &work->cm_event.param.rej_rcvd; 1972 param = &work->cm_event.param.rej_rcvd;
1970 param->ari = rej_msg->ari; 1973 param->ari = rej_msg->ari;
1971 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 1974 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1972 param->reason = rej_msg->reason; 1975 param->reason = __be16_to_cpu(rej_msg->reason);
1973 work->cm_event.private_data = &rej_msg->private_data; 1976 work->cm_event.private_data = &rej_msg->private_data;
1974} 1977}
1975 1978
@@ -1978,20 +1981,20 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1978 struct cm_timewait_info *timewait_info; 1981 struct cm_timewait_info *timewait_info;
1979 struct cm_id_private *cm_id_priv; 1982 struct cm_id_private *cm_id_priv;
1980 unsigned long flags; 1983 unsigned long flags;
1981 u32 remote_id; 1984 __be32 remote_id;
1982 1985
1983 remote_id = rej_msg->local_comm_id; 1986 remote_id = rej_msg->local_comm_id;
1984 1987
1985 if (rej_msg->reason == IB_CM_REJ_TIMEOUT) { 1988 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1986 spin_lock_irqsave(&cm.lock, flags); 1989 spin_lock_irqsave(&cm.lock, flags);
1987 timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari), 1990 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1988 remote_id); 1991 remote_id);
1989 if (!timewait_info) { 1992 if (!timewait_info) {
1990 spin_unlock_irqrestore(&cm.lock, flags); 1993 spin_unlock_irqrestore(&cm.lock, flags);
1991 return NULL; 1994 return NULL;
1992 } 1995 }
1993 cm_id_priv = idr_find(&cm.local_id_table, 1996 cm_id_priv = idr_find(&cm.local_id_table,
1994 (int) timewait_info->work.local_id); 1997 (__force int) timewait_info->work.local_id);
1995 if (cm_id_priv) { 1998 if (cm_id_priv) {
1996 if (cm_id_priv->id.remote_id == remote_id) 1999 if (cm_id_priv->id.remote_id == remote_id)
1997 atomic_inc(&cm_id_priv->refcount); 2000 atomic_inc(&cm_id_priv->refcount);
@@ -2032,7 +2035,7 @@ static int cm_rej_handler(struct cm_work *work)
2032 /* fall through */ 2035 /* fall through */
2033 case IB_CM_REQ_RCVD: 2036 case IB_CM_REQ_RCVD:
2034 case IB_CM_MRA_REQ_SENT: 2037 case IB_CM_MRA_REQ_SENT:
2035 if (rej_msg->reason == IB_CM_REJ_STALE_CONN) 2038 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2036 cm_enter_timewait(cm_id_priv); 2039 cm_enter_timewait(cm_id_priv);
2037 else 2040 else
2038 cm_reset_to_idle(cm_id_priv); 2041 cm_reset_to_idle(cm_id_priv);
@@ -2553,7 +2556,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2553 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2556 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2554 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2557 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2555 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2558 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2556 sidr_req_msg->pkey = param->pkey; 2559 sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2557 sidr_req_msg->service_id = param->service_id; 2560 sidr_req_msg->service_id = param->service_id;
2558 2561
2559 if (param->private_data && param->private_data_len) 2562 if (param->private_data && param->private_data_len)
@@ -2580,7 +2583,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2580 goto out; 2583 goto out;
2581 2584
2582 cm_id->service_id = param->service_id; 2585 cm_id->service_id = param->service_id;
2583 cm_id->service_mask = ~0ULL; 2586 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2584 cm_id_priv->timeout_ms = param->timeout_ms; 2587 cm_id_priv->timeout_ms = param->timeout_ms;
2585 cm_id_priv->max_cm_retries = param->max_cm_retries; 2588 cm_id_priv->max_cm_retries = param->max_cm_retries;
2586 ret = cm_alloc_msg(cm_id_priv, &msg); 2589 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2621,7 +2624,7 @@ static void cm_format_sidr_req_event(struct cm_work *work,
2621 sidr_req_msg = (struct cm_sidr_req_msg *) 2624 sidr_req_msg = (struct cm_sidr_req_msg *)
2622 work->mad_recv_wc->recv_buf.mad; 2625 work->mad_recv_wc->recv_buf.mad;
2623 param = &work->cm_event.param.sidr_req_rcvd; 2626 param = &work->cm_event.param.sidr_req_rcvd;
2624 param->pkey = sidr_req_msg->pkey; 2627 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2625 param->listen_id = listen_id; 2628 param->listen_id = listen_id;
2626 param->device = work->port->mad_agent->device; 2629 param->device = work->port->mad_agent->device;
2627 param->port = work->port->port_num; 2630 param->port = work->port->port_num;
@@ -2645,7 +2648,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2645 sidr_req_msg = (struct cm_sidr_req_msg *) 2648 sidr_req_msg = (struct cm_sidr_req_msg *)
2646 work->mad_recv_wc->recv_buf.mad; 2649 work->mad_recv_wc->recv_buf.mad;
2647 wc = work->mad_recv_wc->wc; 2650 wc = work->mad_recv_wc->wc;
2648 cm_id_priv->av.dgid.global.subnet_prefix = wc->slid; 2651 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2649 cm_id_priv->av.dgid.global.interface_id = 0; 2652 cm_id_priv->av.dgid.global.interface_id = 0;
2650 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2653 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2651 &cm_id_priv->av); 2654 &cm_id_priv->av);
@@ -2673,7 +2676,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2673 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2676 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2674 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2677 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2675 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2678 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2676 cm_id_priv->id.service_mask = ~0ULL; 2679 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2677 2680
2678 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2681 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2679 cm_process_work(cm_id_priv, work); 2682 cm_process_work(cm_id_priv, work);
@@ -3175,10 +3178,10 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3175} 3178}
3176EXPORT_SYMBOL(ib_cm_init_qp_attr); 3179EXPORT_SYMBOL(ib_cm_init_qp_attr);
3177 3180
3178static u64 cm_get_ca_guid(struct ib_device *device) 3181static __be64 cm_get_ca_guid(struct ib_device *device)
3179{ 3182{
3180 struct ib_device_attr *device_attr; 3183 struct ib_device_attr *device_attr;
3181 u64 guid; 3184 __be64 guid;
3182 int ret; 3185 int ret;
3183 3186
3184 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 3187 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 15a309a77b2b..813ab70bf6d5 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -34,7 +34,7 @@
34#if !defined(CM_MSGS_H) 34#if !defined(CM_MSGS_H)
35#define CM_MSGS_H 35#define CM_MSGS_H
36 36
37#include <ib_mad.h> 37#include <rdma/ib_mad.h>
38 38
39/* 39/*
40 * Parameters to routines below should be in network-byte order, and values 40 * Parameters to routines below should be in network-byte order, and values
@@ -43,19 +43,17 @@
43 43
44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45 45
46enum cm_msg_attr_id { 46#define CM_REQ_ATTR_ID __constant_htons(0x0010)
47 CM_REQ_ATTR_ID = __constant_htons(0x0010), 47#define CM_MRA_ATTR_ID __constant_htons(0x0011)
48 CM_MRA_ATTR_ID = __constant_htons(0x0011), 48#define CM_REJ_ATTR_ID __constant_htons(0x0012)
49 CM_REJ_ATTR_ID = __constant_htons(0x0012), 49#define CM_REP_ATTR_ID __constant_htons(0x0013)
50 CM_REP_ATTR_ID = __constant_htons(0x0013), 50#define CM_RTU_ATTR_ID __constant_htons(0x0014)
51 CM_RTU_ATTR_ID = __constant_htons(0x0014), 51#define CM_DREQ_ATTR_ID __constant_htons(0x0015)
52 CM_DREQ_ATTR_ID = __constant_htons(0x0015), 52#define CM_DREP_ATTR_ID __constant_htons(0x0016)
53 CM_DREP_ATTR_ID = __constant_htons(0x0016), 53#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
54 CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017), 54#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
55 CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018), 55#define CM_LAP_ATTR_ID __constant_htons(0x0019)
56 CM_LAP_ATTR_ID = __constant_htons(0x0019), 56#define CM_APR_ATTR_ID __constant_htons(0x001A)
57 CM_APR_ATTR_ID = __constant_htons(0x001A)
58};
59 57
60enum cm_msg_sequence { 58enum cm_msg_sequence {
61 CM_MSG_SEQUENCE_REQ, 59 CM_MSG_SEQUENCE_REQ,
@@ -67,35 +65,35 @@ enum cm_msg_sequence {
67struct cm_req_msg { 65struct cm_req_msg {
68 struct ib_mad_hdr hdr; 66 struct ib_mad_hdr hdr;
69 67
70 u32 local_comm_id; 68 __be32 local_comm_id;
71 u32 rsvd4; 69 __be32 rsvd4;
72 u64 service_id; 70 __be64 service_id;
73 u64 local_ca_guid; 71 __be64 local_ca_guid;
74 u32 rsvd24; 72 __be32 rsvd24;
75 u32 local_qkey; 73 __be32 local_qkey;
76 /* local QPN:24, responder resources:8 */ 74 /* local QPN:24, responder resources:8 */
77 u32 offset32; 75 __be32 offset32;
78 /* local EECN:24, initiator depth:8 */ 76 /* local EECN:24, initiator depth:8 */
79 u32 offset36; 77 __be32 offset36;
80 /* 78 /*
81 * remote EECN:24, remote CM response timeout:5, 79 * remote EECN:24, remote CM response timeout:5,
82 * transport service type:2, end-to-end flow control:1 80 * transport service type:2, end-to-end flow control:1
83 */ 81 */
84 u32 offset40; 82 __be32 offset40;
85 /* starting PSN:24, local CM response timeout:5, retry count:3 */ 83 /* starting PSN:24, local CM response timeout:5, retry count:3 */
86 u32 offset44; 84 __be32 offset44;
87 u16 pkey; 85 __be16 pkey;
88 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 86 /* path MTU:4, RDC exists:1, RNR retry count:3. */
89 u8 offset50; 87 u8 offset50;
90 /* max CM Retries:4, SRQ:1, rsvd:3 */ 88 /* max CM Retries:4, SRQ:1, rsvd:3 */
91 u8 offset51; 89 u8 offset51;
92 90
93 u16 primary_local_lid; 91 __be16 primary_local_lid;
94 u16 primary_remote_lid; 92 __be16 primary_remote_lid;
95 union ib_gid primary_local_gid; 93 union ib_gid primary_local_gid;
96 union ib_gid primary_remote_gid; 94 union ib_gid primary_remote_gid;
97 /* flow label:20, rsvd:6, packet rate:6 */ 95 /* flow label:20, rsvd:6, packet rate:6 */
98 u32 primary_offset88; 96 __be32 primary_offset88;
99 u8 primary_traffic_class; 97 u8 primary_traffic_class;
100 u8 primary_hop_limit; 98 u8 primary_hop_limit;
101 /* SL:4, subnet local:1, rsvd:3 */ 99 /* SL:4, subnet local:1, rsvd:3 */
@@ -103,12 +101,12 @@ struct cm_req_msg {
103 /* local ACK timeout:5, rsvd:3 */ 101 /* local ACK timeout:5, rsvd:3 */
104 u8 primary_offset95; 102 u8 primary_offset95;
105 103
106 u16 alt_local_lid; 104 __be16 alt_local_lid;
107 u16 alt_remote_lid; 105 __be16 alt_remote_lid;
108 union ib_gid alt_local_gid; 106 union ib_gid alt_local_gid;
109 union ib_gid alt_remote_gid; 107 union ib_gid alt_remote_gid;
110 /* flow label:20, rsvd:6, packet rate:6 */ 108 /* flow label:20, rsvd:6, packet rate:6 */
111 u32 alt_offset132; 109 __be32 alt_offset132;
112 u8 alt_traffic_class; 110 u8 alt_traffic_class;
113 u8 alt_hop_limit; 111 u8 alt_hop_limit;
114 /* SL:4, subnet local:1, rsvd:3 */ 112 /* SL:4, subnet local:1, rsvd:3 */
@@ -120,12 +118,12 @@ struct cm_req_msg {
120 118
121} __attribute__ ((packed)); 119} __attribute__ ((packed));
122 120
123static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 121static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124{ 122{
125 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); 123 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
126} 124}
127 125
128static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn) 126static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
129{ 127{
130 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 128 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
131 (be32_to_cpu(req_msg->offset32) & 129 (be32_to_cpu(req_msg->offset32) &
@@ -208,13 +206,13 @@ static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
208 0xFFFFFFFE)); 206 0xFFFFFFFE));
209} 207}
210 208
211static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 209static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
212{ 210{
213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); 211 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
214} 212}
215 213
216static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, 214static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
217 u32 starting_psn) 215 __be32 starting_psn)
218{ 216{
219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 217 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
220 (be32_to_cpu(req_msg->offset44) & 0x000000FF)); 218 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
@@ -288,13 +286,13 @@ static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
288 ((srq & 0x1) << 3)); 286 ((srq & 0x1) << 3));
289} 287}
290 288
291static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 289static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
292{ 290{
293 return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12)); 291 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
294} 292}
295 293
296static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, 294static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
297 u32 flow_label) 295 __be32 flow_label)
298{ 296{
299 req_msg->primary_offset88 = cpu_to_be32( 297 req_msg->primary_offset88 = cpu_to_be32(
300 (be32_to_cpu(req_msg->primary_offset88) & 298 (be32_to_cpu(req_msg->primary_offset88) &
@@ -350,13 +348,13 @@ static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_m
350 (local_ack_timeout << 3)); 348 (local_ack_timeout << 3));
351} 349}
352 350
353static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 351static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
354{ 352{
355 return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12)); 353 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
356} 354}
357 355
358static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, 356static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
359 u32 flow_label) 357 __be32 flow_label)
360{ 358{
361 req_msg->alt_offset132 = cpu_to_be32( 359 req_msg->alt_offset132 = cpu_to_be32(
362 (be32_to_cpu(req_msg->alt_offset132) & 360 (be32_to_cpu(req_msg->alt_offset132) &
@@ -422,8 +420,8 @@ enum cm_msg_response {
422 struct cm_mra_msg { 420 struct cm_mra_msg {
423 struct ib_mad_hdr hdr; 421 struct ib_mad_hdr hdr;
424 422
425 u32 local_comm_id; 423 __be32 local_comm_id;
426 u32 remote_comm_id; 424 __be32 remote_comm_id;
427 /* message MRAed:2, rsvd:6 */ 425 /* message MRAed:2, rsvd:6 */
428 u8 offset8; 426 u8 offset8;
429 /* service timeout:5, rsvd:3 */ 427 /* service timeout:5, rsvd:3 */
@@ -458,13 +456,13 @@ static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
458struct cm_rej_msg { 456struct cm_rej_msg {
459 struct ib_mad_hdr hdr; 457 struct ib_mad_hdr hdr;
460 458
461 u32 local_comm_id; 459 __be32 local_comm_id;
462 u32 remote_comm_id; 460 __be32 remote_comm_id;
463 /* message REJected:2, rsvd:6 */ 461 /* message REJected:2, rsvd:6 */
464 u8 offset8; 462 u8 offset8;
465 /* reject info length:7, rsvd:1. */ 463 /* reject info length:7, rsvd:1. */
466 u8 offset9; 464 u8 offset9;
467 u16 reason; 465 __be16 reason;
468 u8 ari[IB_CM_REJ_ARI_LENGTH]; 466 u8 ari[IB_CM_REJ_ARI_LENGTH];
469 467
470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; 468 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
@@ -495,45 +493,45 @@ static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
495struct cm_rep_msg { 493struct cm_rep_msg {
496 struct ib_mad_hdr hdr; 494 struct ib_mad_hdr hdr;
497 495
498 u32 local_comm_id; 496 __be32 local_comm_id;
499 u32 remote_comm_id; 497 __be32 remote_comm_id;
500 u32 local_qkey; 498 __be32 local_qkey;
501 /* local QPN:24, rsvd:8 */ 499 /* local QPN:24, rsvd:8 */
502 u32 offset12; 500 __be32 offset12;
503 /* local EECN:24, rsvd:8 */ 501 /* local EECN:24, rsvd:8 */
504 u32 offset16; 502 __be32 offset16;
505 /* starting PSN:24 rsvd:8 */ 503 /* starting PSN:24 rsvd:8 */
506 u32 offset20; 504 __be32 offset20;
507 u8 resp_resources; 505 u8 resp_resources;
508 u8 initiator_depth; 506 u8 initiator_depth;
509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ 507 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
510 u8 offset26; 508 u8 offset26;
511 /* RNR retry count:3, SRQ:1, rsvd:5 */ 509 /* RNR retry count:3, SRQ:1, rsvd:5 */
512 u8 offset27; 510 u8 offset27;
513 u64 local_ca_guid; 511 __be64 local_ca_guid;
514 512
515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 513 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
516 514
517} __attribute__ ((packed)); 515} __attribute__ ((packed));
518 516
519static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 517static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
520{ 518{
521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); 519 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
522} 520}
523 521
524static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn) 522static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
525{ 523{
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 524 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); 525 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
528} 526}
529 527
530static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 528static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
531{ 529{
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); 530 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
533} 531}
534 532
535static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, 533static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
536 u32 starting_psn) 534 __be32 starting_psn)
537{ 535{
538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 536 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); 537 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
@@ -600,8 +598,8 @@ static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
600struct cm_rtu_msg { 598struct cm_rtu_msg {
601 struct ib_mad_hdr hdr; 599 struct ib_mad_hdr hdr;
602 600
603 u32 local_comm_id; 601 __be32 local_comm_id;
604 u32 remote_comm_id; 602 __be32 remote_comm_id;
605 603
606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 604 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
607 605
@@ -610,21 +608,21 @@ struct cm_rtu_msg {
610struct cm_dreq_msg { 608struct cm_dreq_msg {
611 struct ib_mad_hdr hdr; 609 struct ib_mad_hdr hdr;
612 610
613 u32 local_comm_id; 611 __be32 local_comm_id;
614 u32 remote_comm_id; 612 __be32 remote_comm_id;
615 /* remote QPN/EECN:24, rsvd:8 */ 613 /* remote QPN/EECN:24, rsvd:8 */
616 u32 offset8; 614 __be32 offset8;
617 615
618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 616 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
619 617
620} __attribute__ ((packed)); 618} __attribute__ ((packed));
621 619
622static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 620static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
623{ 621{
624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); 622 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
625} 623}
626 624
627static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) 625static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
628{ 626{
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 627 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); 628 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
@@ -633,8 +631,8 @@ static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
633struct cm_drep_msg { 631struct cm_drep_msg {
634 struct ib_mad_hdr hdr; 632 struct ib_mad_hdr hdr;
635 633
636 u32 local_comm_id; 634 __be32 local_comm_id;
637 u32 remote_comm_id; 635 __be32 remote_comm_id;
638 636
639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 637 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
640 638
@@ -643,37 +641,37 @@ struct cm_drep_msg {
643struct cm_lap_msg { 641struct cm_lap_msg {
644 struct ib_mad_hdr hdr; 642 struct ib_mad_hdr hdr;
645 643
646 u32 local_comm_id; 644 __be32 local_comm_id;
647 u32 remote_comm_id; 645 __be32 remote_comm_id;
648 646
649 u32 rsvd8; 647 __be32 rsvd8;
650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ 648 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
651 u32 offset12; 649 __be32 offset12;
652 u32 rsvd16; 650 __be32 rsvd16;
653 651
654 u16 alt_local_lid; 652 __be16 alt_local_lid;
655 u16 alt_remote_lid; 653 __be16 alt_remote_lid;
656 union ib_gid alt_local_gid; 654 union ib_gid alt_local_gid;
657 union ib_gid alt_remote_gid; 655 union ib_gid alt_remote_gid;
658 /* flow label:20, rsvd:4, traffic class:8 */ 656 /* flow label:20, rsvd:4, traffic class:8 */
659 u32 offset56; 657 __be32 offset56;
660 u8 alt_hop_limit; 658 u8 alt_hop_limit;
661 /* rsvd:2, packet rate:6 */ 659 /* rsvd:2, packet rate:6 */
662 uint8_t offset61; 660 u8 offset61;
663 /* SL:4, subnet local:1, rsvd:3 */ 661 /* SL:4, subnet local:1, rsvd:3 */
664 uint8_t offset62; 662 u8 offset62;
665 /* local ACK timeout:5, rsvd:3 */ 663 /* local ACK timeout:5, rsvd:3 */
666 uint8_t offset63; 664 u8 offset63;
667 665
668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 666 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
669} __attribute__ ((packed)); 667} __attribute__ ((packed));
670 668
671static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 669static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
672{ 670{
673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); 671 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
674} 672}
675 673
676static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn) 674static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
677{ 675{
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 676 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
679 (be32_to_cpu(lap_msg->offset12) & 677 (be32_to_cpu(lap_msg->offset12) &
@@ -693,17 +691,17 @@ static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
693 0xFFFFFF07)); 691 0xFFFFFF07));
694} 692}
695 693
696static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 694static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
697{ 695{
698 return be32_to_cpu(lap_msg->offset56) >> 12; 696 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
699} 697}
700 698
701static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, 699static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
702 u32 flow_label) 700 __be32 flow_label)
703{ 701{
704 lap_msg->offset56 = cpu_to_be32((flow_label << 12) | 702 lap_msg->offset56 = cpu_to_be32(
705 (be32_to_cpu(lap_msg->offset56) & 703 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
706 0x00000FFF)); 704 (be32_to_cpu(flow_label) << 12));
707} 705}
708 706
709static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) 707static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
@@ -766,8 +764,8 @@ static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
766struct cm_apr_msg { 764struct cm_apr_msg {
767 struct ib_mad_hdr hdr; 765 struct ib_mad_hdr hdr;
768 766
769 u32 local_comm_id; 767 __be32 local_comm_id;
770 u32 remote_comm_id; 768 __be32 remote_comm_id;
771 769
772 u8 info_length; 770 u8 info_length;
773 u8 ap_status; 771 u8 ap_status;
@@ -779,10 +777,10 @@ struct cm_apr_msg {
779struct cm_sidr_req_msg { 777struct cm_sidr_req_msg {
780 struct ib_mad_hdr hdr; 778 struct ib_mad_hdr hdr;
781 779
782 u32 request_id; 780 __be32 request_id;
783 u16 pkey; 781 __be16 pkey;
784 u16 rsvd; 782 __be16 rsvd;
785 u64 service_id; 783 __be64 service_id;
786 784
787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; 785 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
788} __attribute__ ((packed)); 786} __attribute__ ((packed));
@@ -790,26 +788,26 @@ struct cm_sidr_req_msg {
790struct cm_sidr_rep_msg { 788struct cm_sidr_rep_msg {
791 struct ib_mad_hdr hdr; 789 struct ib_mad_hdr hdr;
792 790
793 u32 request_id; 791 __be32 request_id;
794 u8 status; 792 u8 status;
795 u8 info_length; 793 u8 info_length;
796 u16 rsvd; 794 __be16 rsvd;
797 /* QPN:24, rsvd:8 */ 795 /* QPN:24, rsvd:8 */
798 u32 offset8; 796 __be32 offset8;
799 u64 service_id; 797 __be64 service_id;
800 u32 qkey; 798 __be32 qkey;
801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 799 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
802 800
803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 801 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
804} __attribute__ ((packed)); 802} __attribute__ ((packed));
805 803
806static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 804static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
807{ 805{
808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); 806 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
809} 807}
810 808
811static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, 809static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
812 u32 qpn) 810 __be32 qpn)
813{ 811{
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 812 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
815 (be32_to_cpu(sidr_rep_msg->offset8) & 813 (be32_to_cpu(sidr_rep_msg->offset8) &
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 797049626ff6..7ad47a4b166b 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,7 @@
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40 40
41#include <ib_verbs.h> 41#include <rdma/ib_verbs.h>
42 42
43int ib_device_register_sysfs(struct ib_device *device); 43int ib_device_register_sysfs(struct ib_device *device);
44void ib_device_unregister_sysfs(struct ib_device *device); 44void ib_device_unregister_sysfs(struct ib_device *device);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9197e92d708a..d3cf84e01587 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7763b31abba7..d34a6f1c4f4c 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -39,7 +39,7 @@
39#include <linux/jhash.h> 39#include <linux/jhash.h>
40#include <linux/kthread.h> 40#include <linux/kthread.h>
41 41
42#include <ib_fmr_pool.h> 42#include <rdma/ib_fmr_pool.h>
43 43
44#include "core_priv.h" 44#include "core_priv.h"
45 45
@@ -334,6 +334,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
334{ 334{
335 struct ib_pool_fmr *fmr; 335 struct ib_pool_fmr *fmr;
336 struct ib_pool_fmr *tmp; 336 struct ib_pool_fmr *tmp;
337 LIST_HEAD(fmr_list);
337 int i; 338 int i;
338 339
339 kthread_stop(pool->thread); 340 kthread_stop(pool->thread);
@@ -341,6 +342,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
341 342
342 i = 0; 343 i = 0;
343 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 344 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
345 if (fmr->remap_count) {
346 INIT_LIST_HEAD(&fmr_list);
347 list_add_tail(&fmr->fmr->list, &fmr_list);
348 ib_unmap_fmr(&fmr_list);
349 }
344 ib_dealloc_fmr(fmr->fmr); 350 ib_dealloc_fmr(fmr->fmr);
345 list_del(&fmr->list); 351 list_del(&fmr->list);
346 kfree(fmr); 352 kfree(fmr);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b97e210ce9c8..a4a4d9c1eef3 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -693,7 +693,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
693 goto out; 693 goto out;
694 } 694 }
695 695
696 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index, 696 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
697 send_wr->wr.ud.pkey_index,
697 send_wr->wr.ud.port_num, &mad_wc); 698 send_wr->wr.ud.port_num, &mad_wc);
698 699
699 /* No GRH for DR SMP */ 700 /* No GRH for DR SMP */
@@ -1554,7 +1555,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1554} 1555}
1555 1556
1556struct ib_mad_send_wr_private* 1557struct ib_mad_send_wr_private*
1557ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) 1558ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1558{ 1559{
1559 struct ib_mad_send_wr_private *mad_send_wr; 1560 struct ib_mad_send_wr_private *mad_send_wr;
1560 1561
@@ -1597,7 +1598,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1597 struct ib_mad_send_wr_private *mad_send_wr; 1598 struct ib_mad_send_wr_private *mad_send_wr;
1598 struct ib_mad_send_wc mad_send_wc; 1599 struct ib_mad_send_wc mad_send_wc;
1599 unsigned long flags; 1600 unsigned long flags;
1600 u64 tid; 1601 __be64 tid;
1601 1602
1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1603 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1604 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -2165,7 +2166,8 @@ static void local_completions(void *data)
2165 * Defined behavior is to complete response 2166 * Defined behavior is to complete response
2166 * before request 2167 * before request
2167 */ 2168 */
2168 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE, 2169 build_smp_wc(local->wr_id,
2170 be16_to_cpu(IB_LID_PERMISSIVE),
2169 0 /* pkey index */, 2171 0 /* pkey index */,
2170 recv_mad_agent->agent.port_num, &wc); 2172 recv_mad_agent->agent.port_num, &wc);
2171 2173
@@ -2294,7 +2296,7 @@ static void timeout_sends(void *data)
2294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2296 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2295} 2297}
2296 2298
2297static void ib_mad_thread_completion_handler(struct ib_cq *cq) 2299static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2298{ 2300{
2299 struct ib_mad_port_private *port_priv = cq->cq_context; 2301 struct ib_mad_port_private *port_priv = cq->cq_context;
2300 2302
@@ -2574,8 +2576,7 @@ static int ib_mad_port_open(struct ib_device *device,
2574 2576
2575 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2577 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2576 port_priv->cq = ib_create_cq(port_priv->device, 2578 port_priv->cq = ib_create_cq(port_priv->device,
2577 (ib_comp_handler) 2579 ib_mad_thread_completion_handler,
2578 ib_mad_thread_completion_handler,
2579 NULL, port_priv, cq_size); 2580 NULL, port_priv, cq_size);
2580 if (IS_ERR(port_priv->cq)) { 2581 if (IS_ERR(port_priv->cq)) {
2581 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); 2582 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 568da10b05ab..f1ba794e0daa 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -40,8 +40,8 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/workqueue.h> 42#include <linux/workqueue.h>
43#include <ib_mad.h> 43#include <rdma/ib_mad.h>
44#include <ib_smi.h> 44#include <rdma/ib_smi.h>
45 45
46 46
47#define PFX "ib_mad: " 47#define PFX "ib_mad: "
@@ -121,7 +121,7 @@ struct ib_mad_send_wr_private {
121 struct ib_send_wr send_wr; 121 struct ib_send_wr send_wr;
122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
123 u64 wr_id; /* client WR ID */ 123 u64 wr_id; /* client WR ID */
124 u64 tid; 124 __be64 tid;
125 unsigned long timeout; 125 unsigned long timeout;
126 int retries; 126 int retries;
127 int retry; 127 int retry;
@@ -144,7 +144,7 @@ struct ib_mad_local_private {
144 struct ib_send_wr send_wr; 144 struct ib_send_wr send_wr;
145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
146 u64 wr_id; /* client WR ID */ 146 u64 wr_id; /* client WR ID */
147 u64 tid; 147 __be64 tid;
148}; 148};
149 149
150struct ib_mad_mgmt_method_table { 150struct ib_mad_mgmt_method_table {
@@ -210,7 +210,7 @@ extern kmem_cache_t *ib_mad_cache;
210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
211 211
212struct ib_mad_send_wr_private * 212struct ib_mad_send_wr_private *
213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid); 213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
214 214
215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
216 struct ib_mad_send_wc *mad_send_wc); 216 struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8f1eb80e421f..43fd805e0265 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -61,7 +61,7 @@ struct mad_rmpp_recv {
61 int seg_num; 61 int seg_num;
62 int newwin; 62 int newwin;
63 63
64 u64 tid; 64 __be64 tid;
65 u32 src_qp; 65 u32 src_qp;
66 u16 slid; 66 u16 slid;
67 u8 mgmt_class; 67 u8 mgmt_class;
@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100 } 100 }
101} 101}
102 102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return offsetof(struct ib_sa_mad, data);
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return offsetof(struct ib_vendor_mad, data);
110 else
111 return offsetof(struct ib_rmpp_mad, data);
112}
113
114static void format_ack(struct ib_rmpp_mad *ack,
115 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv)
117{
118 unsigned long flags;
119
120 memcpy(&ack->mad_hdr, &data->mad_hdr,
121 data_offset(data->mad_hdr.mgmt_class));
122
123 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
124 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
125 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
126
127 spin_lock_irqsave(&rmpp_recv->lock, flags);
128 rmpp_recv->last_ack = rmpp_recv->seg_num;
129 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
130 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
131 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
132}
133
134static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
135 struct ib_mad_recv_wc *recv_wc)
136{
137 struct ib_mad_send_buf *msg;
138 struct ib_send_wr *bad_send_wr;
139 int hdr_len, ret;
140
141 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
144 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
145 GFP_KERNEL);
146 if (!msg)
147 return;
148
149 format_ack((struct ib_rmpp_mad *) msg->mad,
150 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
151 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
152 &bad_send_wr);
153 if (ret)
154 ib_free_send_mad(msg);
155}
156
157static int alloc_response_msg(struct ib_mad_agent *agent,
158 struct ib_mad_recv_wc *recv_wc,
159 struct ib_mad_send_buf **msg)
160{
161 struct ib_mad_send_buf *m;
162 struct ib_ah *ah;
163 int hdr_len;
164
165 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
166 recv_wc->recv_buf.grh, agent->port_num);
167 if (IS_ERR(ah))
168 return PTR_ERR(ah);
169
170 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
171 m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
172 recv_wc->wc->pkey_index, ah, 1, hdr_len,
173 sizeof(struct ib_rmpp_mad) - hdr_len,
174 GFP_KERNEL);
175 if (IS_ERR(m)) {
176 ib_destroy_ah(ah);
177 return PTR_ERR(m);
178 }
179 *msg = m;
180 return 0;
181}
182
183static void free_msg(struct ib_mad_send_buf *msg)
184{
185 ib_destroy_ah(msg->send_wr.wr.ud.ah);
186 ib_free_send_mad(msg);
187}
188
189static void nack_recv(struct ib_mad_agent_private *agent,
190 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
191{
192 struct ib_mad_send_buf *msg;
193 struct ib_rmpp_mad *rmpp_mad;
194 struct ib_send_wr *bad_send_wr;
195 int ret;
196
197 ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
198 if (ret)
199 return;
200
201 rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
202 memcpy(rmpp_mad, recv_wc->recv_buf.mad,
203 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
204
205 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
206 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
207 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
208 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
209 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
210 rmpp_mad->rmpp_hdr.seg_num = 0;
211 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
212
213 ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
214 if (ret)
215 free_msg(msg);
216}
217
103static void recv_timeout_handler(void *data) 218static void recv_timeout_handler(void *data)
104{ 219{
105 struct mad_rmpp_recv *rmpp_recv = data; 220 struct mad_rmpp_recv *rmpp_recv = data;
@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data)
115 list_del(&rmpp_recv->list); 230 list_del(&rmpp_recv->list);
116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 231 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
117 232
118 /* TODO: send abort. */
119 rmpp_wc = rmpp_recv->rmpp_wc; 233 rmpp_wc = rmpp_recv->rmpp_wc;
234 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
120 destroy_rmpp_recv(rmpp_recv); 235 destroy_rmpp_recv(rmpp_recv);
121 ib_free_recv_mad(rmpp_wc); 236 ib_free_recv_mad(rmpp_wc);
122} 237}
@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent,
230 return cur_rmpp_recv; 345 return cur_rmpp_recv;
231} 346}
232 347
233static int data_offset(u8 mgmt_class)
234{
235 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236 return offsetof(struct ib_sa_mad, data);
237 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239 return offsetof(struct ib_vendor_mad, data);
240 else
241 return offsetof(struct ib_rmpp_mad, data);
242}
243
244static void format_ack(struct ib_rmpp_mad *ack,
245 struct ib_rmpp_mad *data,
246 struct mad_rmpp_recv *rmpp_recv)
247{
248 unsigned long flags;
249
250 memcpy(&ack->mad_hdr, &data->mad_hdr,
251 data_offset(data->mad_hdr.mgmt_class));
252
253 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
256
257 spin_lock_irqsave(&rmpp_recv->lock, flags);
258 rmpp_recv->last_ack = rmpp_recv->seg_num;
259 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
262}
263
264static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265 struct ib_mad_recv_wc *recv_wc)
266{
267 struct ib_mad_send_buf *msg;
268 struct ib_send_wr *bad_send_wr;
269 int hdr_len, ret;
270
271 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
275 GFP_KERNEL);
276 if (!msg)
277 return;
278
279 format_ack((struct ib_rmpp_mad *) msg->mad,
280 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
282 &bad_send_wr);
283 if (ret)
284 ib_free_send_mad(msg);
285}
286
287static inline int get_last_flag(struct ib_mad_recv_buf *seg) 348static inline int get_last_flag(struct ib_mad_recv_buf *seg)
288{ 349{
289 struct ib_rmpp_mad *rmpp_mad; 350 struct ib_rmpp_mad *rmpp_mad;
@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
559 return ib_send_mad(mad_send_wr); 620 return ib_send_mad(mad_send_wr);
560} 621}
561 622
623static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
624 u8 rmpp_status)
625{
626 struct ib_mad_send_wr_private *mad_send_wr;
627 struct ib_mad_send_wc wc;
628 unsigned long flags;
629
630 spin_lock_irqsave(&agent->lock, flags);
631 mad_send_wr = ib_find_send_mad(agent, tid);
632 if (!mad_send_wr)
633 goto out; /* Unmatched send */
634
635 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
636 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
637 goto out; /* Send is already done */
638
639 ib_mark_mad_done(mad_send_wr);
640 spin_unlock_irqrestore(&agent->lock, flags);
641
642 wc.status = IB_WC_REM_ABORT_ERR;
643 wc.vendor_err = rmpp_status;
644 wc.wr_id = mad_send_wr->wr_id;
645 ib_mad_complete_send_wr(mad_send_wr, &wc);
646 return;
647out:
648 spin_unlock_irqrestore(&agent->lock, flags);
649}
650
562static void process_rmpp_ack(struct ib_mad_agent_private *agent, 651static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563 struct ib_mad_recv_wc *mad_recv_wc) 652 struct ib_mad_recv_wc *mad_recv_wc)
564{ 653{
@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
568 int seg_num, newwin, ret; 657 int seg_num, newwin, ret;
569 658
570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 659 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571 if (rmpp_mad->rmpp_hdr.rmpp_status) 660 if (rmpp_mad->rmpp_hdr.rmpp_status) {
661 abort_send(agent, rmpp_mad->mad_hdr.tid,
662 IB_MGMT_RMPP_STATUS_BAD_STATUS);
663 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
572 return; 664 return;
665 }
573 666
574 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 667 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 668 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
669 if (newwin < seg_num) {
670 abort_send(agent, rmpp_mad->mad_hdr.tid,
671 IB_MGMT_RMPP_STATUS_W2S);
672 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
673 return;
674 }
576 675
577 spin_lock_irqsave(&agent->lock, flags); 676 spin_lock_irqsave(&agent->lock, flags);
578 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 677 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 682 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584 goto out; /* Send is already done */ 683 goto out; /* Send is already done */
585 684
586 if (seg_num > mad_send_wr->total_seg) 685 if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
587 goto out; /* Bad ACK */ 686 spin_unlock_irqrestore(&agent->lock, flags);
687 abort_send(agent, rmpp_mad->mad_hdr.tid,
688 IB_MGMT_RMPP_STATUS_S2B);
689 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
690 return;
691 }
588 692
589 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) 693 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590 goto out; /* Old ACK */ 694 goto out; /* Old ACK */
@@ -628,6 +732,72 @@ out:
628 spin_unlock_irqrestore(&agent->lock, flags); 732 spin_unlock_irqrestore(&agent->lock, flags);
629} 733}
630 734
735static struct ib_mad_recv_wc *
736process_rmpp_data(struct ib_mad_agent_private *agent,
737 struct ib_mad_recv_wc *mad_recv_wc)
738{
739 struct ib_rmpp_hdr *rmpp_hdr;
740 u8 rmpp_status;
741
742 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
743
744 if (rmpp_hdr->rmpp_status) {
745 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
746 goto bad;
747 }
748
749 if (rmpp_hdr->seg_num == __constant_htonl(1)) {
750 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
751 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
752 goto bad;
753 }
754 return start_rmpp(agent, mad_recv_wc);
755 } else {
756 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
757 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
758 goto bad;
759 }
760 return continue_rmpp(agent, mad_recv_wc);
761 }
762bad:
763 nack_recv(agent, mad_recv_wc, rmpp_status);
764 ib_free_recv_mad(mad_recv_wc);
765 return NULL;
766}
767
768static void process_rmpp_stop(struct ib_mad_agent_private *agent,
769 struct ib_mad_recv_wc *mad_recv_wc)
770{
771 struct ib_rmpp_mad *rmpp_mad;
772
773 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
774
775 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
776 abort_send(agent, rmpp_mad->mad_hdr.tid,
777 IB_MGMT_RMPP_STATUS_BAD_STATUS);
778 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
779 } else
780 abort_send(agent, rmpp_mad->mad_hdr.tid,
781 rmpp_mad->rmpp_hdr.rmpp_status);
782}
783
784static void process_rmpp_abort(struct ib_mad_agent_private *agent,
785 struct ib_mad_recv_wc *mad_recv_wc)
786{
787 struct ib_rmpp_mad *rmpp_mad;
788
789 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
790
791 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
792 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
793 abort_send(agent, rmpp_mad->mad_hdr.tid,
794 IB_MGMT_RMPP_STATUS_BAD_STATUS);
795 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
796 } else
797 abort_send(agent, rmpp_mad->mad_hdr.tid,
798 rmpp_mad->rmpp_hdr.rmpp_status);
799}
800
631struct ib_mad_recv_wc * 801struct ib_mad_recv_wc *
632ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, 802ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633 struct ib_mad_recv_wc *mad_recv_wc) 803 struct ib_mad_recv_wc *mad_recv_wc)
@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) 808 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
639 return mad_recv_wc; 809 return mad_recv_wc;
640 810
641 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) 811 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
812 abort_send(agent, rmpp_mad->mad_hdr.tid,
813 IB_MGMT_RMPP_STATUS_UNV);
814 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
642 goto out; 815 goto out;
816 }
643 817
644 switch (rmpp_mad->rmpp_hdr.rmpp_type) { 818 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645 case IB_MGMT_RMPP_TYPE_DATA: 819 case IB_MGMT_RMPP_TYPE_DATA:
646 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) 820 return process_rmpp_data(agent, mad_recv_wc);
647 return start_rmpp(agent, mad_recv_wc);
648 else
649 return continue_rmpp(agent, mad_recv_wc);
650 case IB_MGMT_RMPP_TYPE_ACK: 821 case IB_MGMT_RMPP_TYPE_ACK:
651 process_rmpp_ack(agent, mad_recv_wc); 822 process_rmpp_ack(agent, mad_recv_wc);
652 break; 823 break;
653 case IB_MGMT_RMPP_TYPE_STOP: 824 case IB_MGMT_RMPP_TYPE_STOP:
825 process_rmpp_stop(agent, mad_recv_wc);
826 break;
654 case IB_MGMT_RMPP_TYPE_ABORT: 827 case IB_MGMT_RMPP_TYPE_ABORT:
655 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ 828 process_rmpp_abort(agent, mad_recv_wc);
656 break; 829 break;
657 default: 830 default:
831 abort_send(agent, rmpp_mad->mad_hdr.tid,
832 IB_MGMT_RMPP_STATUS_BADT);
833 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
658 break; 834 break;
659 } 835 }
660out: 836out:
@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 890 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715 msg = (struct ib_mad_send_buf *) (unsigned long) 891 msg = (struct ib_mad_send_buf *) (unsigned long)
716 mad_send_wc->wr_id; 892 mad_send_wc->wr_id;
717 ib_free_send_mad(msg); 893 if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
894 ib_free_send_mad(msg);
895 else
896 free_msg(msg);
718 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 897 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
719 } 898 }
720 899
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index eb5ff54c10d7..35df5010e723 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
32 * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
33 */ 34 */
34 35
35#include <ib_pack.h> 36#include <rdma/ib_pack.h>
36 37
37static u64 value_read(int offset, int size, void *structure) 38static u64 value_read(int offset, int size, void *structure)
38{ 39{
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 795184931c83..126ac80db7b8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -44,8 +44,8 @@
44#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/idr.h> 45#include <linux/idr.h>
46 46
47#include <ib_pack.h> 47#include <rdma/ib_pack.h>
48#include <ib_sa.h> 48#include <rdma/ib_sa.h>
49 49
50MODULE_AUTHOR("Roland Dreier"); 50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 51MODULE_DESCRIPTION("InfiniBand subnet administration query support");
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index b4b284324a33..35852e794e26 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
36 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ 37 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $
37 */ 38 */
38 39
39#include <ib_smi.h> 40#include <rdma/ib_smi.h>
40#include "smi.h" 41#include "smi.h"
41 42
42/* 43/*
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 90d51b179abe..fae1c2dcee51 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
34 36
35#include "core_priv.h" 37#include "core_priv.h"
36 38
37#include <ib_mad.h> 39#include <rdma/ib_mad.h>
38 40
39struct ib_port { 41struct ib_port {
40 struct kobject kobj; 42 struct kobject kobj;
@@ -253,14 +255,14 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
253 return ret; 255 return ret;
254 256
255 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 257 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
256 be16_to_cpu(((u16 *) gid.raw)[0]), 258 be16_to_cpu(((__be16 *) gid.raw)[0]),
257 be16_to_cpu(((u16 *) gid.raw)[1]), 259 be16_to_cpu(((__be16 *) gid.raw)[1]),
258 be16_to_cpu(((u16 *) gid.raw)[2]), 260 be16_to_cpu(((__be16 *) gid.raw)[2]),
259 be16_to_cpu(((u16 *) gid.raw)[3]), 261 be16_to_cpu(((__be16 *) gid.raw)[3]),
260 be16_to_cpu(((u16 *) gid.raw)[4]), 262 be16_to_cpu(((__be16 *) gid.raw)[4]),
261 be16_to_cpu(((u16 *) gid.raw)[5]), 263 be16_to_cpu(((__be16 *) gid.raw)[5]),
262 be16_to_cpu(((u16 *) gid.raw)[6]), 264 be16_to_cpu(((__be16 *) gid.raw)[6]),
263 be16_to_cpu(((u16 *) gid.raw)[7])); 265 be16_to_cpu(((__be16 *) gid.raw)[7]));
264} 266}
265 267
266static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, 268static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
@@ -332,11 +334,11 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
332 break; 334 break;
333 case 16: 335 case 16:
334 ret = sprintf(buf, "%u\n", 336 ret = sprintf(buf, "%u\n",
335 be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); 337 be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
336 break; 338 break;
337 case 32: 339 case 32:
338 ret = sprintf(buf, "%u\n", 340 ret = sprintf(buf, "%u\n",
339 be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); 341 be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
340 break; 342 break;
341 default: 343 default:
342 ret = 0; 344 ret = 0;
@@ -598,10 +600,10 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
598 return ret; 600 return ret;
599 601
600 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 602 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
601 be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), 603 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
602 be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), 604 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
603 be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), 605 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
604 be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); 606 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
605} 607}
606 608
607static ssize_t show_node_guid(struct class_device *cdev, char *buf) 609static ssize_t show_node_guid(struct class_device *cdev, char *buf)
@@ -615,10 +617,10 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
615 return ret; 617 return ret;
616 618
617 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 619 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
618 be16_to_cpu(((u16 *) &attr.node_guid)[0]), 620 be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
619 be16_to_cpu(((u16 *) &attr.node_guid)[1]), 621 be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
620 be16_to_cpu(((u16 *) &attr.node_guid)[2]), 622 be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
621 be16_to_cpu(((u16 *) &attr.node_guid)[3])); 623 be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
622} 624}
623 625
624static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); 626static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 61d07c732f49..79595826ccc7 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -73,14 +74,18 @@ static struct semaphore ctx_id_mutex;
73static struct idr ctx_id_table; 74static struct idr ctx_id_table;
74static int ctx_id_rover = 0; 75static int ctx_id_rover = 0;
75 76
76static struct ib_ucm_context *ib_ucm_ctx_get(int id) 77static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
77{ 78{
78 struct ib_ucm_context *ctx; 79 struct ib_ucm_context *ctx;
79 80
80 down(&ctx_id_mutex); 81 down(&ctx_id_mutex);
81 ctx = idr_find(&ctx_id_table, id); 82 ctx = idr_find(&ctx_id_table, id);
82 if (ctx) 83 if (!ctx)
83 ctx->ref++; 84 ctx = ERR_PTR(-ENOENT);
85 else if (ctx->file != file)
86 ctx = ERR_PTR(-EINVAL);
87 else
88 atomic_inc(&ctx->ref);
84 up(&ctx_id_mutex); 89 up(&ctx_id_mutex);
85 90
86 return ctx; 91 return ctx;
@@ -88,21 +93,37 @@ static struct ib_ucm_context *ib_ucm_ctx_get(int id)
88 93
89static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) 94static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
90{ 95{
96 if (atomic_dec_and_test(&ctx->ref))
97 wake_up(&ctx->wait);
98}
99
100static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
101{
102 struct ib_ucm_context *ctx;
91 struct ib_ucm_event *uevent; 103 struct ib_ucm_event *uevent;
92 104
93 down(&ctx_id_mutex); 105 down(&ctx_id_mutex);
94 106 ctx = idr_find(&ctx_id_table, id);
95 ctx->ref--; 107 if (!ctx)
96 if (!ctx->ref) 108 ctx = ERR_PTR(-ENOENT);
109 else if (ctx->file != file)
110 ctx = ERR_PTR(-EINVAL);
111 else
97 idr_remove(&ctx_id_table, ctx->id); 112 idr_remove(&ctx_id_table, ctx->id);
98
99 up(&ctx_id_mutex); 113 up(&ctx_id_mutex);
100 114
101 if (ctx->ref) 115 if (IS_ERR(ctx))
102 return; 116 return PTR_ERR(ctx);
103 117
104 down(&ctx->file->mutex); 118 atomic_dec(&ctx->ref);
119 wait_event(ctx->wait, !atomic_read(&ctx->ref));
120
121 /* No new events will be generated after destroying the cm_id. */
122 if (!IS_ERR(ctx->cm_id))
123 ib_destroy_cm_id(ctx->cm_id);
105 124
125 /* Cleanup events not yet reported to the user. */
126 down(&file->mutex);
106 list_del(&ctx->file_list); 127 list_del(&ctx->file_list);
107 while (!list_empty(&ctx->events)) { 128 while (!list_empty(&ctx->events)) {
108 129
@@ -117,13 +138,10 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
117 138
118 kfree(uevent); 139 kfree(uevent);
119 } 140 }
141 up(&file->mutex);
120 142
121 up(&ctx->file->mutex);
122
123 ucm_dbg("Destroyed CM ID <%d>\n", ctx->id);
124
125 ib_destroy_cm_id(ctx->cm_id);
126 kfree(ctx); 143 kfree(ctx);
144 return 0;
127} 145}
128 146
129static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) 147static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
@@ -135,11 +153,11 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
135 if (!ctx) 153 if (!ctx)
136 return NULL; 154 return NULL;
137 155
138 ctx->ref = 1; /* user reference */ 156 atomic_set(&ctx->ref, 1);
157 init_waitqueue_head(&ctx->wait);
139 ctx->file = file; 158 ctx->file = file;
140 159
141 INIT_LIST_HEAD(&ctx->events); 160 INIT_LIST_HEAD(&ctx->events);
142 init_MUTEX(&ctx->mutex);
143 161
144 list_add_tail(&ctx->file_list, &file->ctxs); 162 list_add_tail(&ctx->file_list, &file->ctxs);
145 163
@@ -177,8 +195,8 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
177 if (!kpath || !upath) 195 if (!kpath || !upath)
178 return; 196 return;
179 197
180 memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid)); 198 memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid);
181 memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid)); 199 memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid);
182 200
183 upath->dlid = kpath->dlid; 201 upath->dlid = kpath->dlid;
184 upath->slid = kpath->slid; 202 upath->slid = kpath->slid;
@@ -201,10 +219,11 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
201 kpath->packet_life_time_selector; 219 kpath->packet_life_time_selector;
202} 220}
203 221
204static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, 222static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
223 struct ib_ucm_req_event_resp *ureq,
205 struct ib_cm_req_event_param *kreq) 224 struct ib_cm_req_event_param *kreq)
206{ 225{
207 ureq->listen_id = (long)kreq->listen_id->context; 226 ureq->listen_id = ctx->id;
208 227
209 ureq->remote_ca_guid = kreq->remote_ca_guid; 228 ureq->remote_ca_guid = kreq->remote_ca_guid;
210 ureq->remote_qkey = kreq->remote_qkey; 229 ureq->remote_qkey = kreq->remote_qkey;
@@ -240,34 +259,11 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
240 urep->srq = krep->srq; 259 urep->srq = krep->srq;
241} 260}
242 261
243static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej, 262static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
244 struct ib_cm_rej_event_param *krej) 263 struct ib_ucm_sidr_req_event_resp *ureq,
245{
246 urej->reason = krej->reason;
247}
248
249static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
250 struct ib_cm_mra_event_param *kmra)
251{
252 umra->timeout = kmra->service_timeout;
253}
254
255static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
256 struct ib_cm_lap_event_param *klap)
257{
258 ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
259}
260
261static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
262 struct ib_cm_apr_event_param *kapr)
263{
264 uapr->status = kapr->ap_status;
265}
266
267static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
268 struct ib_cm_sidr_req_event_param *kreq) 264 struct ib_cm_sidr_req_event_param *kreq)
269{ 265{
270 ureq->listen_id = (long)kreq->listen_id->context; 266 ureq->listen_id = ctx->id;
271 ureq->pkey = kreq->pkey; 267 ureq->pkey = kreq->pkey;
272} 268}
273 269
@@ -279,19 +275,18 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
279 urep->qpn = krep->qpn; 275 urep->qpn = krep->qpn;
280}; 276};
281 277
282static int ib_ucm_event_process(struct ib_cm_event *evt, 278static int ib_ucm_event_process(struct ib_ucm_context *ctx,
279 struct ib_cm_event *evt,
283 struct ib_ucm_event *uvt) 280 struct ib_ucm_event *uvt)
284{ 281{
285 void *info = NULL; 282 void *info = NULL;
286 int result;
287 283
288 switch (evt->event) { 284 switch (evt->event) {
289 case IB_CM_REQ_RECEIVED: 285 case IB_CM_REQ_RECEIVED:
290 ib_ucm_event_req_get(&uvt->resp.u.req_resp, 286 ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
291 &evt->param.req_rcvd); 287 &evt->param.req_rcvd);
292 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; 288 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
293 uvt->resp.present |= (evt->param.req_rcvd.primary_path ? 289 uvt->resp.present = IB_UCM_PRES_PRIMARY;
294 IB_UCM_PRES_PRIMARY : 0);
295 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? 290 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
296 IB_UCM_PRES_ALTERNATE : 0); 291 IB_UCM_PRES_ALTERNATE : 0);
297 break; 292 break;
@@ -299,57 +294,46 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
299 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, 294 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
300 &evt->param.rep_rcvd); 295 &evt->param.rep_rcvd);
301 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 296 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
302
303 break; 297 break;
304 case IB_CM_RTU_RECEIVED: 298 case IB_CM_RTU_RECEIVED:
305 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; 299 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
306 uvt->resp.u.send_status = evt->param.send_status; 300 uvt->resp.u.send_status = evt->param.send_status;
307
308 break; 301 break;
309 case IB_CM_DREQ_RECEIVED: 302 case IB_CM_DREQ_RECEIVED:
310 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; 303 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
311 uvt->resp.u.send_status = evt->param.send_status; 304 uvt->resp.u.send_status = evt->param.send_status;
312
313 break; 305 break;
314 case IB_CM_DREP_RECEIVED: 306 case IB_CM_DREP_RECEIVED:
315 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; 307 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
316 uvt->resp.u.send_status = evt->param.send_status; 308 uvt->resp.u.send_status = evt->param.send_status;
317
318 break; 309 break;
319 case IB_CM_MRA_RECEIVED: 310 case IB_CM_MRA_RECEIVED:
320 ib_ucm_event_mra_get(&uvt->resp.u.mra_resp, 311 uvt->resp.u.mra_resp.timeout =
321 &evt->param.mra_rcvd); 312 evt->param.mra_rcvd.service_timeout;
322 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; 313 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
323
324 break; 314 break;
325 case IB_CM_REJ_RECEIVED: 315 case IB_CM_REJ_RECEIVED:
326 ib_ucm_event_rej_get(&uvt->resp.u.rej_resp, 316 uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
327 &evt->param.rej_rcvd);
328 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 317 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
329 uvt->info_len = evt->param.rej_rcvd.ari_length; 318 uvt->info_len = evt->param.rej_rcvd.ari_length;
330 info = evt->param.rej_rcvd.ari; 319 info = evt->param.rej_rcvd.ari;
331
332 break; 320 break;
333 case IB_CM_LAP_RECEIVED: 321 case IB_CM_LAP_RECEIVED:
334 ib_ucm_event_lap_get(&uvt->resp.u.lap_resp, 322 ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path,
335 &evt->param.lap_rcvd); 323 evt->param.lap_rcvd.alternate_path);
336 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; 324 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
337 uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ? 325 uvt->resp.present = IB_UCM_PRES_ALTERNATE;
338 IB_UCM_PRES_ALTERNATE : 0);
339 break; 326 break;
340 case IB_CM_APR_RECEIVED: 327 case IB_CM_APR_RECEIVED:
341 ib_ucm_event_apr_get(&uvt->resp.u.apr_resp, 328 uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
342 &evt->param.apr_rcvd);
343 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; 329 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
344 uvt->info_len = evt->param.apr_rcvd.info_len; 330 uvt->info_len = evt->param.apr_rcvd.info_len;
345 info = evt->param.apr_rcvd.apr_info; 331 info = evt->param.apr_rcvd.apr_info;
346
347 break; 332 break;
348 case IB_CM_SIDR_REQ_RECEIVED: 333 case IB_CM_SIDR_REQ_RECEIVED:
349 ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp, 334 ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
350 &evt->param.sidr_req_rcvd); 335 &evt->param.sidr_req_rcvd);
351 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 336 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
352
353 break; 337 break;
354 case IB_CM_SIDR_REP_RECEIVED: 338 case IB_CM_SIDR_REP_RECEIVED:
355 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, 339 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
@@ -357,43 +341,35 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
357 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 341 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
358 uvt->info_len = evt->param.sidr_rep_rcvd.info_len; 342 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
359 info = evt->param.sidr_rep_rcvd.info; 343 info = evt->param.sidr_rep_rcvd.info;
360
361 break; 344 break;
362 default: 345 default:
363 uvt->resp.u.send_status = evt->param.send_status; 346 uvt->resp.u.send_status = evt->param.send_status;
364
365 break; 347 break;
366 } 348 }
367 349
368 if (uvt->data_len && evt->private_data) { 350 if (uvt->data_len) {
369
370 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); 351 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
371 if (!uvt->data) { 352 if (!uvt->data)
372 result = -ENOMEM; 353 goto err1;
373 goto error;
374 }
375 354
376 memcpy(uvt->data, evt->private_data, uvt->data_len); 355 memcpy(uvt->data, evt->private_data, uvt->data_len);
377 uvt->resp.present |= IB_UCM_PRES_DATA; 356 uvt->resp.present |= IB_UCM_PRES_DATA;
378 } 357 }
379 358
380 if (uvt->info_len && info) { 359 if (uvt->info_len) {
381
382 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); 360 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
383 if (!uvt->info) { 361 if (!uvt->info)
384 result = -ENOMEM; 362 goto err2;
385 goto error;
386 }
387 363
388 memcpy(uvt->info, info, uvt->info_len); 364 memcpy(uvt->info, info, uvt->info_len);
389 uvt->resp.present |= IB_UCM_PRES_INFO; 365 uvt->resp.present |= IB_UCM_PRES_INFO;
390 } 366 }
391
392 return 0; 367 return 0;
393error: 368
394 kfree(uvt->info); 369err2:
395 kfree(uvt->data); 370 kfree(uvt->data);
396 return result; 371err1:
372 return -ENOMEM;
397} 373}
398 374
399static int ib_ucm_event_handler(struct ib_cm_id *cm_id, 375static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
@@ -403,63 +379,42 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
403 struct ib_ucm_context *ctx; 379 struct ib_ucm_context *ctx;
404 int result = 0; 380 int result = 0;
405 int id; 381 int id;
406 /*
407 * lookup correct context based on event type.
408 */
409 switch (event->event) {
410 case IB_CM_REQ_RECEIVED:
411 id = (long)event->param.req_rcvd.listen_id->context;
412 break;
413 case IB_CM_SIDR_REQ_RECEIVED:
414 id = (long)event->param.sidr_req_rcvd.listen_id->context;
415 break;
416 default:
417 id = (long)cm_id->context;
418 break;
419 }
420 382
421 ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event); 383 ctx = cm_id->context;
422
423 ctx = ib_ucm_ctx_get(id);
424 if (!ctx)
425 return -ENOENT;
426 384
427 if (event->event == IB_CM_REQ_RECEIVED || 385 if (event->event == IB_CM_REQ_RECEIVED ||
428 event->event == IB_CM_SIDR_REQ_RECEIVED) 386 event->event == IB_CM_SIDR_REQ_RECEIVED)
429 id = IB_UCM_CM_ID_INVALID; 387 id = IB_UCM_CM_ID_INVALID;
388 else
389 id = ctx->id;
430 390
431 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 391 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
432 if (!uevent) { 392 if (!uevent)
433 result = -ENOMEM; 393 goto err1;
434 goto done;
435 }
436 394
437 memset(uevent, 0, sizeof(*uevent)); 395 memset(uevent, 0, sizeof(*uevent));
438
439 uevent->resp.id = id; 396 uevent->resp.id = id;
440 uevent->resp.event = event->event; 397 uevent->resp.event = event->event;
441 398
442 result = ib_ucm_event_process(event, uevent); 399 result = ib_ucm_event_process(ctx, event, uevent);
443 if (result) 400 if (result)
444 goto done; 401 goto err2;
445 402
446 uevent->ctx = ctx; 403 uevent->ctx = ctx;
447 uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED || 404 uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
448 event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
449 cm_id : NULL);
450 405
451 down(&ctx->file->mutex); 406 down(&ctx->file->mutex);
452
453 list_add_tail(&uevent->file_list, &ctx->file->events); 407 list_add_tail(&uevent->file_list, &ctx->file->events);
454 list_add_tail(&uevent->ctx_list, &ctx->events); 408 list_add_tail(&uevent->ctx_list, &ctx->events);
455
456 wake_up_interruptible(&ctx->file->poll_wait); 409 wake_up_interruptible(&ctx->file->poll_wait);
457
458 up(&ctx->file->mutex); 410 up(&ctx->file->mutex);
459done: 411 return 0;
460 ctx->error = result; 412
461 ib_ucm_ctx_put(ctx); /* func reference */ 413err2:
462 return result; 414 kfree(uevent);
415err1:
416 /* Destroy new cm_id's */
417 return (id == IB_UCM_CM_ID_INVALID);
463} 418}
464 419
465static ssize_t ib_ucm_event(struct ib_ucm_file *file, 420static ssize_t ib_ucm_event(struct ib_ucm_file *file,
@@ -517,9 +472,8 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
517 goto done; 472 goto done;
518 } 473 }
519 474
520 ctx->cm_id = uevent->cm_id; 475 ctx->cm_id = uevent->cm_id;
521 ctx->cm_id->cm_handler = ib_ucm_event_handler; 476 ctx->cm_id->context = ctx;
522 ctx->cm_id->context = (void *)(unsigned long)ctx->id;
523 477
524 uevent->resp.id = ctx->id; 478 uevent->resp.id = ctx->id;
525 479
@@ -585,30 +539,29 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
585 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 539 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
586 return -EFAULT; 540 return -EFAULT;
587 541
542 down(&file->mutex);
588 ctx = ib_ucm_ctx_alloc(file); 543 ctx = ib_ucm_ctx_alloc(file);
544 up(&file->mutex);
589 if (!ctx) 545 if (!ctx)
590 return -ENOMEM; 546 return -ENOMEM;
591 547
592 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, 548 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
593 (void *)(unsigned long)ctx->id); 549 if (IS_ERR(ctx->cm_id)) {
594 if (!ctx->cm_id) { 550 result = PTR_ERR(ctx->cm_id);
595 result = -ENOMEM; 551 goto err;
596 goto err_cm;
597 } 552 }
598 553
599 resp.id = ctx->id; 554 resp.id = ctx->id;
600 if (copy_to_user((void __user *)(unsigned long)cmd.response, 555 if (copy_to_user((void __user *)(unsigned long)cmd.response,
601 &resp, sizeof(resp))) { 556 &resp, sizeof(resp))) {
602 result = -EFAULT; 557 result = -EFAULT;
603 goto err_ret; 558 goto err;
604 } 559 }
605 560
606 return 0; 561 return 0;
607err_ret:
608 ib_destroy_cm_id(ctx->cm_id);
609err_cm:
610 ib_ucm_ctx_put(ctx); /* user reference */
611 562
563err:
564 ib_ucm_destroy_ctx(file, ctx->id);
612 return result; 565 return result;
613} 566}
614 567
@@ -617,19 +570,11 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
617 int in_len, int out_len) 570 int in_len, int out_len)
618{ 571{
619 struct ib_ucm_destroy_id cmd; 572 struct ib_ucm_destroy_id cmd;
620 struct ib_ucm_context *ctx;
621 573
622 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 574 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
623 return -EFAULT; 575 return -EFAULT;
624 576
625 ctx = ib_ucm_ctx_get(cmd.id); 577 return ib_ucm_destroy_ctx(file, cmd.id);
626 if (!ctx)
627 return -ENOENT;
628
629 ib_ucm_ctx_put(ctx); /* user reference */
630 ib_ucm_ctx_put(ctx); /* func reference */
631
632 return 0;
633} 578}
634 579
635static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, 580static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
@@ -647,15 +592,9 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
647 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 592 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
648 return -EFAULT; 593 return -EFAULT;
649 594
650 ctx = ib_ucm_ctx_get(cmd.id); 595 ctx = ib_ucm_ctx_get(file, cmd.id);
651 if (!ctx) 596 if (IS_ERR(ctx))
652 return -ENOENT; 597 return PTR_ERR(ctx);
653
654 down(&ctx->file->mutex);
655 if (ctx->file != file) {
656 result = -EINVAL;
657 goto done;
658 }
659 598
660 resp.service_id = ctx->cm_id->service_id; 599 resp.service_id = ctx->cm_id->service_id;
661 resp.service_mask = ctx->cm_id->service_mask; 600 resp.service_mask = ctx->cm_id->service_mask;
@@ -666,9 +605,7 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
666 &resp, sizeof(resp))) 605 &resp, sizeof(resp)))
667 result = -EFAULT; 606 result = -EFAULT;
668 607
669done: 608 ib_ucm_ctx_put(ctx);
670 up(&ctx->file->mutex);
671 ib_ucm_ctx_put(ctx); /* func reference */
672 return result; 609 return result;
673} 610}
674 611
@@ -683,19 +620,12 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
683 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 620 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
684 return -EFAULT; 621 return -EFAULT;
685 622
686 ctx = ib_ucm_ctx_get(cmd.id); 623 ctx = ib_ucm_ctx_get(file, cmd.id);
687 if (!ctx) 624 if (IS_ERR(ctx))
688 return -ENOENT; 625 return PTR_ERR(ctx);
689 626
690 down(&ctx->file->mutex); 627 result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
691 if (ctx->file != file) 628 ib_ucm_ctx_put(ctx);
692 result = -EINVAL;
693 else
694 result = ib_cm_listen(ctx->cm_id, cmd.service_id,
695 cmd.service_mask);
696
697 up(&ctx->file->mutex);
698 ib_ucm_ctx_put(ctx); /* func reference */
699 return result; 629 return result;
700} 630}
701 631
@@ -710,18 +640,12 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
710 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 640 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
711 return -EFAULT; 641 return -EFAULT;
712 642
713 ctx = ib_ucm_ctx_get(cmd.id); 643 ctx = ib_ucm_ctx_get(file, cmd.id);
714 if (!ctx) 644 if (IS_ERR(ctx))
715 return -ENOENT; 645 return PTR_ERR(ctx);
716
717 down(&ctx->file->mutex);
718 if (ctx->file != file)
719 result = -EINVAL;
720 else
721 result = ib_cm_establish(ctx->cm_id);
722 646
723 up(&ctx->file->mutex); 647 result = ib_cm_establish(ctx->cm_id);
724 ib_ucm_ctx_put(ctx); /* func reference */ 648 ib_ucm_ctx_put(ctx);
725 return result; 649 return result;
726} 650}
727 651
@@ -768,8 +692,8 @@ static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
768 return -EFAULT; 692 return -EFAULT;
769 } 693 }
770 694
771 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid)); 695 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid);
772 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid)); 696 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid);
773 697
774 sa_path->dlid = ucm_path.dlid; 698 sa_path->dlid = ucm_path.dlid;
775 sa_path->slid = ucm_path.slid; 699 sa_path->slid = ucm_path.slid;
@@ -839,25 +763,17 @@ static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
839 param.max_cm_retries = cmd.max_cm_retries; 763 param.max_cm_retries = cmd.max_cm_retries;
840 param.srq = cmd.srq; 764 param.srq = cmd.srq;
841 765
842 ctx = ib_ucm_ctx_get(cmd.id); 766 ctx = ib_ucm_ctx_get(file, cmd.id);
843 if (!ctx) { 767 if (!IS_ERR(ctx)) {
844 result = -ENOENT;
845 goto done;
846 }
847
848 down(&ctx->file->mutex);
849 if (ctx->file != file)
850 result = -EINVAL;
851 else
852 result = ib_send_cm_req(ctx->cm_id, &param); 768 result = ib_send_cm_req(ctx->cm_id, &param);
769 ib_ucm_ctx_put(ctx);
770 } else
771 result = PTR_ERR(ctx);
853 772
854 up(&ctx->file->mutex);
855 ib_ucm_ctx_put(ctx); /* func reference */
856done: 773done:
857 kfree(param.private_data); 774 kfree(param.private_data);
858 kfree(param.primary_path); 775 kfree(param.primary_path);
859 kfree(param.alternate_path); 776 kfree(param.alternate_path);
860
861 return result; 777 return result;
862} 778}
863 779
@@ -890,23 +806,14 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
890 param.rnr_retry_count = cmd.rnr_retry_count; 806 param.rnr_retry_count = cmd.rnr_retry_count;
891 param.srq = cmd.srq; 807 param.srq = cmd.srq;
892 808
893 ctx = ib_ucm_ctx_get(cmd.id); 809 ctx = ib_ucm_ctx_get(file, cmd.id);
894 if (!ctx) { 810 if (!IS_ERR(ctx)) {
895 result = -ENOENT;
896 goto done;
897 }
898
899 down(&ctx->file->mutex);
900 if (ctx->file != file)
901 result = -EINVAL;
902 else
903 result = ib_send_cm_rep(ctx->cm_id, &param); 811 result = ib_send_cm_rep(ctx->cm_id, &param);
812 ib_ucm_ctx_put(ctx);
813 } else
814 result = PTR_ERR(ctx);
904 815
905 up(&ctx->file->mutex);
906 ib_ucm_ctx_put(ctx); /* func reference */
907done:
908 kfree(param.private_data); 816 kfree(param.private_data);
909
910 return result; 817 return result;
911} 818}
912 819
@@ -928,23 +835,14 @@ static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
928 if (result) 835 if (result)
929 return result; 836 return result;
930 837
931 ctx = ib_ucm_ctx_get(cmd.id); 838 ctx = ib_ucm_ctx_get(file, cmd.id);
932 if (!ctx) { 839 if (!IS_ERR(ctx)) {
933 result = -ENOENT;
934 goto done;
935 }
936
937 down(&ctx->file->mutex);
938 if (ctx->file != file)
939 result = -EINVAL;
940 else
941 result = func(ctx->cm_id, private_data, cmd.len); 840 result = func(ctx->cm_id, private_data, cmd.len);
841 ib_ucm_ctx_put(ctx);
842 } else
843 result = PTR_ERR(ctx);
942 844
943 up(&ctx->file->mutex);
944 ib_ucm_ctx_put(ctx); /* func reference */
945done:
946 kfree(private_data); 845 kfree(private_data);
947
948 return result; 846 return result;
949} 847}
950 848
@@ -995,26 +893,17 @@ static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
995 if (result) 893 if (result)
996 goto done; 894 goto done;
997 895
998 ctx = ib_ucm_ctx_get(cmd.id); 896 ctx = ib_ucm_ctx_get(file, cmd.id);
999 if (!ctx) { 897 if (!IS_ERR(ctx)) {
1000 result = -ENOENT; 898 result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
1001 goto done;
1002 }
1003
1004 down(&ctx->file->mutex);
1005 if (ctx->file != file)
1006 result = -EINVAL;
1007 else
1008 result = func(ctx->cm_id, cmd.status,
1009 info, cmd.info_len,
1010 data, cmd.data_len); 899 data, cmd.data_len);
900 ib_ucm_ctx_put(ctx);
901 } else
902 result = PTR_ERR(ctx);
1011 903
1012 up(&ctx->file->mutex);
1013 ib_ucm_ctx_put(ctx); /* func reference */
1014done: 904done:
1015 kfree(data); 905 kfree(data);
1016 kfree(info); 906 kfree(info);
1017
1018 return result; 907 return result;
1019} 908}
1020 909
@@ -1048,24 +937,14 @@ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
1048 if (result) 937 if (result)
1049 return result; 938 return result;
1050 939
1051 ctx = ib_ucm_ctx_get(cmd.id); 940 ctx = ib_ucm_ctx_get(file, cmd.id);
1052 if (!ctx) { 941 if (!IS_ERR(ctx)) {
1053 result = -ENOENT; 942 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
1054 goto done; 943 ib_ucm_ctx_put(ctx);
1055 } 944 } else
945 result = PTR_ERR(ctx);
1056 946
1057 down(&ctx->file->mutex);
1058 if (ctx->file != file)
1059 result = -EINVAL;
1060 else
1061 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
1062 data, cmd.len);
1063
1064 up(&ctx->file->mutex);
1065 ib_ucm_ctx_put(ctx); /* func reference */
1066done:
1067 kfree(data); 947 kfree(data);
1068
1069 return result; 948 return result;
1070} 949}
1071 950
@@ -1090,24 +969,16 @@ static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
1090 if (result) 969 if (result)
1091 goto done; 970 goto done;
1092 971
1093 ctx = ib_ucm_ctx_get(cmd.id); 972 ctx = ib_ucm_ctx_get(file, cmd.id);
1094 if (!ctx) { 973 if (!IS_ERR(ctx)) {
1095 result = -ENOENT;
1096 goto done;
1097 }
1098
1099 down(&ctx->file->mutex);
1100 if (ctx->file != file)
1101 result = -EINVAL;
1102 else
1103 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); 974 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
975 ib_ucm_ctx_put(ctx);
976 } else
977 result = PTR_ERR(ctx);
1104 978
1105 up(&ctx->file->mutex);
1106 ib_ucm_ctx_put(ctx); /* func reference */
1107done: 979done:
1108 kfree(data); 980 kfree(data);
1109 kfree(path); 981 kfree(path);
1110
1111 return result; 982 return result;
1112} 983}
1113 984
@@ -1140,24 +1011,16 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1140 param.max_cm_retries = cmd.max_cm_retries; 1011 param.max_cm_retries = cmd.max_cm_retries;
1141 param.pkey = cmd.pkey; 1012 param.pkey = cmd.pkey;
1142 1013
1143 ctx = ib_ucm_ctx_get(cmd.id); 1014 ctx = ib_ucm_ctx_get(file, cmd.id);
1144 if (!ctx) { 1015 if (!IS_ERR(ctx)) {
1145 result = -ENOENT;
1146 goto done;
1147 }
1148
1149 down(&ctx->file->mutex);
1150 if (ctx->file != file)
1151 result = -EINVAL;
1152 else
1153 result = ib_send_cm_sidr_req(ctx->cm_id, &param); 1016 result = ib_send_cm_sidr_req(ctx->cm_id, &param);
1017 ib_ucm_ctx_put(ctx);
1018 } else
1019 result = PTR_ERR(ctx);
1154 1020
1155 up(&ctx->file->mutex);
1156 ib_ucm_ctx_put(ctx); /* func reference */
1157done: 1021done:
1158 kfree(param.private_data); 1022 kfree(param.private_data);
1159 kfree(param.path); 1023 kfree(param.path);
1160
1161 return result; 1024 return result;
1162} 1025}
1163 1026
@@ -1184,30 +1047,22 @@ static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1184 if (result) 1047 if (result)
1185 goto done; 1048 goto done;
1186 1049
1187 param.qp_num = cmd.qpn; 1050 param.qp_num = cmd.qpn;
1188 param.qkey = cmd.qkey; 1051 param.qkey = cmd.qkey;
1189 param.status = cmd.status; 1052 param.status = cmd.status;
1190 param.info_length = cmd.info_len; 1053 param.info_length = cmd.info_len;
1191 param.private_data_len = cmd.data_len; 1054 param.private_data_len = cmd.data_len;
1192
1193 ctx = ib_ucm_ctx_get(cmd.id);
1194 if (!ctx) {
1195 result = -ENOENT;
1196 goto done;
1197 }
1198 1055
1199 down(&ctx->file->mutex); 1056 ctx = ib_ucm_ctx_get(file, cmd.id);
1200 if (ctx->file != file) 1057 if (!IS_ERR(ctx)) {
1201 result = -EINVAL;
1202 else
1203 result = ib_send_cm_sidr_rep(ctx->cm_id, &param); 1058 result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
1059 ib_ucm_ctx_put(ctx);
1060 } else
1061 result = PTR_ERR(ctx);
1204 1062
1205 up(&ctx->file->mutex);
1206 ib_ucm_ctx_put(ctx); /* func reference */
1207done: 1063done:
1208 kfree(param.private_data); 1064 kfree(param.private_data);
1209 kfree(param.info); 1065 kfree(param.info);
1210
1211 return result; 1066 return result;
1212} 1067}
1213 1068
@@ -1305,22 +1160,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1305 struct ib_ucm_context *ctx; 1160 struct ib_ucm_context *ctx;
1306 1161
1307 down(&file->mutex); 1162 down(&file->mutex);
1308
1309 while (!list_empty(&file->ctxs)) { 1163 while (!list_empty(&file->ctxs)) {
1310 1164
1311 ctx = list_entry(file->ctxs.next, 1165 ctx = list_entry(file->ctxs.next,
1312 struct ib_ucm_context, file_list); 1166 struct ib_ucm_context, file_list);
1313 1167
1314 up(&ctx->file->mutex); 1168 up(&file->mutex);
1315 ib_ucm_ctx_put(ctx); /* user reference */ 1169 ib_ucm_destroy_ctx(file, ctx->id);
1316 down(&file->mutex); 1170 down(&file->mutex);
1317 } 1171 }
1318
1319 up(&file->mutex); 1172 up(&file->mutex);
1320
1321 kfree(file); 1173 kfree(file);
1322
1323 ucm_dbg("Deleted struct\n");
1324 return 0; 1174 return 0;
1325} 1175}
1326 1176
diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h
index 6d36606151b2..c8819b928a1b 100644
--- a/drivers/infiniband/core/ucm.h
+++ b/drivers/infiniband/core/ucm.h
@@ -40,17 +40,15 @@
40#include <linux/cdev.h> 40#include <linux/cdev.h>
41#include <linux/idr.h> 41#include <linux/idr.h>
42 42
43#include <ib_cm.h> 43#include <rdma/ib_cm.h>
44#include <ib_user_cm.h> 44#include <rdma/ib_user_cm.h>
45 45
46#define IB_UCM_CM_ID_INVALID 0xffffffff 46#define IB_UCM_CM_ID_INVALID 0xffffffff
47 47
48struct ib_ucm_file { 48struct ib_ucm_file {
49 struct semaphore mutex; 49 struct semaphore mutex;
50 struct file *filp; 50 struct file *filp;
51 /* 51
52 * list of pending events
53 */
54 struct list_head ctxs; /* list of active connections */ 52 struct list_head ctxs; /* list of active connections */
55 struct list_head events; /* list of pending events */ 53 struct list_head events; /* list of pending events */
56 wait_queue_head_t poll_wait; 54 wait_queue_head_t poll_wait;
@@ -58,12 +56,11 @@ struct ib_ucm_file {
58 56
59struct ib_ucm_context { 57struct ib_ucm_context {
60 int id; 58 int id;
61 int ref; 59 wait_queue_head_t wait;
62 int error; 60 atomic_t ref;
63 61
64 struct ib_ucm_file *file; 62 struct ib_ucm_file *file;
65 struct ib_cm_id *cm_id; 63 struct ib_cm_id *cm_id;
66 struct semaphore mutex;
67 64
68 struct list_head events; /* list of pending events. */ 65 struct list_head events; /* list of pending events. */
69 struct list_head file_list; /* member in file ctx list */ 66 struct list_head file_list; /* member in file ctx list */
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index dc4eb1db5e96..527b23450ab3 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,7 @@
34 35
35#include <linux/errno.h> 36#include <linux/errno.h>
36 37
37#include <ib_pack.h> 38#include <rdma/ib_pack.h>
38 39
39#define STRUCT_FIELD(header, field) \ 40#define STRUCT_FIELD(header, field) \
40 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 41 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
@@ -194,6 +195,7 @@ void ib_ud_header_init(int payload_bytes,
194 struct ib_ud_header *header) 195 struct ib_ud_header *header)
195{ 196{
196 int header_len; 197 int header_len;
198 u16 packet_length;
197 199
198 memset(header, 0, sizeof *header); 200 memset(header, 0, sizeof *header);
199 201
@@ -208,7 +210,7 @@ void ib_ud_header_init(int payload_bytes,
208 header->lrh.link_version = 0; 210 header->lrh.link_version = 0;
209 header->lrh.link_next_header = 211 header->lrh.link_next_header =
210 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; 212 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
211 header->lrh.packet_length = (IB_LRH_BYTES + 213 packet_length = (IB_LRH_BYTES +
212 IB_BTH_BYTES + 214 IB_BTH_BYTES +
213 IB_DETH_BYTES + 215 IB_DETH_BYTES +
214 payload_bytes + 216 payload_bytes +
@@ -217,8 +219,7 @@ void ib_ud_header_init(int payload_bytes,
217 219
218 header->grh_present = grh_present; 220 header->grh_present = grh_present;
219 if (grh_present) { 221 if (grh_present) {
220 header->lrh.packet_length += IB_GRH_BYTES / 4; 222 packet_length += IB_GRH_BYTES / 4;
221
222 header->grh.ip_version = 6; 223 header->grh.ip_version = 6;
223 header->grh.payload_length = 224 header->grh.payload_length =
224 cpu_to_be16((IB_BTH_BYTES + 225 cpu_to_be16((IB_BTH_BYTES +
@@ -229,7 +230,7 @@ void ib_ud_header_init(int payload_bytes,
229 header->grh.next_header = 0x1b; 230 header->grh.next_header = 0x1b;
230 } 231 }
231 232
232 cpu_to_be16s(&header->lrh.packet_length); 233 header->lrh.packet_length = cpu_to_be16(packet_length);
233 234
234 if (header->immediate_present) 235 if (header->immediate_present)
235 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 236 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 2e38792df533..7c2f03057ddb 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
@@ -49,8 +49,8 @@
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <asm/semaphore.h> 50#include <asm/semaphore.h>
51 51
52#include <ib_mad.h> 52#include <rdma/ib_mad.h>
53#include <ib_user_mad.h> 53#include <rdma/ib_user_mad.h>
54 54
55MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
@@ -271,7 +271,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
271 struct ib_send_wr *bad_wr; 271 struct ib_send_wr *bad_wr;
272 struct ib_rmpp_mad *rmpp_mad; 272 struct ib_rmpp_mad *rmpp_mad;
273 u8 method; 273 u8 method;
274 u64 *tid; 274 __be64 *tid;
275 int ret, length, hdr_len, data_len, rmpp_hdr_size; 275 int ret, length, hdr_len, data_len, rmpp_hdr_size;
276 int rmpp_active = 0; 276 int rmpp_active = 0;
277 277
@@ -316,7 +316,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
316 if (packet->mad.hdr.grh_present) { 316 if (packet->mad.hdr.grh_present) {
317 ah_attr.ah_flags = IB_AH_GRH; 317 ah_attr.ah_flags = IB_AH_GRH;
318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
319 ah_attr.grh.flow_label = packet->mad.hdr.flow_label; 319 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
322 } 322 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7696022f9a4e..180b3d4765e4 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -43,8 +45,8 @@
43#include <linux/kref.h> 45#include <linux/kref.h>
44#include <linux/idr.h> 46#include <linux/idr.h>
45 47
46#include <ib_verbs.h> 48#include <rdma/ib_verbs.h>
47#include <ib_user_verbs.h> 49#include <rdma/ib_user_verbs.h>
48 50
49struct ib_uverbs_device { 51struct ib_uverbs_device {
50 int devnum; 52 int devnum;
@@ -97,10 +99,12 @@ extern struct idr ib_uverbs_mw_idr;
97extern struct idr ib_uverbs_ah_idr; 99extern struct idr ib_uverbs_ah_idr;
98extern struct idr ib_uverbs_cq_idr; 100extern struct idr ib_uverbs_cq_idr;
99extern struct idr ib_uverbs_qp_idr; 101extern struct idr ib_uverbs_qp_idr;
102extern struct idr ib_uverbs_srq_idr;
100 103
101void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); 104void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
102void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); 105void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
103void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); 106void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
107void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
104 108
105int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, 109int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
106 void *addr, size_t size, int write); 110 void *addr, size_t size, int write);
@@ -129,5 +133,8 @@ IB_UVERBS_DECLARE_CMD(modify_qp);
129IB_UVERBS_DECLARE_CMD(destroy_qp); 133IB_UVERBS_DECLARE_CMD(destroy_qp);
130IB_UVERBS_DECLARE_CMD(attach_mcast); 134IB_UVERBS_DECLARE_CMD(attach_mcast);
131IB_UVERBS_DECLARE_CMD(detach_mcast); 135IB_UVERBS_DECLARE_CMD(detach_mcast);
136IB_UVERBS_DECLARE_CMD(create_srq);
137IB_UVERBS_DECLARE_CMD(modify_srq);
138IB_UVERBS_DECLARE_CMD(destroy_srq);
132 139
133#endif /* UVERBS_H */ 140#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5f2bbcda4c73..ebccf9f38af9 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -724,6 +724,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
724 struct ib_uobject *uobj; 724 struct ib_uobject *uobj;
725 struct ib_pd *pd; 725 struct ib_pd *pd;
726 struct ib_cq *scq, *rcq; 726 struct ib_cq *scq, *rcq;
727 struct ib_srq *srq;
727 struct ib_qp *qp; 728 struct ib_qp *qp;
728 struct ib_qp_init_attr attr; 729 struct ib_qp_init_attr attr;
729 int ret; 730 int ret;
@@ -747,10 +748,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
747 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 748 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
748 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 749 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
749 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 750 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
751 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
750 752
751 if (!pd || pd->uobject->context != file->ucontext || 753 if (!pd || pd->uobject->context != file->ucontext ||
752 !scq || scq->uobject->context != file->ucontext || 754 !scq || scq->uobject->context != file->ucontext ||
753 !rcq || rcq->uobject->context != file->ucontext) { 755 !rcq || rcq->uobject->context != file->ucontext ||
756 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
754 ret = -EINVAL; 757 ret = -EINVAL;
755 goto err_up; 758 goto err_up;
756 } 759 }
@@ -759,7 +762,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
759 attr.qp_context = file; 762 attr.qp_context = file;
760 attr.send_cq = scq; 763 attr.send_cq = scq;
761 attr.recv_cq = rcq; 764 attr.recv_cq = rcq;
762 attr.srq = NULL; 765 attr.srq = srq;
763 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 766 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
764 attr.qp_type = cmd.qp_type; 767 attr.qp_type = cmd.qp_type;
765 768
@@ -1004,3 +1007,178 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1004 1007
1005 return ret ? ret : in_len; 1008 return ret ? ret : in_len;
1006} 1009}
1010
1011ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1012 const char __user *buf, int in_len,
1013 int out_len)
1014{
1015 struct ib_uverbs_create_srq cmd;
1016 struct ib_uverbs_create_srq_resp resp;
1017 struct ib_udata udata;
1018 struct ib_uobject *uobj;
1019 struct ib_pd *pd;
1020 struct ib_srq *srq;
1021 struct ib_srq_init_attr attr;
1022 int ret;
1023
1024 if (out_len < sizeof resp)
1025 return -ENOSPC;
1026
1027 if (copy_from_user(&cmd, buf, sizeof cmd))
1028 return -EFAULT;
1029
1030 INIT_UDATA(&udata, buf + sizeof cmd,
1031 (unsigned long) cmd.response + sizeof resp,
1032 in_len - sizeof cmd, out_len - sizeof resp);
1033
1034 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1035 if (!uobj)
1036 return -ENOMEM;
1037
1038 down(&ib_uverbs_idr_mutex);
1039
1040 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1041
1042 if (!pd || pd->uobject->context != file->ucontext) {
1043 ret = -EINVAL;
1044 goto err_up;
1045 }
1046
1047 attr.event_handler = ib_uverbs_srq_event_handler;
1048 attr.srq_context = file;
1049 attr.attr.max_wr = cmd.max_wr;
1050 attr.attr.max_sge = cmd.max_sge;
1051 attr.attr.srq_limit = cmd.srq_limit;
1052
1053 uobj->user_handle = cmd.user_handle;
1054 uobj->context = file->ucontext;
1055
1056 srq = pd->device->create_srq(pd, &attr, &udata);
1057 if (IS_ERR(srq)) {
1058 ret = PTR_ERR(srq);
1059 goto err_up;
1060 }
1061
1062 srq->device = pd->device;
1063 srq->pd = pd;
1064 srq->uobject = uobj;
1065 srq->event_handler = attr.event_handler;
1066 srq->srq_context = attr.srq_context;
1067 atomic_inc(&pd->usecnt);
1068 atomic_set(&srq->usecnt, 0);
1069
1070 memset(&resp, 0, sizeof resp);
1071
1072retry:
1073 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
1074 ret = -ENOMEM;
1075 goto err_destroy;
1076 }
1077
1078 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id);
1079
1080 if (ret == -EAGAIN)
1081 goto retry;
1082 if (ret)
1083 goto err_destroy;
1084
1085 resp.srq_handle = uobj->id;
1086
1087 spin_lock_irq(&file->ucontext->lock);
1088 list_add_tail(&uobj->list, &file->ucontext->srq_list);
1089 spin_unlock_irq(&file->ucontext->lock);
1090
1091 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1092 &resp, sizeof resp)) {
1093 ret = -EFAULT;
1094 goto err_list;
1095 }
1096
1097 up(&ib_uverbs_idr_mutex);
1098
1099 return in_len;
1100
1101err_list:
1102 spin_lock_irq(&file->ucontext->lock);
1103 list_del(&uobj->list);
1104 spin_unlock_irq(&file->ucontext->lock);
1105
1106err_destroy:
1107 ib_destroy_srq(srq);
1108
1109err_up:
1110 up(&ib_uverbs_idr_mutex);
1111
1112 kfree(uobj);
1113 return ret;
1114}
1115
1116ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1117 const char __user *buf, int in_len,
1118 int out_len)
1119{
1120 struct ib_uverbs_modify_srq cmd;
1121 struct ib_srq *srq;
1122 struct ib_srq_attr attr;
1123 int ret;
1124
1125 if (copy_from_user(&cmd, buf, sizeof cmd))
1126 return -EFAULT;
1127
1128 down(&ib_uverbs_idr_mutex);
1129
1130 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1131 if (!srq || srq->uobject->context != file->ucontext) {
1132 ret = -EINVAL;
1133 goto out;
1134 }
1135
1136 attr.max_wr = cmd.max_wr;
1137 attr.max_sge = cmd.max_sge;
1138 attr.srq_limit = cmd.srq_limit;
1139
1140 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1141
1142out:
1143 up(&ib_uverbs_idr_mutex);
1144
1145 return ret ? ret : in_len;
1146}
1147
1148ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1149 const char __user *buf, int in_len,
1150 int out_len)
1151{
1152 struct ib_uverbs_destroy_srq cmd;
1153 struct ib_srq *srq;
1154 struct ib_uobject *uobj;
1155 int ret = -EINVAL;
1156
1157 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 return -EFAULT;
1159
1160 down(&ib_uverbs_idr_mutex);
1161
1162 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1163 if (!srq || srq->uobject->context != file->ucontext)
1164 goto out;
1165
1166 uobj = srq->uobject;
1167
1168 ret = ib_destroy_srq(srq);
1169 if (ret)
1170 goto out;
1171
1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1173
1174 spin_lock_irq(&file->ucontext->lock);
1175 list_del(&uobj->list);
1176 spin_unlock_irq(&file->ucontext->lock);
1177
1178 kfree(uobj);
1179
1180out:
1181 up(&ib_uverbs_idr_mutex);
1182
1183 return ret ? ret : in_len;
1184}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb99e693dec2..09caf5b1ef36 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,7 @@ DEFINE_IDR(ib_uverbs_mw_idr);
67DEFINE_IDR(ib_uverbs_ah_idr); 69DEFINE_IDR(ib_uverbs_ah_idr);
68DEFINE_IDR(ib_uverbs_cq_idr); 70DEFINE_IDR(ib_uverbs_cq_idr);
69DEFINE_IDR(ib_uverbs_qp_idr); 71DEFINE_IDR(ib_uverbs_qp_idr);
72DEFINE_IDR(ib_uverbs_srq_idr);
70 73
71static spinlock_t map_lock; 74static spinlock_t map_lock;
72static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 75static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -91,6 +94,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
91 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, 94 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
92 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, 95 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
93 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, 96 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
97 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
98 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
99 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
94}; 100};
95 101
96static struct vfsmount *uverbs_event_mnt; 102static struct vfsmount *uverbs_event_mnt;
@@ -125,18 +131,26 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
125 kfree(uobj); 131 kfree(uobj);
126 } 132 }
127 133
128 /* XXX Free SRQs */ 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
135 struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id);
136 idr_remove(&ib_uverbs_srq_idr, uobj->id);
137 ib_destroy_srq(srq);
138 list_del(&uobj->list);
139 kfree(uobj);
140 }
141
129 /* XXX Free MWs */ 142 /* XXX Free MWs */
130 143
131 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { 144 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
132 struct ib_mr *mr = idr_find(&ib_uverbs_mr_idr, uobj->id); 145 struct ib_mr *mr = idr_find(&ib_uverbs_mr_idr, uobj->id);
146 struct ib_device *mrdev = mr->device;
133 struct ib_umem_object *memobj; 147 struct ib_umem_object *memobj;
134 148
135 idr_remove(&ib_uverbs_mr_idr, uobj->id); 149 idr_remove(&ib_uverbs_mr_idr, uobj->id);
136 ib_dereg_mr(mr); 150 ib_dereg_mr(mr);
137 151
138 memobj = container_of(uobj, struct ib_umem_object, uobject); 152 memobj = container_of(uobj, struct ib_umem_object, uobject);
139 ib_umem_release_on_close(mr->device, &memobj->umem); 153 ib_umem_release_on_close(mrdev, &memobj->umem);
140 154
141 list_del(&uobj->list); 155 list_del(&uobj->list);
142 kfree(memobj); 156 kfree(memobj);
@@ -343,6 +357,13 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
343 event->event); 357 event->event);
344} 358}
345 359
360void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
361{
362 ib_uverbs_async_handler(context_ptr,
363 event->element.srq->uobject->user_handle,
364 event->event);
365}
366
346static void ib_uverbs_event_handler(struct ib_event_handler *handler, 367static void ib_uverbs_event_handler(struct ib_event_handler *handler,
347 struct ib_event *event) 368 struct ib_event *event)
348{ 369{
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index ed550f6595bd..36a32c315668 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 506fdf1f2a26..5081d903e561 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
8 * 9 *
9 * This software is available to you under a choice of one of two 10 * This software is available to you under a choice of one of two
@@ -40,8 +41,8 @@
40#include <linux/errno.h> 41#include <linux/errno.h>
41#include <linux/err.h> 42#include <linux/err.h>
42 43
43#include <ib_verbs.h> 44#include <rdma/ib_verbs.h>
44#include <ib_cache.h> 45#include <rdma/ib_cache.h>
45 46
46/* Protection domains */ 47/* Protection domains */
47 48
@@ -153,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah)
153} 154}
154EXPORT_SYMBOL(ib_destroy_ah); 155EXPORT_SYMBOL(ib_destroy_ah);
155 156
157/* Shared receive queues */
158
159struct ib_srq *ib_create_srq(struct ib_pd *pd,
160 struct ib_srq_init_attr *srq_init_attr)
161{
162 struct ib_srq *srq;
163
164 if (!pd->device->create_srq)
165 return ERR_PTR(-ENOSYS);
166
167 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
168
169 if (!IS_ERR(srq)) {
170 srq->device = pd->device;
171 srq->pd = pd;
172 srq->uobject = NULL;
173 srq->event_handler = srq_init_attr->event_handler;
174 srq->srq_context = srq_init_attr->srq_context;
175 atomic_inc(&pd->usecnt);
176 atomic_set(&srq->usecnt, 0);
177 }
178
179 return srq;
180}
181EXPORT_SYMBOL(ib_create_srq);
182
183int ib_modify_srq(struct ib_srq *srq,
184 struct ib_srq_attr *srq_attr,
185 enum ib_srq_attr_mask srq_attr_mask)
186{
187 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
188}
189EXPORT_SYMBOL(ib_modify_srq);
190
191int ib_query_srq(struct ib_srq *srq,
192 struct ib_srq_attr *srq_attr)
193{
194 return srq->device->query_srq ?
195 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
196}
197EXPORT_SYMBOL(ib_query_srq);
198
199int ib_destroy_srq(struct ib_srq *srq)
200{
201 struct ib_pd *pd;
202 int ret;
203
204 if (atomic_read(&srq->usecnt))
205 return -EBUSY;
206
207 pd = srq->pd;
208
209 ret = srq->device->destroy_srq(srq);
210 if (!ret)
211 atomic_dec(&pd->usecnt);
212
213 return ret;
214}
215EXPORT_SYMBOL(ib_destroy_srq);
216
156/* Queue pairs */ 217/* Queue pairs */
157 218
158struct ib_qp *ib_create_qp(struct ib_pd *pd, 219struct ib_qp *ib_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index 5dcbd43073e2..c44f7bae5424 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 1ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
4EXTRA_CFLAGS += -DDEBUG 2EXTRA_CFLAGS += -DDEBUG
5endif 3endif
@@ -9,4 +7,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
9ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ 7ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ 8 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ 9 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
12 mthca_provider.o mthca_memfree.o mthca_uar.o 10 mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index b1db48dd91d6..9ba3211cef7c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)
177 177
178 kfree(array->page_list); 178 kfree(array->page_list);
179} 179}
180
181/*
182 * Handling for queue buffers -- we allocate a bunch of memory and
183 * register it in a memory region at HCA virtual address 0. If the
184 * requested size is > max_direct, we split the allocation into
185 * multiple pages, so we don't require too much contiguous memory.
186 */
187
188int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
189 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
190 int hca_write, struct mthca_mr *mr)
191{
192 int err = -ENOMEM;
193 int npages, shift;
194 u64 *dma_list = NULL;
195 dma_addr_t t;
196 int i;
197
198 if (size <= max_direct) {
199 *is_direct = 1;
200 npages = 1;
201 shift = get_order(size) + PAGE_SHIFT;
202
203 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
204 size, &t, GFP_KERNEL);
205 if (!buf->direct.buf)
206 return -ENOMEM;
207
208 pci_unmap_addr_set(&buf->direct, mapping, t);
209
210 memset(buf->direct.buf, 0, size);
211
212 while (t & ((1 << shift) - 1)) {
213 --shift;
214 npages *= 2;
215 }
216
217 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
218 if (!dma_list)
219 goto err_free;
220
221 for (i = 0; i < npages; ++i)
222 dma_list[i] = t + i * (1 << shift);
223 } else {
224 *is_direct = 0;
225 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
226 shift = PAGE_SHIFT;
227
228 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
229 if (!dma_list)
230 return -ENOMEM;
231
232 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
233 GFP_KERNEL);
234 if (!buf->page_list)
235 goto err_out;
236
237 for (i = 0; i < npages; ++i)
238 buf->page_list[i].buf = NULL;
239
240 for (i = 0; i < npages; ++i) {
241 buf->page_list[i].buf =
242 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
243 &t, GFP_KERNEL);
244 if (!buf->page_list[i].buf)
245 goto err_free;
246
247 dma_list[i] = t;
248 pci_unmap_addr_set(&buf->page_list[i], mapping, t);
249
250 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
251 }
252 }
253
254 err = mthca_mr_alloc_phys(dev, pd->pd_num,
255 dma_list, shift, npages,
256 0, size,
257 MTHCA_MPT_FLAG_LOCAL_READ |
258 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
259 mr);
260 if (err)
261 goto err_free;
262
263 kfree(dma_list);
264
265 return 0;
266
267err_free:
268 mthca_buf_free(dev, size, buf, *is_direct, NULL);
269
270err_out:
271 kfree(dma_list);
272
273 return err;
274}
275
276void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
277 int is_direct, struct mthca_mr *mr)
278{
279 int i;
280
281 if (mr)
282 mthca_free_mr(dev, mr);
283
284 if (is_direct)
285 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
286 pci_unmap_addr(&buf->direct, mapping));
287 else {
288 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
289 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
290 buf->page_list[i].buf,
291 pci_unmap_addr(&buf->page_list[i],
292 mapping));
293 kfree(buf->page_list);
294 }
295}
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index d58dcbe66488..889e85096736 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -35,22 +35,22 @@
35 35
36#include <linux/init.h> 36#include <linux/init.h>
37 37
38#include <ib_verbs.h> 38#include <rdma/ib_verbs.h>
39#include <ib_cache.h> 39#include <rdma/ib_cache.h>
40 40
41#include "mthca_dev.h" 41#include "mthca_dev.h"
42 42
43struct mthca_av { 43struct mthca_av {
44 u32 port_pd; 44 __be32 port_pd;
45 u8 reserved1; 45 u8 reserved1;
46 u8 g_slid; 46 u8 g_slid;
47 u16 dlid; 47 __be16 dlid;
48 u8 reserved2; 48 u8 reserved2;
49 u8 gid_index; 49 u8 gid_index;
50 u8 msg_sr; 50 u8 msg_sr;
51 u8 hop_limit; 51 u8 hop_limit;
52 u32 sl_tclass_flowlabel; 52 __be32 sl_tclass_flowlabel;
53 u32 dgid[4]; 53 __be32 dgid[4];
54}; 54};
55 55
56int mthca_create_ah(struct mthca_dev *dev, 56int mthca_create_ah(struct mthca_dev *dev,
@@ -128,7 +128,7 @@ on_hca_fail:
128 av, (unsigned long) ah->avdma); 128 av, (unsigned long) ah->avdma);
129 for (j = 0; j < 8; ++j) 129 for (j = 0; j < 8; ++j)
130 printk(KERN_DEBUG " [%2x] %08x\n", 130 printk(KERN_DEBUG " [%2x] %08x\n",
131 j * 4, be32_to_cpu(((u32 *) av)[j])); 131 j * 4, be32_to_cpu(((__be32 *) av)[j]));
132 } 132 }
133 133
134 if (ah->type == MTHCA_AH_ON_HCA) { 134 if (ah->type == MTHCA_AH_ON_HCA) {
@@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
169 169
170 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; 170 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
171 header->lrh.destination_lid = ah->av->dlid; 171 header->lrh.destination_lid = ah->av->dlid;
172 header->lrh.source_lid = ah->av->g_slid & 0x7f; 172 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
173 if (ah->av->g_slid & 0x80) { 173 if (ah->av->g_slid & 0x80) {
174 header->grh_present = 1; 174 header->grh_present = 1;
175 header->grh.traffic_class = 175 header->grh.traffic_class =
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1557a522d831..cc758a2d2bc6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <ib_mad.h> 40#include <rdma/ib_mad.h>
40 41
41#include "mthca_dev.h" 42#include "mthca_dev.h"
42#include "mthca_config_reg.h" 43#include "mthca_config_reg.h"
@@ -108,6 +109,7 @@ enum {
108 CMD_SW2HW_SRQ = 0x35, 109 CMD_SW2HW_SRQ = 0x35,
109 CMD_HW2SW_SRQ = 0x36, 110 CMD_HW2SW_SRQ = 0x36,
110 CMD_QUERY_SRQ = 0x37, 111 CMD_QUERY_SRQ = 0x37,
112 CMD_ARM_SRQ = 0x40,
111 113
112 /* QP/EE commands */ 114 /* QP/EE commands */
113 CMD_RST2INIT_QPEE = 0x19, 115 CMD_RST2INIT_QPEE = 0x19,
@@ -219,20 +221,20 @@ static int mthca_cmd_post(struct mthca_dev *dev,
219 * (and some architectures such as ia64 implement memcpy_toio 221 * (and some architectures such as ia64 implement memcpy_toio
220 * in terms of writeb). 222 * in terms of writeb).
221 */ 223 */
222 __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); 224 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
223 __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); 225 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4);
224 __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); 226 __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4);
225 __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); 227 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4);
226 __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); 228 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
227 __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); 229 __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4);
228 230
229 /* __raw_writel may not order writes. */ 231 /* __raw_writel may not order writes. */
230 wmb(); 232 wmb();
231 233
232 __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | 234 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
233 (event ? (1 << HCA_E_BIT) : 0) | 235 (event ? (1 << HCA_E_BIT) : 0) |
234 (op_modifier << HCR_OPMOD_SHIFT) | 236 (op_modifier << HCR_OPMOD_SHIFT) |
235 op), dev->hcr + 6 * 4); 237 op), dev->hcr + 6 * 4);
236 238
237out: 239out:
238 up(&dev->cmd.hcr_sem); 240 up(&dev->cmd.hcr_sem);
@@ -273,12 +275,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
273 goto out; 275 goto out;
274 } 276 }
275 277
276 if (out_is_imm) { 278 if (out_is_imm)
277 memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); 279 *out_param =
278 be64_to_cpus(out_param); 280 (u64) be32_to_cpu((__force __be32)
279 } 281 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
282 (u64) be32_to_cpu((__force __be32)
283 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
280 284
281 *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 285 *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
282 286
283out: 287out:
284 up(&dev->cmd.poll_sem); 288 up(&dev->cmd.poll_sem);
@@ -1029,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1029 1033
1030 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1034 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1031 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); 1035 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
1036 mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1037 dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
1032 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1038 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1033 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); 1039 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
1034 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 1040 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
@@ -1082,6 +1088,34 @@ out:
1082 return err; 1088 return err;
1083} 1089}
1084 1090
1091static void get_board_id(void *vsd, char *board_id)
1092{
1093 int i;
1094
1095#define VSD_OFFSET_SIG1 0x00
1096#define VSD_OFFSET_SIG2 0xde
1097#define VSD_OFFSET_MLX_BOARD_ID 0xd0
1098#define VSD_OFFSET_TS_BOARD_ID 0x20
1099
1100#define VSD_SIGNATURE_TOPSPIN 0x5ad
1101
1102 memset(board_id, 0, MTHCA_BOARD_ID_LEN);
1103
1104 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1105 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1106 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
1107 } else {
1108 /*
1109 * The board ID is a string but the firmware byte
1110 * swaps each 4-byte word before passing it back to
1111 * us. Therefore we need to swab it before printing.
1112 */
1113 for (i = 0; i < 4; ++i)
1114 ((u32 *) board_id)[i] =
1115 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1116 }
1117}
1118
1085int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1119int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1086 struct mthca_adapter *adapter, u8 *status) 1120 struct mthca_adapter *adapter, u8 *status)
1087{ 1121{
@@ -1094,6 +1128,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1094#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 1128#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
1095#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1129#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
1096#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1130#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1131#define QUERY_ADAPTER_VSD_OFFSET 0x20
1097 1132
1098 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1133 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1099 if (IS_ERR(mailbox)) 1134 if (IS_ERR(mailbox))
@@ -1111,6 +1146,9 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1111 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1146 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1112 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1147 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1113 1148
1149 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1150 adapter->board_id);
1151
1114out: 1152out:
1115 mthca_free_mailbox(dev, mailbox); 1153 mthca_free_mailbox(dev, mailbox);
1116 return err; 1154 return err;
@@ -1121,7 +1159,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1121 u8 *status) 1159 u8 *status)
1122{ 1160{
1123 struct mthca_mailbox *mailbox; 1161 struct mthca_mailbox *mailbox;
1124 u32 *inbox; 1162 __be32 *inbox;
1125 int err; 1163 int err;
1126 1164
1127#define INIT_HCA_IN_SIZE 0x200 1165#define INIT_HCA_IN_SIZE 0x200
@@ -1247,10 +1285,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1247#define INIT_IB_FLAG_SIG (1 << 18) 1285#define INIT_IB_FLAG_SIG (1 << 18)
1248#define INIT_IB_FLAG_NG (1 << 17) 1286#define INIT_IB_FLAG_NG (1 << 17)
1249#define INIT_IB_FLAG_G0 (1 << 16) 1287#define INIT_IB_FLAG_G0 (1 << 16)
1250#define INIT_IB_FLAG_1X (1 << 8)
1251#define INIT_IB_FLAG_4X (1 << 9)
1252#define INIT_IB_FLAG_12X (1 << 11)
1253#define INIT_IB_VL_SHIFT 4 1288#define INIT_IB_VL_SHIFT 4
1289#define INIT_IB_PORT_WIDTH_SHIFT 8
1254#define INIT_IB_MTU_SHIFT 12 1290#define INIT_IB_MTU_SHIFT 12
1255#define INIT_IB_MAX_GID_OFFSET 0x06 1291#define INIT_IB_MAX_GID_OFFSET 0x06
1256#define INIT_IB_MAX_PKEY_OFFSET 0x0a 1292#define INIT_IB_MAX_PKEY_OFFSET 0x0a
@@ -1266,12 +1302,11 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1266 memset(inbox, 0, INIT_IB_IN_SIZE); 1302 memset(inbox, 0, INIT_IB_IN_SIZE);
1267 1303
1268 flags = 0; 1304 flags = 0;
1269 flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0;
1270 flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0;
1271 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; 1305 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0;
1272 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; 1306 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0;
1273 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; 1307 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0;
1274 flags |= param->vl_cap << INIT_IB_VL_SHIFT; 1308 flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1309 flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1275 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; 1310 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1276 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); 1311 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1277 1312
@@ -1342,7 +1377,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
1342int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1377int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1343{ 1378{
1344 struct mthca_mailbox *mailbox; 1379 struct mthca_mailbox *mailbox;
1345 u64 *inbox; 1380 __be64 *inbox;
1346 int err; 1381 int err;
1347 1382
1348 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1383 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -1468,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1468 CMD_TIME_CLASS_A, status); 1503 CMD_TIME_CLASS_A, status);
1469} 1504}
1470 1505
1506int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1507 int srq_num, u8 *status)
1508{
1509 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1510 CMD_TIME_CLASS_A, status);
1511}
1512
1513int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1514 int srq_num, u8 *status)
1515{
1516 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1517 CMD_HW2SW_SRQ,
1518 CMD_TIME_CLASS_A, status);
1519}
1520
1521int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1522{
1523 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1524 CMD_TIME_CLASS_B, status);
1525}
1526
1471int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1527int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1472 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 1528 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1473 u8 *status) 1529 u8 *status)
@@ -1513,7 +1569,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1513 if (i % 8 == 0) 1569 if (i % 8 == 0)
1514 printk(" [%02x] ", i * 4); 1570 printk(" [%02x] ", i * 4);
1515 printk(" %08x", 1571 printk(" %08x",
1516 be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1572 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1517 if ((i + 1) % 8 == 0) 1573 if ((i + 1) % 8 == 0)
1518 printk("\n"); 1574 printk("\n");
1519 } 1575 }
@@ -1533,7 +1589,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1533 if (i % 8 == 0) 1589 if (i % 8 == 0)
1534 printk("[%02x] ", i * 4); 1590 printk("[%02x] ", i * 4);
1535 printk(" %08x", 1591 printk(" %08x",
1536 be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1592 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1537 if ((i + 1) % 8 == 0) 1593 if ((i + 1) % 8 == 0)
1538 printk("\n"); 1594 printk("\n");
1539 } 1595 }
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index ed517f175dd6..65f976a13e02 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +36,7 @@
35#ifndef MTHCA_CMD_H 36#ifndef MTHCA_CMD_H
36#define MTHCA_CMD_H 37#define MTHCA_CMD_H
37 38
38#include <ib_verbs.h> 39#include <rdma/ib_verbs.h>
39 40
40#define MTHCA_MAILBOX_SIZE 4096 41#define MTHCA_MAILBOX_SIZE 4096
41 42
@@ -183,10 +184,11 @@ struct mthca_dev_lim {
183}; 184};
184 185
185struct mthca_adapter { 186struct mthca_adapter {
186 u32 vendor_id; 187 u32 vendor_id;
187 u32 device_id; 188 u32 device_id;
188 u32 revision_id; 189 u32 revision_id;
189 u8 inta_pin; 190 char board_id[MTHCA_BOARD_ID_LEN];
191 u8 inta_pin;
190}; 192};
191 193
192struct mthca_init_hca_param { 194struct mthca_init_hca_param {
@@ -218,8 +220,7 @@ struct mthca_init_hca_param {
218}; 220};
219 221
220struct mthca_init_ib_param { 222struct mthca_init_ib_param {
221 int enable_1x; 223 int port_width;
222 int enable_4x;
223 int vl_cap; 224 int vl_cap;
224 int mtu_cap; 225 int mtu_cap;
225 u16 gid_cap; 226 u16 gid_cap;
@@ -297,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
297 int cq_num, u8 *status); 298 int cq_num, u8 *status);
298int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 299int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
299 int cq_num, u8 *status); 300 int cq_num, u8 *status);
301int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
302 int srq_num, u8 *status);
303int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
304 int srq_num, u8 *status);
305int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
300int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 306int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
301 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 307 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
302 u8 *status); 308 u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index b4bfbbfe2c3d..afa56bfaab2e 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 5687c3014522..8600b6c3e0c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -37,7 +39,7 @@
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/hardirq.h> 40#include <linux/hardirq.h>
39 41
40#include <ib_pack.h> 42#include <rdma/ib_pack.h>
41 43
42#include "mthca_dev.h" 44#include "mthca_dev.h"
43#include "mthca_cmd.h" 45#include "mthca_cmd.h"
@@ -55,21 +57,21 @@ enum {
55 * Must be packed because start is 64 bits but only aligned to 32 bits. 57 * Must be packed because start is 64 bits but only aligned to 32 bits.
56 */ 58 */
57struct mthca_cq_context { 59struct mthca_cq_context {
58 u32 flags; 60 __be32 flags;
59 u64 start; 61 __be64 start;
60 u32 logsize_usrpage; 62 __be32 logsize_usrpage;
61 u32 error_eqn; /* Tavor only */ 63 __be32 error_eqn; /* Tavor only */
62 u32 comp_eqn; 64 __be32 comp_eqn;
63 u32 pd; 65 __be32 pd;
64 u32 lkey; 66 __be32 lkey;
65 u32 last_notified_index; 67 __be32 last_notified_index;
66 u32 solicit_producer_index; 68 __be32 solicit_producer_index;
67 u32 consumer_index; 69 __be32 consumer_index;
68 u32 producer_index; 70 __be32 producer_index;
69 u32 cqn; 71 __be32 cqn;
70 u32 ci_db; /* Arbel only */ 72 __be32 ci_db; /* Arbel only */
71 u32 state_db; /* Arbel only */ 73 __be32 state_db; /* Arbel only */
72 u32 reserved; 74 u32 reserved;
73} __attribute__((packed)); 75} __attribute__((packed));
74 76
75#define MTHCA_CQ_STATUS_OK ( 0 << 28) 77#define MTHCA_CQ_STATUS_OK ( 0 << 28)
@@ -108,31 +110,31 @@ enum {
108}; 110};
109 111
110struct mthca_cqe { 112struct mthca_cqe {
111 u32 my_qpn; 113 __be32 my_qpn;
112 u32 my_ee; 114 __be32 my_ee;
113 u32 rqpn; 115 __be32 rqpn;
114 u16 sl_g_mlpath; 116 __be16 sl_g_mlpath;
115 u16 rlid; 117 __be16 rlid;
116 u32 imm_etype_pkey_eec; 118 __be32 imm_etype_pkey_eec;
117 u32 byte_cnt; 119 __be32 byte_cnt;
118 u32 wqe; 120 __be32 wqe;
119 u8 opcode; 121 u8 opcode;
120 u8 is_send; 122 u8 is_send;
121 u8 reserved; 123 u8 reserved;
122 u8 owner; 124 u8 owner;
123}; 125};
124 126
125struct mthca_err_cqe { 127struct mthca_err_cqe {
126 u32 my_qpn; 128 __be32 my_qpn;
127 u32 reserved1[3]; 129 u32 reserved1[3];
128 u8 syndrome; 130 u8 syndrome;
129 u8 reserved2; 131 u8 reserved2;
130 u16 db_cnt; 132 __be16 db_cnt;
131 u32 reserved3; 133 u32 reserved3;
132 u32 wqe; 134 __be32 wqe;
133 u8 opcode; 135 u8 opcode;
134 u8 reserved4[2]; 136 u8 reserved4[2];
135 u8 owner; 137 u8 owner;
136}; 138};
137 139
138#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) 140#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
@@ -191,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
191static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, 193static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
192 int incr) 194 int incr)
193{ 195{
194 u32 doorbell[2]; 196 __be32 doorbell[2];
195 197
196 if (mthca_is_memfree(dev)) { 198 if (mthca_is_memfree(dev)) {
197 *cq->set_ci_db = cpu_to_be32(cq->cons_index); 199 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
@@ -222,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
222 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
223} 225}
224 226
225void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
228 struct mthca_srq *srq)
226{ 229{
227 struct mthca_cq *cq; 230 struct mthca_cq *cq;
228 struct mthca_cqe *cqe; 231 struct mthca_cqe *cqe;
@@ -263,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
263 */ 266 */
264 while (prod_index > cq->cons_index) { 267 while (prod_index > cq->cons_index) {
265 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 268 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
266 if (cqe->my_qpn == cpu_to_be32(qpn)) 269 if (cqe->my_qpn == cpu_to_be32(qpn)) {
270 if (srq)
271 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
267 ++nfreed; 272 ++nfreed;
273 }
268 else if (nfreed) 274 else if (nfreed)
269 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 275 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
270 cq->ibcq.cqe), 276 cq->ibcq.cqe),
@@ -291,7 +297,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
291{ 297{
292 int err; 298 int err;
293 int dbd; 299 int dbd;
294 u32 new_wqe; 300 __be32 new_wqe;
295 301
296 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { 302 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
297 mthca_dbg(dev, "local QP operation err " 303 mthca_dbg(dev, "local QP operation err "
@@ -365,6 +371,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
365 break; 371 break;
366 } 372 }
367 373
374 /*
375 * Mem-free HCAs always generate one CQE per WQE, even in the
376 * error case, so we don't have to check the doorbell count, etc.
377 */
378 if (mthca_is_memfree(dev))
379 return 0;
380
368 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); 381 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
369 if (err) 382 if (err)
370 return err; 383 return err;
@@ -373,12 +386,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
373 * If we're at the end of the WQE chain, or we've used up our 386 * If we're at the end of the WQE chain, or we've used up our
374 * doorbell count, free the CQE. Otherwise just update it for 387 * doorbell count, free the CQE. Otherwise just update it for
375 * the next poll operation. 388 * the next poll operation.
376 *
377 * This does not apply to mem-free HCAs: they don't use the
378 * doorbell count field, and so we should always free the CQE.
379 */ 389 */
380 if (mthca_is_memfree(dev) || 390 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
381 !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
382 return 0; 391 return 0;
383 392
384 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); 393 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
@@ -450,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
450 >> wq->wqe_shift); 459 >> wq->wqe_shift);
451 entry->wr_id = (*cur_qp)->wrid[wqe_index + 460 entry->wr_id = (*cur_qp)->wrid[wqe_index +
452 (*cur_qp)->rq.max]; 461 (*cur_qp)->rq.max];
462 } else if ((*cur_qp)->ibqp.srq) {
463 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
464 u32 wqe = be32_to_cpu(cqe->wqe);
465 wq = NULL;
466 wqe_index = wqe >> srq->wqe_shift;
467 entry->wr_id = srq->wrid[wqe_index];
468 mthca_free_srq_wqe(srq, wqe);
453 } else { 469 } else {
454 wq = &(*cur_qp)->rq; 470 wq = &(*cur_qp)->rq;
455 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 471 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
456 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 472 entry->wr_id = (*cur_qp)->wrid[wqe_index];
457 } 473 }
458 474
459 if (wq->last_comp < wqe_index) 475 if (wq) {
460 wq->tail += wqe_index - wq->last_comp; 476 if (wq->last_comp < wqe_index)
461 else 477 wq->tail += wqe_index - wq->last_comp;
462 wq->tail += wqe_index + wq->max - wq->last_comp; 478 else
463 479 wq->tail += wqe_index + wq->max - wq->last_comp;
464 wq->last_comp = wqe_index;
465 480
466 if (0) 481 wq->last_comp = wqe_index;
467 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 482 }
468 is_send ? "Send" : "Receive",
469 (*cur_qp)->qpn, wqe_index, wq->max);
470 483
471 if (is_error) { 484 if (is_error) {
472 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 485 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
@@ -584,13 +597,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
584 597
585int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 598int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
586{ 599{
587 u32 doorbell[2]; 600 __be32 doorbell[2];
588 601
589 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 602 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
590 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 603 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
591 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 604 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
592 to_mcq(cq)->cqn); 605 to_mcq(cq)->cqn);
593 doorbell[1] = 0xffffffff; 606 doorbell[1] = (__force __be32) 0xffffffff;
594 607
595 mthca_write64(doorbell, 608 mthca_write64(doorbell,
596 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, 609 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
@@ -602,9 +615,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
602int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 615int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
603{ 616{
604 struct mthca_cq *cq = to_mcq(ibcq); 617 struct mthca_cq *cq = to_mcq(ibcq);
605 u32 doorbell[2]; 618 __be32 doorbell[2];
606 u32 sn; 619 u32 sn;
607 u32 ci; 620 __be32 ci;
608 621
609 sn = cq->arm_sn & 3; 622 sn = cq->arm_sn & 3;
610 ci = cpu_to_be32(cq->cons_index); 623 ci = cpu_to_be32(cq->cons_index);
@@ -637,113 +650,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
637 650
638static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 651static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
639{ 652{
640 int i; 653 mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
641 int size; 654 &cq->queue, cq->is_direct, &cq->mr);
642
643 if (cq->is_direct)
644 dma_free_coherent(&dev->pdev->dev,
645 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
646 cq->queue.direct.buf,
647 pci_unmap_addr(&cq->queue.direct,
648 mapping));
649 else {
650 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
651 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
652 if (cq->queue.page_list[i].buf)
653 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
654 cq->queue.page_list[i].buf,
655 pci_unmap_addr(&cq->queue.page_list[i],
656 mapping));
657
658 kfree(cq->queue.page_list);
659 }
660}
661
662static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
663 struct mthca_cq *cq)
664{
665 int err = -ENOMEM;
666 int npages, shift;
667 u64 *dma_list = NULL;
668 dma_addr_t t;
669 int i;
670
671 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
672 cq->is_direct = 1;
673 npages = 1;
674 shift = get_order(size) + PAGE_SHIFT;
675
676 cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
677 size, &t, GFP_KERNEL);
678 if (!cq->queue.direct.buf)
679 return -ENOMEM;
680
681 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
682
683 memset(cq->queue.direct.buf, 0, size);
684
685 while (t & ((1 << shift) - 1)) {
686 --shift;
687 npages *= 2;
688 }
689
690 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
691 if (!dma_list)
692 goto err_free;
693
694 for (i = 0; i < npages; ++i)
695 dma_list[i] = t + i * (1 << shift);
696 } else {
697 cq->is_direct = 0;
698 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
699 shift = PAGE_SHIFT;
700
701 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
702 if (!dma_list)
703 return -ENOMEM;
704
705 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
706 GFP_KERNEL);
707 if (!cq->queue.page_list)
708 goto err_out;
709
710 for (i = 0; i < npages; ++i)
711 cq->queue.page_list[i].buf = NULL;
712
713 for (i = 0; i < npages; ++i) {
714 cq->queue.page_list[i].buf =
715 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
716 &t, GFP_KERNEL);
717 if (!cq->queue.page_list[i].buf)
718 goto err_free;
719
720 dma_list[i] = t;
721 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
722
723 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
724 }
725 }
726
727 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
728 dma_list, shift, npages,
729 0, size,
730 MTHCA_MPT_FLAG_LOCAL_WRITE |
731 MTHCA_MPT_FLAG_LOCAL_READ,
732 &cq->mr);
733 if (err)
734 goto err_free;
735
736 kfree(dma_list);
737
738 return 0;
739
740err_free:
741 mthca_free_cq_buf(dev, cq);
742
743err_out:
744 kfree(dma_list);
745
746 return err;
747} 655}
748 656
749int mthca_init_cq(struct mthca_dev *dev, int nent, 657int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -795,7 +703,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
795 cq_context = mailbox->buf; 703 cq_context = mailbox->buf;
796 704
797 if (cq->is_kernel) { 705 if (cq->is_kernel) {
798 err = mthca_alloc_cq_buf(dev, size, cq); 706 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
707 &cq->queue, &cq->is_direct,
708 &dev->driver_pd, 1, &cq->mr);
799 if (err) 709 if (err)
800 goto err_out_mailbox; 710 goto err_out_mailbox;
801 711
@@ -811,7 +721,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
811 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 721 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
812 MTHCA_CQ_STATE_DISARMED | 722 MTHCA_CQ_STATE_DISARMED |
813 MTHCA_CQ_FLAG_TR); 723 MTHCA_CQ_FLAG_TR);
814 cq_context->start = cpu_to_be64(0);
815 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 724 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
816 if (ctx) 725 if (ctx)
817 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); 726 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
@@ -857,10 +766,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
857 return 0; 766 return 0;
858 767
859err_out_free_mr: 768err_out_free_mr:
860 if (cq->is_kernel) { 769 if (cq->is_kernel)
861 mthca_free_mr(dev, &cq->mr);
862 mthca_free_cq_buf(dev, cq); 770 mthca_free_cq_buf(dev, cq);
863 }
864 771
865err_out_mailbox: 772err_out_mailbox:
866 mthca_free_mailbox(dev, mailbox); 773 mthca_free_mailbox(dev, mailbox);
@@ -904,7 +811,7 @@ void mthca_free_cq(struct mthca_dev *dev,
904 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 811 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
905 812
906 if (0) { 813 if (0) {
907 u32 *ctx = mailbox->buf; 814 __be32 *ctx = mailbox->buf;
908 int j; 815 int j;
909 816
910 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 817 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@@ -928,7 +835,6 @@ void mthca_free_cq(struct mthca_dev *dev,
928 wait_event(cq->wait, !atomic_read(&cq->refcount)); 835 wait_event(cq->wait, !atomic_read(&cq->refcount));
929 836
930 if (cq->is_kernel) { 837 if (cq->is_kernel) {
931 mthca_free_mr(dev, &cq->mr);
932 mthca_free_cq_buf(dev, cq); 838 mthca_free_cq_buf(dev, cq);
933 if (mthca_is_memfree(dev)) { 839 if (mthca_is_memfree(dev)) {
934 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 840 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 5ecdd2eeeb0f..7bff5a8425f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,10 @@ enum {
67}; 69};
68 70
69enum { 71enum {
72 MTHCA_BOARD_ID_LEN = 64
73};
74
75enum {
70 MTHCA_EQ_CONTEXT_SIZE = 0x40, 76 MTHCA_EQ_CONTEXT_SIZE = 0x40,
71 MTHCA_CQ_CONTEXT_SIZE = 0x40, 77 MTHCA_CQ_CONTEXT_SIZE = 0x40,
72 MTHCA_QP_CONTEXT_SIZE = 0x200, 78 MTHCA_QP_CONTEXT_SIZE = 0x200,
@@ -142,6 +148,7 @@ struct mthca_limits {
142 int reserved_mcgs; 148 int reserved_mcgs;
143 int num_pds; 149 int num_pds;
144 int reserved_pds; 150 int reserved_pds;
151 u8 port_width_cap;
145}; 152};
146 153
147struct mthca_alloc { 154struct mthca_alloc {
@@ -211,6 +218,13 @@ struct mthca_cq_table {
211 struct mthca_icm_table *table; 218 struct mthca_icm_table *table;
212}; 219};
213 220
221struct mthca_srq_table {
222 struct mthca_alloc alloc;
223 spinlock_t lock;
224 struct mthca_array srq;
225 struct mthca_icm_table *table;
226};
227
214struct mthca_qp_table { 228struct mthca_qp_table {
215 struct mthca_alloc alloc; 229 struct mthca_alloc alloc;
216 u32 rdb_base; 230 u32 rdb_base;
@@ -246,6 +260,7 @@ struct mthca_dev {
246 unsigned long device_cap_flags; 260 unsigned long device_cap_flags;
247 261
248 u32 rev_id; 262 u32 rev_id;
263 char board_id[MTHCA_BOARD_ID_LEN];
249 264
250 /* firmware info */ 265 /* firmware info */
251 u64 fw_ver; 266 u64 fw_ver;
@@ -291,6 +306,7 @@ struct mthca_dev {
291 struct mthca_mr_table mr_table; 306 struct mthca_mr_table mr_table;
292 struct mthca_eq_table eq_table; 307 struct mthca_eq_table eq_table;
293 struct mthca_cq_table cq_table; 308 struct mthca_cq_table cq_table;
309 struct mthca_srq_table srq_table;
294 struct mthca_qp_table qp_table; 310 struct mthca_qp_table qp_table;
295 struct mthca_av_table av_table; 311 struct mthca_av_table av_table;
296 struct mthca_mcg_table mcg_table; 312 struct mthca_mcg_table mcg_table;
@@ -331,14 +347,13 @@ extern void __buggy_use_of_MTHCA_PUT(void);
331 347
332#define MTHCA_PUT(dest, source, offset) \ 348#define MTHCA_PUT(dest, source, offset) \
333 do { \ 349 do { \
334 __typeof__(source) *__p = \ 350 void *__d = ((char *) (dest) + (offset)); \
335 (__typeof__(source) *) ((char *) (dest) + (offset)); \
336 switch (sizeof(source)) { \ 351 switch (sizeof(source)) { \
337 case 1: *__p = (source); break; \ 352 case 1: *(u8 *) __d = (source); break; \
338 case 2: *__p = cpu_to_be16(source); break; \ 353 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
339 case 4: *__p = cpu_to_be32(source); break; \ 354 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
340 case 8: *__p = cpu_to_be64(source); break; \ 355 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
341 default: __buggy_use_of_MTHCA_PUT(); \ 356 default: __buggy_use_of_MTHCA_PUT(); \
342 } \ 357 } \
343 } while (0) 358 } while (0)
344 359
@@ -354,12 +369,18 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
354void mthca_array_clear(struct mthca_array *array, int index); 369void mthca_array_clear(struct mthca_array *array, int index);
355int mthca_array_init(struct mthca_array *array, int nent); 370int mthca_array_init(struct mthca_array *array, int nent);
356void mthca_array_cleanup(struct mthca_array *array, int nent); 371void mthca_array_cleanup(struct mthca_array *array, int nent);
372int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
373 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
374 int hca_write, struct mthca_mr *mr);
375void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
376 int is_direct, struct mthca_mr *mr);
357 377
358int mthca_init_uar_table(struct mthca_dev *dev); 378int mthca_init_uar_table(struct mthca_dev *dev);
359int mthca_init_pd_table(struct mthca_dev *dev); 379int mthca_init_pd_table(struct mthca_dev *dev);
360int mthca_init_mr_table(struct mthca_dev *dev); 380int mthca_init_mr_table(struct mthca_dev *dev);
361int mthca_init_eq_table(struct mthca_dev *dev); 381int mthca_init_eq_table(struct mthca_dev *dev);
362int mthca_init_cq_table(struct mthca_dev *dev); 382int mthca_init_cq_table(struct mthca_dev *dev);
383int mthca_init_srq_table(struct mthca_dev *dev);
363int mthca_init_qp_table(struct mthca_dev *dev); 384int mthca_init_qp_table(struct mthca_dev *dev);
364int mthca_init_av_table(struct mthca_dev *dev); 385int mthca_init_av_table(struct mthca_dev *dev);
365int mthca_init_mcg_table(struct mthca_dev *dev); 386int mthca_init_mcg_table(struct mthca_dev *dev);
@@ -369,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev);
369void mthca_cleanup_mr_table(struct mthca_dev *dev); 390void mthca_cleanup_mr_table(struct mthca_dev *dev);
370void mthca_cleanup_eq_table(struct mthca_dev *dev); 391void mthca_cleanup_eq_table(struct mthca_dev *dev);
371void mthca_cleanup_cq_table(struct mthca_dev *dev); 392void mthca_cleanup_cq_table(struct mthca_dev *dev);
393void mthca_cleanup_srq_table(struct mthca_dev *dev);
372void mthca_cleanup_qp_table(struct mthca_dev *dev); 394void mthca_cleanup_qp_table(struct mthca_dev *dev);
373void mthca_cleanup_av_table(struct mthca_dev *dev); 395void mthca_cleanup_av_table(struct mthca_dev *dev);
374void mthca_cleanup_mcg_table(struct mthca_dev *dev); 396void mthca_cleanup_mcg_table(struct mthca_dev *dev);
@@ -419,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
419void mthca_free_cq(struct mthca_dev *dev, 441void mthca_free_cq(struct mthca_dev *dev,
420 struct mthca_cq *cq); 442 struct mthca_cq *cq);
421void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 443void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
422void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); 444void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
445 struct mthca_srq *srq);
446
447int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
448 struct ib_srq_attr *attr, struct mthca_srq *srq);
449void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
450void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
451 enum ib_event_type event_type);
452void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
453int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
454 struct ib_recv_wr **bad_wr);
455int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
456 struct ib_recv_wr **bad_wr);
423 457
424void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 458void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
425 enum ib_event_type event_type); 459 enum ib_event_type event_type);
@@ -433,7 +467,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
433int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 467int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr); 468 struct ib_recv_wr **bad_wr);
435int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 469int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
436 int index, int *dbd, u32 *new_wqe); 470 int index, int *dbd, __be32 *new_wqe);
437int mthca_alloc_qp(struct mthca_dev *dev, 471int mthca_alloc_qp(struct mthca_dev *dev,
438 struct mthca_pd *pd, 472 struct mthca_pd *pd,
439 struct mthca_cq *send_cq, 473 struct mthca_cq *send_cq,
diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h
index 535fad7710fb..dd9a44d170c9 100644
--- a/drivers/infiniband/hw/mthca/mthca_doorbell.h
+++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -57,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
57 __raw_writeq((__force u64) val, dest); 58 __raw_writeq((__force u64) val, dest);
58} 59}
59 60
60static inline void mthca_write64(u32 val[2], void __iomem *dest, 61static inline void mthca_write64(__be32 val[2], void __iomem *dest,
61 spinlock_t *doorbell_lock) 62 spinlock_t *doorbell_lock)
62{ 63{
63 __raw_writeq(*(u64 *) val, dest); 64 __raw_writeq(*(u64 *) val, dest);
64} 65}
65 66
66static inline void mthca_write_db_rec(u32 val[2], u32 *db) 67static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
67{ 68{
68 *(u64 *) db = *(u64 *) val; 69 *(u64 *) db = *(u64 *) val;
69} 70}
@@ -86,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
86 __raw_writel(((__force u32 *) &val)[1], dest + 4); 87 __raw_writel(((__force u32 *) &val)[1], dest + 4);
87} 88}
88 89
89static inline void mthca_write64(u32 val[2], void __iomem *dest, 90static inline void mthca_write64(__be32 val[2], void __iomem *dest,
90 spinlock_t *doorbell_lock) 91 spinlock_t *doorbell_lock)
91{ 92{
92 unsigned long flags; 93 unsigned long flags;
93 94
94 spin_lock_irqsave(doorbell_lock, flags); 95 spin_lock_irqsave(doorbell_lock, flags);
95 __raw_writel(val[0], dest); 96 __raw_writel((__force u32) val[0], dest);
96 __raw_writel(val[1], dest + 4); 97 __raw_writel((__force u32) val[1], dest + 4);
97 spin_unlock_irqrestore(doorbell_lock, flags); 98 spin_unlock_irqrestore(doorbell_lock, flags);
98} 99}
99 100
100static inline void mthca_write_db_rec(u32 val[2], u32 *db) 101static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
101{ 102{
102 db[0] = val[0]; 103 db[0] = val[0];
103 wmb(); 104 wmb();
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cbcf2b4722e4..18f0981eb0c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -51,18 +52,18 @@ enum {
51 * Must be packed because start is 64 bits but only aligned to 32 bits. 52 * Must be packed because start is 64 bits but only aligned to 32 bits.
52 */ 53 */
53struct mthca_eq_context { 54struct mthca_eq_context {
54 u32 flags; 55 __be32 flags;
55 u64 start; 56 __be64 start;
56 u32 logsize_usrpage; 57 __be32 logsize_usrpage;
57 u32 tavor_pd; /* reserved for Arbel */ 58 __be32 tavor_pd; /* reserved for Arbel */
58 u8 reserved1[3]; 59 u8 reserved1[3];
59 u8 intr; 60 u8 intr;
60 u32 arbel_pd; /* lost_count for Tavor */ 61 __be32 arbel_pd; /* lost_count for Tavor */
61 u32 lkey; 62 __be32 lkey;
62 u32 reserved2[2]; 63 u32 reserved2[2];
63 u32 consumer_index; 64 __be32 consumer_index;
64 u32 producer_index; 65 __be32 producer_index;
65 u32 reserved3[4]; 66 u32 reserved3[4];
66} __attribute__((packed)); 67} __attribute__((packed));
67 68
68#define MTHCA_EQ_STATUS_OK ( 0 << 28) 69#define MTHCA_EQ_STATUS_OK ( 0 << 28)
@@ -127,28 +128,28 @@ struct mthca_eqe {
127 union { 128 union {
128 u32 raw[6]; 129 u32 raw[6];
129 struct { 130 struct {
130 u32 cqn; 131 __be32 cqn;
131 } __attribute__((packed)) comp; 132 } __attribute__((packed)) comp;
132 struct { 133 struct {
133 u16 reserved1; 134 u16 reserved1;
134 u16 token; 135 __be16 token;
135 u32 reserved2; 136 u32 reserved2;
136 u8 reserved3[3]; 137 u8 reserved3[3];
137 u8 status; 138 u8 status;
138 u64 out_param; 139 __be64 out_param;
139 } __attribute__((packed)) cmd; 140 } __attribute__((packed)) cmd;
140 struct { 141 struct {
141 u32 qpn; 142 __be32 qpn;
142 } __attribute__((packed)) qp; 143 } __attribute__((packed)) qp;
143 struct { 144 struct {
144 u32 cqn; 145 __be32 cqn;
145 u32 reserved1; 146 u32 reserved1;
146 u8 reserved2[3]; 147 u8 reserved2[3];
147 u8 syndrome; 148 u8 syndrome;
148 } __attribute__((packed)) cq_err; 149 } __attribute__((packed)) cq_err;
149 struct { 150 struct {
150 u32 reserved1[2]; 151 u32 reserved1[2];
151 u32 port; 152 __be32 port;
152 } __attribute__((packed)) port_change; 153 } __attribute__((packed)) port_change;
153 } event; 154 } event;
154 u8 reserved3[3]; 155 u8 reserved3[3];
@@ -167,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev)
167 168
168static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) 169static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
169{ 170{
170 u32 doorbell[2]; 171 __be32 doorbell[2];
171 172
172 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); 173 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);
173 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); 174 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));
@@ -190,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u
190{ 191{
191 /* See comment in tavor_set_eq_ci() above. */ 192 /* See comment in tavor_set_eq_ci() above. */
192 wmb(); 193 wmb();
193 __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + 194 __raw_writel((__force u32) cpu_to_be32(ci),
194 eq->eqn * 8); 195 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
195 /* We still want ordering, just not swabbing, so add a barrier */ 196 /* We still want ordering, just not swabbing, so add a barrier */
196 mb(); 197 mb();
197} 198}
@@ -206,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
206 207
207static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) 208static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
208{ 209{
209 u32 doorbell[2]; 210 __be32 doorbell[2];
210 211
211 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); 212 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);
212 doorbell[1] = 0; 213 doorbell[1] = 0;
@@ -224,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
224static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) 225static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
225{ 226{
226 if (!mthca_is_memfree(dev)) { 227 if (!mthca_is_memfree(dev)) {
227 u32 doorbell[2]; 228 __be32 doorbell[2];
228 229
229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); 230 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
230 doorbell[1] = cpu_to_be32(cqn); 231 doorbell[1] = cpu_to_be32(cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7df223642015..9804174f7f3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,9 +34,9 @@
32 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 34 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
33 */ 35 */
34 36
35#include <ib_verbs.h> 37#include <rdma/ib_verbs.h>
36#include <ib_mad.h> 38#include <rdma/ib_mad.h>
37#include <ib_smi.h> 39#include <rdma/ib_smi.h>
38 40
39#include "mthca_dev.h" 41#include "mthca_dev.h"
40#include "mthca_cmd.h" 42#include "mthca_cmd.h"
@@ -192,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev,
192{ 194{
193 int err; 195 int err;
194 u8 status; 196 u8 status;
195 u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; 197 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
196 198
197 /* Forward locally generated traps to the SM */ 199 /* Forward locally generated traps to the SM */
198 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 200 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 2ef916859e17..3241d6c9dc11 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,6 @@
34 */ 35 */
35 36
36#include <linux/config.h> 37#include <linux/config.h>
37#include <linux/version.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
@@ -171,6 +171,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
171 mdev->limits.reserved_mrws = dev_lim->reserved_mrws; 171 mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
172 mdev->limits.reserved_uars = dev_lim->reserved_uars; 172 mdev->limits.reserved_uars = dev_lim->reserved_uars;
173 mdev->limits.reserved_pds = dev_lim->reserved_pds; 173 mdev->limits.reserved_pds = dev_lim->reserved_pds;
174 mdev->limits.port_width_cap = dev_lim->max_port_width;
174 175
175 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. 176 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
176 May be doable since hardware supports it for SRQ. 177 May be doable since hardware supports it for SRQ.
@@ -212,7 +213,6 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
212 struct mthca_dev_lim dev_lim; 213 struct mthca_dev_lim dev_lim;
213 struct mthca_profile profile; 214 struct mthca_profile profile;
214 struct mthca_init_hca_param init_hca; 215 struct mthca_init_hca_param init_hca;
215 struct mthca_adapter adapter;
216 216
217 err = mthca_SYS_EN(mdev, &status); 217 err = mthca_SYS_EN(mdev, &status);
218 if (err) { 218 if (err) {
@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
253 profile = default_profile; 253 profile = default_profile;
254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
255 profile.uarc_size = 0; 255 profile.uarc_size = 0;
256 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
257 profile.num_srq = dev_lim.max_srqs;
256 258
257 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
258 if (err < 0) 260 if (err < 0)
@@ -270,26 +272,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
270 goto err_disable; 272 goto err_disable;
271 } 273 }
272 274
273 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
274 if (err) {
275 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
276 goto err_close;
277 }
278 if (status) {
279 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
280 "aborting.\n", status);
281 err = -EINVAL;
282 goto err_close;
283 }
284
285 mdev->eq_table.inta_pin = adapter.inta_pin;
286 mdev->rev_id = adapter.revision_id;
287
288 return 0; 275 return 0;
289 276
290err_close:
291 mthca_CLOSE_HCA(mdev, 0, &status);
292
293err_disable: 277err_disable:
294 mthca_SYS_DIS(mdev, &status); 278 mthca_SYS_DIS(mdev, &status);
295 279
@@ -442,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
442 } 426 }
443 427
444 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 428 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
445 dev_lim->cqc_entry_sz, 429 dev_lim->cqc_entry_sz,
446 mdev->limits.num_cqs, 430 mdev->limits.num_cqs,
447 mdev->limits.reserved_cqs, 0); 431 mdev->limits.reserved_cqs, 0);
448 if (!mdev->cq_table.table) { 432 if (!mdev->cq_table.table) {
449 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 433 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
450 err = -ENOMEM; 434 err = -ENOMEM;
451 goto err_unmap_rdb; 435 goto err_unmap_rdb;
452 } 436 }
453 437
438 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
439 mdev->srq_table.table =
440 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
441 dev_lim->srq_entry_sz,
442 mdev->limits.num_srqs,
443 mdev->limits.reserved_srqs, 0);
444 if (!mdev->srq_table.table) {
445 mthca_err(mdev, "Failed to map SRQ context memory, "
446 "aborting.\n");
447 err = -ENOMEM;
448 goto err_unmap_cq;
449 }
450 }
451
454 /* 452 /*
455 * It's not strictly required, but for simplicity just map the 453 * It's not strictly required, but for simplicity just map the
456 * whole multicast group table now. The table isn't very big 454 * whole multicast group table now. The table isn't very big
@@ -466,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
466 if (!mdev->mcg_table.table) { 464 if (!mdev->mcg_table.table) {
467 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 465 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
468 err = -ENOMEM; 466 err = -ENOMEM;
469 goto err_unmap_cq; 467 goto err_unmap_srq;
470 } 468 }
471 469
472 return 0; 470 return 0;
473 471
472err_unmap_srq:
473 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
474 mthca_free_icm_table(mdev, mdev->srq_table.table);
475
474err_unmap_cq: 476err_unmap_cq:
475 mthca_free_icm_table(mdev, mdev->cq_table.table); 477 mthca_free_icm_table(mdev, mdev->cq_table.table);
476 478
@@ -506,7 +508,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
506 struct mthca_dev_lim dev_lim; 508 struct mthca_dev_lim dev_lim;
507 struct mthca_profile profile; 509 struct mthca_profile profile;
508 struct mthca_init_hca_param init_hca; 510 struct mthca_init_hca_param init_hca;
509 struct mthca_adapter adapter;
510 u64 icm_size; 511 u64 icm_size;
511 u8 status; 512 u8 status;
512 int err; 513 int err;
@@ -551,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
551 profile = default_profile; 552 profile = default_profile;
552 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 553 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
553 profile.num_udav = 0; 554 profile.num_udav = 0;
555 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
556 profile.num_srq = dev_lim.max_srqs;
554 557
555 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 558 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
556 if ((int) icm_size < 0) { 559 if ((int) icm_size < 0) {
@@ -574,24 +577,11 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
574 goto err_free_icm; 577 goto err_free_icm;
575 } 578 }
576 579
577 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
578 if (err) {
579 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
580 goto err_free_icm;
581 }
582 if (status) {
583 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
584 "aborting.\n", status);
585 err = -EINVAL;
586 goto err_free_icm;
587 }
588
589 mdev->eq_table.inta_pin = adapter.inta_pin;
590 mdev->rev_id = adapter.revision_id;
591
592 return 0; 580 return 0;
593 581
594err_free_icm: 582err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
595 mthca_free_icm_table(mdev, mdev->cq_table.table); 585 mthca_free_icm_table(mdev, mdev->cq_table.table);
596 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
597 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -614,12 +604,70 @@ err_disable:
614 return err; 604 return err;
615} 605}
616 606
607static void mthca_close_hca(struct mthca_dev *mdev)
608{
609 u8 status;
610
611 mthca_CLOSE_HCA(mdev, 0, &status);
612
613 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626
627 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
629
630 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
631 mthca_DISABLE_LAM(mdev, &status);
632 } else
633 mthca_SYS_DIS(mdev, &status);
634}
635
617static int __devinit mthca_init_hca(struct mthca_dev *mdev) 636static int __devinit mthca_init_hca(struct mthca_dev *mdev)
618{ 637{
638 u8 status;
639 int err;
640 struct mthca_adapter adapter;
641
619 if (mthca_is_memfree(mdev)) 642 if (mthca_is_memfree(mdev))
620 return mthca_init_arbel(mdev); 643 err = mthca_init_arbel(mdev);
621 else 644 else
622 return mthca_init_tavor(mdev); 645 err = mthca_init_tavor(mdev);
646
647 if (err)
648 return err;
649
650 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
651 if (err) {
652 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
653 goto err_close;
654 }
655 if (status) {
656 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
657 "aborting.\n", status);
658 err = -EINVAL;
659 goto err_close;
660 }
661
662 mdev->eq_table.inta_pin = adapter.inta_pin;
663 mdev->rev_id = adapter.revision_id;
664 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
665
666 return 0;
667
668err_close:
669 mthca_close_hca(mdev);
670 return err;
623} 671}
624 672
625static int __devinit mthca_setup_hca(struct mthca_dev *dev) 673static int __devinit mthca_setup_hca(struct mthca_dev *dev)
@@ -709,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
709 goto err_cmd_poll; 757 goto err_cmd_poll;
710 } 758 }
711 759
760 err = mthca_init_srq_table(dev);
761 if (err) {
762 mthca_err(dev, "Failed to initialize "
763 "shared receive queue table, aborting.\n");
764 goto err_cq_table_free;
765 }
766
712 err = mthca_init_qp_table(dev); 767 err = mthca_init_qp_table(dev);
713 if (err) { 768 if (err) {
714 mthca_err(dev, "Failed to initialize " 769 mthca_err(dev, "Failed to initialize "
715 "queue pair table, aborting.\n"); 770 "queue pair table, aborting.\n");
716 goto err_cq_table_free; 771 goto err_srq_table_free;
717 } 772 }
718 773
719 err = mthca_init_av_table(dev); 774 err = mthca_init_av_table(dev);
@@ -738,6 +793,9 @@ err_av_table_free:
738err_qp_table_free: 793err_qp_table_free:
739 mthca_cleanup_qp_table(dev); 794 mthca_cleanup_qp_table(dev);
740 795
796err_srq_table_free:
797 mthca_cleanup_srq_table(dev);
798
741err_cq_table_free: 799err_cq_table_free:
742 mthca_cleanup_cq_table(dev); 800 mthca_cleanup_cq_table(dev);
743 801
@@ -844,33 +902,6 @@ static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
844 return 0; 902 return 0;
845} 903}
846 904
847static void mthca_close_hca(struct mthca_dev *mdev)
848{
849 u8 status;
850
851 mthca_CLOSE_HCA(mdev, 0, &status);
852
853 if (mthca_is_memfree(mdev)) {
854 mthca_free_icm_table(mdev, mdev->cq_table.table);
855 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
856 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
857 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
858 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
859 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
860 mthca_unmap_eq_icm(mdev);
861
862 mthca_UNMAP_ICM_AUX(mdev, &status);
863 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
864
865 mthca_UNMAP_FA(mdev, &status);
866 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
867
868 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
869 mthca_DISABLE_LAM(mdev, &status);
870 } else
871 mthca_SYS_DIS(mdev, &status);
872}
873
874/* Types of supported HCA */ 905/* Types of supported HCA */
875enum { 906enum {
876 TAVOR, /* MT23108 */ 907 TAVOR, /* MT23108 */
@@ -887,9 +918,9 @@ static struct {
887 int is_memfree; 918 int is_memfree;
888 int is_pcie; 919 int is_pcie;
889} mthca_hca_table[] = { 920} mthca_hca_table[] = {
890 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 }, 921 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 },
891 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 }, 922 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 },
892 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 }, 923 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 },
893 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 } 924 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 }
894}; 925};
895 926
@@ -1051,6 +1082,7 @@ err_cleanup:
1051 mthca_cleanup_mcg_table(mdev); 1082 mthca_cleanup_mcg_table(mdev);
1052 mthca_cleanup_av_table(mdev); 1083 mthca_cleanup_av_table(mdev);
1053 mthca_cleanup_qp_table(mdev); 1084 mthca_cleanup_qp_table(mdev);
1085 mthca_cleanup_srq_table(mdev);
1054 mthca_cleanup_cq_table(mdev); 1086 mthca_cleanup_cq_table(mdev);
1055 mthca_cmd_use_polling(mdev); 1087 mthca_cmd_use_polling(mdev);
1056 mthca_cleanup_eq_table(mdev); 1088 mthca_cleanup_eq_table(mdev);
@@ -1100,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
1100 mthca_cleanup_mcg_table(mdev); 1132 mthca_cleanup_mcg_table(mdev);
1101 mthca_cleanup_av_table(mdev); 1133 mthca_cleanup_av_table(mdev);
1102 mthca_cleanup_qp_table(mdev); 1134 mthca_cleanup_qp_table(mdev);
1135 mthca_cleanup_srq_table(mdev);
1103 mthca_cleanup_cq_table(mdev); 1136 mthca_cleanup_cq_table(mdev);
1104 mthca_cmd_use_polling(mdev); 1137 mthca_cmd_use_polling(mdev);
1105 mthca_cleanup_eq_table(mdev); 1138 mthca_cleanup_eq_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 5be7d949dbf6..a2707605f4c8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -42,10 +42,10 @@ enum {
42}; 42};
43 43
44struct mthca_mgm { 44struct mthca_mgm {
45 u32 next_gid_index; 45 __be32 next_gid_index;
46 u32 reserved[3]; 46 u32 reserved[3];
47 u8 gid[16]; 47 u8 gid[16];
48 u32 qp[MTHCA_QP_PER_MGM]; 48 __be32 qp[MTHCA_QP_PER_MGM];
49}; 49};
50 50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 51static const u8 zero_gid[16]; /* automatically initialized to 0 */
@@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev,
94 if (0) 94 if (0)
95 mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" 95 mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
96 "%04x:%04x:%04x:%04x is %04x\n", 96 "%04x:%04x:%04x:%04x is %04x\n",
97 be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), 97 be16_to_cpu(((__be16 *) gid)[0]),
98 be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), 98 be16_to_cpu(((__be16 *) gid)[1]),
99 be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), 99 be16_to_cpu(((__be16 *) gid)[2]),
100 be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), 100 be16_to_cpu(((__be16 *) gid)[3]),
101 be16_to_cpu(((__be16 *) gid)[4]),
102 be16_to_cpu(((__be16 *) gid)[5]),
103 be16_to_cpu(((__be16 *) gid)[6]),
104 be16_to_cpu(((__be16 *) gid)[7]),
101 *hash); 105 *hash);
102 106
103 *index = *hash; 107 *index = *hash;
@@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
258 if (index == -1) { 262 if (index == -1) {
259 mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " 263 mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
260 "not found\n", 264 "not found\n",
261 be16_to_cpu(((u16 *) gid->raw)[0]), 265 be16_to_cpu(((__be16 *) gid->raw)[0]),
262 be16_to_cpu(((u16 *) gid->raw)[1]), 266 be16_to_cpu(((__be16 *) gid->raw)[1]),
263 be16_to_cpu(((u16 *) gid->raw)[2]), 267 be16_to_cpu(((__be16 *) gid->raw)[2]),
264 be16_to_cpu(((u16 *) gid->raw)[3]), 268 be16_to_cpu(((__be16 *) gid->raw)[3]),
265 be16_to_cpu(((u16 *) gid->raw)[4]), 269 be16_to_cpu(((__be16 *) gid->raw)[4]),
266 be16_to_cpu(((u16 *) gid->raw)[5]), 270 be16_to_cpu(((__be16 *) gid->raw)[5]),
267 be16_to_cpu(((u16 *) gid->raw)[6]), 271 be16_to_cpu(((__be16 *) gid->raw)[6]),
268 be16_to_cpu(((u16 *) gid->raw)[7])); 272 be16_to_cpu(((__be16 *) gid->raw)[7]));
269 err = -EINVAL; 273 err = -EINVAL;
270 goto out; 274 goto out;
271 } 275 }
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 2a8646150355..1827400f189b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -285,6 +286,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
285{ 286{
286 struct mthca_icm_table *table; 287 struct mthca_icm_table *table;
287 int num_icm; 288 int num_icm;
289 unsigned chunk_size;
288 int i; 290 int i;
289 u8 status; 291 u8 status;
290 292
@@ -305,7 +307,11 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
305 table->icm[i] = NULL; 307 table->icm[i] = NULL;
306 308
307 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 309 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
308 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 310 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
311 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
312 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
313
314 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
309 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 315 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
310 __GFP_NOWARN); 316 __GFP_NOWARN);
311 if (!table->icm[i]) 317 if (!table->icm[i])
@@ -481,7 +487,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
481 } 487 }
482} 488}
483 489
484int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) 490int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
485{ 491{
486 int group; 492 int group;
487 int start, end, dir; 493 int start, end, dir;
@@ -564,7 +570,7 @@ found:
564 570
565 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); 571 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
566 572
567 *db = (u32 *) &page->db_rec[j]; 573 *db = (__be32 *) &page->db_rec[j];
568 574
569out: 575out:
570 up(&dev->db_tab->mutex); 576 up(&dev->db_tab->mutex);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 4761d844cb5f..bafa51544aa3 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -137,7 +138,7 @@ enum {
137 138
138struct mthca_db_page { 139struct mthca_db_page {
139 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); 140 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
140 u64 *db_rec; 141 __be64 *db_rec;
141 dma_addr_t mapping; 142 dma_addr_t mapping;
142}; 143};
143 144
@@ -172,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
172 173
173int mthca_init_db_tab(struct mthca_dev *dev); 174int mthca_init_db_tab(struct mthca_dev *dev);
174void mthca_cleanup_db_tab(struct mthca_dev *dev); 175void mthca_cleanup_db_tab(struct mthca_dev *dev);
175int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); 176int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db);
176void mthca_free_db(struct mthca_dev *dev, int type, int db_index); 177void mthca_free_db(struct mthca_dev *dev, int type, int db_index);
177 178
178#endif /* MTHCA_MEMFREE_H */ 179#endif /* MTHCA_MEMFREE_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index cbe50feaf680..1f97a44477f5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -50,18 +51,18 @@ struct mthca_mtt {
50 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 51 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
51 */ 52 */
52struct mthca_mpt_entry { 53struct mthca_mpt_entry {
53 u32 flags; 54 __be32 flags;
54 u32 page_size; 55 __be32 page_size;
55 u32 key; 56 __be32 key;
56 u32 pd; 57 __be32 pd;
57 u64 start; 58 __be64 start;
58 u64 length; 59 __be64 length;
59 u32 lkey; 60 __be32 lkey;
60 u32 window_count; 61 __be32 window_count;
61 u32 window_count_limit; 62 __be32 window_count_limit;
62 u64 mtt_seg; 63 __be64 mtt_seg;
63 u32 mtt_sz; /* Arbel only */ 64 __be32 mtt_sz; /* Arbel only */
64 u32 reserved[2]; 65 u32 reserved[2];
65} __attribute__((packed)); 66} __attribute__((packed));
66 67
67#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) 68#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
@@ -247,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
247 int start_index, u64 *buffer_list, int list_len) 248 int start_index, u64 *buffer_list, int list_len)
248{ 249{
249 struct mthca_mailbox *mailbox; 250 struct mthca_mailbox *mailbox;
250 u64 *mtt_entry; 251 __be64 *mtt_entry;
251 int err = 0; 252 int err = 0;
252 u8 status; 253 u8 status;
253 int i; 254 int i;
@@ -389,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
389 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 390 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
390 if (i % 4 == 0) 391 if (i % 4 == 0)
391 printk("[%02x] ", i * 4); 392 printk("[%02x] ", i * 4);
392 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 393 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
393 if ((i + 1) % 4 == 0) 394 if ((i + 1) % 4 == 0)
394 printk("\n"); 395 printk("\n");
395 } 396 }
@@ -458,7 +459,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
458static void mthca_free_region(struct mthca_dev *dev, u32 lkey) 459static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
459{ 460{
460 mthca_table_put(dev, dev->mr_table.mpt_table, 461 mthca_table_put(dev, dev->mr_table.mpt_table,
461 arbel_key_to_hw_index(lkey)); 462 key_to_hw_index(dev, lkey));
462 463
463 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); 464 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
464} 465}
@@ -562,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
562 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 563 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
563 if (i % 4 == 0) 564 if (i % 4 == 0)
564 printk("[%02x] ", i * 4); 565 printk("[%02x] ", i * 4);
565 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 566 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
566 if ((i + 1) % 4 == 0) 567 if ((i + 1) % 4 == 0)
567 printk("\n"); 568 printk("\n");
568 } 569 }
@@ -669,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
669 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); 670 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
670 mpt_entry.start = cpu_to_be64(iova); 671 mpt_entry.start = cpu_to_be64(iova);
671 672
672 writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); 673 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
673 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, 674 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
674 offsetof(struct mthca_mpt_entry, window_count) - 675 offsetof(struct mthca_mpt_entry, window_count) -
675 offsetof(struct mthca_mpt_entry, start)); 676 offsetof(struct mthca_mpt_entry, start));
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index c2c899844e98..3dbf06a6e6f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 4fedc32d5871..0576056b34f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -101,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
101 profile[MTHCA_RES_UARC].size = request->uarc_size; 102 profile[MTHCA_RES_UARC].size = request->uarc_size;
102 103
103 profile[MTHCA_RES_QP].num = request->num_qp; 104 profile[MTHCA_RES_QP].num = request->num_qp;
105 profile[MTHCA_RES_SRQ].num = request->num_srq;
104 profile[MTHCA_RES_EQP].num = request->num_qp; 106 profile[MTHCA_RES_EQP].num = request->num_qp;
105 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; 107 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
106 profile[MTHCA_RES_CQ].num = request->num_cq; 108 profile[MTHCA_RES_CQ].num = request->num_cq;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 17aef3357661..94641808f97f 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +42,7 @@
41struct mthca_profile { 42struct mthca_profile {
42 int num_qp; 43 int num_qp;
43 int rdb_per_qp; 44 int rdb_per_qp;
45 int num_srq;
44 int num_cq; 46 int num_cq;
45 int num_mcg; 47 int num_mcg;
46 int num_mpt; 48 int num_mpt;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 81919a7b4935..1c1c2e230871 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
34 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ 36 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
35 */ 37 */
36 38
37#include <ib_smi.h> 39#include <rdma/ib_smi.h>
38#include <linux/mm.h> 40#include <linux/mm.h>
39 41
40#include "mthca_dev.h" 42#include "mthca_dev.h"
@@ -79,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev,
79 } 81 }
80 82
81 props->device_cap_flags = mdev->device_cap_flags; 83 props->device_cap_flags = mdev->device_cap_flags;
82 props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
83 0xffffff; 85 0xffffff;
84 props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); 86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
85 props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); 87 props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32));
86 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
87 memcpy(&props->node_guid, out_mad->data + 12, 8); 89 memcpy(&props->node_guid, out_mad->data + 12, 8);
88 90
@@ -118,6 +120,8 @@ static int mthca_query_port(struct ib_device *ibdev,
118 if (!in_mad || !out_mad) 120 if (!in_mad || !out_mad)
119 goto out; 121 goto out;
120 122
123 memset(props, 0, sizeof *props);
124
121 memset(in_mad, 0, sizeof *in_mad); 125 memset(in_mad, 0, sizeof *in_mad);
122 in_mad->base_version = 1; 126 in_mad->base_version = 1;
123 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 127 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -136,16 +140,17 @@ static int mthca_query_port(struct ib_device *ibdev,
136 goto out; 140 goto out;
137 } 141 }
138 142
139 props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); 143 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
140 props->lmc = out_mad->data[34] & 0x7; 144 props->lmc = out_mad->data[34] & 0x7;
141 props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); 145 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
142 props->sm_sl = out_mad->data[36] & 0xf; 146 props->sm_sl = out_mad->data[36] & 0xf;
143 props->state = out_mad->data[32] & 0xf; 147 props->state = out_mad->data[32] & 0xf;
144 props->phys_state = out_mad->data[33] >> 4; 148 props->phys_state = out_mad->data[33] >> 4;
145 props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); 149 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
146 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 150 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
151 props->max_msg_sz = 0x80000000;
147 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 152 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
148 props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); 153 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
149 props->active_width = out_mad->data[31] & 0xf; 154 props->active_width = out_mad->data[31] & 0xf;
150 props->active_speed = out_mad->data[35] >> 4; 155 props->active_speed = out_mad->data[35] >> 4;
151 156
@@ -221,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
221 goto out; 226 goto out;
222 } 227 }
223 228
224 *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); 229 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
225 230
226 out: 231 out:
227 kfree(in_mad); 232 kfree(in_mad);
@@ -420,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah)
420 return 0; 425 return 0;
421} 426}
422 427
428static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
429 struct ib_srq_init_attr *init_attr,
430 struct ib_udata *udata)
431{
432 struct mthca_create_srq ucmd;
433 struct mthca_ucontext *context = NULL;
434 struct mthca_srq *srq;
435 int err;
436
437 srq = kmalloc(sizeof *srq, GFP_KERNEL);
438 if (!srq)
439 return ERR_PTR(-ENOMEM);
440
441 if (pd->uobject) {
442 context = to_mucontext(pd->uobject->context);
443
444 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
445 return ERR_PTR(-EFAULT);
446
447 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
448 context->db_tab, ucmd.db_index,
449 ucmd.db_page);
450
451 if (err)
452 goto err_free;
453
454 srq->mr.ibmr.lkey = ucmd.lkey;
455 srq->db_index = ucmd.db_index;
456 }
457
458 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
459 &init_attr->attr, srq);
460
461 if (err && pd->uobject)
462 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
463 context->db_tab, ucmd.db_index);
464
465 if (err)
466 goto err_free;
467
468 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
469 mthca_free_srq(to_mdev(pd->device), srq);
470 err = -EFAULT;
471 goto err_free;
472 }
473
474 return &srq->ibsrq;
475
476err_free:
477 kfree(srq);
478
479 return ERR_PTR(err);
480}
481
482static int mthca_destroy_srq(struct ib_srq *srq)
483{
484 struct mthca_ucontext *context;
485
486 if (srq->uobject) {
487 context = to_mucontext(srq->uobject->context);
488
489 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
490 context->db_tab, to_msrq(srq)->db_index);
491 }
492
493 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
494 kfree(srq);
495
496 return 0;
497}
498
423static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 499static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
424 struct ib_qp_init_attr *init_attr, 500 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata) 501 struct ib_udata *udata)
@@ -956,14 +1032,22 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
956 } 1032 }
957} 1033}
958 1034
1035static ssize_t show_board(struct class_device *cdev, char *buf)
1036{
1037 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1038 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1039}
1040
959static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1041static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
960static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1042static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
961static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1043static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1044static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
962 1045
963static struct class_device_attribute *mthca_class_attributes[] = { 1046static struct class_device_attribute *mthca_class_attributes[] = {
964 &class_device_attr_hw_rev, 1047 &class_device_attr_hw_rev,
965 &class_device_attr_fw_ver, 1048 &class_device_attr_fw_ver,
966 &class_device_attr_hca_type 1049 &class_device_attr_hca_type,
1050 &class_device_attr_board_id
967}; 1051};
968 1052
969int mthca_register_device(struct mthca_dev *dev) 1053int mthca_register_device(struct mthca_dev *dev)
@@ -990,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev)
990 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1074 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
991 dev->ib_dev.create_ah = mthca_ah_create; 1075 dev->ib_dev.create_ah = mthca_ah_create;
992 dev->ib_dev.destroy_ah = mthca_ah_destroy; 1076 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1077
1078 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1079 dev->ib_dev.create_srq = mthca_create_srq;
1080 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1081
1082 if (mthca_is_memfree(dev))
1083 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1084 else
1085 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1086 }
1087
993 dev->ib_dev.create_qp = mthca_create_qp; 1088 dev->ib_dev.create_qp = mthca_create_qp;
994 dev->ib_dev.modify_qp = mthca_modify_qp; 1089 dev->ib_dev.modify_qp = mthca_modify_qp;
995 dev->ib_dev.destroy_qp = mthca_destroy_qp; 1090 dev->ib_dev.destroy_qp = mthca_destroy_qp;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1d032791cc8b..bcd4b01a339c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,8 +37,8 @@
36#ifndef MTHCA_PROVIDER_H 37#ifndef MTHCA_PROVIDER_H
37#define MTHCA_PROVIDER_H 38#define MTHCA_PROVIDER_H
38 39
39#include <ib_verbs.h> 40#include <rdma/ib_verbs.h>
40#include <ib_pack.h> 41#include <rdma/ib_pack.h>
41 42
42#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 43#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
43#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) 44#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
@@ -50,6 +51,11 @@ struct mthca_buf_list {
50 DECLARE_PCI_UNMAP_ADDR(mapping) 51 DECLARE_PCI_UNMAP_ADDR(mapping)
51}; 52};
52 53
54union mthca_buf {
55 struct mthca_buf_list direct;
56 struct mthca_buf_list *page_list;
57};
58
53struct mthca_uar { 59struct mthca_uar {
54 unsigned long pfn; 60 unsigned long pfn;
55 int index; 61 int index;
@@ -181,19 +187,39 @@ struct mthca_cq {
181 187
182 /* Next fields are Arbel only */ 188 /* Next fields are Arbel only */
183 int set_ci_db_index; 189 int set_ci_db_index;
184 u32 *set_ci_db; 190 __be32 *set_ci_db;
185 int arm_db_index; 191 int arm_db_index;
186 u32 *arm_db; 192 __be32 *arm_db;
187 int arm_sn; 193 int arm_sn;
188 194
189 union { 195 union mthca_buf queue;
190 struct mthca_buf_list direct;
191 struct mthca_buf_list *page_list;
192 } queue;
193 struct mthca_mr mr; 196 struct mthca_mr mr;
194 wait_queue_head_t wait; 197 wait_queue_head_t wait;
195}; 198};
196 199
200struct mthca_srq {
201 struct ib_srq ibsrq;
202 spinlock_t lock;
203 atomic_t refcount;
204 int srqn;
205 int max;
206 int max_gs;
207 int wqe_shift;
208 int first_free;
209 int last_free;
210 u16 counter; /* Arbel only */
211 int db_index; /* Arbel only */
212 __be32 *db; /* Arbel only */
213 void *last;
214
215 int is_direct;
216 u64 *wrid;
217 union mthca_buf queue;
218 struct mthca_mr mr;
219
220 wait_queue_head_t wait;
221};
222
197struct mthca_wq { 223struct mthca_wq {
198 spinlock_t lock; 224 spinlock_t lock;
199 int max; 225 int max;
@@ -206,7 +232,7 @@ struct mthca_wq {
206 int wqe_shift; 232 int wqe_shift;
207 233
208 int db_index; /* Arbel only */ 234 int db_index; /* Arbel only */
209 u32 *db; 235 __be32 *db;
210}; 236};
211 237
212struct mthca_qp { 238struct mthca_qp {
@@ -227,10 +253,7 @@ struct mthca_qp {
227 int send_wqe_offset; 253 int send_wqe_offset;
228 254
229 u64 *wrid; 255 u64 *wrid;
230 union { 256 union mthca_buf queue;
231 struct mthca_buf_list direct;
232 struct mthca_buf_list *page_list;
233 } queue;
234 257
235 wait_queue_head_t wait; 258 wait_queue_head_t wait;
236}; 259};
@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
277 return container_of(ibcq, struct mthca_cq, ibcq); 300 return container_of(ibcq, struct mthca_cq, ibcq);
278} 301}
279 302
303static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
304{
305 return container_of(ibsrq, struct mthca_srq, ibsrq);
306}
307
280static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 308static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
281{ 309{
282 return container_of(ibqp, struct mthca_qp, ibqp); 310 return container_of(ibqp, struct mthca_qp, ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f7126b14d5ae..0164b84d4ec6 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,13 +37,14 @@
35 37
36#include <linux/init.h> 38#include <linux/init.h>
37 39
38#include <ib_verbs.h> 40#include <rdma/ib_verbs.h>
39#include <ib_cache.h> 41#include <rdma/ib_cache.h>
40#include <ib_pack.h> 42#include <rdma/ib_pack.h>
41 43
42#include "mthca_dev.h" 44#include "mthca_dev.h"
43#include "mthca_cmd.h" 45#include "mthca_cmd.h"
44#include "mthca_memfree.h" 46#include "mthca_memfree.h"
47#include "mthca_wqe.h"
45 48
46enum { 49enum {
47 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 50 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
@@ -95,62 +98,62 @@ enum {
95}; 98};
96 99
97struct mthca_qp_path { 100struct mthca_qp_path {
98 u32 port_pkey; 101 __be32 port_pkey;
99 u8 rnr_retry; 102 u8 rnr_retry;
100 u8 g_mylmc; 103 u8 g_mylmc;
101 u16 rlid; 104 __be16 rlid;
102 u8 ackto; 105 u8 ackto;
103 u8 mgid_index; 106 u8 mgid_index;
104 u8 static_rate; 107 u8 static_rate;
105 u8 hop_limit; 108 u8 hop_limit;
106 u32 sl_tclass_flowlabel; 109 __be32 sl_tclass_flowlabel;
107 u8 rgid[16]; 110 u8 rgid[16];
108} __attribute__((packed)); 111} __attribute__((packed));
109 112
110struct mthca_qp_context { 113struct mthca_qp_context {
111 u32 flags; 114 __be32 flags;
112 u32 tavor_sched_queue; /* Reserved on Arbel */ 115 __be32 tavor_sched_queue; /* Reserved on Arbel */
113 u8 mtu_msgmax; 116 u8 mtu_msgmax;
114 u8 rq_size_stride; /* Reserved on Tavor */ 117 u8 rq_size_stride; /* Reserved on Tavor */
115 u8 sq_size_stride; /* Reserved on Tavor */ 118 u8 sq_size_stride; /* Reserved on Tavor */
116 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 119 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
117 u32 usr_page; 120 __be32 usr_page;
118 u32 local_qpn; 121 __be32 local_qpn;
119 u32 remote_qpn; 122 __be32 remote_qpn;
120 u32 reserved1[2]; 123 u32 reserved1[2];
121 struct mthca_qp_path pri_path; 124 struct mthca_qp_path pri_path;
122 struct mthca_qp_path alt_path; 125 struct mthca_qp_path alt_path;
123 u32 rdd; 126 __be32 rdd;
124 u32 pd; 127 __be32 pd;
125 u32 wqe_base; 128 __be32 wqe_base;
126 u32 wqe_lkey; 129 __be32 wqe_lkey;
127 u32 params1; 130 __be32 params1;
128 u32 reserved2; 131 __be32 reserved2;
129 u32 next_send_psn; 132 __be32 next_send_psn;
130 u32 cqn_snd; 133 __be32 cqn_snd;
131 u32 snd_wqe_base_l; /* Next send WQE on Tavor */ 134 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
132 u32 snd_db_index; /* (debugging only entries) */ 135 __be32 snd_db_index; /* (debugging only entries) */
133 u32 last_acked_psn; 136 __be32 last_acked_psn;
134 u32 ssn; 137 __be32 ssn;
135 u32 params2; 138 __be32 params2;
136 u32 rnr_nextrecvpsn; 139 __be32 rnr_nextrecvpsn;
137 u32 ra_buff_indx; 140 __be32 ra_buff_indx;
138 u32 cqn_rcv; 141 __be32 cqn_rcv;
139 u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 142 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
140 u32 rcv_db_index; /* (debugging only entries) */ 143 __be32 rcv_db_index; /* (debugging only entries) */
141 u32 qkey; 144 __be32 qkey;
142 u32 srqn; 145 __be32 srqn;
143 u32 rmsn; 146 __be32 rmsn;
144 u16 rq_wqe_counter; /* reserved on Tavor */ 147 __be16 rq_wqe_counter; /* reserved on Tavor */
145 u16 sq_wqe_counter; /* reserved on Tavor */ 148 __be16 sq_wqe_counter; /* reserved on Tavor */
146 u32 reserved3[18]; 149 u32 reserved3[18];
147} __attribute__((packed)); 150} __attribute__((packed));
148 151
149struct mthca_qp_param { 152struct mthca_qp_param {
150 u32 opt_param_mask; 153 __be32 opt_param_mask;
151 u32 reserved1; 154 u32 reserved1;
152 struct mthca_qp_context context; 155 struct mthca_qp_context context;
153 u32 reserved2[62]; 156 u32 reserved2[62];
154} __attribute__((packed)); 157} __attribute__((packed));
155 158
156enum { 159enum {
@@ -173,80 +176,6 @@ enum {
173 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 176 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
174}; 177};
175 178
176enum {
177 MTHCA_NEXT_DBD = 1 << 7,
178 MTHCA_NEXT_FENCE = 1 << 6,
179 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
180 MTHCA_NEXT_EVENT_GEN = 1 << 2,
181 MTHCA_NEXT_SOLICIT = 1 << 1,
182
183 MTHCA_MLX_VL15 = 1 << 17,
184 MTHCA_MLX_SLR = 1 << 16
185};
186
187enum {
188 MTHCA_INVAL_LKEY = 0x100
189};
190
191struct mthca_next_seg {
192 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
193 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
194 u32 flags; /* [3] CQ [2] Event [1] Solicit */
195 u32 imm; /* immediate data */
196};
197
198struct mthca_tavor_ud_seg {
199 u32 reserved1;
200 u32 lkey;
201 u64 av_addr;
202 u32 reserved2[4];
203 u32 dqpn;
204 u32 qkey;
205 u32 reserved3[2];
206};
207
208struct mthca_arbel_ud_seg {
209 u32 av[8];
210 u32 dqpn;
211 u32 qkey;
212 u32 reserved[2];
213};
214
215struct mthca_bind_seg {
216 u32 flags; /* [31] Atomic [30] rem write [29] rem read */
217 u32 reserved;
218 u32 new_rkey;
219 u32 lkey;
220 u64 addr;
221 u64 length;
222};
223
224struct mthca_raddr_seg {
225 u64 raddr;
226 u32 rkey;
227 u32 reserved;
228};
229
230struct mthca_atomic_seg {
231 u64 swap_add;
232 u64 compare;
233};
234
235struct mthca_data_seg {
236 u32 byte_count;
237 u32 lkey;
238 u64 addr;
239};
240
241struct mthca_mlx_seg {
242 u32 nda_op;
243 u32 nds;
244 u32 flags; /* [17] VL15 [16] SLR [14:12] static rate
245 [11:8] SL [3] C [2] E */
246 u16 rlid;
247 u16 vcrc;
248};
249
250static const u8 mthca_opcode[] = { 179static const u8 mthca_opcode[] = {
251 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 180 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
252 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 181 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
@@ -573,12 +502,11 @@ static void init_port(struct mthca_dev *dev, int port)
573 502
574 memset(&param, 0, sizeof param); 503 memset(&param, 0, sizeof param);
575 504
576 param.enable_1x = 1; 505 param.port_width = dev->limits.port_width_cap;
577 param.enable_4x = 1; 506 param.vl_cap = dev->limits.vl_cap;
578 param.vl_cap = dev->limits.vl_cap; 507 param.mtu_cap = dev->limits.mtu_cap;
579 param.mtu_cap = dev->limits.mtu_cap; 508 param.gid_cap = dev->limits.gid_table_len;
580 param.gid_cap = dev->limits.gid_table_len; 509 param.pkey_cap = dev->limits.pkey_table_len;
581 param.pkey_cap = dev->limits.pkey_table_len;
582 510
583 err = mthca_INIT_IB(dev, &param, port, &status); 511 err = mthca_INIT_IB(dev, &param, port, &status);
584 if (err) 512 if (err)
@@ -684,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
684 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 612 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
685 613
686 if (mthca_is_memfree(dev)) { 614 if (mthca_is_memfree(dev)) {
687 qp_context->rq_size_stride = 615 if (qp->rq.max)
688 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 616 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
689 qp_context->sq_size_stride = 617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
690 ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); 618
619 if (qp->sq.max)
620 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
691 } 622 }
692 623
693 /* leave arbel_sched_queue as 0 */ 624 /* leave arbel_sched_queue as 0 */
@@ -856,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
856 787
857 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 788 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
858 789
790 if (ibqp->srq)
791 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
792
859 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 793 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
860 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 794 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
861 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 795 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
@@ -878,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
878 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 812 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
879 } 813 }
880 814
815 if (ibqp->srq)
816 qp_context->srqn = cpu_to_be32(1 << 24 |
817 to_msrq(ibqp->srq)->srqn);
818
881 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 819 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
882 qp->qpn, 0, mailbox, 0, &status); 820 qp->qpn, 0, mailbox, 0, &status);
883 if (status) { 821 if (status) {
@@ -925,10 +863,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
925 struct mthca_qp *qp) 863 struct mthca_qp *qp)
926{ 864{
927 int size; 865 int size;
928 int i;
929 int npages, shift;
930 dma_addr_t t;
931 u64 *dma_list = NULL;
932 int err = -ENOMEM; 866 int err = -ENOMEM;
933 867
934 size = sizeof (struct mthca_next_seg) + 868 size = sizeof (struct mthca_next_seg) +
@@ -978,116 +912,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
978 if (!qp->wrid) 912 if (!qp->wrid)
979 goto err_out; 913 goto err_out;
980 914
981 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { 915 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
982 qp->is_direct = 1; 916 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
983 npages = 1;
984 shift = get_order(size) + PAGE_SHIFT;
985
986 if (0)
987 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
988 size, shift);
989
990 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
991 &t, GFP_KERNEL);
992 if (!qp->queue.direct.buf)
993 goto err_out;
994
995 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
996
997 memset(qp->queue.direct.buf, 0, size);
998
999 while (t & ((1 << shift) - 1)) {
1000 --shift;
1001 npages *= 2;
1002 }
1003
1004 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1005 if (!dma_list)
1006 goto err_out_free;
1007
1008 for (i = 0; i < npages; ++i)
1009 dma_list[i] = t + i * (1 << shift);
1010 } else {
1011 qp->is_direct = 0;
1012 npages = size / PAGE_SIZE;
1013 shift = PAGE_SHIFT;
1014
1015 if (0)
1016 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
1017
1018 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1019 if (!dma_list)
1020 goto err_out;
1021
1022 qp->queue.page_list = kmalloc(npages *
1023 sizeof *qp->queue.page_list,
1024 GFP_KERNEL);
1025 if (!qp->queue.page_list)
1026 goto err_out;
1027
1028 for (i = 0; i < npages; ++i) {
1029 qp->queue.page_list[i].buf =
1030 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1031 &t, GFP_KERNEL);
1032 if (!qp->queue.page_list[i].buf)
1033 goto err_out_free;
1034
1035 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
1036
1037 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
1038 dma_list[i] = t;
1039 }
1040 }
1041
1042 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
1043 npages, 0, size,
1044 MTHCA_MPT_FLAG_LOCAL_READ,
1045 &qp->mr);
1046 if (err) 917 if (err)
1047 goto err_out_free; 918 goto err_out;
1048 919
1049 kfree(dma_list);
1050 return 0; 920 return 0;
1051 921
1052 err_out_free: 922err_out:
1053 if (qp->is_direct) {
1054 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1055 pci_unmap_addr(&qp->queue.direct, mapping));
1056 } else
1057 for (i = 0; i < npages; ++i) {
1058 if (qp->queue.page_list[i].buf)
1059 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1060 qp->queue.page_list[i].buf,
1061 pci_unmap_addr(&qp->queue.page_list[i],
1062 mapping));
1063
1064 }
1065
1066 err_out:
1067 kfree(qp->wrid); 923 kfree(qp->wrid);
1068 kfree(dma_list);
1069 return err; 924 return err;
1070} 925}
1071 926
1072static void mthca_free_wqe_buf(struct mthca_dev *dev, 927static void mthca_free_wqe_buf(struct mthca_dev *dev,
1073 struct mthca_qp *qp) 928 struct mthca_qp *qp)
1074{ 929{
1075 int i; 930 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1076 int size = PAGE_ALIGN(qp->send_wqe_offset + 931 (qp->sq.max << qp->sq.wqe_shift)),
1077 (qp->sq.max << qp->sq.wqe_shift)); 932 &qp->queue, qp->is_direct, &qp->mr);
1078
1079 if (qp->is_direct) {
1080 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1081 pci_unmap_addr(&qp->queue.direct, mapping));
1082 } else {
1083 for (i = 0; i < size / PAGE_SIZE; ++i) {
1084 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1085 qp->queue.page_list[i].buf,
1086 pci_unmap_addr(&qp->queue.page_list[i],
1087 mapping));
1088 }
1089 }
1090
1091 kfree(qp->wrid); 933 kfree(qp->wrid);
1092} 934}
1093 935
@@ -1428,11 +1270,12 @@ void mthca_free_qp(struct mthca_dev *dev,
1428 * unref the mem-free tables and free the QPN in our table. 1270 * unref the mem-free tables and free the QPN in our table.
1429 */ 1271 */
1430 if (!qp->ibqp.uobject) { 1272 if (!qp->ibqp.uobject) {
1431 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1273 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1274 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1432 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1275 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1433 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1276 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1277 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1434 1278
1435 mthca_free_mr(dev, &qp->mr);
1436 mthca_free_memfree(dev, qp); 1279 mthca_free_memfree(dev, qp);
1437 mthca_free_wqe_buf(dev, qp); 1280 mthca_free_wqe_buf(dev, qp);
1438 } 1281 }
@@ -1457,6 +1300,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1457{ 1300{
1458 int header_size; 1301 int header_size;
1459 int err; 1302 int err;
1303 u16 pkey;
1460 1304
1461 ib_ud_header_init(256, /* assume a MAD */ 1305 ib_ud_header_init(256, /* assume a MAD */
1462 sqp->ud_header.grh_present, 1306 sqp->ud_header.grh_present,
@@ -1467,8 +1311,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1467 return err; 1311 return err;
1468 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1312 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1469 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1313 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1470 (sqp->ud_header.lrh.destination_lid == 0xffff ? 1314 (sqp->ud_header.lrh.destination_lid ==
1471 MTHCA_MLX_SLR : 0) | 1315 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1472 (sqp->ud_header.lrh.service_level << 8)); 1316 (sqp->ud_header.lrh.service_level << 8));
1473 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1317 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1474 mlx->vcrc = 0; 1318 mlx->vcrc = 0;
@@ -1488,18 +1332,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1488 } 1332 }
1489 1333
1490 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1334 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1491 if (sqp->ud_header.lrh.destination_lid == 0xffff) 1335 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1492 sqp->ud_header.lrh.source_lid = 0xffff; 1336 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1493 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1337 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1494 if (!sqp->qp.ibqp.qp_num) 1338 if (!sqp->qp.ibqp.qp_num)
1495 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1339 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1496 sqp->pkey_index, 1340 sqp->pkey_index, &pkey);
1497 &sqp->ud_header.bth.pkey);
1498 else 1341 else
1499 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1342 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1500 wr->wr.ud.pkey_index, 1343 wr->wr.ud.pkey_index, &pkey);
1501 &sqp->ud_header.bth.pkey); 1344 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1502 cpu_to_be16s(&sqp->ud_header.bth.pkey);
1503 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1345 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1504 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1346 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1505 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1347 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
@@ -1742,7 +1584,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1742 1584
1743out: 1585out:
1744 if (likely(nreq)) { 1586 if (likely(nreq)) {
1745 u32 doorbell[2]; 1587 __be32 doorbell[2];
1746 1588
1747 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + 1589 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1748 qp->send_wqe_offset) | f0 | op0); 1590 qp->send_wqe_offset) | f0 | op0);
@@ -1843,7 +1685,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1843 1685
1844out: 1686out:
1845 if (likely(nreq)) { 1687 if (likely(nreq)) {
1846 u32 doorbell[2]; 1688 __be32 doorbell[2];
1847 1689
1848 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1690 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1849 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); 1691 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
@@ -2064,7 +1906,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2064 1906
2065out: 1907out:
2066 if (likely(nreq)) { 1908 if (likely(nreq)) {
2067 u32 doorbell[2]; 1909 __be32 doorbell[2];
2068 1910
2069 doorbell[0] = cpu_to_be32((nreq << 24) | 1911 doorbell[0] = cpu_to_be32((nreq << 24) |
2070 ((qp->sq.head & 0xffff) << 8) | 1912 ((qp->sq.head & 0xffff) << 8) |
@@ -2174,19 +2016,25 @@ out:
2174} 2016}
2175 2017
2176int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2018int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2177 int index, int *dbd, u32 *new_wqe) 2019 int index, int *dbd, __be32 *new_wqe)
2178{ 2020{
2179 struct mthca_next_seg *next; 2021 struct mthca_next_seg *next;
2180 2022
2023 /*
2024 * For SRQs, all WQEs generate a CQE, so we're always at the
2025 * end of the doorbell chain.
2026 */
2027 if (qp->ibqp.srq) {
2028 *new_wqe = 0;
2029 return 0;
2030 }
2031
2181 if (is_send) 2032 if (is_send)
2182 next = get_send_wqe(qp, index); 2033 next = get_send_wqe(qp, index);
2183 else 2034 else
2184 next = get_recv_wqe(qp, index); 2035 next = get_recv_wqe(qp, index);
2185 2036
2186 if (mthca_is_memfree(dev)) 2037 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2187 *dbd = 1;
2188 else
2189 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2190 if (next->ee_nds & cpu_to_be32(0x3f)) 2038 if (next->ee_nds & cpu_to_be32(0x3f))
2191 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2039 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2192 (next->ee_nds & cpu_to_be32(0x3f)); 2040 (next->ee_nds & cpu_to_be32(0x3f));
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
new file mode 100644
index 000000000000..75cd2d84ef12
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -0,0 +1,591 @@
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#include "mthca_dev.h"
36#include "mthca_cmd.h"
37#include "mthca_memfree.h"
38#include "mthca_wqe.h"
39
40enum {
41 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
42};
43
44struct mthca_tavor_srq_context {
45 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
46 __be32 state_pd;
47 __be32 lkey;
48 __be32 uar;
49 __be32 wqe_cnt;
50 u32 reserved[2];
51};
52
53struct mthca_arbel_srq_context {
54 __be32 state_logsize_srqn;
55 __be32 lkey;
56 __be32 db_index;
57 __be32 logstride_usrpage;
58 __be64 wqe_base;
59 __be32 eq_pd;
60 __be16 limit_watermark;
61 __be16 wqe_cnt;
62 u16 reserved1;
63 __be16 wqe_counter;
64 u32 reserved2[3];
65};
66
67static void *get_wqe(struct mthca_srq *srq, int n)
68{
69 if (srq->is_direct)
70 return srq->queue.direct.buf + (n << srq->wqe_shift);
71 else
72 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
73 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
74}
75
76/*
77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4
79 * because in the Tavor case, posting a WQE may overwrite the first
80 * four bytes of the previous WQE. The offset avoids corrupting our
81 * free list if the WQE has already completed and been put on the free
82 * list when we post the next WQE.
83 */
84static inline int *wqe_to_link(void *wqe)
85{
86 return (int *) (wqe + 4);
87}
88
89static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
90 struct mthca_pd *pd,
91 struct mthca_srq *srq,
92 struct mthca_tavor_srq_context *context)
93{
94 memset(context, 0, sizeof *context);
95
96 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
97 context->state_pd = cpu_to_be32(pd->pd_num);
98 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
99
100 if (pd->ibpd.uobject)
101 context->uar =
102 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
103 else
104 context->uar = cpu_to_be32(dev->driver_uar.index);
105}
106
107static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
108 struct mthca_pd *pd,
109 struct mthca_srq *srq,
110 struct mthca_arbel_srq_context *context)
111{
112 int logsize;
113
114 memset(context, 0, sizeof *context);
115
116 logsize = long_log2(srq->max) + srq->wqe_shift;
117 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
118 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
119 context->db_index = cpu_to_be32(srq->db_index);
120 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
121 if (pd->ibpd.uobject)
122 context->logstride_usrpage |=
123 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
124 else
125 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
126 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
127}
128
129static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
130{
131 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
132 srq->is_direct, &srq->mr);
133 kfree(srq->wrid);
134}
135
136static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
137 struct mthca_srq *srq)
138{
139 struct mthca_data_seg *scatter;
140 void *wqe;
141 int err;
142 int i;
143
144 if (pd->ibpd.uobject)
145 return 0;
146
147 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
148 if (!srq->wrid)
149 return -ENOMEM;
150
151 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
152 MTHCA_MAX_DIRECT_SRQ_SIZE,
153 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
154 if (err) {
155 kfree(srq->wrid);
156 return err;
157 }
158
159 /*
160 * Now initialize the SRQ buffer so that all of the WQEs are
161 * linked into the list of free WQEs. In addition, set the
162 * scatter list L_Keys to the sentry value of 0x100.
163 */
164 for (i = 0; i < srq->max; ++i) {
165 wqe = get_wqe(srq, i);
166
167 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
168
169 for (scatter = wqe + sizeof (struct mthca_next_seg);
170 (void *) scatter < wqe + (1 << srq->wqe_shift);
171 ++scatter)
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 }
174
175 return 0;
176}
177
178int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
179 struct ib_srq_attr *attr, struct mthca_srq *srq)
180{
181 struct mthca_mailbox *mailbox;
182 u8 status;
183 int ds;
184 int err;
185
186 /* Sanity check SRQ size before proceeding */
187 if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
188 return -EINVAL;
189
190 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0;
194
195 if (mthca_is_memfree(dev))
196 srq->max = roundup_pow_of_two(srq->max + 1);
197
198 ds = min(64UL,
199 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
200 srq->max_gs * sizeof (struct mthca_data_seg)));
201 srq->wqe_shift = long_log2(ds);
202
203 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
204 if (srq->srqn == -1)
205 return -ENOMEM;
206
207 if (mthca_is_memfree(dev)) {
208 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
209 if (err)
210 goto err_out;
211
212 if (!pd->ibpd.uobject) {
213 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
214 srq->srqn, &srq->db);
215 if (srq->db_index < 0) {
216 err = -ENOMEM;
217 goto err_out_icm;
218 }
219 }
220 }
221
222 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
223 if (IS_ERR(mailbox)) {
224 err = PTR_ERR(mailbox);
225 goto err_out_db;
226 }
227
228 err = mthca_alloc_srq_buf(dev, pd, srq);
229 if (err)
230 goto err_out_mailbox;
231
232 spin_lock_init(&srq->lock);
233 atomic_set(&srq->refcount, 1);
234 init_waitqueue_head(&srq->wait);
235
236 if (mthca_is_memfree(dev))
237 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
238 else
239 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
240
241 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
242
243 if (err) {
244 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
245 goto err_out_free_buf;
246 }
247 if (status) {
248 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
249 status);
250 err = -EINVAL;
251 goto err_out_free_buf;
252 }
253
254 spin_lock_irq(&dev->srq_table.lock);
255 if (mthca_array_set(&dev->srq_table.srq,
256 srq->srqn & (dev->limits.num_srqs - 1),
257 srq)) {
258 spin_unlock_irq(&dev->srq_table.lock);
259 goto err_out_free_srq;
260 }
261 spin_unlock_irq(&dev->srq_table.lock);
262
263 mthca_free_mailbox(dev, mailbox);
264
265 srq->first_free = 0;
266 srq->last_free = srq->max - 1;
267
268 return 0;
269
270err_out_free_srq:
271 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
272 if (err)
273 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
274 else if (status)
275 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
276
277err_out_free_buf:
278 if (!pd->ibpd.uobject)
279 mthca_free_srq_buf(dev, srq);
280
281err_out_mailbox:
282 mthca_free_mailbox(dev, mailbox);
283
284err_out_db:
285 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
286 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
287
288err_out_icm:
289 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
290
291err_out:
292 mthca_free(&dev->srq_table.alloc, srq->srqn);
293
294 return err;
295}
296
297void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
298{
299 struct mthca_mailbox *mailbox;
300 int err;
301 u8 status;
302
303 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
304 if (IS_ERR(mailbox)) {
305 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
306 return;
307 }
308
309 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
310 if (err)
311 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
312 else if (status)
313 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
314
315 spin_lock_irq(&dev->srq_table.lock);
316 mthca_array_clear(&dev->srq_table.srq,
317 srq->srqn & (dev->limits.num_srqs - 1));
318 spin_unlock_irq(&dev->srq_table.lock);
319
320 atomic_dec(&srq->refcount);
321 wait_event(srq->wait, !atomic_read(&srq->refcount));
322
323 if (!srq->ibsrq.uobject) {
324 mthca_free_srq_buf(dev, srq);
325 if (mthca_is_memfree(dev))
326 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
327 }
328
329 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
330 mthca_free(&dev->srq_table.alloc, srq->srqn);
331 mthca_free_mailbox(dev, mailbox);
332}
333
334void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
335 enum ib_event_type event_type)
336{
337 struct mthca_srq *srq;
338 struct ib_event event;
339
340 spin_lock(&dev->srq_table.lock);
341 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
342 if (srq)
343 atomic_inc(&srq->refcount);
344 spin_unlock(&dev->srq_table.lock);
345
346 if (!srq) {
347 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
348 return;
349 }
350
351 if (!srq->ibsrq.event_handler)
352 goto out;
353
354 event.device = &dev->ib_dev;
355 event.event = event_type;
356 event.element.srq = &srq->ibsrq;
357 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
358
359out:
360 if (atomic_dec_and_test(&srq->refcount))
361 wake_up(&srq->wait);
362}
363
364/*
365 * This function must be called with IRQs disabled.
366 */
367void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
368{
369 int ind;
370
371 ind = wqe_addr >> srq->wqe_shift;
372
373 spin_lock(&srq->lock);
374
375 if (likely(srq->first_free >= 0))
376 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
377 else
378 srq->first_free = ind;
379
380 *wqe_to_link(get_wqe(srq, ind)) = -1;
381 srq->last_free = ind;
382
383 spin_unlock(&srq->lock);
384}
385
386int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
387 struct ib_recv_wr **bad_wr)
388{
389 struct mthca_dev *dev = to_mdev(ibsrq->device);
390 struct mthca_srq *srq = to_msrq(ibsrq);
391 unsigned long flags;
392 int err = 0;
393 int first_ind;
394 int ind;
395 int next_ind;
396 int nreq;
397 int i;
398 void *wqe;
399 void *prev_wqe;
400
401 spin_lock_irqsave(&srq->lock, flags);
402
403 first_ind = srq->first_free;
404
405 for (nreq = 0; wr; ++nreq, wr = wr->next) {
406 ind = srq->first_free;
407
408 if (ind < 0) {
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM;
411 *bad_wr = wr;
412 return nreq;
413 }
414
415 wqe = get_wqe(srq, ind);
416 next_ind = *wqe_to_link(wqe);
417 prev_wqe = srq->last;
418 srq->last = wqe;
419
420 ((struct mthca_next_seg *) wqe)->nda_op = 0;
421 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
422 /* flags field will always remain 0 */
423
424 wqe += sizeof (struct mthca_next_seg);
425
426 if (unlikely(wr->num_sge > srq->max_gs)) {
427 err = -EINVAL;
428 *bad_wr = wr;
429 srq->last = prev_wqe;
430 return nreq;
431 }
432
433 for (i = 0; i < wr->num_sge; ++i) {
434 ((struct mthca_data_seg *) wqe)->byte_count =
435 cpu_to_be32(wr->sg_list[i].length);
436 ((struct mthca_data_seg *) wqe)->lkey =
437 cpu_to_be32(wr->sg_list[i].lkey);
438 ((struct mthca_data_seg *) wqe)->addr =
439 cpu_to_be64(wr->sg_list[i].addr);
440 wqe += sizeof (struct mthca_data_seg);
441 }
442
443 if (i < srq->max_gs) {
444 ((struct mthca_data_seg *) wqe)->byte_count = 0;
445 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
446 ((struct mthca_data_seg *) wqe)->addr = 0;
447 }
448
449 if (likely(prev_wqe)) {
450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
451 cpu_to_be32((ind << srq->wqe_shift) | 1);
452 wmb();
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456
457 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind;
459 }
460
461 return nreq;
462
463 if (likely(nreq)) {
464 __be32 doorbell[2];
465
466 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
467 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
468
469 /*
470 * Make sure that descriptors are written before
471 * doorbell is rung.
472 */
473 wmb();
474
475 mthca_write64(doorbell,
476 dev->kar + MTHCA_RECEIVE_DOORBELL,
477 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
478 }
479
480 spin_unlock_irqrestore(&srq->lock, flags);
481 return err;
482}
483
484int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
485 struct ib_recv_wr **bad_wr)
486{
487 struct mthca_dev *dev = to_mdev(ibsrq->device);
488 struct mthca_srq *srq = to_msrq(ibsrq);
489 unsigned long flags;
490 int err = 0;
491 int ind;
492 int next_ind;
493 int nreq;
494 int i;
495 void *wqe;
496
497 spin_lock_irqsave(&srq->lock, flags);
498
499 for (nreq = 0; wr; ++nreq, wr = wr->next) {
500 ind = srq->first_free;
501
502 if (ind < 0) {
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM;
505 *bad_wr = wr;
506 return nreq;
507 }
508
509 wqe = get_wqe(srq, ind);
510 next_ind = *wqe_to_link(wqe);
511
512 ((struct mthca_next_seg *) wqe)->nda_op =
513 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
514 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
515 /* flags field will always remain 0 */
516
517 wqe += sizeof (struct mthca_next_seg);
518
519 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL;
521 *bad_wr = wr;
522 return nreq;
523 }
524
525 for (i = 0; i < wr->num_sge; ++i) {
526 ((struct mthca_data_seg *) wqe)->byte_count =
527 cpu_to_be32(wr->sg_list[i].length);
528 ((struct mthca_data_seg *) wqe)->lkey =
529 cpu_to_be32(wr->sg_list[i].lkey);
530 ((struct mthca_data_seg *) wqe)->addr =
531 cpu_to_be64(wr->sg_list[i].addr);
532 wqe += sizeof (struct mthca_data_seg);
533 }
534
535 if (i < srq->max_gs) {
536 ((struct mthca_data_seg *) wqe)->byte_count = 0;
537 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
538 ((struct mthca_data_seg *) wqe)->addr = 0;
539 }
540
541 srq->wrid[ind] = wr->wr_id;
542 srq->first_free = next_ind;
543 }
544
545 if (likely(nreq)) {
546 srq->counter += nreq;
547
548 /*
549 * Make sure that descriptors are written before
550 * we write doorbell record.
551 */
552 wmb();
553 *srq->db = cpu_to_be32(srq->counter);
554 }
555
556 spin_unlock_irqrestore(&srq->lock, flags);
557 return err;
558}
559
560int __devinit mthca_init_srq_table(struct mthca_dev *dev)
561{
562 int err;
563
564 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
565 return 0;
566
567 spin_lock_init(&dev->srq_table.lock);
568
569 err = mthca_alloc_init(&dev->srq_table.alloc,
570 dev->limits.num_srqs,
571 dev->limits.num_srqs - 1,
572 dev->limits.reserved_srqs);
573 if (err)
574 return err;
575
576 err = mthca_array_init(&dev->srq_table.srq,
577 dev->limits.num_srqs);
578 if (err)
579 mthca_alloc_cleanup(&dev->srq_table.alloc);
580
581 return err;
582}
583
584void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
585{
586 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
587 return;
588
589 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
590 mthca_alloc_cleanup(&dev->srq_table.alloc);
591}
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 3024c1b4547d..41613ec8a04e 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -69,6 +69,17 @@ struct mthca_create_cq_resp {
69 __u32 reserved; 69 __u32 reserved;
70}; 70};
71 71
72struct mthca_create_srq {
73 __u32 lkey;
74 __u32 db_index;
75 __u64 db_page;
76};
77
78struct mthca_create_srq_resp {
79 __u32 srqn;
80 __u32 reserved;
81};
82
72struct mthca_create_qp { 83struct mthca_create_qp {
73 __u32 lkey; 84 __u32 lkey;
74 __u32 reserved; 85 __u32 reserved;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
new file mode 100644
index 000000000000..1f4c0ff28f79
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#ifndef MTHCA_WQE_H
36#define MTHCA_WQE_H
37
38#include <linux/types.h>
39
40enum {
41 MTHCA_NEXT_DBD = 1 << 7,
42 MTHCA_NEXT_FENCE = 1 << 6,
43 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
44 MTHCA_NEXT_EVENT_GEN = 1 << 2,
45 MTHCA_NEXT_SOLICIT = 1 << 1,
46
47 MTHCA_MLX_VL15 = 1 << 17,
48 MTHCA_MLX_SLR = 1 << 16
49};
50
51enum {
52 MTHCA_INVAL_LKEY = 0x100
53};
54
55struct mthca_next_seg {
56 __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */
57 __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
58 __be32 flags; /* [3] CQ [2] Event [1] Solicit */
59 __be32 imm; /* immediate data */
60};
61
62struct mthca_tavor_ud_seg {
63 u32 reserved1;
64 __be32 lkey;
65 __be64 av_addr;
66 u32 reserved2[4];
67 __be32 dqpn;
68 __be32 qkey;
69 u32 reserved3[2];
70};
71
72struct mthca_arbel_ud_seg {
73 __be32 av[8];
74 __be32 dqpn;
75 __be32 qkey;
76 u32 reserved[2];
77};
78
79struct mthca_bind_seg {
80 __be32 flags; /* [31] Atomic [30] rem write [29] rem read */
81 u32 reserved;
82 __be32 new_rkey;
83 __be32 lkey;
84 __be64 addr;
85 __be64 length;
86};
87
88struct mthca_raddr_seg {
89 __be64 raddr;
90 __be32 rkey;
91 u32 reserved;
92};
93
94struct mthca_atomic_seg {
95 __be64 swap_add;
96 __be64 compare;
97};
98
99struct mthca_data_seg {
100 __be32 byte_count;
101 __be32 lkey;
102 __be64 addr;
103};
104
105struct mthca_mlx_seg {
106 __be32 nda_op;
107 __be32 nds;
108 __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate
109 [11:8] SL [3] C [2] E */
110 __be16 rlid;
111 __be16 vcrc;
112};
113
114#endif /* MTHCA_WQE_H */
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 394bc08abc6f..8935e74ae3f8 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o 1obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
4 2
5ib_ipoib-y := ipoib_main.o \ 3ib_ipoib-y := ipoib_main.o \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 04c98f54e9c4..bea960b8191f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -49,9 +51,9 @@
49#include <asm/atomic.h> 51#include <asm/atomic.h>
50#include <asm/semaphore.h> 52#include <asm/semaphore.h>
51 53
52#include <ib_verbs.h> 54#include <rdma/ib_verbs.h>
53#include <ib_pack.h> 55#include <rdma/ib_pack.h>
54#include <ib_sa.h> 56#include <rdma/ib_sa.h>
55 57
56/* constants */ 58/* constants */
57 59
@@ -88,8 +90,8 @@ enum {
88/* structs */ 90/* structs */
89 91
90struct ipoib_header { 92struct ipoib_header {
91 u16 proto; 93 __be16 proto;
92 u16 reserved; 94 u16 reserved;
93}; 95};
94 96
95struct ipoib_pseudoheader { 97struct ipoib_pseudoheader {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index a84e5fe0f193..38b150f775e7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -97,7 +97,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
97 97
98 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { 98 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
99 n += sprintf(gid_buf + n, "%x", 99 n += sprintf(gid_buf + n, "%x",
100 be16_to_cpu(((u16 *)mgid.raw)[i])); 100 be16_to_cpu(((__be16 *) mgid.raw)[i]));
101 if (i < sizeof mgid / 2 - 1) 101 if (i < sizeof mgid / 2 - 1)
102 gid_buf[n++] = ':'; 102 gid_buf[n++] = ':';
103 } 103 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index eee82363167d..ef0e3894863c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * 6 *
4 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +38,7 @@
35#include <linux/delay.h> 38#include <linux/delay.h>
36#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
37 40
38#include <ib_cache.h> 41#include <rdma/ib_cache.h>
39 42
40#include "ipoib.h" 43#include "ipoib.h"
41 44
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index fa00816a3cf7..0e8ac138e355 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,6 @@
34 36
35#include "ipoib.h" 37#include "ipoib.h"
36 38
37#include <linux/version.h>
38#include <linux/module.h> 39#include <linux/module.h>
39 40
40#include <linux/init.h> 41#include <linux/init.h>
@@ -607,8 +608,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
607 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 608 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
608 IPOIB_GID_FMT "\n", 609 IPOIB_GID_FMT "\n",
609 skb->dst ? "neigh" : "dst", 610 skb->dst ? "neigh" : "dst",
610 be16_to_cpup((u16 *) skb->data), 611 be16_to_cpup((__be16 *) skb->data),
611 be32_to_cpup((u32 *) phdr->hwaddr), 612 be32_to_cpup((__be32 *) phdr->hwaddr),
612 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 613 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
613 dev_kfree_skb_any(skb); 614 dev_kfree_skb_any(skb);
614 ++priv->stats.tx_dropped; 615 ++priv->stats.tx_dropped;
@@ -671,7 +672,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
671{ 672{
672 struct ipoib_dev_priv *priv = netdev_priv(dev); 673 struct ipoib_dev_priv *priv = netdev_priv(dev);
673 674
674 schedule_work(&priv->restart_task); 675 queue_work(ipoib_workqueue, &priv->restart_task);
675} 676}
676 677
677static void ipoib_neigh_destructor(struct neighbour *n) 678static void ipoib_neigh_destructor(struct neighbour *n)
@@ -780,15 +781,11 @@ void ipoib_dev_cleanup(struct net_device *dev)
780 781
781 ipoib_ib_dev_cleanup(dev); 782 ipoib_ib_dev_cleanup(dev);
782 783
783 if (priv->rx_ring) { 784 kfree(priv->rx_ring);
784 kfree(priv->rx_ring); 785 kfree(priv->tx_ring);
785 priv->rx_ring = NULL;
786 }
787 786
788 if (priv->tx_ring) { 787 priv->rx_ring = NULL;
789 kfree(priv->tx_ring); 788 priv->tx_ring = NULL;
790 priv->tx_ring = NULL;
791 }
792} 789}
793 790
794static void ipoib_setup(struct net_device *dev) 791static void ipoib_setup(struct net_device *dev)
@@ -886,6 +883,12 @@ static ssize_t create_child(struct class_device *cdev,
886 if (pkey < 0 || pkey > 0xffff) 883 if (pkey < 0 || pkey > 0xffff)
887 return -EINVAL; 884 return -EINVAL;
888 885
886 /*
887 * Set the full membership bit, so that we join the right
888 * broadcast group, etc.
889 */
890 pkey |= 0x8000;
891
889 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 892 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
890 pkey); 893 pkey);
891 894
@@ -938,6 +941,12 @@ static struct net_device *ipoib_add_port(const char *format,
938 goto alloc_mem_failed; 941 goto alloc_mem_failed;
939 } 942 }
940 943
944 /*
945 * Set the full membership bit, so that we join the right
946 * broadcast group, etc.
947 */
948 priv->pkey |= 0x8000;
949
941 priv->dev->broadcast[8] = priv->pkey >> 8; 950 priv->dev->broadcast[8] = priv->pkey >> 8;
942 priv->dev->broadcast[9] = priv->pkey & 0xff; 951 priv->dev->broadcast[9] = priv->pkey & 0xff;
943 952
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 70208c3d21e2..aca7aea18a69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -357,7 +359,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
357 359
358 rec.mgid = mcast->mcmember.mgid; 360 rec.mgid = mcast->mcmember.mgid;
359 rec.port_gid = priv->local_gid; 361 rec.port_gid = priv->local_gid;
360 rec.pkey = be16_to_cpu(priv->pkey); 362 rec.pkey = cpu_to_be16(priv->pkey);
361 363
362 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 364 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
363 IB_SA_MCMEMBER_REC_MGID | 365 IB_SA_MCMEMBER_REC_MGID |
@@ -457,7 +459,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
457 459
458 rec.mgid = mcast->mcmember.mgid; 460 rec.mgid = mcast->mcmember.mgid;
459 rec.port_gid = priv->local_gid; 461 rec.port_gid = priv->local_gid;
460 rec.pkey = be16_to_cpu(priv->pkey); 462 rec.pkey = cpu_to_be16(priv->pkey);
461 463
462 comp_mask = 464 comp_mask =
463 IB_SA_MCMEMBER_REC_MGID | 465 IB_SA_MCMEMBER_REC_MGID |
@@ -646,7 +648,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
646 648
647 rec.mgid = mcast->mcmember.mgid; 649 rec.mgid = mcast->mcmember.mgid;
648 rec.port_gid = priv->local_gid; 650 rec.port_gid = priv->local_gid;
649 rec.pkey = be16_to_cpu(priv->pkey); 651 rec.pkey = cpu_to_be16(priv->pkey);
650 652
651 /* Remove ourselves from the multicast group */ 653 /* Remove ourselves from the multicast group */
652 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), 654 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 4933edf062c2..79f59d0563ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
32 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
33 */ 34 */
34 35
35#include <ib_cache.h> 36#include <rdma/ib_cache.h>
36 37
37#include "ipoib.h" 38#include "ipoib.h"
38 39
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 94b8ea812fef..332d730e60c2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -32,7 +32,6 @@
32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $ 32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $
33 */ 33 */
34 34
35#include <linux/version.h>
36#include <linux/module.h> 35#include <linux/module.h>
37 36
38#include <linux/init.h> 37#include <linux/init.h>
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index afa46681f983..6ae6eb322111 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && 606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
607 (m->msg.data_b3_req.blocknr == blocknr)) { 607 (m->msg.data_b3_req.blocknr == blocknr)) {
608 /* found corresponding DATA_B3_REQ */ 608 /* found corresponding DATA_B3_REQ */
609 skb_unlink(tmp); 609 skb_unlink(tmp, &card->ackq);
610 chan->queued -= m->msg.data_b3_req.datalen; 610 chan->queued -= m->msg.data_b3_req.datalen;
611 if (m->msg.data_b3_req.flags) 611 if (m->msg.data_b3_req.flags)
612 ret = m->msg.data_b3_req.datalen; 612 ret = m->msg.data_b3_req.datalen;
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index f8570fd9d2ab..3abd7fc6e5ef 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -191,8 +191,10 @@ static int __init capifs_init(void)
191 err = register_filesystem(&capifs_fs_type); 191 err = register_filesystem(&capifs_fs_type);
192 if (!err) { 192 if (!err) {
193 capifs_mnt = kern_mount(&capifs_fs_type); 193 capifs_mnt = kern_mount(&capifs_fs_type);
194 if (IS_ERR(capifs_mnt)) 194 if (IS_ERR(capifs_mnt)) {
195 err = PTR_ERR(capifs_mnt); 195 err = PTR_ERR(capifs_mnt);
196 unregister_filesystem(&capifs_fs_type);
197 }
196 } 198 }
197 if (!err) 199 if (!err)
198 printk(KERN_NOTICE "capifs: Rev %s\n", rev); 200 printk(KERN_NOTICE "capifs: Rev %s\n", rev);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f30e8e63ae0d..96c115e13389 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1786,7 +1786,6 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1786 lp->stats.rx_bytes += skb->len; 1786 lp->stats.rx_bytes += skb->len;
1787 } 1787 }
1788 skb->dev = ndev; 1788 skb->dev = ndev;
1789 skb->input_dev = ndev;
1790 skb->pkt_type = PACKET_HOST; 1789 skb->pkt_type = PACKET_HOST;
1791 skb->mac.raw = skb->data; 1790 skb->mac.raw = skb->data;
1792#ifdef ISDN_DEBUG_NET_DUMP 1791#ifdef ISDN_DEBUG_NET_DUMP
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 260a323a96d3..d97a9be5469c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1177,7 +1177,6 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1177 mlp->huptimer = 0; 1177 mlp->huptimer = 0;
1178#endif /* CONFIG_IPPP_FILTER */ 1178#endif /* CONFIG_IPPP_FILTER */
1179 skb->dev = dev; 1179 skb->dev = dev;
1180 skb->input_dev = dev;
1181 skb->mac.raw = skb->data; 1180 skb->mac.raw = skb->data;
1182 netif_rx(skb); 1181 netif_rx(skb);
1183 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ 1182 /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 486ee50cfdda..20ca80b7dc20 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1689,6 +1689,7 @@ static int do_md_run(mddev_t * mddev)
1689 mddev->pers = pers[pnum]; 1689 mddev->pers = pers[pnum];
1690 spin_unlock(&pers_lock); 1690 spin_unlock(&pers_lock);
1691 1691
1692 mddev->recovery = 0;
1692 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 1693 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
1693 1694
1694 /* before we start the array running, initialise the bitmap */ 1695 /* before we start the array running, initialise the bitmap */
@@ -4011,3 +4012,4 @@ EXPORT_SYMBOL(md_print_devices);
4011EXPORT_SYMBOL(md_check_recovery); 4012EXPORT_SYMBOL(md_check_recovery);
4012MODULE_LICENSE("GPL"); 4013MODULE_LICENSE("GPL");
4013MODULE_ALIAS("md"); 4014MODULE_ALIAS("md");
4015MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 63b626f70c81..9b9d6f8ee74e 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -70,13 +70,22 @@ EXPORT_SYMBOL(dibusb_power_ctrl);
70 70
71int dibusb2_0_streaming_ctrl(struct dvb_usb_device *d, int onoff) 71int dibusb2_0_streaming_ctrl(struct dvb_usb_device *d, int onoff)
72{ 72{
73 u8 b[2]; 73 u8 b[3] = { 0 };
74 b[0] = DIBUSB_REQ_SET_IOCTL; 74 int ret;
75 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; 75
76 if ((ret = dibusb_streaming_ctrl(d,onoff)) < 0)
77 return ret;
76 78
77 dvb_usb_generic_write(d,b,3); 79 if (onoff) {
80 b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
81 b[1] = 0x00;
82 if ((ret = dvb_usb_generic_write(d,b,2)) < 0)
83 return ret;
84 }
78 85
79 return dibusb_streaming_ctrl(d,onoff); 86 b[0] = DIBUSB_REQ_SET_IOCTL;
87 b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
88 return dvb_usb_generic_write(d,b,3);
80} 89}
81EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); 90EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
82 91
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
index 3491ff40885c..6fa92100248b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
@@ -23,12 +23,12 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
23 */ 23 */
24 if (newfeedcount == 0) { 24 if (newfeedcount == 0) {
25 deb_ts("stop feeding\n"); 25 deb_ts("stop feeding\n");
26 dvb_usb_urb_kill(d);
26 27
27 if (d->props.streaming_ctrl != NULL) 28 if (d->props.streaming_ctrl != NULL)
28 if ((ret = d->props.streaming_ctrl(d,0))) 29 if ((ret = d->props.streaming_ctrl(d,0)))
29 err("error while stopping stream."); 30 err("error while stopping stream.");
30 31
31 dvb_usb_urb_kill(d);
32 } 32 }
33 33
34 d->feedcount = newfeedcount; 34 d->feedcount = newfeedcount;
@@ -44,6 +44,8 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
44 * for reception. 44 * for reception.
45 */ 45 */
46 if (d->feedcount == onoff && d->feedcount > 0) { 46 if (d->feedcount == onoff && d->feedcount > 0) {
47 deb_ts("submitting all URBs\n");
48 dvb_usb_urb_submit(d);
47 49
48 deb_ts("controlling pid parser\n"); 50 deb_ts("controlling pid parser\n");
49 if (d->props.caps & DVB_USB_HAS_PID_FILTER && 51 if (d->props.caps & DVB_USB_HAS_PID_FILTER &&
@@ -59,7 +61,6 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
59 return -ENODEV; 61 return -ENODEV;
60 } 62 }
61 63
62 dvb_usb_urb_submit(d);
63 } 64 }
64 return 0; 65 return 0;
65} 66}
diff --git a/drivers/media/dvb/frontends/tda80xx.c b/drivers/media/dvb/frontends/tda80xx.c
index 88e125079ca1..d1cabb6a0a13 100644
--- a/drivers/media/dvb/frontends/tda80xx.c
+++ b/drivers/media/dvb/frontends/tda80xx.c
@@ -30,6 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <asm/irq.h>
33#include <asm/div64.h> 34#include <asm/div64.h>
34 35
35#include "dvb_frontend.h" 36#include "dvb_frontend.h"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
new file mode 100644
index 000000000000..1588a59e3767
--- /dev/null
+++ b/drivers/mfd/Kconfig
@@ -0,0 +1,16 @@
1#
2# Multifunction miscellaneous devices
3#
4
5menu "Multimedia Capabilities Port drivers"
6
7config MCP
8 tristate
9
10# Interface drivers
11config MCP_SA11X0
12 tristate "Support SA11x0 MCP interface"
13 depends on ARCH_SA1100
14 select MCP
15
16endmenu
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
new file mode 100644
index 000000000000..98bdd6a42188
--- /dev/null
+++ b/drivers/mfd/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for multifunction miscellaneous devices
3#
4
5obj-$(CONFIG_MCP) += mcp-core.o
6obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
new file mode 100644
index 000000000000..c75d713c01e4
--- /dev/null
+++ b/drivers/mfd/mcp-core.c
@@ -0,0 +1,255 @@
1/*
2 * linux/drivers/mfd/mcp-core.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 *
10 * Generic MCP (Multimedia Communications Port) layer. All MCP locking
11 * is solely held within this file.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/smp.h>
17#include <linux/device.h>
18
19#include <asm/dma.h>
20#include <asm/system.h>
21
22#include "mcp.h"
23
24#define to_mcp(d) container_of(d, struct mcp, attached_device)
25#define to_mcp_driver(d) container_of(d, struct mcp_driver, drv)
26
27static int mcp_bus_match(struct device *dev, struct device_driver *drv)
28{
29 return 1;
30}
31
32static int mcp_bus_probe(struct device *dev)
33{
34 struct mcp *mcp = to_mcp(dev);
35 struct mcp_driver *drv = to_mcp_driver(dev->driver);
36
37 return drv->probe(mcp);
38}
39
40static int mcp_bus_remove(struct device *dev)
41{
42 struct mcp *mcp = to_mcp(dev);
43 struct mcp_driver *drv = to_mcp_driver(dev->driver);
44
45 drv->remove(mcp);
46 return 0;
47}
48
49static int mcp_bus_suspend(struct device *dev, pm_message_t state)
50{
51 struct mcp *mcp = to_mcp(dev);
52 int ret = 0;
53
54 if (dev->driver) {
55 struct mcp_driver *drv = to_mcp_driver(dev->driver);
56
57 ret = drv->suspend(mcp, state);
58 }
59 return ret;
60}
61
62static int mcp_bus_resume(struct device *dev)
63{
64 struct mcp *mcp = to_mcp(dev);
65 int ret = 0;
66
67 if (dev->driver) {
68 struct mcp_driver *drv = to_mcp_driver(dev->driver);
69
70 ret = drv->resume(mcp);
71 }
72 return ret;
73}
74
75static struct bus_type mcp_bus_type = {
76 .name = "mcp",
77 .match = mcp_bus_match,
78 .suspend = mcp_bus_suspend,
79 .resume = mcp_bus_resume,
80};
81
82/**
83 * mcp_set_telecom_divisor - set the telecom divisor
84 * @mcp: MCP interface structure
85 * @div: SIB clock divisor
86 *
87 * Set the telecom divisor on the MCP interface. The resulting
88 * sample rate is SIBCLOCK/div.
89 */
90void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div)
91{
92 spin_lock_irq(&mcp->lock);
93 mcp->ops->set_telecom_divisor(mcp, div);
94 spin_unlock_irq(&mcp->lock);
95}
96EXPORT_SYMBOL(mcp_set_telecom_divisor);
97
98/**
99 * mcp_set_audio_divisor - set the audio divisor
100 * @mcp: MCP interface structure
101 * @div: SIB clock divisor
102 *
103 * Set the audio divisor on the MCP interface.
104 */
105void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div)
106{
107 spin_lock_irq(&mcp->lock);
108 mcp->ops->set_audio_divisor(mcp, div);
109 spin_unlock_irq(&mcp->lock);
110}
111EXPORT_SYMBOL(mcp_set_audio_divisor);
112
113/**
114 * mcp_reg_write - write a device register
115 * @mcp: MCP interface structure
116 * @reg: 4-bit register index
117 * @val: 16-bit data value
118 *
119 * Write a device register. The MCP interface must be enabled
120 * to prevent this function hanging.
121 */
122void mcp_reg_write(struct mcp *mcp, unsigned int reg, unsigned int val)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&mcp->lock, flags);
127 mcp->ops->reg_write(mcp, reg, val);
128 spin_unlock_irqrestore(&mcp->lock, flags);
129}
130EXPORT_SYMBOL(mcp_reg_write);
131
132/**
133 * mcp_reg_read - read a device register
134 * @mcp: MCP interface structure
135 * @reg: 4-bit register index
136 *
137 * Read a device register and return its value. The MCP interface
138 * must be enabled to prevent this function hanging.
139 */
140unsigned int mcp_reg_read(struct mcp *mcp, unsigned int reg)
141{
142 unsigned long flags;
143 unsigned int val;
144
145 spin_lock_irqsave(&mcp->lock, flags);
146 val = mcp->ops->reg_read(mcp, reg);
147 spin_unlock_irqrestore(&mcp->lock, flags);
148
149 return val;
150}
151EXPORT_SYMBOL(mcp_reg_read);
152
153/**
154 * mcp_enable - enable the MCP interface
155 * @mcp: MCP interface to enable
156 *
157 * Enable the MCP interface. Each call to mcp_enable will need
158 * a corresponding call to mcp_disable to disable the interface.
159 */
160void mcp_enable(struct mcp *mcp)
161{
162 spin_lock_irq(&mcp->lock);
163 if (mcp->use_count++ == 0)
164 mcp->ops->enable(mcp);
165 spin_unlock_irq(&mcp->lock);
166}
167EXPORT_SYMBOL(mcp_enable);
168
169/**
170 * mcp_disable - disable the MCP interface
171 * @mcp: MCP interface to disable
172 *
173 * Disable the MCP interface. The MCP interface will only be
174 * disabled once the number of calls to mcp_enable matches the
175 * number of calls to mcp_disable.
176 */
177void mcp_disable(struct mcp *mcp)
178{
179 unsigned long flags;
180
181 spin_lock_irqsave(&mcp->lock, flags);
182 if (--mcp->use_count == 0)
183 mcp->ops->disable(mcp);
184 spin_unlock_irqrestore(&mcp->lock, flags);
185}
186EXPORT_SYMBOL(mcp_disable);
187
188static void mcp_release(struct device *dev)
189{
190 struct mcp *mcp = container_of(dev, struct mcp, attached_device);
191
192 kfree(mcp);
193}
194
195struct mcp *mcp_host_alloc(struct device *parent, size_t size)
196{
197 struct mcp *mcp;
198
199 mcp = kmalloc(sizeof(struct mcp) + size, GFP_KERNEL);
200 if (mcp) {
201 memset(mcp, 0, sizeof(struct mcp) + size);
202 spin_lock_init(&mcp->lock);
203 mcp->attached_device.parent = parent;
204 mcp->attached_device.bus = &mcp_bus_type;
205 mcp->attached_device.dma_mask = parent->dma_mask;
206 mcp->attached_device.release = mcp_release;
207 }
208 return mcp;
209}
210EXPORT_SYMBOL(mcp_host_alloc);
211
212int mcp_host_register(struct mcp *mcp)
213{
214 strcpy(mcp->attached_device.bus_id, "mcp0");
215 return device_register(&mcp->attached_device);
216}
217EXPORT_SYMBOL(mcp_host_register);
218
219void mcp_host_unregister(struct mcp *mcp)
220{
221 device_unregister(&mcp->attached_device);
222}
223EXPORT_SYMBOL(mcp_host_unregister);
224
225int mcp_driver_register(struct mcp_driver *mcpdrv)
226{
227 mcpdrv->drv.bus = &mcp_bus_type;
228 mcpdrv->drv.probe = mcp_bus_probe;
229 mcpdrv->drv.remove = mcp_bus_remove;
230 return driver_register(&mcpdrv->drv);
231}
232EXPORT_SYMBOL(mcp_driver_register);
233
234void mcp_driver_unregister(struct mcp_driver *mcpdrv)
235{
236 driver_unregister(&mcpdrv->drv);
237}
238EXPORT_SYMBOL(mcp_driver_unregister);
239
240static int __init mcp_init(void)
241{
242 return bus_register(&mcp_bus_type);
243}
244
245static void __exit mcp_exit(void)
246{
247 bus_unregister(&mcp_bus_type);
248}
249
250module_init(mcp_init);
251module_exit(mcp_exit);
252
253MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
254MODULE_DESCRIPTION("Core multimedia communications port driver");
255MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
new file mode 100644
index 000000000000..e9806fbbe696
--- /dev/null
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -0,0 +1,275 @@
1/*
2 * linux/drivers/mfd/mcp-sa11x0.c
3 *
4 * Copyright (C) 2001-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 *
10 * SA11x0 MCP (Multimedia Communications Port) driver.
11 *
12 * MCP read/write timeouts from Jordi Colomer, rehacked by rmk.
13 */
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/delay.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/device.h>
22
23#include <asm/dma.h>
24#include <asm/hardware.h>
25#include <asm/mach-types.h>
26#include <asm/system.h>
27#include <asm/arch/mcp.h>
28
29#include <asm/arch/assabet.h>
30
31#include "mcp.h"
32
33struct mcp_sa11x0 {
34 u32 mccr0;
35 u32 mccr1;
36};
37
38#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
39
40static void
41mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
42{
43 unsigned int mccr0;
44
45 divisor /= 32;
46
47 mccr0 = Ser4MCCR0 & ~0x00007f00;
48 mccr0 |= divisor << 8;
49 Ser4MCCR0 = mccr0;
50}
51
52static void
53mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
54{
55 unsigned int mccr0;
56
57 divisor /= 32;
58
59 mccr0 = Ser4MCCR0 & ~0x0000007f;
60 mccr0 |= divisor;
61 Ser4MCCR0 = mccr0;
62}
63
64/*
65 * Write data to the device. The bit should be set after 3 subframe
66 * times (each frame is 64 clocks). We wait a maximum of 6 subframes.
67 * We really should try doing something more productive while we
68 * wait.
69 */
70static void
71mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
72{
73 int ret = -ETIME;
74 int i;
75
76 Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff);
77
78 for (i = 0; i < 2; i++) {
79 udelay(mcp->rw_timeout);
80 if (Ser4MCSR & MCSR_CWC) {
81 ret = 0;
82 break;
83 }
84 }
85
86 if (ret < 0)
87 printk(KERN_WARNING "mcp: write timed out\n");
88}
89
90/*
91 * Read data from the device. The bit should be set after 3 subframe
92 * times (each frame is 64 clocks). We wait a maximum of 6 subframes.
93 * We really should try doing something more productive while we
94 * wait.
95 */
96static unsigned int
97mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
98{
99 int ret = -ETIME;
100 int i;
101
102 Ser4MCDR2 = reg << 17 | MCDR2_Rd;
103
104 for (i = 0; i < 2; i++) {
105 udelay(mcp->rw_timeout);
106 if (Ser4MCSR & MCSR_CRC) {
107 ret = Ser4MCDR2 & 0xffff;
108 break;
109 }
110 }
111
112 if (ret < 0)
113 printk(KERN_WARNING "mcp: read timed out\n");
114
115 return ret;
116}
117
118static void mcp_sa11x0_enable(struct mcp *mcp)
119{
120 Ser4MCSR = -1;
121 Ser4MCCR0 |= MCCR0_MCE;
122}
123
124static void mcp_sa11x0_disable(struct mcp *mcp)
125{
126 Ser4MCCR0 &= ~MCCR0_MCE;
127}
128
129/*
130 * Our methods.
131 */
132static struct mcp_ops mcp_sa11x0 = {
133 .set_telecom_divisor = mcp_sa11x0_set_telecom_divisor,
134 .set_audio_divisor = mcp_sa11x0_set_audio_divisor,
135 .reg_write = mcp_sa11x0_write,
136 .reg_read = mcp_sa11x0_read,
137 .enable = mcp_sa11x0_enable,
138 .disable = mcp_sa11x0_disable,
139};
140
141static int mcp_sa11x0_probe(struct device *dev)
142{
143 struct platform_device *pdev = to_platform_device(dev);
144 struct mcp_plat_data *data = pdev->dev.platform_data;
145 struct mcp *mcp;
146 int ret;
147
148 if (!data)
149 return -ENODEV;
150
151 if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp"))
152 return -EBUSY;
153
154 mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0));
155 if (!mcp) {
156 ret = -ENOMEM;
157 goto release;
158 }
159
160 mcp->owner = THIS_MODULE;
161 mcp->ops = &mcp_sa11x0;
162 mcp->sclk_rate = data->sclk_rate;
163 mcp->dma_audio_rd = DMA_Ser4MCP0Rd;
164 mcp->dma_audio_wr = DMA_Ser4MCP0Wr;
165 mcp->dma_telco_rd = DMA_Ser4MCP1Rd;
166 mcp->dma_telco_wr = DMA_Ser4MCP1Wr;
167
168 dev_set_drvdata(dev, mcp);
169
170 if (machine_is_assabet()) {
171 ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
172 }
173
174 /*
175 * Setup the PPC unit correctly.
176 */
177 PPDR &= ~PPC_RXD4;
178 PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
179 PSDR |= PPC_RXD4;
180 PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
181 PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
182
183 /*
184 * Initialise device. Note that we initially
185 * set the sampling rate to minimum.
186 */
187 Ser4MCSR = -1;
188 Ser4MCCR1 = data->mccr1;
189 Ser4MCCR0 = data->mccr0 | 0x7f7f;
190
191 /*
192 * Calculate the read/write timeout (us) from the bit clock
193 * rate. This is the period for 3 64-bit frames. Always
194 * round this time up.
195 */
196 mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
197 mcp->sclk_rate;
198
199 ret = mcp_host_register(mcp);
200 if (ret == 0)
201 goto out;
202
203 release:
204 release_mem_region(0x80060000, 0x60);
205 dev_set_drvdata(dev, NULL);
206
207 out:
208 return ret;
209}
210
211static int mcp_sa11x0_remove(struct device *dev)
212{
213 struct mcp *mcp = dev_get_drvdata(dev);
214
215 dev_set_drvdata(dev, NULL);
216 mcp_host_unregister(mcp);
217 release_mem_region(0x80060000, 0x60);
218
219 return 0;
220}
221
222static int mcp_sa11x0_suspend(struct device *dev, pm_message_t state, u32 level)
223{
224 struct mcp *mcp = dev_get_drvdata(dev);
225
226 if (level == SUSPEND_DISABLE) {
227 priv(mcp)->mccr0 = Ser4MCCR0;
228 priv(mcp)->mccr1 = Ser4MCCR1;
229 Ser4MCCR0 &= ~MCCR0_MCE;
230 }
231 return 0;
232}
233
234static int mcp_sa11x0_resume(struct device *dev, u32 level)
235{
236 struct mcp *mcp = dev_get_drvdata(dev);
237
238 if (level == RESUME_RESTORE_STATE) {
239 Ser4MCCR1 = priv(mcp)->mccr1;
240 Ser4MCCR0 = priv(mcp)->mccr0;
241 }
242 return 0;
243}
244
245/*
246 * The driver for the SA11x0 MCP port.
247 */
248static struct device_driver mcp_sa11x0_driver = {
249 .name = "sa11x0-mcp",
250 .bus = &platform_bus_type,
251 .probe = mcp_sa11x0_probe,
252 .remove = mcp_sa11x0_remove,
253 .suspend = mcp_sa11x0_suspend,
254 .resume = mcp_sa11x0_resume,
255};
256
257/*
258 * This needs re-working
259 */
260static int __init mcp_sa11x0_init(void)
261{
262 return driver_register(&mcp_sa11x0_driver);
263}
264
265static void __exit mcp_sa11x0_exit(void)
266{
267 driver_unregister(&mcp_sa11x0_driver);
268}
269
270module_init(mcp_sa11x0_init);
271module_exit(mcp_sa11x0_exit);
272
273MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
274MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
275MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mcp.h b/drivers/mfd/mcp.h
new file mode 100644
index 000000000000..c093a93b8808
--- /dev/null
+++ b/drivers/mfd/mcp.h
@@ -0,0 +1,66 @@
1/*
2 * linux/drivers/mfd/mcp.h
3 *
4 * Copyright (C) 2001 Russell King, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 */
10#ifndef MCP_H
11#define MCP_H
12
13struct mcp_ops;
14
15struct mcp {
16 struct module *owner;
17 struct mcp_ops *ops;
18 spinlock_t lock;
19 int use_count;
20 unsigned int sclk_rate;
21 unsigned int rw_timeout;
22 dma_device_t dma_audio_rd;
23 dma_device_t dma_audio_wr;
24 dma_device_t dma_telco_rd;
25 dma_device_t dma_telco_wr;
26 struct device attached_device;
27};
28
29struct mcp_ops {
30 void (*set_telecom_divisor)(struct mcp *, unsigned int);
31 void (*set_audio_divisor)(struct mcp *, unsigned int);
32 void (*reg_write)(struct mcp *, unsigned int, unsigned int);
33 unsigned int (*reg_read)(struct mcp *, unsigned int);
34 void (*enable)(struct mcp *);
35 void (*disable)(struct mcp *);
36};
37
38void mcp_set_telecom_divisor(struct mcp *, unsigned int);
39void mcp_set_audio_divisor(struct mcp *, unsigned int);
40void mcp_reg_write(struct mcp *, unsigned int, unsigned int);
41unsigned int mcp_reg_read(struct mcp *, unsigned int);
42void mcp_enable(struct mcp *);
43void mcp_disable(struct mcp *);
44#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate)
45
46struct mcp *mcp_host_alloc(struct device *, size_t);
47int mcp_host_register(struct mcp *);
48void mcp_host_unregister(struct mcp *);
49
50struct mcp_driver {
51 struct device_driver drv;
52 int (*probe)(struct mcp *);
53 void (*remove)(struct mcp *);
54 int (*suspend)(struct mcp *, pm_message_t);
55 int (*resume)(struct mcp *);
56};
57
58int mcp_driver_register(struct mcp_driver *);
59void mcp_driver_unregister(struct mcp_driver *);
60
61#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device)
62#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d)
63
64#define mcp_priv(mcp) ((void *)((mcp)+1))
65
66#endif
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index eeb9f6668e69..3c5904834fe8 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -361,7 +361,7 @@ static void mmc_decode_cid(struct mmc_card *card)
361 361
362 default: 362 default:
363 printk("%s: card has unknown MMCA version %d\n", 363 printk("%s: card has unknown MMCA version %d\n",
364 card->host->host_name, card->csd.mmca_vsn); 364 mmc_hostname(card->host), card->csd.mmca_vsn);
365 mmc_card_set_bad(card); 365 mmc_card_set_bad(card);
366 break; 366 break;
367 } 367 }
@@ -383,7 +383,7 @@ static void mmc_decode_csd(struct mmc_card *card)
383 csd_struct = UNSTUFF_BITS(resp, 126, 2); 383 csd_struct = UNSTUFF_BITS(resp, 126, 2);
384 if (csd_struct != 1 && csd_struct != 2) { 384 if (csd_struct != 1 && csd_struct != 2) {
385 printk("%s: unrecognised CSD structure version %d\n", 385 printk("%s: unrecognised CSD structure version %d\n",
386 card->host->host_name, csd_struct); 386 mmc_hostname(card->host), csd_struct);
387 mmc_card_set_bad(card); 387 mmc_card_set_bad(card);
388 return; 388 return;
389 } 389 }
@@ -551,7 +551,7 @@ static void mmc_discover_cards(struct mmc_host *host)
551 } 551 }
552 if (err != MMC_ERR_NONE) { 552 if (err != MMC_ERR_NONE) {
553 printk(KERN_ERR "%s: error requesting CID: %d\n", 553 printk(KERN_ERR "%s: error requesting CID: %d\n",
554 host->host_name, err); 554 mmc_hostname(host), err);
555 break; 555 break;
556 } 556 }
557 557
@@ -796,17 +796,13 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
796{ 796{
797 struct mmc_host *host; 797 struct mmc_host *host;
798 798
799 host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 799 host = mmc_alloc_host_sysfs(extra, dev);
800 if (host) { 800 if (host) {
801 memset(host, 0, sizeof(struct mmc_host) + extra);
802
803 spin_lock_init(&host->lock); 801 spin_lock_init(&host->lock);
804 init_waitqueue_head(&host->wq); 802 init_waitqueue_head(&host->wq);
805 INIT_LIST_HEAD(&host->cards); 803 INIT_LIST_HEAD(&host->cards);
806 INIT_WORK(&host->detect, mmc_rescan, host); 804 INIT_WORK(&host->detect, mmc_rescan, host);
807 805
808 host->dev = dev;
809
810 /* 806 /*
811 * By default, hosts do not support SGIO or large requests. 807 * By default, hosts do not support SGIO or large requests.
812 * They have to set these according to their abilities. 808 * They have to set these according to their abilities.
@@ -828,15 +824,15 @@ EXPORT_SYMBOL(mmc_alloc_host);
828 */ 824 */
829int mmc_add_host(struct mmc_host *host) 825int mmc_add_host(struct mmc_host *host)
830{ 826{
831 static unsigned int host_num; 827 int ret;
832 828
833 snprintf(host->host_name, sizeof(host->host_name), 829 ret = mmc_add_host_sysfs(host);
834 "mmc%d", host_num++); 830 if (ret == 0) {
835 831 mmc_power_off(host);
836 mmc_power_off(host); 832 mmc_detect_change(host);
837 mmc_detect_change(host); 833 }
838 834
839 return 0; 835 return ret;
840} 836}
841 837
842EXPORT_SYMBOL(mmc_add_host); 838EXPORT_SYMBOL(mmc_add_host);
@@ -859,6 +855,7 @@ void mmc_remove_host(struct mmc_host *host)
859 } 855 }
860 856
861 mmc_power_off(host); 857 mmc_power_off(host);
858 mmc_remove_host_sysfs(host);
862} 859}
863 860
864EXPORT_SYMBOL(mmc_remove_host); 861EXPORT_SYMBOL(mmc_remove_host);
@@ -872,7 +869,7 @@ EXPORT_SYMBOL(mmc_remove_host);
872void mmc_free_host(struct mmc_host *host) 869void mmc_free_host(struct mmc_host *host)
873{ 870{
874 flush_scheduled_work(); 871 flush_scheduled_work();
875 kfree(host); 872 mmc_free_host_sysfs(host);
876} 873}
877 874
878EXPORT_SYMBOL(mmc_free_host); 875EXPORT_SYMBOL(mmc_free_host);
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index b498dffe0b11..97bae00292fa 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -13,4 +13,9 @@
13void mmc_init_card(struct mmc_card *card, struct mmc_host *host); 13void mmc_init_card(struct mmc_card *card, struct mmc_host *host);
14int mmc_register_card(struct mmc_card *card); 14int mmc_register_card(struct mmc_card *card);
15void mmc_remove_card(struct mmc_card *card); 15void mmc_remove_card(struct mmc_card *card);
16
17struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev);
18int mmc_add_host_sysfs(struct mmc_host *host);
19void mmc_remove_host_sysfs(struct mmc_host *host);
20void mmc_free_host_sysfs(struct mmc_host *host);
16#endif 21#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index 5556cd3b5559..ad8949810fc5 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/idr.h>
15 16
16#include <linux/mmc/card.h> 17#include <linux/mmc/card.h>
17#include <linux/mmc/host.h> 18#include <linux/mmc/host.h>
@@ -20,6 +21,7 @@
20 21
21#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev) 22#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
22#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) 23#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
24#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
23 25
24#define MMC_ATTR(name, fmt, args...) \ 26#define MMC_ATTR(name, fmt, args...) \
25static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \ 27static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
@@ -206,7 +208,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
206int mmc_register_card(struct mmc_card *card) 208int mmc_register_card(struct mmc_card *card)
207{ 209{
208 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), 210 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
209 "%s:%04x", card->host->host_name, card->rca); 211 "%s:%04x", mmc_hostname(card->host), card->rca);
210 212
211 return device_add(&card->dev); 213 return device_add(&card->dev);
212} 214}
@@ -224,13 +226,97 @@ void mmc_remove_card(struct mmc_card *card)
224} 226}
225 227
226 228
229static void mmc_host_classdev_release(struct class_device *dev)
230{
231 struct mmc_host *host = cls_dev_to_mmc_host(dev);
232 kfree(host);
233}
234
235static struct class mmc_host_class = {
236 .name = "mmc_host",
237 .release = mmc_host_classdev_release,
238};
239
240static DEFINE_IDR(mmc_host_idr);
241static DEFINE_SPINLOCK(mmc_host_lock);
242
243/*
244 * Internal function. Allocate a new MMC host.
245 */
246struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev)
247{
248 struct mmc_host *host;
249
250 host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
251 if (host) {
252 memset(host, 0, sizeof(struct mmc_host) + extra);
253
254 host->dev = dev;
255 host->class_dev.dev = host->dev;
256 host->class_dev.class = &mmc_host_class;
257 class_device_initialize(&host->class_dev);
258 }
259
260 return host;
261}
262
263/*
264 * Internal function. Register a new MMC host with the MMC class.
265 */
266int mmc_add_host_sysfs(struct mmc_host *host)
267{
268 int err;
269
270 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
271 return -ENOMEM;
272
273 spin_lock(&mmc_host_lock);
274 err = idr_get_new(&mmc_host_idr, host, &host->index);
275 spin_unlock(&mmc_host_lock);
276 if (err)
277 return err;
278
279 snprintf(host->class_dev.class_id, BUS_ID_SIZE,
280 "mmc%d", host->index);
281
282 return class_device_add(&host->class_dev);
283}
284
285/*
286 * Internal function. Unregister a MMC host with the MMC class.
287 */
288void mmc_remove_host_sysfs(struct mmc_host *host)
289{
290 class_device_del(&host->class_dev);
291
292 spin_lock(&mmc_host_lock);
293 idr_remove(&mmc_host_idr, host->index);
294 spin_unlock(&mmc_host_lock);
295}
296
297/*
298 * Internal function. Free a MMC host.
299 */
300void mmc_free_host_sysfs(struct mmc_host *host)
301{
302 class_device_put(&host->class_dev);
303}
304
305
227static int __init mmc_init(void) 306static int __init mmc_init(void)
228{ 307{
229 return bus_register(&mmc_bus_type); 308 int ret = bus_register(&mmc_bus_type);
309 if (ret == 0) {
310 ret = class_register(&mmc_host_class);
311 if (ret)
312 bus_unregister(&mmc_bus_type);
313 }
314 return ret;
230} 315}
231 316
232static void __exit mmc_exit(void) 317static void __exit mmc_exit(void)
233{ 318{
319 class_unregister(&mmc_host_class);
234 bus_unregister(&mmc_bus_type); 320 bus_unregister(&mmc_bus_type);
235} 321}
236 322
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 7a42966d755b..716c4ef4faf6 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -34,7 +34,7 @@
34 34
35#ifdef CONFIG_MMC_DEBUG 35#ifdef CONFIG_MMC_DEBUG
36#define DBG(host,fmt,args...) \ 36#define DBG(host,fmt,args...) \
37 pr_debug("%s: %s: " fmt, host->mmc->host_name, __func__ , args) 37 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
38#else 38#else
39#define DBG(host,fmt,args...) do { } while (0) 39#define DBG(host,fmt,args...) do { } while (0)
40#endif 40#endif
@@ -541,7 +541,7 @@ static int mmci_probe(struct amba_device *dev, void *id)
541 mmc_add_host(mmc); 541 mmc_add_host(mmc);
542 542
543 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n", 543 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n",
544 mmc->host_name, amba_rev(dev), amba_config(dev), 544 mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
545 dev->res.start, dev->irq[0], dev->irq[1]); 545 dev->res.start, dev->irq[0], dev->irq[1]);
546 546
547 init_timer(&host->timer); 547 init_timer(&host->timer);
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 974f2f36bdbe..402c2d661fb2 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1796,7 +1796,7 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
1796 1796
1797 mmc_add_host(mmc); 1797 mmc_add_host(mmc);
1798 1798
1799 printk(KERN_INFO "%s: W83L51xD", mmc->host_name); 1799 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1800 if (host->chip_id != 0) 1800 if (host->chip_id != 0)
1801 printk(" id %x", (int)host->chip_id); 1801 printk(" id %x", (int)host->chip_id);
1802 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); 1802 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8edb6936fb9b..e0239a10d325 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -131,6 +131,8 @@ config NET_SB1000
131 131
132 source "drivers/net/arcnet/Kconfig" 132 source "drivers/net/arcnet/Kconfig"
133 133
134source "drivers/net/phy/Kconfig"
135
134# 136#
135# Ethernet 137# Ethernet
136# 138#
@@ -1921,6 +1923,17 @@ config R8169_VLAN
1921 1923
1922 If in doubt, say Y. 1924 If in doubt, say Y.
1923 1925
1926config SIS190
1927 tristate "SiS190 gigabit ethernet support"
1928 depends on PCI
1929 select CRC32
1930 select MII
1931 ---help---
1932 Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
1933
1934 To compile this driver as a module, choose M here: the module
1935 will be called sis190. This is recommended.
1936
1924config SKGE 1937config SKGE
1925 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 1938 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
1926 depends on PCI && EXPERIMENTAL 1939 depends on PCI && EXPERIMENTAL
@@ -2091,6 +2104,25 @@ endmenu
2091menu "Ethernet (10000 Mbit)" 2104menu "Ethernet (10000 Mbit)"
2092 depends on !UML 2105 depends on !UML
2093 2106
2107config CHELSIO_T1
2108 tristate "Chelsio 10Gb Ethernet support"
2109 depends on PCI
2110 help
2111 This driver supports Chelsio N110 and N210 models 10Gb Ethernet
2112 cards. More information about adapter features and performance
2113 tuning is in <file:Documentation/networking/cxgb.txt>.
2114
2115 For general information about Chelsio and our products, visit
2116 our website at <http://www.chelsio.com>.
2117
2118 For customer support, please visit our customer support page at
2119 <http://www.chelsio.com/support.htm>.
2120
2121 Please send feedback to <linux-bugs@chelsio.com>.
2122
2123 To compile this driver as a module, choose M here: the module
2124 will be called cxgb.
2125
2094config IXGB 2126config IXGB
2095 tristate "Intel(R) PRO/10GbE support" 2127 tristate "Intel(R) PRO/10GbE support"
2096 depends on PCI 2128 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 63c6d1e6d4d9..5baafcd55610 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ endif
9obj-$(CONFIG_E1000) += e1000/ 9obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 10obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 11obj-$(CONFIG_IXGB) += ixgb/
12obj-$(CONFIG_CHELSIO_T1) += chelsio/
12obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
13obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
14 15
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
42obj-$(CONFIG_E100) += e100.o 43obj-$(CONFIG_E100) += e100.o
43obj-$(CONFIG_TLAN) += tlan.o 44obj-$(CONFIG_TLAN) += tlan.o
44obj-$(CONFIG_EPIC100) += epic100.o 45obj-$(CONFIG_EPIC100) += epic100.o
46obj-$(CONFIG_SIS190) += sis190.o
45obj-$(CONFIG_SIS900) += sis900.o 47obj-$(CONFIG_SIS900) += sis900.o
46obj-$(CONFIG_YELLOWFIN) += yellowfin.o 48obj-$(CONFIG_YELLOWFIN) += yellowfin.o
47obj-$(CONFIG_ACENIC) += acenic.o 49obj-$(CONFIG_ACENIC) += acenic.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
65# 67#
66 68
67obj-$(CONFIG_MII) += mii.o 69obj-$(CONFIG_MII) += mii.o
70obj-$(CONFIG_PHYLIB) += phy/
68 71
69obj-$(CONFIG_SUNDANCE) += sundance.o 72obj-$(CONFIG_SUNDANCE) += sundance.o
70obj-$(CONFIG_HAMACHI) += hamachi.o 73obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3707df6b0cfa..60304f7e7e5b 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -87,7 +87,6 @@ extern struct net_device *mvme147lance_probe(int unit);
87extern struct net_device *tc515_probe(int unit); 87extern struct net_device *tc515_probe(int unit);
88extern struct net_device *lance_probe(int unit); 88extern struct net_device *lance_probe(int unit);
89extern struct net_device *mace_probe(int unit); 89extern struct net_device *mace_probe(int unit);
90extern struct net_device *macsonic_probe(int unit);
91extern struct net_device *mac8390_probe(int unit); 90extern struct net_device *mac8390_probe(int unit);
92extern struct net_device *mac89x0_probe(int unit); 91extern struct net_device *mac89x0_probe(int unit);
93extern struct net_device *mc32_probe(int unit); 92extern struct net_device *mc32_probe(int unit);
@@ -284,9 +283,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
284#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */ 283#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */
285 {mace_probe, 0}, 284 {mace_probe, 0},
286#endif 285#endif
287#ifdef CONFIG_MACSONIC /* Mac SONIC-based Ethernet of all sorts */
288 {macsonic_probe, 0},
289#endif
290#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ 286#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
291 {mac8390_probe, 0}, 287 {mac8390_probe, 0},
292#endif 288#endif
@@ -318,17 +314,9 @@ static void __init ethif_probe2(int unit)
318#ifdef CONFIG_TR 314#ifdef CONFIG_TR
319/* Token-ring device probe */ 315/* Token-ring device probe */
320extern int ibmtr_probe_card(struct net_device *); 316extern int ibmtr_probe_card(struct net_device *);
321extern struct net_device *sk_isa_probe(int unit);
322extern struct net_device *proteon_probe(int unit);
323extern struct net_device *smctr_probe(int unit); 317extern struct net_device *smctr_probe(int unit);
324 318
325static struct devprobe2 tr_probes2[] __initdata = { 319static struct devprobe2 tr_probes2[] __initdata = {
326#ifdef CONFIG_SKISA
327 {sk_isa_probe, 0},
328#endif
329#ifdef CONFIG_PROTEON
330 {proteon_probe, 0},
331#endif
332#ifdef CONFIG_SMCTR 320#ifdef CONFIG_SMCTR
333 {smctr_probe, 0}, 321 {smctr_probe, 0},
334#endif 322#endif
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8acc655ec1e8..7babf6af4e28 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -14,8 +14,8 @@
14 14
15#define DRV_MODULE_NAME "bnx2" 15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": " 16#define PFX DRV_MODULE_NAME ": "
17#define DRV_MODULE_VERSION "1.2.19" 17#define DRV_MODULE_VERSION "1.2.20"
18#define DRV_MODULE_RELDATE "May 23, 2005" 18#define DRV_MODULE_RELDATE "August 22, 2005"
19 19
20#define RUN_AT(x) (jiffies + (x)) 20#define RUN_AT(x) (jiffies + (x))
21 21
@@ -52,7 +52,6 @@ static struct {
52 { "HP NC370i Multifunction Gigabit Server Adapter" }, 52 { "HP NC370i Multifunction Gigabit Server Adapter" },
53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
54 { "HP NC370F Multifunction Gigabit Server Adapter" }, 54 { "HP NC370F Multifunction Gigabit Server Adapter" },
55 { 0 },
56 }; 55 };
57 56
58static struct pci_device_id bnx2_pci_tbl[] = { 57static struct pci_device_id bnx2_pci_tbl[] = {
@@ -108,6 +107,15 @@ static struct flash_spec flash_table[] =
108 107
109MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
110 109
110static inline u32 bnx2_tx_avail(struct bnx2 *bp)
111{
112 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
113
114 if (diff > MAX_TX_DESC_CNT)
115 diff = (diff & MAX_TX_DESC_CNT) - 1;
116 return (bp->tx_ring_size - diff);
117}
118
111static u32 119static u32
112bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 120bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
113{ 121{
@@ -807,7 +815,19 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
807 bnx2_write_phy(bp, MII_ADVERTISE, new_adv); 815 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
808 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | 816 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
809 BMCR_ANENABLE); 817 BMCR_ANENABLE);
810 bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval; 818 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
819 /* Speed up link-up time when the link partner
820 * does not autonegotiate which is very common
821 * in blade servers. Some blade servers use
822 * IPMI for kerboard input and it's important
823 * to minimize link disruptions. Autoneg. involves
824 * exchanging base pages plus 3 next pages and
825 * normally completes in about 120 msec.
826 */
827 bp->current_interval = SERDES_AN_TIMEOUT;
828 bp->serdes_an_pending = 1;
829 mod_timer(&bp->timer, jiffies + bp->current_interval);
830 }
811 } 831 }
812 832
813 return 0; 833 return 0;
@@ -1327,22 +1347,17 @@ bnx2_tx_int(struct bnx2 *bp)
1327 } 1347 }
1328 } 1348 }
1329 1349
1330 atomic_add(tx_free_bd, &bp->tx_avail_bd); 1350 bp->tx_cons = sw_cons;
1331 1351
1332 if (unlikely(netif_queue_stopped(bp->dev))) { 1352 if (unlikely(netif_queue_stopped(bp->dev))) {
1333 unsigned long flags; 1353 spin_lock(&bp->tx_lock);
1334
1335 spin_lock_irqsave(&bp->tx_lock, flags);
1336 if ((netif_queue_stopped(bp->dev)) && 1354 if ((netif_queue_stopped(bp->dev)) &&
1337 (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { 1355 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1338 1356
1339 netif_wake_queue(bp->dev); 1357 netif_wake_queue(bp->dev);
1340 } 1358 }
1341 spin_unlock_irqrestore(&bp->tx_lock, flags); 1359 spin_unlock(&bp->tx_lock);
1342 } 1360 }
1343
1344 bp->tx_cons = sw_cons;
1345
1346} 1361}
1347 1362
1348static inline void 1363static inline void
@@ -1523,15 +1538,12 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1538 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1524 1539
1525 /* Return here if interrupt is disabled. */ 1540 /* Return here if interrupt is disabled. */
1526 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1541 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1527 return IRQ_RETVAL(1); 1542 return IRQ_HANDLED;
1528 }
1529 1543
1530 if (netif_rx_schedule_prep(dev)) { 1544 netif_rx_schedule(dev);
1531 __netif_rx_schedule(dev);
1532 }
1533 1545
1534 return IRQ_RETVAL(1); 1546 return IRQ_HANDLED;
1535} 1547}
1536 1548
1537static irqreturn_t 1549static irqreturn_t
@@ -1549,22 +1561,19 @@ bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1549 if ((bp->status_blk->status_idx == bp->last_status_idx) || 1561 if ((bp->status_blk->status_idx == bp->last_status_idx) ||
1550 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 1562 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1551 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 1563 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1552 return IRQ_RETVAL(0); 1564 return IRQ_NONE;
1553 1565
1554 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 1566 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1555 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 1567 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1568 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1557 1569
1558 /* Return here if interrupt is shared and is disabled. */ 1570 /* Return here if interrupt is shared and is disabled. */
1559 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1571 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1560 return IRQ_RETVAL(1); 1572 return IRQ_HANDLED;
1561 }
1562 1573
1563 if (netif_rx_schedule_prep(dev)) { 1574 netif_rx_schedule(dev);
1564 __netif_rx_schedule(dev);
1565 }
1566 1575
1567 return IRQ_RETVAL(1); 1576 return IRQ_HANDLED;
1568} 1577}
1569 1578
1570static int 1579static int
@@ -1581,11 +1590,9 @@ bnx2_poll(struct net_device *dev, int *budget)
1581 (bp->status_blk->status_attn_bits_ack & 1590 (bp->status_blk->status_attn_bits_ack &
1582 STATUS_ATTN_BITS_LINK_STATE)) { 1591 STATUS_ATTN_BITS_LINK_STATE)) {
1583 1592
1584 unsigned long flags; 1593 spin_lock(&bp->phy_lock);
1585
1586 spin_lock_irqsave(&bp->phy_lock, flags);
1587 bnx2_phy_int(bp); 1594 bnx2_phy_int(bp);
1588 spin_unlock_irqrestore(&bp->phy_lock, flags); 1595 spin_unlock(&bp->phy_lock);
1589 } 1596 }
1590 1597
1591 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) { 1598 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
@@ -1628,9 +1635,8 @@ bnx2_set_rx_mode(struct net_device *dev)
1628 struct bnx2 *bp = dev->priv; 1635 struct bnx2 *bp = dev->priv;
1629 u32 rx_mode, sort_mode; 1636 u32 rx_mode, sort_mode;
1630 int i; 1637 int i;
1631 unsigned long flags;
1632 1638
1633 spin_lock_irqsave(&bp->phy_lock, flags); 1639 spin_lock_bh(&bp->phy_lock);
1634 1640
1635 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 1641 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1636 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 1642 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
@@ -1691,7 +1697,7 @@ bnx2_set_rx_mode(struct net_device *dev)
1691 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 1697 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1692 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 1698 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1693 1699
1694 spin_unlock_irqrestore(&bp->phy_lock, flags); 1700 spin_unlock_bh(&bp->phy_lock);
1695} 1701}
1696 1702
1697static void 1703static void
@@ -2960,7 +2966,6 @@ bnx2_init_tx_ring(struct bnx2 *bp)
2960 bp->tx_prod = 0; 2966 bp->tx_prod = 0;
2961 bp->tx_cons = 0; 2967 bp->tx_cons = 0;
2962 bp->tx_prod_bseq = 0; 2968 bp->tx_prod_bseq = 0;
2963 atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2964 2969
2965 val = BNX2_L2CTX_TYPE_TYPE_L2; 2970 val = BNX2_L2CTX_TYPE_TYPE_L2;
2966 val |= BNX2_L2CTX_TYPE_SIZE_L2; 2971 val |= BNX2_L2CTX_TYPE_SIZE_L2;
@@ -3507,11 +3512,11 @@ bnx2_test_registers(struct bnx2 *bp)
3507 rw_mask = reg_tbl[i].rw_mask; 3512 rw_mask = reg_tbl[i].rw_mask;
3508 ro_mask = reg_tbl[i].ro_mask; 3513 ro_mask = reg_tbl[i].ro_mask;
3509 3514
3510 save_val = readl((u8 *) bp->regview + offset); 3515 save_val = readl(bp->regview + offset);
3511 3516
3512 writel(0, (u8 *) bp->regview + offset); 3517 writel(0, bp->regview + offset);
3513 3518
3514 val = readl((u8 *) bp->regview + offset); 3519 val = readl(bp->regview + offset);
3515 if ((val & rw_mask) != 0) { 3520 if ((val & rw_mask) != 0) {
3516 goto reg_test_err; 3521 goto reg_test_err;
3517 } 3522 }
@@ -3520,9 +3525,9 @@ bnx2_test_registers(struct bnx2 *bp)
3520 goto reg_test_err; 3525 goto reg_test_err;
3521 } 3526 }
3522 3527
3523 writel(0xffffffff, (u8 *) bp->regview + offset); 3528 writel(0xffffffff, bp->regview + offset);
3524 3529
3525 val = readl((u8 *) bp->regview + offset); 3530 val = readl(bp->regview + offset);
3526 if ((val & rw_mask) != rw_mask) { 3531 if ((val & rw_mask) != rw_mask) {
3527 goto reg_test_err; 3532 goto reg_test_err;
3528 } 3533 }
@@ -3531,11 +3536,11 @@ bnx2_test_registers(struct bnx2 *bp)
3531 goto reg_test_err; 3536 goto reg_test_err;
3532 } 3537 }
3533 3538
3534 writel(save_val, (u8 *) bp->regview + offset); 3539 writel(save_val, bp->regview + offset);
3535 continue; 3540 continue;
3536 3541
3537reg_test_err: 3542reg_test_err:
3538 writel(save_val, (u8 *) bp->regview + offset); 3543 writel(save_val, bp->regview + offset);
3539 ret = -ENODEV; 3544 ret = -ENODEV;
3540 break; 3545 break;
3541 } 3546 }
@@ -3752,10 +3757,10 @@ bnx2_test_link(struct bnx2 *bp)
3752{ 3757{
3753 u32 bmsr; 3758 u32 bmsr;
3754 3759
3755 spin_lock_irq(&bp->phy_lock); 3760 spin_lock_bh(&bp->phy_lock);
3756 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3761 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3757 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3762 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3758 spin_unlock_irq(&bp->phy_lock); 3763 spin_unlock_bh(&bp->phy_lock);
3759 3764
3760 if (bmsr & BMSR_LSTATUS) { 3765 if (bmsr & BMSR_LSTATUS) {
3761 return 0; 3766 return 0;
@@ -3801,6 +3806,9 @@ bnx2_timer(unsigned long data)
3801 struct bnx2 *bp = (struct bnx2 *) data; 3806 struct bnx2 *bp = (struct bnx2 *) data;
3802 u32 msg; 3807 u32 msg;
3803 3808
3809 if (!netif_running(bp->dev))
3810 return;
3811
3804 if (atomic_read(&bp->intr_sem) != 0) 3812 if (atomic_read(&bp->intr_sem) != 0)
3805 goto bnx2_restart_timer; 3813 goto bnx2_restart_timer;
3806 3814
@@ -3809,15 +3817,16 @@ bnx2_timer(unsigned long data)
3809 3817
3810 if ((bp->phy_flags & PHY_SERDES_FLAG) && 3818 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3811 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 3819 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3812 unsigned long flags;
3813 3820
3814 spin_lock_irqsave(&bp->phy_lock, flags); 3821 spin_lock(&bp->phy_lock);
3815 if (bp->serdes_an_pending) { 3822 if (bp->serdes_an_pending) {
3816 bp->serdes_an_pending--; 3823 bp->serdes_an_pending--;
3817 } 3824 }
3818 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 3825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3819 u32 bmcr; 3826 u32 bmcr;
3820 3827
3828 bp->current_interval = bp->timer_interval;
3829
3821 bnx2_read_phy(bp, MII_BMCR, &bmcr); 3830 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3822 3831
3823 if (bmcr & BMCR_ANENABLE) { 3832 if (bmcr & BMCR_ANENABLE) {
@@ -3860,14 +3869,14 @@ bnx2_timer(unsigned long data)
3860 3869
3861 } 3870 }
3862 } 3871 }
3872 else
3873 bp->current_interval = bp->timer_interval;
3863 3874
3864 spin_unlock_irqrestore(&bp->phy_lock, flags); 3875 spin_unlock(&bp->phy_lock);
3865 } 3876 }
3866 3877
3867bnx2_restart_timer: 3878bnx2_restart_timer:
3868 bp->timer.expires = RUN_AT(bp->timer_interval); 3879 mod_timer(&bp->timer, jiffies + bp->current_interval);
3869
3870 add_timer(&bp->timer);
3871} 3880}
3872 3881
3873/* Called with rtnl_lock */ 3882/* Called with rtnl_lock */
@@ -3920,12 +3929,7 @@ bnx2_open(struct net_device *dev)
3920 return rc; 3929 return rc;
3921 } 3930 }
3922 3931
3923 init_timer(&bp->timer); 3932 mod_timer(&bp->timer, jiffies + bp->current_interval);
3924
3925 bp->timer.expires = RUN_AT(bp->timer_interval);
3926 bp->timer.data = (unsigned long) bp;
3927 bp->timer.function = bnx2_timer;
3928 add_timer(&bp->timer);
3929 3933
3930 atomic_set(&bp->intr_sem, 0); 3934 atomic_set(&bp->intr_sem, 0);
3931 3935
@@ -3976,12 +3980,17 @@ bnx2_reset_task(void *data)
3976{ 3980{
3977 struct bnx2 *bp = data; 3981 struct bnx2 *bp = data;
3978 3982
3983 if (!netif_running(bp->dev))
3984 return;
3985
3986 bp->in_reset_task = 1;
3979 bnx2_netif_stop(bp); 3987 bnx2_netif_stop(bp);
3980 3988
3981 bnx2_init_nic(bp); 3989 bnx2_init_nic(bp);
3982 3990
3983 atomic_set(&bp->intr_sem, 1); 3991 atomic_set(&bp->intr_sem, 1);
3984 bnx2_netif_start(bp); 3992 bnx2_netif_start(bp);
3993 bp->in_reset_task = 0;
3985} 3994}
3986 3995
3987static void 3996static void
@@ -4041,9 +4050,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4041 u16 prod, ring_prod; 4050 u16 prod, ring_prod;
4042 int i; 4051 int i;
4043 4052
4044 if (unlikely(atomic_read(&bp->tx_avail_bd) < 4053 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4045 (skb_shinfo(skb)->nr_frags + 1))) {
4046
4047 netif_stop_queue(dev); 4054 netif_stop_queue(dev);
4048 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 4055 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4049 dev->name); 4056 dev->name);
@@ -4140,8 +4147,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4140 prod = NEXT_TX_BD(prod); 4147 prod = NEXT_TX_BD(prod);
4141 bp->tx_prod_bseq += skb->len; 4148 bp->tx_prod_bseq += skb->len;
4142 4149
4143 atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4144
4145 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); 4150 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4146 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); 4151 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4147 4152
@@ -4150,17 +4155,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4150 bp->tx_prod = prod; 4155 bp->tx_prod = prod;
4151 dev->trans_start = jiffies; 4156 dev->trans_start = jiffies;
4152 4157
4153 if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { 4158 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4154 unsigned long flags; 4159 spin_lock(&bp->tx_lock);
4155 4160 netif_stop_queue(dev);
4156 spin_lock_irqsave(&bp->tx_lock, flags); 4161
4157 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { 4162 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4158 netif_stop_queue(dev); 4163 netif_wake_queue(dev);
4159 4164 spin_unlock(&bp->tx_lock);
4160 if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)
4161 netif_wake_queue(dev);
4162 }
4163 spin_unlock_irqrestore(&bp->tx_lock, flags);
4164 } 4165 }
4165 4166
4166 return NETDEV_TX_OK; 4167 return NETDEV_TX_OK;
@@ -4173,7 +4174,13 @@ bnx2_close(struct net_device *dev)
4173 struct bnx2 *bp = dev->priv; 4174 struct bnx2 *bp = dev->priv;
4174 u32 reset_code; 4175 u32 reset_code;
4175 4176
4176 flush_scheduled_work(); 4177 /* Calling flush_scheduled_work() may deadlock because
4178 * linkwatch_event() may be on the workqueue and it will try to get
4179 * the rtnl_lock which we are holding.
4180 */
4181 while (bp->in_reset_task)
4182 msleep(1);
4183
4177 bnx2_netif_stop(bp); 4184 bnx2_netif_stop(bp);
4178 del_timer_sync(&bp->timer); 4185 del_timer_sync(&bp->timer);
4179 if (bp->wol) 4186 if (bp->wol)
@@ -4390,11 +4397,11 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4390 bp->req_line_speed = req_line_speed; 4397 bp->req_line_speed = req_line_speed;
4391 bp->req_duplex = req_duplex; 4398 bp->req_duplex = req_duplex;
4392 4399
4393 spin_lock_irq(&bp->phy_lock); 4400 spin_lock_bh(&bp->phy_lock);
4394 4401
4395 bnx2_setup_phy(bp); 4402 bnx2_setup_phy(bp);
4396 4403
4397 spin_unlock_irq(&bp->phy_lock); 4404 spin_unlock_bh(&bp->phy_lock);
4398 4405
4399 return 0; 4406 return 0;
4400} 4407}
@@ -4464,19 +4471,20 @@ bnx2_nway_reset(struct net_device *dev)
4464 return -EINVAL; 4471 return -EINVAL;
4465 } 4472 }
4466 4473
4467 spin_lock_irq(&bp->phy_lock); 4474 spin_lock_bh(&bp->phy_lock);
4468 4475
4469 /* Force a link down visible on the other side */ 4476 /* Force a link down visible on the other side */
4470 if (bp->phy_flags & PHY_SERDES_FLAG) { 4477 if (bp->phy_flags & PHY_SERDES_FLAG) {
4471 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK); 4478 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4472 spin_unlock_irq(&bp->phy_lock); 4479 spin_unlock_bh(&bp->phy_lock);
4473 4480
4474 msleep(20); 4481 msleep(20);
4475 4482
4476 spin_lock_irq(&bp->phy_lock); 4483 spin_lock_bh(&bp->phy_lock);
4477 if (CHIP_NUM(bp) == CHIP_NUM_5706) { 4484 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4478 bp->serdes_an_pending = SERDES_AN_TIMEOUT / 4485 bp->current_interval = SERDES_AN_TIMEOUT;
4479 bp->timer_interval; 4486 bp->serdes_an_pending = 1;
4487 mod_timer(&bp->timer, jiffies + bp->current_interval);
4480 } 4488 }
4481 } 4489 }
4482 4490
@@ -4484,7 +4492,7 @@ bnx2_nway_reset(struct net_device *dev)
4484 bmcr &= ~BMCR_LOOPBACK; 4492 bmcr &= ~BMCR_LOOPBACK;
4485 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); 4493 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4486 4494
4487 spin_unlock_irq(&bp->phy_lock); 4495 spin_unlock_bh(&bp->phy_lock);
4488 4496
4489 return 0; 4497 return 0;
4490} 4498}
@@ -4670,11 +4678,11 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4670 bp->autoneg &= ~AUTONEG_FLOW_CTRL; 4678 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4671 } 4679 }
4672 4680
4673 spin_lock_irq(&bp->phy_lock); 4681 spin_lock_bh(&bp->phy_lock);
4674 4682
4675 bnx2_setup_phy(bp); 4683 bnx2_setup_phy(bp);
4676 4684
4677 spin_unlock_irq(&bp->phy_lock); 4685 spin_unlock_bh(&bp->phy_lock);
4678 4686
4679 return 0; 4687 return 0;
4680} 4688}
@@ -4698,7 +4706,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
4698 4706
4699#define BNX2_NUM_STATS 45 4707#define BNX2_NUM_STATS 45
4700 4708
4701struct { 4709static struct {
4702 char string[ETH_GSTRING_LEN]; 4710 char string[ETH_GSTRING_LEN];
4703} bnx2_stats_str_arr[BNX2_NUM_STATS] = { 4711} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4704 { "rx_bytes" }, 4712 { "rx_bytes" },
@@ -4750,7 +4758,7 @@ struct {
4750 4758
4751#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 4759#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4752 4760
4753unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { 4761static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4754 STATS_OFFSET32(stat_IfHCInOctets_hi), 4762 STATS_OFFSET32(stat_IfHCInOctets_hi),
4755 STATS_OFFSET32(stat_IfHCInBadOctets_hi), 4763 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4756 STATS_OFFSET32(stat_IfHCOutOctets_hi), 4764 STATS_OFFSET32(stat_IfHCOutOctets_hi),
@@ -4801,7 +4809,7 @@ unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4801/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are 4809/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4802 * skipped because of errata. 4810 * skipped because of errata.
4803 */ 4811 */
4804u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { 4812static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4805 8,0,8,8,8,8,8,8,8,8, 4813 8,0,8,8,8,8,8,8,8,8,
4806 4,0,4,4,4,4,4,4,4,4, 4814 4,0,4,4,4,4,4,4,4,4,
4807 4,4,4,4,4,4,4,4,4,4, 4815 4,4,4,4,4,4,4,4,4,4,
@@ -4811,7 +4819,7 @@ u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4811 4819
4812#define BNX2_NUM_TESTS 6 4820#define BNX2_NUM_TESTS 6
4813 4821
4814struct { 4822static struct {
4815 char string[ETH_GSTRING_LEN]; 4823 char string[ETH_GSTRING_LEN];
4816} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { 4824} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
4817 { "register_test (offline)" }, 4825 { "register_test (offline)" },
@@ -4910,7 +4918,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
4910 struct bnx2 *bp = dev->priv; 4918 struct bnx2 *bp = dev->priv;
4911 int i; 4919 int i;
4912 u32 *hw_stats = (u32 *) bp->stats_blk; 4920 u32 *hw_stats = (u32 *) bp->stats_blk;
4913 u8 *stats_len_arr = 0; 4921 u8 *stats_len_arr = NULL;
4914 4922
4915 if (hw_stats == NULL) { 4923 if (hw_stats == NULL) {
4916 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); 4924 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
@@ -5012,7 +5020,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5012static int 5020static int
5013bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5021bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5014{ 5022{
5015 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; 5023 struct mii_ioctl_data *data = if_mii(ifr);
5016 struct bnx2 *bp = dev->priv; 5024 struct bnx2 *bp = dev->priv;
5017 int err; 5025 int err;
5018 5026
@@ -5024,9 +5032,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5024 case SIOCGMIIREG: { 5032 case SIOCGMIIREG: {
5025 u32 mii_regval; 5033 u32 mii_regval;
5026 5034
5027 spin_lock_irq(&bp->phy_lock); 5035 spin_lock_bh(&bp->phy_lock);
5028 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); 5036 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5029 spin_unlock_irq(&bp->phy_lock); 5037 spin_unlock_bh(&bp->phy_lock);
5030 5038
5031 data->val_out = mii_regval; 5039 data->val_out = mii_regval;
5032 5040
@@ -5037,9 +5045,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5037 if (!capable(CAP_NET_ADMIN)) 5045 if (!capable(CAP_NET_ADMIN))
5038 return -EPERM; 5046 return -EPERM;
5039 5047
5040 spin_lock_irq(&bp->phy_lock); 5048 spin_lock_bh(&bp->phy_lock);
5041 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); 5049 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5042 spin_unlock_irq(&bp->phy_lock); 5050 spin_unlock_bh(&bp->phy_lock);
5043 5051
5044 return err; 5052 return err;
5045 5053
@@ -5057,6 +5065,9 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
5057 struct sockaddr *addr = p; 5065 struct sockaddr *addr = p;
5058 struct bnx2 *bp = dev->priv; 5066 struct bnx2 *bp = dev->priv;
5059 5067
5068 if (!is_valid_ether_addr(addr->sa_data))
5069 return -EINVAL;
5070
5060 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5071 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5061 if (netif_running(dev)) 5072 if (netif_running(dev))
5062 bnx2_set_mac_addr(bp); 5073 bnx2_set_mac_addr(bp);
@@ -5305,6 +5316,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5305 bp->stats_ticks = 1000000 & 0xffff00; 5316 bp->stats_ticks = 1000000 & 0xffff00;
5306 5317
5307 bp->timer_interval = HZ; 5318 bp->timer_interval = HZ;
5319 bp->current_interval = HZ;
5308 5320
5309 /* Disable WOL support if we are running on a SERDES chip. */ 5321 /* Disable WOL support if we are running on a SERDES chip. */
5310 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) { 5322 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
@@ -5328,6 +5340,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5328 bp->req_line_speed = 0; 5340 bp->req_line_speed = 0;
5329 if (bp->phy_flags & PHY_SERDES_FLAG) { 5341 if (bp->phy_flags & PHY_SERDES_FLAG) {
5330 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; 5342 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5343
5344 reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE +
5345 BNX2_PORT_HW_CFG_CONFIG);
5346 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5347 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5348 bp->autoneg = 0;
5349 bp->req_line_speed = bp->line_speed = SPEED_1000;
5350 bp->req_duplex = DUPLEX_FULL;
5351 }
5331 } 5352 }
5332 else { 5353 else {
5333 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; 5354 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
@@ -5335,11 +5356,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5335 5356
5336 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 5357 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5337 5358
5359 init_timer(&bp->timer);
5360 bp->timer.expires = RUN_AT(bp->timer_interval);
5361 bp->timer.data = (unsigned long) bp;
5362 bp->timer.function = bnx2_timer;
5363
5338 return 0; 5364 return 0;
5339 5365
5340err_out_unmap: 5366err_out_unmap:
5341 if (bp->regview) { 5367 if (bp->regview) {
5342 iounmap(bp->regview); 5368 iounmap(bp->regview);
5369 bp->regview = NULL;
5343 } 5370 }
5344 5371
5345err_out_release: 5372err_out_release:
@@ -5454,6 +5481,8 @@ bnx2_remove_one(struct pci_dev *pdev)
5454 struct net_device *dev = pci_get_drvdata(pdev); 5481 struct net_device *dev = pci_get_drvdata(pdev);
5455 struct bnx2 *bp = dev->priv; 5482 struct bnx2 *bp = dev->priv;
5456 5483
5484 flush_scheduled_work();
5485
5457 unregister_netdev(dev); 5486 unregister_netdev(dev);
5458 5487
5459 if (bp->regview) 5488 if (bp->regview)
@@ -5505,12 +5534,12 @@ bnx2_resume(struct pci_dev *pdev)
5505} 5534}
5506 5535
5507static struct pci_driver bnx2_pci_driver = { 5536static struct pci_driver bnx2_pci_driver = {
5508 name: DRV_MODULE_NAME, 5537 .name = DRV_MODULE_NAME,
5509 id_table: bnx2_pci_tbl, 5538 .id_table = bnx2_pci_tbl,
5510 probe: bnx2_init_one, 5539 .probe = bnx2_init_one,
5511 remove: __devexit_p(bnx2_remove_one), 5540 .remove = __devexit_p(bnx2_remove_one),
5512 suspend: bnx2_suspend, 5541 .suspend = bnx2_suspend,
5513 resume: bnx2_resume, 5542 .resume = bnx2_resume,
5514}; 5543};
5515 5544
5516static int __init bnx2_init(void) 5545static int __init bnx2_init(void)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 8214a2853d0d..9ad3f5740cd8 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3841,12 +3841,12 @@ struct bnx2 {
3841 struct status_block *status_blk; 3841 struct status_block *status_blk;
3842 u32 last_status_idx; 3842 u32 last_status_idx;
3843 3843
3844 atomic_t tx_avail_bd;
3845 struct tx_bd *tx_desc_ring; 3844 struct tx_bd *tx_desc_ring;
3846 struct sw_bd *tx_buf_ring; 3845 struct sw_bd *tx_buf_ring;
3847 u32 tx_prod_bseq; 3846 u32 tx_prod_bseq;
3848 u16 tx_prod; 3847 u16 tx_prod;
3849 u16 tx_cons; 3848 u16 tx_cons;
3849 int tx_ring_size;
3850 3850
3851#ifdef BCM_VLAN 3851#ifdef BCM_VLAN
3852 struct vlan_group *vlgrp; 3852 struct vlan_group *vlgrp;
@@ -3872,8 +3872,10 @@ struct bnx2 {
3872 char *name; 3872 char *name;
3873 3873
3874 int timer_interval; 3874 int timer_interval;
3875 int current_interval;
3875 struct timer_list timer; 3876 struct timer_list timer;
3876 struct work_struct reset_task; 3877 struct work_struct reset_task;
3878 int in_reset_task;
3877 3879
3878 /* Used to synchronize phy accesses. */ 3880 /* Used to synchronize phy accesses. */
3879 spinlock_t phy_lock; 3881 spinlock_t phy_lock;
@@ -3927,7 +3929,6 @@ struct bnx2 {
3927 u16 fw_wr_seq; 3929 u16 fw_wr_seq;
3928 u16 fw_drv_pulse_wr_seq; 3930 u16 fw_drv_pulse_wr_seq;
3929 3931
3930 int tx_ring_size;
3931 dma_addr_t tx_desc_mapping; 3932 dma_addr_t tx_desc_mapping;
3932 3933
3933 3934
@@ -3985,7 +3986,7 @@ struct bnx2 {
3985#define PHY_LOOPBACK 2 3986#define PHY_LOOPBACK 2
3986 3987
3987 u8 serdes_an_pending; 3988 u8 serdes_an_pending;
3988#define SERDES_AN_TIMEOUT (2 * HZ) 3989#define SERDES_AN_TIMEOUT (HZ / 3)
3989 3990
3990 u8 mac_addr[8]; 3991 u8 mac_addr[8];
3991 3992
@@ -4171,6 +4172,9 @@ struct fw_info {
4171 4172
4172#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 4173#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054
4173#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 4174#define BNX2_PORT_HW_CFG_CONFIG 0x00000058
4175#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000
4176#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000
4177#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000
4174 4178
4175#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 4179#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068
4176#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c 4180#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a2e8dda5afac..d2f34d5a8083 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2419,22 +2419,19 @@ out:
2419 return 0; 2419 return 0;
2420} 2420}
2421 2421
2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype) 2422int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
2423{ 2423{
2424 struct bonding *bond = dev->priv; 2424 struct bonding *bond = dev->priv;
2425 struct slave *slave = NULL; 2425 struct slave *slave = NULL;
2426 int ret = NET_RX_DROP; 2426 int ret = NET_RX_DROP;
2427 2427
2428 if (!(dev->flags & IFF_MASTER)) { 2428 if (!(dev->flags & IFF_MASTER))
2429 goto out; 2429 goto out;
2430 }
2431 2430
2432 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2433 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, 2432 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, orig_dev);
2434 skb->real_dev); 2433 if (!slave)
2435 if (slave == NULL) {
2436 goto out_unlock; 2434 goto out_unlock;
2437 }
2438 2435
2439 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2436 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2440 2437
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f46823894187..673a30af5660 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -295,6 +295,6 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
295void bond_3ad_handle_link_change(struct slave *slave, char link); 295void bond_3ad_handle_link_change(struct slave *slave, char link);
296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 296int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 297int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype); 298int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev);
299#endif //__BOND_3AD_H__ 299#endif //__BOND_3AD_H__
300 300
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5ce606d9dc03..f8fce3961197 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -354,15 +354,14 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
354 _unlock_rx_hashtbl(bond); 354 _unlock_rx_hashtbl(bond);
355} 355}
356 356
357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype) 357static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
358{ 358{
359 struct bonding *bond = bond_dev->priv; 359 struct bonding *bond = bond_dev->priv;
360 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 360 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
361 int res = NET_RX_DROP; 361 int res = NET_RX_DROP;
362 362
363 if (!(bond_dev->flags & IFF_MASTER)) { 363 if (!(bond_dev->flags & IFF_MASTER))
364 goto out; 364 goto out;
365 }
366 365
367 if (!arp) { 366 if (!arp) {
368 dprintk("Packet has no ARP data\n"); 367 dprintk("Packet has no ARP data\n");
@@ -1106,18 +1105,13 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1106 } 1105 }
1107 } 1106 }
1108 1107
1109 if (found) { 1108 if (!found)
1110 /* a slave was found that is using the mac address 1109 return 0;
1111 * of the new slave
1112 */
1113 printk(KERN_ERR DRV_NAME
1114 ": Error: the hw address of slave %s is not "
1115 "unique - cannot enslave it!",
1116 slave->dev->name);
1117 return -EINVAL;
1118 }
1119 1110
1120 return 0; 1111 /* Try setting slave mac to bond address and fall-through
1112 to code handling that situation below... */
1113 alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
1114 bond->alb_info.rlb_enabled);
1121 } 1115 }
1122 1116
1123 /* The slave's address is equal to the address of the bond. 1117 /* The slave's address is equal to the address of the bond.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c930da90a85..94c9f68dd16b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1604,6 +1604,44 @@ static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_
1604 return 0; 1604 return 0;
1605} 1605}
1606 1606
1607#define BOND_INTERSECT_FEATURES \
1608 (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
1609
1610/*
1611 * Compute the features available to the bonding device by
1612 * intersection of all of the slave devices' BOND_INTERSECT_FEATURES.
1613 * Call this after attaching or detaching a slave to update the
1614 * bond's features.
1615 */
1616static int bond_compute_features(struct bonding *bond)
1617{
1618 int i;
1619 struct slave *slave;
1620 struct net_device *bond_dev = bond->dev;
1621 int features = bond->bond_features;
1622
1623 bond_for_each_slave(bond, slave, i) {
1624 struct net_device * slave_dev = slave->dev;
1625 if (i == 0) {
1626 features |= BOND_INTERSECT_FEATURES;
1627 }
1628 features &=
1629 ~(~slave_dev->features & BOND_INTERSECT_FEATURES);
1630 }
1631
1632 /* turn off NETIF_F_SG if we need a csum and h/w can't do it */
1633 if ((features & NETIF_F_SG) &&
1634 !(features & (NETIF_F_IP_CSUM |
1635 NETIF_F_NO_CSUM |
1636 NETIF_F_HW_CSUM))) {
1637 features &= ~NETIF_F_SG;
1638 }
1639
1640 bond_dev->features = features;
1641
1642 return 0;
1643}
1644
1607/* enslave device <slave> to bond device <master> */ 1645/* enslave device <slave> to bond device <master> */
1608static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1646static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1609{ 1647{
@@ -1811,6 +1849,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1811 new_slave->delay = 0; 1849 new_slave->delay = 0;
1812 new_slave->link_failure_count = 0; 1850 new_slave->link_failure_count = 0;
1813 1851
1852 bond_compute_features(bond);
1853
1814 if (bond->params.miimon && !bond->params.use_carrier) { 1854 if (bond->params.miimon && !bond->params.use_carrier) {
1815 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1855 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1816 1856
@@ -2015,7 +2055,7 @@ err_free:
2015 2055
2016err_undo_flags: 2056err_undo_flags:
2017 bond_dev->features = old_features; 2057 bond_dev->features = old_features;
2018 2058
2019 return res; 2059 return res;
2020} 2060}
2021 2061
@@ -2100,6 +2140,8 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2100 /* release the slave from its bond */ 2140 /* release the slave from its bond */
2101 bond_detach_slave(bond, slave); 2141 bond_detach_slave(bond, slave);
2102 2142
2143 bond_compute_features(bond);
2144
2103 if (bond->primary_slave == slave) { 2145 if (bond->primary_slave == slave) {
2104 bond->primary_slave = NULL; 2146 bond->primary_slave = NULL;
2105 } 2147 }
@@ -2243,6 +2285,8 @@ static int bond_release_all(struct net_device *bond_dev)
2243 bond_alb_deinit_slave(bond, slave); 2285 bond_alb_deinit_slave(bond, slave);
2244 } 2286 }
2245 2287
2288 bond_compute_features(bond);
2289
2246 /* now that the slave is detached, unlock and perform 2290 /* now that the slave is detached, unlock and perform
2247 * all the undo steps that should not be called from 2291 * all the undo steps that should not be called from
2248 * within a lock. 2292 * within a lock.
@@ -3588,6 +3632,7 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond
3588static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) 3632static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
3589{ 3633{
3590 struct net_device *bond_dev = slave_dev->master; 3634 struct net_device *bond_dev = slave_dev->master;
3635 struct bonding *bond = bond_dev->priv;
3591 3636
3592 switch (event) { 3637 switch (event) {
3593 case NETDEV_UNREGISTER: 3638 case NETDEV_UNREGISTER:
@@ -3626,6 +3671,9 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
3626 * TODO: handle changing the primary's name 3671 * TODO: handle changing the primary's name
3627 */ 3672 */
3628 break; 3673 break;
3674 case NETDEV_FEAT_CHANGE:
3675 bond_compute_features(bond);
3676 break;
3629 default: 3677 default:
3630 break; 3678 break;
3631 } 3679 }
@@ -4526,6 +4574,11 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4526 } 4574 }
4527} 4575}
4528 4576
4577static struct ethtool_ops bond_ethtool_ops = {
4578 .get_tx_csum = ethtool_op_get_tx_csum,
4579 .get_sg = ethtool_op_get_sg,
4580};
4581
4529/* 4582/*
4530 * Does not allocate but creates a /proc entry. 4583 * Does not allocate but creates a /proc entry.
4531 * Allowed to fail. 4584 * Allowed to fail.
@@ -4555,6 +4608,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4555 bond_dev->stop = bond_close; 4608 bond_dev->stop = bond_close;
4556 bond_dev->get_stats = bond_get_stats; 4609 bond_dev->get_stats = bond_get_stats;
4557 bond_dev->do_ioctl = bond_do_ioctl; 4610 bond_dev->do_ioctl = bond_do_ioctl;
4611 bond_dev->ethtool_ops = &bond_ethtool_ops;
4558 bond_dev->set_multicast_list = bond_set_multicast_list; 4612 bond_dev->set_multicast_list = bond_set_multicast_list;
4559 bond_dev->change_mtu = bond_change_mtu; 4613 bond_dev->change_mtu = bond_change_mtu;
4560 bond_dev->set_mac_address = bond_set_mac_address; 4614 bond_dev->set_mac_address = bond_set_mac_address;
@@ -4591,6 +4645,8 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
4591 NETIF_F_HW_VLAN_RX | 4645 NETIF_F_HW_VLAN_RX |
4592 NETIF_F_HW_VLAN_FILTER); 4646 NETIF_F_HW_VLAN_FILTER);
4593 4647
4648 bond->bond_features = bond_dev->features;
4649
4594#ifdef CONFIG_PROC_FS 4650#ifdef CONFIG_PROC_FS
4595 bond_create_proc_entry(bond); 4651 bond_create_proc_entry(bond);
4596#endif 4652#endif
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index d27f377b3eeb..388196980862 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -211,6 +211,9 @@ struct bonding {
211 struct bond_params params; 211 struct bond_params params;
212 struct list_head vlan_list; 212 struct list_head vlan_list;
213 struct vlan_group *vlgrp; 213 struct vlan_group *vlgrp;
214 /* the features the bonding device supports, independently
215 * of any slaves */
216 int bond_features;
214}; 217};
215 218
216/** 219/**
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 000000000000..91e927827c43
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
1#
2# Chelsio 10Gb NIC driver for Linux.
3#
4
5obj-$(CONFIG_CHELSIO_T1) += cxgb.o
6
7EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
8
9
10cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
11
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 000000000000..f09348802b46
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
1/*****************************************************************************
2 * *
3 * File: common.h *
4 * $Revision: 1.21 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_COMMON_H_
40#define _CXGB_COMMON_H_
41
42#include <linux/config.h>
43#include <linux/module.h>
44#include <linux/netdevice.h>
45#include <linux/types.h>
46#include <linux/delay.h>
47#include <linux/pci.h>
48#include <linux/ethtool.h>
49#include <linux/mii.h>
50#include <linux/crc32.h>
51#include <linux/init.h>
52#include <asm/io.h>
53#include <linux/pci_ids.h>
54
55#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
56#define DRV_NAME "cxgb"
57#define DRV_VERSION "2.1.1"
58#define PFX DRV_NAME ": "
59
60#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
61#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
62#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
63
64#define CH_DEVICE(devid, ssid, idx) \
65 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
66
67#define SUPPORTED_PAUSE (1 << 13)
68#define SUPPORTED_LOOPBACK (1 << 15)
69
70#define ADVERTISED_PAUSE (1 << 13)
71#define ADVERTISED_ASYM_PAUSE (1 << 14)
72
73typedef struct adapter adapter_t;
74
75void t1_elmer0_ext_intr(adapter_t *adapter);
76void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
77 int speed, int duplex, int fc);
78
79struct t1_rx_mode {
80 struct net_device *dev;
81 u32 idx;
82 struct dev_mc_list *list;
83};
84
85#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
86#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
87#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
88
89static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
90{
91 u8 *addr = 0;
92
93 if (rm->idx++ < rm->dev->mc_count) {
94 addr = rm->list->dmi_addr;
95 rm->list = rm->list->next;
96 }
97 return addr;
98}
99
100#define MAX_NPORTS 4
101
102#define SPEED_INVALID 0xffff
103#define DUPLEX_INVALID 0xff
104
105enum {
106 CHBT_BOARD_N110,
107 CHBT_BOARD_N210
108};
109
110enum {
111 CHBT_TERM_T1,
112 CHBT_TERM_T2
113};
114
115enum {
116 CHBT_MAC_PM3393,
117};
118
119enum {
120 CHBT_PHY_88X2010,
121};
122
123enum {
124 PAUSE_RX = 1 << 0,
125 PAUSE_TX = 1 << 1,
126 PAUSE_AUTONEG = 1 << 2
127};
128
129/* Revisions of T1 chip */
130enum {
131 TERM_T1A = 0,
132 TERM_T1B = 1,
133 TERM_T2 = 3
134};
135
136struct sge_params {
137 unsigned int cmdQ_size[2];
138 unsigned int freelQ_size[2];
139 unsigned int large_buf_capacity;
140 unsigned int rx_coalesce_usecs;
141 unsigned int last_rx_coalesce_raw;
142 unsigned int default_rx_coalesce_usecs;
143 unsigned int sample_interval_usecs;
144 unsigned int coalesce_enable;
145 unsigned int polling;
146};
147
148struct chelsio_pci_params {
149 unsigned short speed;
150 unsigned char width;
151 unsigned char is_pcix;
152};
153
154struct adapter_params {
155 struct sge_params sge;
156 struct chelsio_pci_params pci;
157
158 const struct board_info *brd_info;
159
160 unsigned int nports; /* # of ethernet ports */
161 unsigned int stats_update_period;
162 unsigned short chip_revision;
163 unsigned char chip_version;
164};
165
166struct link_config {
167 unsigned int supported; /* link capabilities */
168 unsigned int advertising; /* advertised capabilities */
169 unsigned short requested_speed; /* speed user has requested */
170 unsigned short speed; /* actual link speed */
171 unsigned char requested_duplex; /* duplex user has requested */
172 unsigned char duplex; /* actual link duplex */
173 unsigned char requested_fc; /* flow control user has requested */
174 unsigned char fc; /* actual link flow control */
175 unsigned char autoneg; /* autonegotiating? */
176};
177
178struct cmac;
179struct cphy;
180
181struct port_info {
182 struct net_device *dev;
183 struct cmac *mac;
184 struct cphy *phy;
185 struct link_config link_config;
186 struct net_device_stats netstats;
187};
188
189struct sge;
190struct peespi;
191
192struct adapter {
193 u8 *regs;
194 struct pci_dev *pdev;
195 unsigned long registered_device_map;
196 unsigned long open_device_map;
197 unsigned long flags;
198
199 const char *name;
200 int msg_enable;
201 u32 mmio_len;
202
203 struct work_struct ext_intr_handler_task;
204 struct adapter_params params;
205
206 struct vlan_group *vlan_grp;
207
208 /* Terminator modules. */
209 struct sge *sge;
210 struct peespi *espi;
211
212 struct port_info port[MAX_NPORTS];
213 struct work_struct stats_update_task;
214 struct timer_list stats_update_timer;
215
216 struct semaphore mib_mutex;
217 spinlock_t tpi_lock;
218 spinlock_t work_lock;
219 /* guards async operations */
220 spinlock_t async_lock ____cacheline_aligned;
221 u32 slow_intr_mask;
222};
223
224enum { /* adapter flags */
225 FULL_INIT_DONE = 1 << 0,
226 TSO_CAPABLE = 1 << 2,
227 TCP_CSUM_CAPABLE = 1 << 3,
228 UDP_CSUM_CAPABLE = 1 << 4,
229 VLAN_ACCEL_CAPABLE = 1 << 5,
230 RX_CSUM_ENABLED = 1 << 6,
231};
232
233struct mdio_ops;
234struct gmac;
235struct gphy;
236
237struct board_info {
238 unsigned char board;
239 unsigned char port_number;
240 unsigned long caps;
241 unsigned char chip_term;
242 unsigned char chip_mac;
243 unsigned char chip_phy;
244 unsigned int clock_core;
245 unsigned int clock_mc3;
246 unsigned int clock_mc4;
247 unsigned int espi_nports;
248 unsigned int clock_cspi;
249 unsigned int clock_elmer0;
250 unsigned char mdio_mdien;
251 unsigned char mdio_mdiinv;
252 unsigned char mdio_mdc;
253 unsigned char mdio_phybaseaddr;
254 struct gmac *gmac;
255 struct gphy *gphy;
256 struct mdio_ops *mdio_ops;
257 const char *desc;
258};
259
260extern struct pci_device_id t1_pci_tbl[];
261
262static inline int adapter_matches_type(const adapter_t *adapter,
263 int version, int revision)
264{
265 return adapter->params.chip_version == version &&
266 adapter->params.chip_revision == revision;
267}
268
269#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
270#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
271
272/* Returns true if an adapter supports VLAN acceleration and TSO */
273static inline int vlan_tso_capable(const adapter_t *adapter)
274{
275 return !t1_is_T1B(adapter);
276}
277
278#define for_each_port(adapter, iter) \
279 for (iter = 0; iter < (adapter)->params.nports; ++iter)
280
281#define board_info(adapter) ((adapter)->params.brd_info)
282#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
283
284static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
285{
286 return board_info(adap)->clock_core / 1000000;
287}
288
289extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
290extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
291
292extern void t1_interrupts_enable(adapter_t *adapter);
293extern void t1_interrupts_disable(adapter_t *adapter);
294extern void t1_interrupts_clear(adapter_t *adapter);
295extern int elmer0_ext_intr_handler(adapter_t *adapter);
296extern int t1_slow_intr_handler(adapter_t *adapter);
297
298extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
299extern const struct board_info *t1_get_board_info(unsigned int board_id);
300extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
301 unsigned short ssid);
302extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
303extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
304 struct adapter_params *p);
305extern int t1_init_hw_modules(adapter_t *adapter);
306extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
307extern void t1_free_sw_modules(adapter_t *adapter);
308extern void t1_fatal_err(adapter_t *adapter);
309
310extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
311extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
312extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
313
314#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 000000000000..3412342f7345
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
1/*****************************************************************************
2 * *
3 * File: cphy.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPHY_H_
40#define _CXGB_CPHY_H_
41
42#include "common.h"
43
44struct mdio_ops {
45 void (*init)(adapter_t *adapter, const struct board_info *bi);
46 int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
47 int reg_addr, unsigned int *val);
48 int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
49 int reg_addr, unsigned int val);
50};
51
52/* PHY interrupt types */
53enum {
54 cphy_cause_link_change = 0x1,
55 cphy_cause_error = 0x2
56};
57
58struct cphy;
59
60/* PHY operations */
61struct cphy_ops {
62 void (*destroy)(struct cphy *);
63 int (*reset)(struct cphy *, int wait);
64
65 int (*interrupt_enable)(struct cphy *);
66 int (*interrupt_disable)(struct cphy *);
67 int (*interrupt_clear)(struct cphy *);
68 int (*interrupt_handler)(struct cphy *);
69
70 int (*autoneg_enable)(struct cphy *);
71 int (*autoneg_disable)(struct cphy *);
72 int (*autoneg_restart)(struct cphy *);
73
74 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
75 int (*set_loopback)(struct cphy *, int on);
76 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
77 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
78 int *duplex, int *fc);
79};
80
81/* A PHY instance */
82struct cphy {
83 int addr; /* PHY address */
84 adapter_t *adapter; /* associated adapter */
85 struct cphy_ops *ops; /* PHY operations */
86 int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
87 int reg_addr, unsigned int *val);
88 int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
89 int reg_addr, unsigned int val);
90 struct cphy_instance *instance;
91};
92
93/* Convenience MDIO read/write wrappers */
94static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
95 unsigned int *valp)
96{
97 return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
98}
99
100static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
101 unsigned int val)
102{
103 return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
104}
105
106static inline int simple_mdio_read(struct cphy *cphy, int reg,
107 unsigned int *valp)
108{
109 return mdio_read(cphy, 0, reg, valp);
110}
111
112static inline int simple_mdio_write(struct cphy *cphy, int reg,
113 unsigned int val)
114{
115 return mdio_write(cphy, 0, reg, val);
116}
117
118/* Convenience initializer */
119static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
120 int phy_addr, struct cphy_ops *phy_ops,
121 struct mdio_ops *mdio_ops)
122{
123 phy->adapter = adapter;
124 phy->addr = phy_addr;
125 phy->ops = phy_ops;
126 if (mdio_ops) {
127 phy->mdio_read = mdio_ops->read;
128 phy->mdio_write = mdio_ops->write;
129 }
130}
131
132/* Operations of the PHY-instance factory */
133struct gphy {
134 /* Construct a PHY instance with the given PHY address */
135 struct cphy *(*create)(adapter_t *adapter, int phy_addr,
136 struct mdio_ops *mdio_ops);
137
138 /*
139 * Reset the PHY chip. This resets the whole PHY chip, not individual
140 * ports.
141 */
142 int (*reset)(adapter_t *adapter);
143};
144
145extern struct gphy t1_mv88x201x_ops;
146extern struct gphy t1_dummy_phy_ops;
147
148#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 000000000000..27925e487bcf
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
1/*****************************************************************************
2 * *
3 * File: cpl5_cmd.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_CPL5_CMD_H_
40#define _CXGB_CPL5_CMD_H_
41
42#include <asm/byteorder.h>
43
44#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
45#error "Adjust your <asm/byteorder.h> defines"
46#endif
47
48enum CPL_opcode {
49 CPL_RX_PKT = 0xAD,
50 CPL_TX_PKT = 0xB2,
51 CPL_TX_PKT_LSO = 0xB6,
52};
53
54enum { /* TX_PKT_LSO ethernet types */
55 CPL_ETH_II,
56 CPL_ETH_II_VLAN,
57 CPL_ETH_802_3,
58 CPL_ETH_802_3_VLAN
59};
60
61struct cpl_rx_data {
62 u32 rsvd0;
63 u32 len;
64 u32 seq;
65 u16 urg;
66 u8 rsvd1;
67 u8 status;
68};
69
70/*
71 * We want this header's alignment to be no more stringent than 2-byte aligned.
72 * All fields are u8 or u16 except for the length. However that field is not
73 * used so we break it into 2 16-bit parts to easily meet our alignment needs.
74 */
75struct cpl_tx_pkt {
76 u8 opcode;
77#if defined(__LITTLE_ENDIAN_BITFIELD)
78 u8 iff:4;
79 u8 ip_csum_dis:1;
80 u8 l4_csum_dis:1;
81 u8 vlan_valid:1;
82 u8 rsvd:1;
83#else
84 u8 rsvd:1;
85 u8 vlan_valid:1;
86 u8 l4_csum_dis:1;
87 u8 ip_csum_dis:1;
88 u8 iff:4;
89#endif
90 u16 vlan;
91 u16 len_hi;
92 u16 len_lo;
93};
94
95struct cpl_tx_pkt_lso {
96 u8 opcode;
97#if defined(__LITTLE_ENDIAN_BITFIELD)
98 u8 iff:4;
99 u8 ip_csum_dis:1;
100 u8 l4_csum_dis:1;
101 u8 vlan_valid:1;
102 u8 rsvd:1;
103#else
104 u8 rsvd:1;
105 u8 vlan_valid:1;
106 u8 l4_csum_dis:1;
107 u8 ip_csum_dis:1;
108 u8 iff:4;
109#endif
110 u16 vlan;
111 u32 len;
112
113 u32 rsvd2;
114 u8 rsvd3;
115#if defined(__LITTLE_ENDIAN_BITFIELD)
116 u8 tcp_hdr_words:4;
117 u8 ip_hdr_words:4;
118#else
119 u8 ip_hdr_words:4;
120 u8 tcp_hdr_words:4;
121#endif
122 u16 eth_type_mss;
123};
124
125struct cpl_rx_pkt {
126 u8 opcode;
127#if defined(__LITTLE_ENDIAN_BITFIELD)
128 u8 iff:4;
129 u8 csum_valid:1;
130 u8 bad_pkt:1;
131 u8 vlan_valid:1;
132 u8 rsvd:1;
133#else
134 u8 rsvd:1;
135 u8 vlan_valid:1;
136 u8 bad_pkt:1;
137 u8 csum_valid:1;
138 u8 iff:4;
139#endif
140 u16 csum;
141 u16 vlan;
142 u16 len;
143};
144
145#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 000000000000..28ae478b386d
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
1/*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/pci.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <linux/if_vlan.h>
47#include <linux/mii.h>
48#include <linux/sockios.h>
49#include <linux/proc_fs.h>
50#include <linux/dma-mapping.h>
51#include <asm/uaccess.h>
52
53#include "cpl5_cmd.h"
54#include "regs.h"
55#include "gmac.h"
56#include "cphy.h"
57#include "sge.h"
58#include "espi.h"
59
60#ifdef work_struct
61#include <linux/tqueue.h>
62#define INIT_WORK INIT_TQUEUE
63#define schedule_work schedule_task
64#define flush_scheduled_work flush_scheduled_tasks
65
66static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
67{
68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
69}
70
71static inline void cancel_mac_stats_update(struct adapter *ap)
72{
73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
75}
76
77/*
78 * Stats update timer for 2.4. It schedules a task to do the actual update as
79 * we need to access MAC statistics in process context.
80 */
81static void mac_stats_timer(unsigned long data)
82{
83 struct adapter *ap = (struct adapter *)data;
84
85 schedule_task(&ap->stats_update_task);
86}
87#else
88#include <linux/workqueue.h>
89
90static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
91{
92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
93}
94
95static inline void cancel_mac_stats_update(struct adapter *ap)
96{
97 cancel_delayed_work(&ap->stats_update_task);
98}
99#endif
100
101#define MAX_CMDQ_ENTRIES 16384
102#define MAX_CMDQ1_ENTRIES 1024
103#define MAX_RX_BUFFERS 16384
104#define MAX_RX_JUMBO_BUFFERS 16384
105#define MAX_TX_BUFFERS_HIGH 16384U
106#define MAX_TX_BUFFERS_LOW 1536U
107#define MIN_FL_ENTRIES 32
108
109#define PORT_MASK ((1 << MAX_NPORTS) - 1)
110
111#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
113 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
114
115/*
116 * The EEPROM is actually bigger but only the first few bytes are used so we
117 * only report those.
118 */
119#define EEPROM_SIZE 32
120
121MODULE_DESCRIPTION(DRV_DESCRIPTION);
122MODULE_AUTHOR("Chelsio Communications");
123MODULE_LICENSE("GPL");
124
125static int dflt_msg_enable = DFLT_MSG_ENABLE;
126
127MODULE_PARM(dflt_msg_enable, "i");
128MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
129
130
131static const char pci_speed[][4] = {
132 "33", "66", "100", "133"
133};
134
135/*
136 * Setup MAC to receive the types of packets we want.
137 */
138static void t1_set_rxmode(struct net_device *dev)
139{
140 struct adapter *adapter = dev->priv;
141 struct cmac *mac = adapter->port[dev->if_port].mac;
142 struct t1_rx_mode rm;
143
144 rm.dev = dev;
145 rm.idx = 0;
146 rm.list = dev->mc_list;
147 mac->ops->set_rx_mode(mac, &rm);
148}
149
150static void link_report(struct port_info *p)
151{
152 if (!netif_carrier_ok(p->dev))
153 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 else {
155 const char *s = "10Mbps";
156
157 switch (p->link_config.speed) {
158 case SPEED_10000: s = "10Gbps"; break;
159 case SPEED_1000: s = "1000Mbps"; break;
160 case SPEED_100: s = "100Mbps"; break;
161 }
162
163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->dev->name, s,
165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
166 }
167}
168
169void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
170 int speed, int duplex, int pause)
171{
172 struct port_info *p = &adapter->port[port_id];
173
174 if (link_stat != netif_carrier_ok(p->dev)) {
175 if (link_stat)
176 netif_carrier_on(p->dev);
177 else
178 netif_carrier_off(p->dev);
179 link_report(p);
180
181 }
182}
183
184static void link_start(struct port_info *p)
185{
186 struct cmac *mac = p->mac;
187
188 mac->ops->reset(mac);
189 if (mac->ops->macaddress_set)
190 mac->ops->macaddress_set(mac, p->dev->dev_addr);
191 t1_set_rxmode(p->dev);
192 t1_link_start(p->phy, mac, &p->link_config);
193 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
194}
195
196static void enable_hw_csum(struct adapter *adapter)
197{
198 if (adapter->flags & TSO_CAPABLE)
199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
200 t1_tp_set_tcp_checksum_offload(adapter, 1);
201}
202
203/*
204 * Things to do upon first use of a card.
205 * This must run with the rtnl lock held.
206 */
207static int cxgb_up(struct adapter *adapter)
208{
209 int err = 0;
210
211 if (!(adapter->flags & FULL_INIT_DONE)) {
212 err = t1_init_hw_modules(adapter);
213 if (err)
214 goto out_err;
215
216 enable_hw_csum(adapter);
217 adapter->flags |= FULL_INIT_DONE;
218 }
219
220 t1_interrupts_clear(adapter);
221 if ((err = request_irq(adapter->pdev->irq,
222 t1_select_intr_handler(adapter), SA_SHIRQ,
223 adapter->name, adapter))) {
224 goto out_err;
225 }
226 t1_sge_start(adapter->sge);
227 t1_interrupts_enable(adapter);
228 out_err:
229 return err;
230}
231
232/*
233 * Release resources when all the ports have been stopped.
234 */
235static void cxgb_down(struct adapter *adapter)
236{
237 t1_sge_stop(adapter->sge);
238 t1_interrupts_disable(adapter);
239 free_irq(adapter->pdev->irq, adapter);
240}
241
242static int cxgb_open(struct net_device *dev)
243{
244 int err;
245 struct adapter *adapter = dev->priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
247
248 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
249 return err;
250
251 __set_bit(dev->if_port, &adapter->open_device_map);
252 link_start(&adapter->port[dev->if_port]);
253 netif_start_queue(dev);
254 if (!other_ports && adapter->params.stats_update_period)
255 schedule_mac_stats_update(adapter,
256 adapter->params.stats_update_period);
257 return 0;
258}
259
260static int cxgb_close(struct net_device *dev)
261{
262 struct adapter *adapter = dev->priv;
263 struct port_info *p = &adapter->port[dev->if_port];
264 struct cmac *mac = p->mac;
265
266 netif_stop_queue(dev);
267 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
268 netif_carrier_off(dev);
269
270 clear_bit(dev->if_port, &adapter->open_device_map);
271 if (adapter->params.stats_update_period &&
272 !(adapter->open_device_map & PORT_MASK)) {
273 /* Stop statistics accumulation. */
274 smp_mb__after_clear_bit();
275 spin_lock(&adapter->work_lock); /* sync with update task */
276 spin_unlock(&adapter->work_lock);
277 cancel_mac_stats_update(adapter);
278 }
279
280 if (!adapter->open_device_map)
281 cxgb_down(adapter);
282 return 0;
283}
284
285static struct net_device_stats *t1_get_stats(struct net_device *dev)
286{
287 struct adapter *adapter = dev->priv;
288 struct port_info *p = &adapter->port[dev->if_port];
289 struct net_device_stats *ns = &p->netstats;
290 const struct cmac_statistics *pstats;
291
292 /* Do a full update of the MAC stats */
293 pstats = p->mac->ops->statistics_update(p->mac,
294 MAC_STATS_UPDATE_FULL);
295
296 ns->tx_packets = pstats->TxUnicastFramesOK +
297 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
298
299 ns->rx_packets = pstats->RxUnicastFramesOK +
300 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
301
302 ns->tx_bytes = pstats->TxOctetsOK;
303 ns->rx_bytes = pstats->RxOctetsOK;
304
305 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
306 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
307 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
308 pstats->RxFCSErrors + pstats->RxAlignErrors +
309 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
310 pstats->RxSymbolErrors + pstats->RxRuntErrors;
311
312 ns->multicast = pstats->RxMulticastFramesOK;
313 ns->collisions = pstats->TxTotalCollisions;
314
315 /* detailed rx_errors */
316 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
317 pstats->RxJabberErrors;
318 ns->rx_over_errors = 0;
319 ns->rx_crc_errors = pstats->RxFCSErrors;
320 ns->rx_frame_errors = pstats->RxAlignErrors;
321 ns->rx_fifo_errors = 0;
322 ns->rx_missed_errors = 0;
323
324 /* detailed tx_errors */
325 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
326 ns->tx_carrier_errors = 0;
327 ns->tx_fifo_errors = pstats->TxUnderrun;
328 ns->tx_heartbeat_errors = 0;
329 ns->tx_window_errors = pstats->TxLateCollisions;
330 return ns;
331}
332
333static u32 get_msglevel(struct net_device *dev)
334{
335 struct adapter *adapter = dev->priv;
336
337 return adapter->msg_enable;
338}
339
340static void set_msglevel(struct net_device *dev, u32 val)
341{
342 struct adapter *adapter = dev->priv;
343
344 adapter->msg_enable = val;
345}
346
347static char stats_strings[][ETH_GSTRING_LEN] = {
348 "TxOctetsOK",
349 "TxOctetsBad",
350 "TxUnicastFramesOK",
351 "TxMulticastFramesOK",
352 "TxBroadcastFramesOK",
353 "TxPauseFrames",
354 "TxFramesWithDeferredXmissions",
355 "TxLateCollisions",
356 "TxTotalCollisions",
357 "TxFramesAbortedDueToXSCollisions",
358 "TxUnderrun",
359 "TxLengthErrors",
360 "TxInternalMACXmitError",
361 "TxFramesWithExcessiveDeferral",
362 "TxFCSErrors",
363
364 "RxOctetsOK",
365 "RxOctetsBad",
366 "RxUnicastFramesOK",
367 "RxMulticastFramesOK",
368 "RxBroadcastFramesOK",
369 "RxPauseFrames",
370 "RxFCSErrors",
371 "RxAlignErrors",
372 "RxSymbolErrors",
373 "RxDataErrors",
374 "RxSequenceErrors",
375 "RxRuntErrors",
376 "RxJabberErrors",
377 "RxInternalMACRcvError",
378 "RxInRangeLengthErrors",
379 "RxOutOfRangeLengthField",
380 "RxFrameTooLongErrors",
381
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
388
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
400
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
407};
408
409#define T2_REGMAP_SIZE (3 * 1024)
410
411static int get_regs_len(struct net_device *dev)
412{
413 return T2_REGMAP_SIZE;
414}
415
416static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
417{
418 struct adapter *adapter = dev->priv;
419
420 strcpy(info->driver, DRV_NAME);
421 strcpy(info->version, DRV_VERSION);
422 strcpy(info->fw_version, "N/A");
423 strcpy(info->bus_info, pci_name(adapter->pdev));
424}
425
426static int get_stats_count(struct net_device *dev)
427{
428 return ARRAY_SIZE(stats_strings);
429}
430
431static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
432{
433 if (stringset == ETH_SS_STATS)
434 memcpy(data, stats_strings, sizeof(stats_strings));
435}
436
437static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
438 u64 *data)
439{
440 struct adapter *adapter = dev->priv;
441 struct cmac *mac = adapter->port[dev->if_port].mac;
442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
445
446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
449
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
465
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
483
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
490
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
502}
503
504static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
506{
507 u32 *p = buf + start;
508
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
511}
512
513static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
514 void *buf)
515{
516 struct adapter *ap = dev->priv;
517
518 /*
519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
520 */
521 regs->version = 2;
522
523 memset(buf, 0, T2_REGMAP_SIZE);
524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
525}
526
527static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
528{
529 struct adapter *adapter = dev->priv;
530 struct port_info *p = &adapter->port[dev->if_port];
531
532 cmd->supported = p->link_config.supported;
533 cmd->advertising = p->link_config.advertising;
534
535 if (netif_carrier_ok(dev)) {
536 cmd->speed = p->link_config.speed;
537 cmd->duplex = p->link_config.duplex;
538 } else {
539 cmd->speed = -1;
540 cmd->duplex = -1;
541 }
542
543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
544 cmd->phy_address = p->phy->addr;
545 cmd->transceiver = XCVR_EXTERNAL;
546 cmd->autoneg = p->link_config.autoneg;
547 cmd->maxtxpkt = 0;
548 cmd->maxrxpkt = 0;
549 return 0;
550}
551
552static int speed_duplex_to_caps(int speed, int duplex)
553{
554 int cap = 0;
555
556 switch (speed) {
557 case SPEED_10:
558 if (duplex == DUPLEX_FULL)
559 cap = SUPPORTED_10baseT_Full;
560 else
561 cap = SUPPORTED_10baseT_Half;
562 break;
563 case SPEED_100:
564 if (duplex == DUPLEX_FULL)
565 cap = SUPPORTED_100baseT_Full;
566 else
567 cap = SUPPORTED_100baseT_Half;
568 break;
569 case SPEED_1000:
570 if (duplex == DUPLEX_FULL)
571 cap = SUPPORTED_1000baseT_Full;
572 else
573 cap = SUPPORTED_1000baseT_Half;
574 break;
575 case SPEED_10000:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10000baseT_Full;
578 }
579 return cap;
580}
581
582#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
583 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
584 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
585 ADVERTISED_10000baseT_Full)
586
587static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
588{
589 struct adapter *adapter = dev->priv;
590 struct port_info *p = &adapter->port[dev->if_port];
591 struct link_config *lc = &p->link_config;
592
593 if (!(lc->supported & SUPPORTED_Autoneg))
594 return -EOPNOTSUPP; /* can't change speed/duplex */
595
596 if (cmd->autoneg == AUTONEG_DISABLE) {
597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
598
599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 return -EINVAL;
601 lc->requested_speed = cmd->speed;
602 lc->requested_duplex = cmd->duplex;
603 lc->advertising = 0;
604 } else {
605 cmd->advertising &= ADVERTISED_MASK;
606 if (cmd->advertising & (cmd->advertising - 1))
607 cmd->advertising = lc->supported;
608 cmd->advertising &= lc->supported;
609 if (!cmd->advertising)
610 return -EINVAL;
611 lc->requested_speed = SPEED_INVALID;
612 lc->requested_duplex = DUPLEX_INVALID;
613 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
614 }
615 lc->autoneg = cmd->autoneg;
616 if (netif_running(dev))
617 t1_link_start(p->phy, p->mac, lc);
618 return 0;
619}
620
621static void get_pauseparam(struct net_device *dev,
622 struct ethtool_pauseparam *epause)
623{
624 struct adapter *adapter = dev->priv;
625 struct port_info *p = &adapter->port[dev->if_port];
626
627 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
628 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
629 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
630}
631
632static int set_pauseparam(struct net_device *dev,
633 struct ethtool_pauseparam *epause)
634{
635 struct adapter *adapter = dev->priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638
639 if (epause->autoneg == AUTONEG_DISABLE)
640 lc->requested_fc = 0;
641 else if (lc->supported & SUPPORTED_Autoneg)
642 lc->requested_fc = PAUSE_AUTONEG;
643 else
644 return -EINVAL;
645
646 if (epause->rx_pause)
647 lc->requested_fc |= PAUSE_RX;
648 if (epause->tx_pause)
649 lc->requested_fc |= PAUSE_TX;
650 if (lc->autoneg == AUTONEG_ENABLE) {
651 if (netif_running(dev))
652 t1_link_start(p->phy, p->mac, lc);
653 } else {
654 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
655 if (netif_running(dev))
656 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
657 lc->fc);
658 }
659 return 0;
660}
661
662static u32 get_rx_csum(struct net_device *dev)
663{
664 struct adapter *adapter = dev->priv;
665
666 return (adapter->flags & RX_CSUM_ENABLED) != 0;
667}
668
669static int set_rx_csum(struct net_device *dev, u32 data)
670{
671 struct adapter *adapter = dev->priv;
672
673 if (data)
674 adapter->flags |= RX_CSUM_ENABLED;
675 else
676 adapter->flags &= ~RX_CSUM_ENABLED;
677 return 0;
678}
679
680static int set_tso(struct net_device *dev, u32 value)
681{
682 struct adapter *adapter = dev->priv;
683
684 if (!(adapter->flags & TSO_CAPABLE))
685 return value ? -EOPNOTSUPP : 0;
686 return ethtool_op_set_tso(dev, value);
687}
688
689static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
690{
691 struct adapter *adapter = dev->priv;
692 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
693
694 e->rx_max_pending = MAX_RX_BUFFERS;
695 e->rx_mini_max_pending = 0;
696 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
697 e->tx_max_pending = MAX_CMDQ_ENTRIES;
698
699 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
700 e->rx_mini_pending = 0;
701 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
702 e->tx_pending = adapter->params.sge.cmdQ_size[0];
703}
704
705static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
706{
707 struct adapter *adapter = dev->priv;
708 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
709
710 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
711 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
712 e->tx_pending > MAX_CMDQ_ENTRIES ||
713 e->rx_pending < MIN_FL_ENTRIES ||
714 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
715 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
716 return -EINVAL;
717
718 if (adapter->flags & FULL_INIT_DONE)
719 return -EBUSY;
720
721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
723 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
724 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
725 MAX_CMDQ1_ENTRIES : e->tx_pending;
726 return 0;
727}
728
729static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
730{
731 struct adapter *adapter = dev->priv;
732
733 /*
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
736 */
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
739
740 if (adapter->params.sge.polling) {
741 adapter->params.sge.rx_coalesce_usecs = 0;
742 } else {
743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
744 }
745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
748 return 0;
749}
750
751static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752{
753 struct adapter *adapter = dev->priv;
754
755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
758 return 0;
759}
760
761static int get_eeprom_len(struct net_device *dev)
762{
763 return EEPROM_SIZE;
764}
765
766#define EEPROM_MAGIC(ap) \
767 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
768
769static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
770 u8 *data)
771{
772 int i;
773 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
774 struct adapter *adapter = dev->priv;
775
776 e->magic = EEPROM_MAGIC(adapter);
777 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
778 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
779 memcpy(data, buf + e->offset, e->len);
780 return 0;
781}
782
783static struct ethtool_ops t1_ethtool_ops = {
784 .get_settings = get_settings,
785 .set_settings = set_settings,
786 .get_drvinfo = get_drvinfo,
787 .get_msglevel = get_msglevel,
788 .set_msglevel = set_msglevel,
789 .get_ringparam = get_sge_param,
790 .set_ringparam = set_sge_param,
791 .get_coalesce = get_coalesce,
792 .set_coalesce = set_coalesce,
793 .get_eeprom_len = get_eeprom_len,
794 .get_eeprom = get_eeprom,
795 .get_pauseparam = get_pauseparam,
796 .set_pauseparam = set_pauseparam,
797 .get_rx_csum = get_rx_csum,
798 .set_rx_csum = set_rx_csum,
799 .get_tx_csum = ethtool_op_get_tx_csum,
800 .set_tx_csum = ethtool_op_set_tx_csum,
801 .get_sg = ethtool_op_get_sg,
802 .set_sg = ethtool_op_set_sg,
803 .get_link = ethtool_op_get_link,
804 .get_strings = get_strings,
805 .get_stats_count = get_stats_count,
806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
809 .get_tso = ethtool_op_get_tso,
810 .set_tso = set_tso,
811};
812
813static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
815{
816 const char *name;
817 name = adapter->name;
818 remove_proc_entry(name, dir);
819}
820//#define chtoe_setup_toedev(adapter) NULL
821#define update_mtu_tab(adapter)
822#define write_smt_entry(adapter, idx)
823
824static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
825{
826 struct adapter *adapter = dev->priv;
827 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
828
829 switch (cmd) {
830 case SIOCGMIIPHY:
831 data->phy_id = adapter->port[dev->if_port].phy->addr;
832 /* FALLTHRU */
833 case SIOCGMIIREG: {
834 struct cphy *phy = adapter->port[dev->if_port].phy;
835 u32 val;
836
837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
840 &val);
841 data->val_out = val;
842 break;
843 }
844 case SIOCSMIIREG: {
845 struct cphy *phy = adapter->port[dev->if_port].phy;
846
847 if (!capable(CAP_NET_ADMIN))
848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
852 data->val_in);
853 break;
854 }
855
856 default:
857 return -EOPNOTSUPP;
858 }
859 return 0;
860}
861
862static int t1_change_mtu(struct net_device *dev, int new_mtu)
863{
864 int ret;
865 struct adapter *adapter = dev->priv;
866 struct cmac *mac = adapter->port[dev->if_port].mac;
867
868 if (!mac->ops->set_mtu)
869 return -EOPNOTSUPP;
870 if (new_mtu < 68)
871 return -EINVAL;
872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
873 return ret;
874 dev->mtu = new_mtu;
875 return 0;
876}
877
878static int t1_set_mac_addr(struct net_device *dev, void *p)
879{
880 struct adapter *adapter = dev->priv;
881 struct cmac *mac = adapter->port[dev->if_port].mac;
882 struct sockaddr *addr = p;
883
884 if (!mac->ops->macaddress_set)
885 return -EOPNOTSUPP;
886
887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
888 mac->ops->macaddress_set(mac, dev->dev_addr);
889 return 0;
890}
891
892#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
893static void vlan_rx_register(struct net_device *dev,
894 struct vlan_group *grp)
895{
896 struct adapter *adapter = dev->priv;
897
898 spin_lock_irq(&adapter->async_lock);
899 adapter->vlan_grp = grp;
900 t1_set_vlan_accel(adapter, grp != NULL);
901 spin_unlock_irq(&adapter->async_lock);
902}
903
904static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
905{
906 struct adapter *adapter = dev->priv;
907
908 spin_lock_irq(&adapter->async_lock);
909 if (adapter->vlan_grp)
910 adapter->vlan_grp->vlan_devices[vid] = NULL;
911 spin_unlock_irq(&adapter->async_lock);
912}
913#endif
914
915#ifdef CONFIG_NET_POLL_CONTROLLER
916static void t1_netpoll(struct net_device *dev)
917{
918 unsigned long flags;
919 struct adapter *adapter = dev->priv;
920
921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
924}
925#endif
926
927/*
928 * Periodic accumulation of MAC statistics. This is used only if the MAC
929 * does not have any other way to prevent stats counter overflow.
930 */
931static void mac_stats_task(void *data)
932{
933 int i;
934 struct adapter *adapter = data;
935
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
938
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
942 }
943
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
950}
951
952/*
953 * Processes elmer0 external interrupts in process context.
954 */
955static void ext_intr_task(void *data)
956{
957 struct adapter *adapter = data;
958
959 elmer0_ext_intr_handler(adapter);
960
961 /* Now reenable external interrupts */
962 spin_lock_irq(&adapter->async_lock);
963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
968}
969
970/*
971 * Interrupt-context handler for elmer0 external interrupts.
972 */
973void t1_elmer0_ext_intr(struct adapter *adapter)
974{
975 /*
976 * Schedule a task to handle external interrupts as we require
977 * a process context. We disable EXT interrupts in the interim
978 * and let the task reenable them when it's done.
979 */
980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
983 schedule_work(&adapter->ext_intr_handler_task);
984}
985
986void t1_fatal_err(struct adapter *adapter)
987{
988 if (adapter->flags & FULL_INIT_DONE) {
989 t1_sge_stop(adapter->sge);
990 t1_interrupts_disable(adapter);
991 }
992 CH_ALERT("%s: encountered fatal error, operation suspended\n",
993 adapter->name);
994}
995
996static int __devinit init_one(struct pci_dev *pdev,
997 const struct pci_device_id *ent)
998{
999 static int version_printed;
1000
1001 int i, err, pci_using_dac = 0;
1002 unsigned long mmio_start, mmio_len;
1003 const struct board_info *bi;
1004 struct adapter *adapter = NULL;
1005 struct port_info *pi;
1006
1007 if (!version_printed) {
1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1009 DRV_VERSION);
1010 ++version_printed;
1011 }
1012
1013 err = pci_enable_device(pdev);
1014 if (err)
1015 return err;
1016
1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1018 CH_ERR("%s: cannot find PCI device memory base address\n",
1019 pci_name(pdev));
1020 err = -ENODEV;
1021 goto out_disable_pdev;
1022 }
1023
1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1025 pci_using_dac = 1;
1026
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1029 "consistent allocations\n", pci_name(pdev));
1030 err = -ENODEV;
1031 goto out_disable_pdev;
1032 }
1033
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1036 goto out_disable_pdev;
1037 }
1038
1039 err = pci_request_regions(pdev, DRV_NAME);
1040 if (err) {
1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1042 goto out_disable_pdev;
1043 }
1044
1045 pci_set_master(pdev);
1046
1047 mmio_start = pci_resource_start(pdev, 0);
1048 mmio_len = pci_resource_len(pdev, 0);
1049 bi = t1_get_board_info(ent->driver_data);
1050
1051 for (i = 0; i < bi->port_number; ++i) {
1052 struct net_device *netdev;
1053
1054 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1055 if (!netdev) {
1056 err = -ENOMEM;
1057 goto out_free_dev;
1058 }
1059
1060 SET_MODULE_OWNER(netdev);
1061 SET_NETDEV_DEV(netdev, &pdev->dev);
1062
1063 if (!adapter) {
1064 adapter = netdev->priv;
1065 adapter->pdev = pdev;
1066 adapter->port[0].dev = netdev; /* so we don't leak it */
1067
1068 adapter->regs = ioremap(mmio_start, mmio_len);
1069 if (!adapter->regs) {
1070 CH_ERR("%s: cannot map device registers\n",
1071 pci_name(pdev));
1072 err = -ENOMEM;
1073 goto out_free_dev;
1074 }
1075
1076 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1077 err = -ENODEV; /* Can't handle this chip rev */
1078 goto out_free_dev;
1079 }
1080
1081 adapter->name = pci_name(pdev);
1082 adapter->msg_enable = dflt_msg_enable;
1083 adapter->mmio_len = mmio_len;
1084
1085 init_MUTEX(&adapter->mib_mutex);
1086 spin_lock_init(&adapter->tpi_lock);
1087 spin_lock_init(&adapter->work_lock);
1088 spin_lock_init(&adapter->async_lock);
1089
1090 INIT_WORK(&adapter->ext_intr_handler_task,
1091 ext_intr_task, adapter);
1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1093 adapter);
1094#ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099#endif
1100
1101 pci_set_drvdata(pdev, netdev);
1102 }
1103
1104 pi = &adapter->port[i];
1105 pi->dev = netdev;
1106 netif_carrier_off(netdev);
1107 netdev->irq = pdev->irq;
1108 netdev->if_port = i;
1109 netdev->mem_start = mmio_start;
1110 netdev->mem_end = mmio_start + mmio_len - 1;
1111 netdev->priv = adapter;
1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1114
1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 if (pci_using_dac)
1117 netdev->features |= NETIF_F_HIGHDMA;
1118 if (vlan_tso_capable(adapter)) {
1119#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 netdev->features |=
1122 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1123 netdev->vlan_rx_register = vlan_rx_register;
1124 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125#endif
1126 adapter->flags |= TSO_CAPABLE;
1127 netdev->features |= NETIF_F_TSO;
1128 }
1129
1130 netdev->open = cxgb_open;
1131 netdev->stop = cxgb_close;
1132 netdev->hard_start_xmit = t1_start_xmit;
1133 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1134 sizeof(struct cpl_tx_pkt_lso) :
1135 sizeof(struct cpl_tx_pkt);
1136 netdev->get_stats = t1_get_stats;
1137 netdev->set_multicast_list = t1_set_rxmode;
1138 netdev->do_ioctl = t1_ioctl;
1139 netdev->change_mtu = t1_change_mtu;
1140 netdev->set_mac_address = t1_set_mac_addr;
1141#ifdef CONFIG_NET_POLL_CONTROLLER
1142 netdev->poll_controller = t1_netpoll;
1143#endif
1144 netdev->weight = 64;
1145
1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1147 }
1148
1149 if (t1_init_sw_modules(adapter, bi) < 0) {
1150 err = -ENODEV;
1151 goto out_free_dev;
1152 }
1153
1154 /*
1155 * The card is now ready to go. If any errors occur during device
1156 * registration we do not fail the whole card but rather proceed only
1157 * with the ports we manage to register successfully. However we must
1158 * register at least one net device.
1159 */
1160 for (i = 0; i < bi->port_number; ++i) {
1161 err = register_netdev(adapter->port[i].dev);
1162 if (err)
1163 CH_WARN("%s: cannot register net device %s, skipping\n",
1164 pci_name(pdev), adapter->port[i].dev->name);
1165 else {
1166 /*
1167 * Change the name we use for messages to the name of
1168 * the first successfully registered interface.
1169 */
1170 if (!adapter->registered_device_map)
1171 adapter->name = adapter->port[i].dev->name;
1172
1173 __set_bit(i, &adapter->registered_device_map);
1174 }
1175 }
1176 if (!adapter->registered_device_map) {
1177 CH_ERR("%s: could not register any net devices\n",
1178 pci_name(pdev));
1179 goto out_release_adapter_res;
1180 }
1181
1182 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1183 bi->desc, adapter->params.chip_revision,
1184 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1185 adapter->params.pci.speed, adapter->params.pci.width);
1186 return 0;
1187
1188 out_release_adapter_res:
1189 t1_free_sw_modules(adapter);
1190 out_free_dev:
1191 if (adapter) {
1192 if (adapter->regs) iounmap(adapter->regs);
1193 for (i = bi->port_number - 1; i >= 0; --i)
1194 if (adapter->port[i].dev) {
1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1197 }
1198 }
1199 pci_release_regions(pdev);
1200 out_disable_pdev:
1201 pci_disable_device(pdev);
1202 pci_set_drvdata(pdev, NULL);
1203 return err;
1204}
1205
1206static inline void t1_sw_reset(struct pci_dev *pdev)
1207{
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1210}
1211
1212static void __devexit remove_one(struct pci_dev *pdev)
1213{
1214 struct net_device *dev = pci_get_drvdata(pdev);
1215
1216 if (dev) {
1217 int i;
1218 struct adapter *adapter = dev->priv;
1219
1220 for_each_port(adapter, i)
1221 if (test_bit(i, &adapter->registered_device_map))
1222 unregister_netdev(adapter->port[i].dev);
1223
1224 t1_free_sw_modules(adapter);
1225 iounmap(adapter->regs);
1226 while (--i >= 0)
1227 if (adapter->port[i].dev) {
1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1230 }
1231 pci_release_regions(pdev);
1232 pci_disable_device(pdev);
1233 pci_set_drvdata(pdev, NULL);
1234 t1_sw_reset(pdev);
1235 }
1236}
1237
1238static struct pci_driver driver = {
1239 .name = DRV_NAME,
1240 .id_table = t1_pci_tbl,
1241 .probe = init_one,
1242 .remove = __devexit_p(remove_one),
1243};
1244
1245static int __init t1_init_module(void)
1246{
1247 return pci_module_init(&driver);
1248}
1249
1250static void __exit t1_cleanup_module(void)
1251{
1252 pci_unregister_driver(&driver);
1253}
1254
1255module_init(t1_init_module);
1256module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 000000000000..5590cb2dac19
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
1/*****************************************************************************
2 * *
3 * File: elmer0.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 22:49:43 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ELMER0_H_
40#define _CXGB_ELMER0_H_
41
42/* ELMER0 registers */
43#define A_ELMER0_VERSION 0x100000
44#define A_ELMER0_PHY_CFG 0x100004
45#define A_ELMER0_INT_ENABLE 0x100008
46#define A_ELMER0_INT_CAUSE 0x10000c
47#define A_ELMER0_GPI_CFG 0x100010
48#define A_ELMER0_GPI_STAT 0x100014
49#define A_ELMER0_GPO 0x100018
50#define A_ELMER0_PORT0_MI1_CFG 0x400000
51
52#define S_MI1_MDI_ENABLE 0
53#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
54#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
55
56#define S_MI1_MDI_INVERT 1
57#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
58#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
59
60#define S_MI1_PREAMBLE_ENABLE 2
61#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
62#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
63
64#define S_MI1_SOF 3
65#define M_MI1_SOF 0x3
66#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
67#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
68
69#define S_MI1_CLK_DIV 5
70#define M_MI1_CLK_DIV 0xff
71#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
72#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
73
74#define A_ELMER0_PORT0_MI1_ADDR 0x400004
75
76#define S_MI1_REG_ADDR 0
77#define M_MI1_REG_ADDR 0x1f
78#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
79#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
80
81#define S_MI1_PHY_ADDR 5
82#define M_MI1_PHY_ADDR 0x1f
83#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
84#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
85
86#define A_ELMER0_PORT0_MI1_DATA 0x400008
87
88#define S_MI1_DATA 0
89#define M_MI1_DATA 0xffff
90#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
91#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
92
93#define A_ELMER0_PORT0_MI1_OP 0x40000c
94
95#define S_MI1_OP 0
96#define M_MI1_OP 0x3
97#define V_MI1_OP(x) ((x) << S_MI1_OP)
98#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
99
100#define S_MI1_ADDR_AUTOINC 2
101#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
102#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
103
104#define S_MI1_OP_BUSY 31
105#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
106#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
107
108#define A_ELMER0_PORT1_MI1_CFG 0x500000
109#define A_ELMER0_PORT1_MI1_ADDR 0x500004
110#define A_ELMER0_PORT1_MI1_DATA 0x500008
111#define A_ELMER0_PORT1_MI1_OP 0x50000c
112#define A_ELMER0_PORT2_MI1_CFG 0x600000
113#define A_ELMER0_PORT2_MI1_ADDR 0x600004
114#define A_ELMER0_PORT2_MI1_DATA 0x600008
115#define A_ELMER0_PORT2_MI1_OP 0x60000c
116#define A_ELMER0_PORT3_MI1_CFG 0x700000
117#define A_ELMER0_PORT3_MI1_ADDR 0x700004
118#define A_ELMER0_PORT3_MI1_DATA 0x700008
119#define A_ELMER0_PORT3_MI1_OP 0x70000c
120
121/* Simple bit definition for GPI and GP0 registers. */
122#define ELMER0_GP_BIT0 0x0001
123#define ELMER0_GP_BIT1 0x0002
124#define ELMER0_GP_BIT2 0x0004
125#define ELMER0_GP_BIT3 0x0008
126#define ELMER0_GP_BIT4 0x0010
127#define ELMER0_GP_BIT5 0x0020
128#define ELMER0_GP_BIT6 0x0040
129#define ELMER0_GP_BIT7 0x0080
130#define ELMER0_GP_BIT8 0x0100
131#define ELMER0_GP_BIT9 0x0200
132#define ELMER0_GP_BIT10 0x0400
133#define ELMER0_GP_BIT11 0x0800
134#define ELMER0_GP_BIT12 0x1000
135#define ELMER0_GP_BIT13 0x2000
136#define ELMER0_GP_BIT14 0x4000
137#define ELMER0_GP_BIT15 0x8000
138#define ELMER0_GP_BIT16 0x10000
139#define ELMER0_GP_BIT17 0x20000
140#define ELMER0_GP_BIT18 0x40000
141#define ELMER0_GP_BIT19 0x80000
142
143#define MI1_OP_DIRECT_WRITE 1
144#define MI1_OP_DIRECT_READ 2
145
146#define MI1_OP_INDIRECT_ADDRESS 0
147#define MI1_OP_INDIRECT_WRITE 1
148#define MI1_OP_INDIRECT_READ_INC 2
149#define MI1_OP_INDIRECT_READ 3
150
151#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 000000000000..230642571c92
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
1/*****************************************************************************
2 * *
3 * File: espi.c *
4 * $Revision: 1.14 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * Ethernet SPI functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "espi.h"
43
44struct peespi {
45 adapter_t *adapter;
46 struct espi_intr_counts intr_cnt;
47 u32 misc_ctrl;
48 spinlock_t lock;
49};
50
51#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
52 F_RAMPARITYERR | F_DIP2PARITYERR)
53#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
54 | F_MONITORED_INTERFACE)
55
56#define TRICN_CNFG 14
57#define TRICN_CMD_READ 0x11
58#define TRICN_CMD_WRITE 0x21
59#define TRICN_CMD_ATTEMPTS 10
60
61static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
62 int ch_addr, int reg_offset, u32 wr_data)
63{
64 int busy, attempts = TRICN_CMD_ATTEMPTS;
65
66 writel(V_WRITE_DATA(wr_data) |
67 V_REGISTER_OFFSET(reg_offset) |
68 V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
69 V_BUNDLE_ADDR(bundle_addr) |
70 V_SPI4_COMMAND(TRICN_CMD_WRITE),
71 adapter->regs + A_ESPI_CMD_ADDR);
72 writel(0, adapter->regs + A_ESPI_GOSTAT);
73
74 do {
75 busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
76 } while (busy && --attempts);
77
78 if (busy)
79 CH_ERR("%s: TRICN write timed out\n", adapter->name);
80
81 return busy;
82}
83
84/* 1. Deassert rx_reset_core. */
85/* 2. Program TRICN_CNFG registers. */
86/* 3. Deassert rx_reset_link */
87static int tricn_init(adapter_t *adapter)
88{
89 int i = 0;
90 int sme = 1;
91 int stat = 0;
92 int timeout = 0;
93 int is_ready = 0;
94 int dynamic_deskew = 0;
95
96 if (dynamic_deskew)
97 sme = 0;
98
99
100 /* 1 */
101 timeout=1000;
102 do {
103 stat = readl(adapter->regs + A_ESPI_RX_RESET);
104 is_ready = (stat & 0x4);
105 timeout--;
106 udelay(5);
107 } while (!is_ready || (timeout==0));
108 writel(0x2, adapter->regs + A_ESPI_RX_RESET);
109 if (timeout==0)
110 {
111 CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
112 t1_fatal_err(adapter);
113 }
114
115 /* 2 */
116 if (sme) {
117 tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
118 tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
119 tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
120 }
121 for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
122 for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
123 for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
124 for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
125 for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
126 for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
127 for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
128 for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
129
130 /* 3 */
131 writel(0x3, adapter->regs + A_ESPI_RX_RESET);
132
133 return 0;
134}
135
136void t1_espi_intr_enable(struct peespi *espi)
137{
138 u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
139
140 /*
141 * Cannot enable ESPI interrupts on T1B because HW asserts the
142 * interrupt incorrectly, namely the driver gets ESPI interrupts
143 * but no data is actually dropped (can verify this reading the ESPI
144 * drop registers). Also, once the ESPI interrupt is asserted it
145 * cannot be cleared (HW bug).
146 */
147 enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
148 writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
149 writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
150}
151
152void t1_espi_intr_clear(struct peespi *espi)
153{
154 writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
155 writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
156}
157
158void t1_espi_intr_disable(struct peespi *espi)
159{
160 u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
161
162 writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
163 writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
164}
165
166int t1_espi_intr_handler(struct peespi *espi)
167{
168 u32 cnt;
169 u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
170
171 if (status & F_DIP4ERR)
172 espi->intr_cnt.DIP4_err++;
173 if (status & F_RXDROP)
174 espi->intr_cnt.rx_drops++;
175 if (status & F_TXDROP)
176 espi->intr_cnt.tx_drops++;
177 if (status & F_RXOVERFLOW)
178 espi->intr_cnt.rx_ovflw++;
179 if (status & F_RAMPARITYERR)
180 espi->intr_cnt.parity_err++;
181 if (status & F_DIP2PARITYERR) {
182 espi->intr_cnt.DIP2_parity_err++;
183
184 /*
185 * Must read the error count to clear the interrupt
186 * that it causes.
187 */
188 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
189 }
190
191 /*
192 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
193 * write the status as is.
194 */
195 if (status && t1_is_T1B(espi->adapter))
196 status = 1;
197 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
198 return 0;
199}
200
201const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
202{
203 return &espi->intr_cnt;
204}
205
206static void espi_setup_for_pm3393(adapter_t *adapter)
207{
208 u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
209
210 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
211 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
212 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
213 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
214 writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
215 writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
216 writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
217 writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
218 writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
219}
220
221/* T2 Init part -- */
222/* 1. Set T_ESPI_MISCCTRL_ADDR */
223/* 2. Init ESPI registers. */
224/* 3. Init TriCN Hard Macro */
225int t1_espi_init(struct peespi *espi, int mac_type, int nports)
226{
227 u32 cnt;
228
229 u32 status_enable_extra = 0;
230 adapter_t *adapter = espi->adapter;
231 u32 status, burstval = 0x800100;
232
233 /* Disable ESPI training. MACs that can handle it enable it below. */
234 writel(0, adapter->regs + A_ESPI_TRAIN);
235
236 if (is_T2(adapter)) {
237 writel(V_OUT_OF_SYNC_COUNT(4) |
238 V_DIP2_PARITY_ERR_THRES(3) |
239 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
240 if (nports == 4) {
241 /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
242 burstval = 0x200040;
243 }
244 }
245 writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
246
247 switch (mac_type) {
248 case CHBT_MAC_PM3393:
249 espi_setup_for_pm3393(adapter);
250 break;
251 default:
252 return -1;
253 }
254
255 /*
256 * Make sure any pending interrupts from the SPI are
257 * Cleared before enabling the interrupt.
258 */
259 writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
260 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
261 if (status & F_DIP2PARITYERR) {
262 cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
263 }
264
265 /*
266 * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
267 * write the status as is.
268 */
269 if (status && t1_is_T1B(espi->adapter))
270 status = 1;
271 writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
272
273 writel(status_enable_extra | F_RXSTATUSENABLE,
274 adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
275
276 if (is_T2(adapter)) {
277 tricn_init(adapter);
278 /*
279 * Always position the control at the 1st port egress IN
280 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
281 */
282 espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
283 & ~MON_MASK) | (F_MONITORED_DIRECTION
284 | F_MONITORED_INTERFACE);
285 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
286 spin_lock_init(&espi->lock);
287 }
288
289 return 0;
290}
291
292void t1_espi_destroy(struct peespi *espi)
293{
294 kfree(espi);
295}
296
297struct peespi *t1_espi_create(adapter_t *adapter)
298{
299 struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
300
301 memset(espi, 0, sizeof(*espi));
302
303 if (espi)
304 espi->adapter = adapter;
305 return espi;
306}
307
308void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
309{
310 struct peespi *espi = adapter->espi;
311
312 if (!is_T2(adapter))
313 return;
314 spin_lock(&espi->lock);
315 espi->misc_ctrl = (val & ~MON_MASK) |
316 (espi->misc_ctrl & MON_MASK);
317 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
318 spin_unlock(&espi->lock);
319}
320
321u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
322{
323 u32 sel;
324
325 struct peespi *espi = adapter->espi;
326
327 if (!is_T2(adapter))
328 return 0;
329 sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
330 if (!wait) {
331 if (!spin_trylock(&espi->lock))
332 return 0;
333 }
334 else
335 spin_lock(&espi->lock);
336 if ((sel != (espi->misc_ctrl & MON_MASK))) {
337 writel(((espi->misc_ctrl & ~MON_MASK) | sel),
338 adapter->regs + A_ESPI_MISC_CONTROL);
339 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
340 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
341 }
342 else
343 sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
344 spin_unlock(&espi->lock);
345 return sel;
346}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 000000000000..c90e37f8457c
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
1/*****************************************************************************
2 * *
3 * File: espi.h *
4 * $Revision: 1.7 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_ESPI_H_
40#define _CXGB_ESPI_H_
41
42#include "common.h"
43
44struct espi_intr_counts {
45 unsigned int DIP4_err;
46 unsigned int rx_drops;
47 unsigned int tx_drops;
48 unsigned int rx_ovflw;
49 unsigned int parity_err;
50 unsigned int DIP2_parity_err;
51};
52
53struct peespi;
54
55struct peespi *t1_espi_create(adapter_t *adapter);
56void t1_espi_destroy(struct peespi *espi);
57int t1_espi_init(struct peespi *espi, int mac_type, int nports);
58
59void t1_espi_intr_enable(struct peespi *);
60void t1_espi_intr_clear(struct peespi *);
61void t1_espi_intr_disable(struct peespi *);
62int t1_espi_intr_handler(struct peespi *);
63const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
64
65void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
66u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
67
68#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 000000000000..746b0eeea964
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
1/*****************************************************************************
2 * *
3 * File: gmac.h *
4 * $Revision: 1.6 $ *
5 * $Date: 2005/06/21 18:29:47 $ *
6 * Description: *
7 * Generic MAC functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#ifndef _CXGB_GMAC_H_
41#define _CXGB_GMAC_H_
42
43#include "common.h"
44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
47
48struct cmac_statistics {
49 /* Transmit */
50 u64 TxOctetsOK;
51 u64 TxOctetsBad;
52 u64 TxUnicastFramesOK;
53 u64 TxMulticastFramesOK;
54 u64 TxBroadcastFramesOK;
55 u64 TxPauseFrames;
56 u64 TxFramesWithDeferredXmissions;
57 u64 TxLateCollisions;
58 u64 TxTotalCollisions;
59 u64 TxFramesAbortedDueToXSCollisions;
60 u64 TxUnderrun;
61 u64 TxLengthErrors;
62 u64 TxInternalMACXmitError;
63 u64 TxFramesWithExcessiveDeferral;
64 u64 TxFCSErrors;
65
66 /* Receive */
67 u64 RxOctetsOK;
68 u64 RxOctetsBad;
69 u64 RxUnicastFramesOK;
70 u64 RxMulticastFramesOK;
71 u64 RxBroadcastFramesOK;
72 u64 RxPauseFrames;
73 u64 RxFCSErrors;
74 u64 RxAlignErrors;
75 u64 RxSymbolErrors;
76 u64 RxDataErrors;
77 u64 RxSequenceErrors;
78 u64 RxRuntErrors;
79 u64 RxJabberErrors;
80 u64 RxInternalMACRcvError;
81 u64 RxInRangeLengthErrors;
82 u64 RxOutOfRangeLengthField;
83 u64 RxFrameTooLongErrors;
84};
85
86struct cmac_ops {
87 void (*destroy)(struct cmac *);
88 int (*reset)(struct cmac *);
89 int (*interrupt_enable)(struct cmac *);
90 int (*interrupt_disable)(struct cmac *);
91 int (*interrupt_clear)(struct cmac *);
92 int (*interrupt_handler)(struct cmac *);
93
94 int (*enable)(struct cmac *, int);
95 int (*disable)(struct cmac *, int);
96
97 int (*loopback_enable)(struct cmac *);
98 int (*loopback_disable)(struct cmac *);
99
100 int (*set_mtu)(struct cmac *, int mtu);
101 int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
102
103 int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
104 int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
105 int *fc);
106
107 const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
108
109 int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
110 int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
111};
112
113typedef struct _cmac_instance cmac_instance;
114
115struct cmac {
116 struct cmac_statistics stats;
117 adapter_t *adapter;
118 struct cmac_ops *ops;
119 cmac_instance *instance;
120};
121
122struct gmac {
123 unsigned int stats_update_period;
124 struct cmac *(*create)(adapter_t *adapter, int index);
125 int (*reset)(adapter_t *);
126};
127
128extern struct gmac t1_pm3393_ops;
129extern struct gmac t1_chelsio_mac_ops;
130extern struct gmac t1_vsc7321_ops;
131extern struct gmac t1_ixf1010_ops;
132extern struct gmac t1_dummy_mac_ops;
133
134#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 000000000000..db5034282782
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
1/*****************************************************************************
2 * *
3 * File: mv88x201x.c *
4 * $Revision: 1.12 $ *
5 * $Date: 2005/04/15 19:27:14 $ *
6 * Description: *
7 * Marvell PHY (mv88x201x) functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "cphy.h"
41#include "elmer0.h"
42
43/*
44 * The 88x2010 Rev C. requires some link status registers * to be read
45 * twice in order to get the right values. Future * revisions will fix
46 * this problem and then this macro * can disappear.
47 */
48#define MV88x2010_LINK_STATUS_BUGS 1
49
50static int led_init(struct cphy *cphy)
51{
52 /* Setup the LED registers so we can turn on/off.
53 * Writing these bits maps control to another
54 * register. mmd(0x1) addr(0x7)
55 */
56 mdio_write(cphy, 0x3, 0x8304, 0xdddd);
57 return 0;
58}
59
60static int led_link(struct cphy *cphy, u32 do_enable)
61{
62 u32 led = 0;
63#define LINK_ENABLE_BIT 0x1
64
65 mdio_read(cphy, 0x1, 0x7, &led);
66
67 if (do_enable & LINK_ENABLE_BIT) {
68 led |= LINK_ENABLE_BIT;
69 mdio_write(cphy, 0x1, 0x7, led);
70 } else {
71 led &= ~LINK_ENABLE_BIT;
72 mdio_write(cphy, 0x1, 0x7, led);
73 }
74 return 0;
75}
76
77/* Port Reset */
78static int mv88x201x_reset(struct cphy *cphy, int wait)
79{
80 /* This can be done through registers. It is not required since
81 * a full chip reset is used.
82 */
83 return 0;
84}
85
86static int mv88x201x_interrupt_enable(struct cphy *cphy)
87{
88 u32 elmer;
89
90 /* Enable PHY LASI interrupts. */
91 mdio_write(cphy, 0x1, 0x9002, 0x1);
92
93 /* Enable Marvell interrupts through Elmer0. */
94 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
95 elmer |= ELMER0_GP_BIT6;
96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
97 return 0;
98}
99
100static int mv88x201x_interrupt_disable(struct cphy *cphy)
101{
102 u32 elmer;
103
104 /* Disable PHY LASI interrupts. */
105 mdio_write(cphy, 0x1, 0x9002, 0x0);
106
107 /* Disable Marvell interrupts through Elmer0. */
108 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
109 elmer &= ~ELMER0_GP_BIT6;
110 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
111 return 0;
112}
113
114static int mv88x201x_interrupt_clear(struct cphy *cphy)
115{
116 u32 elmer;
117 u32 val;
118
119#ifdef MV88x2010_LINK_STATUS_BUGS
120 /* Required to read twice before clear takes affect. */
121 mdio_read(cphy, 0x1, 0x9003, &val);
122 mdio_read(cphy, 0x1, 0x9004, &val);
123 mdio_read(cphy, 0x1, 0x9005, &val);
124
125 /* Read this register after the others above it else
126 * the register doesn't clear correctly.
127 */
128 mdio_read(cphy, 0x1, 0x1, &val);
129#endif
130
131 /* Clear link status. */
132 mdio_read(cphy, 0x1, 0x1, &val);
133 /* Clear PHY LASI interrupts. */
134 mdio_read(cphy, 0x1, 0x9005, &val);
135
136#ifdef MV88x2010_LINK_STATUS_BUGS
137 /* Do it again. */
138 mdio_read(cphy, 0x1, 0x9003, &val);
139 mdio_read(cphy, 0x1, 0x9004, &val);
140#endif
141
142 /* Clear Marvell interrupts through Elmer0. */
143 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
144 elmer |= ELMER0_GP_BIT6;
145 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
146 return 0;
147}
148
149static int mv88x201x_interrupt_handler(struct cphy *cphy)
150{
151 /* Clear interrupts */
152 mv88x201x_interrupt_clear(cphy);
153
154 /* We have only enabled link change interrupts and so
155 * cphy_cause must be a link change interrupt.
156 */
157 return cphy_cause_link_change;
158}
159
160static int mv88x201x_set_loopback(struct cphy *cphy, int on)
161{
162 return 0;
163}
164
165static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
166 int *speed, int *duplex, int *fc)
167{
168 u32 val = 0;
169#define LINK_STATUS_BIT 0x4
170
171 if (link_ok) {
172 /* Read link status. */
173 mdio_read(cphy, 0x1, 0x1, &val);
174 val &= LINK_STATUS_BIT;
175 *link_ok = (val == LINK_STATUS_BIT);
176 /* Turn on/off Link LED */
177 led_link(cphy, *link_ok);
178 }
179 if (speed)
180 *speed = SPEED_10000;
181 if (duplex)
182 *duplex = DUPLEX_FULL;
183 if (fc)
184 *fc = PAUSE_RX | PAUSE_TX;
185 return 0;
186}
187
188static void mv88x201x_destroy(struct cphy *cphy)
189{
190 kfree(cphy);
191}
192
193static struct cphy_ops mv88x201x_ops = {
194 .destroy = mv88x201x_destroy,
195 .reset = mv88x201x_reset,
196 .interrupt_enable = mv88x201x_interrupt_enable,
197 .interrupt_disable = mv88x201x_interrupt_disable,
198 .interrupt_clear = mv88x201x_interrupt_clear,
199 .interrupt_handler = mv88x201x_interrupt_handler,
200 .get_link_status = mv88x201x_get_link_status,
201 .set_loopback = mv88x201x_set_loopback,
202};
203
204static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
205 struct mdio_ops *mdio_ops)
206{
207 u32 val;
208 struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
209
210 if (!cphy)
211 return NULL;
212 memset(cphy, 0, sizeof(*cphy));
213 cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
214
215 /* Commands the PHY to enable XFP's clock. */
216 mdio_read(cphy, 0x3, 0x8300, &val);
217 mdio_write(cphy, 0x3, 0x8300, val | 1);
218
219 /* Clear link status. Required because of a bug in the PHY. */
220 mdio_read(cphy, 0x1, 0x8, &val);
221 mdio_read(cphy, 0x3, 0x8, &val);
222
223 /* Allows for Link,Ack LED turn on/off */
224 led_init(cphy);
225 return cphy;
226}
227
228/* Chip Reset */
229static int mv88x201x_phy_reset(adapter_t *adapter)
230{
231 u32 val;
232
233 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
234 val &= ~4;
235 t1_tpi_write(adapter, A_ELMER0_GPO, val);
236 msleep(100);
237
238 t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
239 msleep(1000);
240
241 /* Now lets enable the Laser. Delay 100us */
242 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
243 val |= 0x8000;
244 t1_tpi_write(adapter, A_ELMER0_GPO, val);
245 udelay(100);
246 return 0;
247}
248
249struct gphy t1_mv88x201x_ops = {
250 mv88x201x_phy_create,
251 mv88x201x_phy_reset
252};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 000000000000..04a1404fc65e
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
1/*****************************************************************************
2 * *
3 * File: pm3393.c *
4 * $Revision: 1.16 $ *
5 * $Date: 2005/05/14 00:59:32 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "regs.h"
42#include "gmac.h"
43#include "elmer0.h"
44#include "suni1x10gexp_regs.h"
45
46/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
47 */
48enum {
49 MMD_RESERVED,
50 MMD_PMAPMD,
51 MMD_WIS,
52 MMD_PCS,
53 MMD_PHY_XGXS, /* XGMII Extender Sublayer */
54 MMD_DTE_XGXS,
55};
56
57enum {
58 PHY_XGXS_CTRL_1,
59 PHY_XGXS_STATUS_1
60};
61
62#define OFFSET(REG_ADDR) (REG_ADDR << 2)
63
64/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
65#define MAX_FRAME_SIZE 9600
66
67#define IPG 12
68#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
69 SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
70 SUNI1x10GEXP_BITMSK_TXXG_PADEN)
71#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
72 SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
73
74/* Update statistics every 15 minutes */
75#define STATS_TICK_SECS (15 * 60)
76
77enum { /* RMON registers */
78 RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
79 RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
80 RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
81 RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
82 RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
83 RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
84 RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
85 RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
86 RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
87 RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
88 RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
89 RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
90 RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
91
92 TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
93 TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
94 TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
95 TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
96 TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
97 TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
98 TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
99};
100
101struct _cmac_instance {
102 u8 enabled;
103 u8 fc;
104 u8 mac_addr[6];
105};
106
107static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
108{
109 t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
110 return 0;
111}
112
113static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
114{
115 t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
116 return 0;
117}
118
119/* Port reset. */
120static int pm3393_reset(struct cmac *cmac)
121{
122 return 0;
123}
124
125/*
126 * Enable interrupts for the PM3393
127
128 1. Enable PM3393 BLOCK interrupts.
129 2. Enable PM3393 Master Interrupt bit(INTE)
130 3. Enable ELMER's PM3393 bit.
131 4. Enable Terminator external interrupt.
132*/
133static int pm3393_interrupt_enable(struct cmac *cmac)
134{
135 u32 pl_intr;
136
137 /* PM3393 - Enabling all hardware block interrupts.
138 */
139 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
140 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
141 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
142 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
143
144 /* Don't interrupt on statistics overflow, we are polling */
145 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
146 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
147 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
148 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
149
150 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
151 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
152 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
153 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
154 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
155 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
156 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
157 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
158 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
159
160 /* PM3393 - Global interrupt enable
161 */
162 /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
163 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
164 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
165
166 /* TERMINATOR - PL_INTERUPTS_EXT */
167 pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
168 pl_intr |= F_PL_INTR_EXT;
169 writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
170 return 0;
171}
172
173static int pm3393_interrupt_disable(struct cmac *cmac)
174{
175 u32 elmer;
176
177 /* PM3393 - Enabling HW interrupt blocks. */
178 pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
179 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
180 pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
181 pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
182 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
183 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
184 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
185 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
186 pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
187 pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
188 pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
189 pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
190 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
191 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
192 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
193 pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
194 pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
195
196 /* PM3393 - Global interrupt enable */
197 pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
198
199 /* ELMER - External chip interrupts. */
200 t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
201 elmer &= ~ELMER0_GP_BIT1;
202 t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
203
204 /* TERMINATOR - PL_INTERUPTS_EXT */
205 /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
206 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
207 */
208
209 return 0;
210}
211
212static int pm3393_interrupt_clear(struct cmac *cmac)
213{
214 u32 elmer;
215 u32 pl_intr;
216 u32 val32;
217
218 /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
219 * bit WCIMODE=0 for a clear-on-read.
220 */
221 pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
222 pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
223 pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
224 pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
225 pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
226 pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
227 pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
228 pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
229 pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
230 pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
231 pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
232 pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
233 &val32);
234 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
235 pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
236
237 /* PM3393 - Global interrupt status
238 */
239 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
240
241 /* ELMER - External chip interrupts.
242 */
243 t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
244 elmer |= ELMER0_GP_BIT1;
245 t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
246
247 /* TERMINATOR - PL_INTERUPTS_EXT
248 */
249 pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
250 pl_intr |= F_PL_INTR_EXT;
251 writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
252
253 return 0;
254}
255
256/* Interrupt handler */
257static int pm3393_interrupt_handler(struct cmac *cmac)
258{
259 u32 master_intr_status;
260/*
261 1. Read master interrupt register.
262 2. Read BLOCK's interrupt status registers.
263 3. Handle BLOCK interrupts.
264*/
265 /* Read the master interrupt status register. */
266 pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
267 &master_intr_status);
268
269 /* TBD XXX Lets just clear everything for now */
270 pm3393_interrupt_clear(cmac);
271
272 return 0;
273}
274
275static int pm3393_enable(struct cmac *cmac, int which)
276{
277 if (which & MAC_DIRECTION_RX)
278 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
279 (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
280
281 if (which & MAC_DIRECTION_TX) {
282 u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
283
284 if (cmac->instance->fc & PAUSE_RX)
285 val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
286 if (cmac->instance->fc & PAUSE_TX)
287 val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
288 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
289 }
290
291 cmac->instance->enabled |= which;
292 return 0;
293}
294
295static int pm3393_enable_port(struct cmac *cmac, int which)
296{
297 /* Clear port statistics */
298 pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
299 SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
300 udelay(2);
301 memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
302
303 pm3393_enable(cmac, which);
304
305 /*
306 * XXX This should be done by the PHY and preferrably not at all.
307 * The PHY doesn't give us link status indication on its own so have
308 * the link management code query it instead.
309 */
310 {
311 extern void link_changed(adapter_t *adapter, int port_id);
312
313 link_changed(cmac->adapter, 0);
314 }
315 return 0;
316}
317
318static int pm3393_disable(struct cmac *cmac, int which)
319{
320 if (which & MAC_DIRECTION_RX)
321 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
322 if (which & MAC_DIRECTION_TX)
323 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
324
325 /*
326 * The disable is graceful. Give the PM3393 time. Can't wait very
327 * long here, we may be holding locks.
328 */
329 udelay(20);
330
331 cmac->instance->enabled &= ~which;
332 return 0;
333}
334
335static int pm3393_loopback_enable(struct cmac *cmac)
336{
337 return 0;
338}
339
340static int pm3393_loopback_disable(struct cmac *cmac)
341{
342 return 0;
343}
344
345static int pm3393_set_mtu(struct cmac *cmac, int mtu)
346{
347 int enabled = cmac->instance->enabled;
348
349 /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
350 mtu += 14 + 4;
351 if (mtu > MAX_FRAME_SIZE)
352 return -EINVAL;
353
354 /* Disable Rx/Tx MAC before configuring it. */
355 if (enabled)
356 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357
358 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
359 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
360
361 if (enabled)
362 pm3393_enable(cmac, enabled);
363 return 0;
364}
365
366static u32 calc_crc(u8 *b, int len)
367{
368 int i;
369 u32 crc = (u32)~0;
370
371 /* calculate crc one bit at a time */
372 while (len--) {
373 crc ^= *b++;
374 for (i = 0; i < 8; i++) {
375 if (crc & 0x1)
376 crc = (crc >> 1) ^ 0xedb88320;
377 else
378 crc = (crc >> 1);
379 }
380 }
381
382 /* reverse bits */
383 crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
384 crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
385 crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
386 /* swap bytes */
387 crc = (crc >> 16) | (crc << 16);
388 crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
389
390 return crc;
391}
392
393static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
394{
395 int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
396 u32 rx_mode;
397
398 /* Disable MAC RX before reconfiguring it */
399 if (enabled)
400 pm3393_disable(cmac, MAC_DIRECTION_RX);
401
402 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
403 rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
404 SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
405 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
406 (u16)rx_mode);
407
408 if (t1_rx_mode_promisc(rm)) {
409 /* Promiscuous mode. */
410 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
411 }
412 if (t1_rx_mode_allmulti(rm)) {
413 /* Accept all multicast. */
414 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
415 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
416 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
417 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
418 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
419 } else if (t1_rx_mode_mc_cnt(rm)) {
420 /* Accept one or more multicast(s). */
421 u8 *addr;
422 int bit;
423 u16 mc_filter[4] = { 0, };
424
425 while ((addr = t1_get_next_mcaddr(rm))) {
426 bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
427 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
428 }
429 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
430 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
431 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
432 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
433 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
434 }
435
436 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
437
438 if (enabled)
439 pm3393_enable(cmac, MAC_DIRECTION_RX);
440
441 return 0;
442}
443
444static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
445 int *duplex, int *fc)
446{
447 if (speed)
448 *speed = SPEED_10000;
449 if (duplex)
450 *duplex = DUPLEX_FULL;
451 if (fc)
452 *fc = cmac->instance->fc;
453 return 0;
454}
455
456static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
457 int fc)
458{
459 if (speed >= 0 && speed != SPEED_10000)
460 return -1;
461 if (duplex >= 0 && duplex != DUPLEX_FULL)
462 return -1;
463 if (fc & ~(PAUSE_TX | PAUSE_RX))
464 return -1;
465
466 if (fc != cmac->instance->fc) {
467 cmac->instance->fc = (u8) fc;
468 if (cmac->instance->enabled & MAC_DIRECTION_TX)
469 pm3393_enable(cmac, MAC_DIRECTION_TX);
470 }
471 return 0;
472}
473
474#define RMON_UPDATE(mac, name, stat_name) \
475 { \
476 t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
477 t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
478 t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
479 (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
480 (((u64)val1 & 0xffff) << 16) | \
481 (((u64)val2 & 0xff) << 32) | \
482 ((mac)->stats.stat_name & \
483 (~(u64)0 << 40)); \
484 if (ro & \
485 ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
486 (mac)->stats.stat_name += ((u64)1 << 40); \
487 }
488
489static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
490 int flag)
491{
492 u64 ro;
493 u32 val0, val1, val2, val3;
494
495 /* Snap the counters */
496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
497 SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
498
499 /* Counter rollover, clear on read */
500 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
501 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
502 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
503 pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
506
507 /* Rx stats */
508 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
509 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
510 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
511 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
512 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
513 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
514 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
515 RxInternalMACRcvError);
516 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
517 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
518 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
519 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
520 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
521 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
522
523 /* Tx stats */
524 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
525 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
526 TxInternalMACXmitError);
527 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
528 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
529 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
530 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
531 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
532
533 return &mac->stats;
534}
535
536static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
537{
538 memcpy(mac_addr, cmac->instance->mac_addr, 6);
539 return 0;
540}
541
542static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
543{
544 u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
545
546 /*
547 * MAC addr: 00:07:43:00:13:09
548 *
549 * ma[5] = 0x09
550 * ma[4] = 0x13
551 * ma[3] = 0x00
552 * ma[2] = 0x43
553 * ma[1] = 0x07
554 * ma[0] = 0x00
555 *
556 * The PM3393 requires byte swapping and reverse order entry
557 * when programming MAC addresses:
558 *
559 * low_bits[15:0] = ma[1]:ma[0]
560 * mid_bits[31:16] = ma[3]:ma[2]
561 * high_bits[47:32] = ma[5]:ma[4]
562 */
563
564 /* Store local copy */
565 memcpy(cmac->instance->mac_addr, ma, 6);
566
567 lo = ((u32) ma[1] << 8) | (u32) ma[0];
568 mid = ((u32) ma[3] << 8) | (u32) ma[2];
569 hi = ((u32) ma[5] << 8) | (u32) ma[4];
570
571 /* Disable Rx/Tx MAC before configuring it. */
572 if (enabled)
573 pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
574
575 /* Set RXXG Station Address */
576 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
577 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
578 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
579
580 /* Set TXXG Station Address */
581 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
582 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
583 pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
584
585 /* Setup Exact Match Filter 1 with our MAC address
586 *
587 * Must disable exact match filter before configuring it.
588 */
589 pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
590 val &= 0xff0f;
591 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
592
593 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
594 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
595 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
596
597 val |= 0x0090;
598 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
599
600 if (enabled)
601 pm3393_enable(cmac, enabled);
602 return 0;
603}
604
605static void pm3393_destroy(struct cmac *cmac)
606{
607 kfree(cmac);
608}
609
610static struct cmac_ops pm3393_ops = {
611 .destroy = pm3393_destroy,
612 .reset = pm3393_reset,
613 .interrupt_enable = pm3393_interrupt_enable,
614 .interrupt_disable = pm3393_interrupt_disable,
615 .interrupt_clear = pm3393_interrupt_clear,
616 .interrupt_handler = pm3393_interrupt_handler,
617 .enable = pm3393_enable_port,
618 .disable = pm3393_disable,
619 .loopback_enable = pm3393_loopback_enable,
620 .loopback_disable = pm3393_loopback_disable,
621 .set_mtu = pm3393_set_mtu,
622 .set_rx_mode = pm3393_set_rx_mode,
623 .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
624 .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
625 .statistics_update = pm3393_update_statistics,
626 .macaddress_get = pm3393_macaddress_get,
627 .macaddress_set = pm3393_macaddress_set
628};
629
630static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
631{
632 struct cmac *cmac;
633
634 cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
635 if (!cmac)
636 return NULL;
637 memset(cmac, 0, sizeof(*cmac));
638
639 cmac->ops = &pm3393_ops;
640 cmac->instance = (cmac_instance *) (cmac + 1);
641 cmac->adapter = adapter;
642 cmac->instance->fc = PAUSE_TX | PAUSE_RX;
643
644 t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
645 t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
646 t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
647 t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
648 t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
649 t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
650 t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
651 t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
652 t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
653 t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
654 t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
655 t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
656 t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
657 t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
658 t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
659 t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
660 t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
661 t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
662 t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
663 t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
664 t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
665 t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
666
667 t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
668 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
669 t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
670 t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
671 t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
672 t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
673 t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
674 t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
675 t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
676 t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
677 t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
678 t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
679 t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
680
681 t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
682 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
683 t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
684 t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
685 t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
686 t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
687 t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
688 t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
689 t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
690 t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
691 t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
692
693 t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
694 t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
695 t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
696 t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
697 t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
698 t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
699
700 t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
701 t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
702
703 t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
704 t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
705
706 t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
707 /* For T1 use timer based Mac flow control. */
708 t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
709 t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
710 t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
711 t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
712
713 /* Setup Exact Match Filter 0 to allow broadcast packets.
714 */
715 t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
716 t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
717 t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
718 t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
719 t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
720
721 t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
722 t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
723 t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
724
725 return cmac;
726}
727
728static int pm3393_mac_reset(adapter_t * adapter)
729{
730 u32 val;
731 u32 x;
732 u32 is_pl4_reset_finished;
733 u32 is_pl4_outof_lock;
734 u32 is_xaui_mabc_pll_locked;
735 u32 successful_reset;
736 int i;
737
738 /* The following steps are required to properly reset
739 * the PM3393. This information is provided in the
740 * PM3393 datasheet (Issue 2: November 2002)
741 * section 13.1 -- Device Reset.
742 *
743 * The PM3393 has three types of components that are
744 * individually reset:
745 *
746 * DRESETB - Digital circuitry
747 * PL4_ARESETB - PL4 analog circuitry
748 * XAUI_ARESETB - XAUI bus analog circuitry
749 *
750 * Steps to reset PM3393 using RSTB pin:
751 *
752 * 1. Assert RSTB pin low ( write 0 )
753 * 2. Wait at least 1ms to initiate a complete initialization of device.
754 * 3. Wait until all external clocks and REFSEL are stable.
755 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
756 * 5. De-assert RSTB ( write 1 )
757 * 6. Wait until internal timers to expires after ~14ms.
758 * - Allows analog clock synthesizer(PL4CSU) to stabilize to
759 * selected reference frequency before allowing the digital
760 * portion of the device to operate.
761 * 7. Wait at least 200us for XAUI interface to stabilize.
762 * 8. Verify the PM3393 came out of reset successfully.
763 * Set successful reset flag if everything worked else try again
764 * a few more times.
765 */
766
767 successful_reset = 0;
768 for (i = 0; i < 3 && !successful_reset; i++) {
769 /* 1 */
770 t1_tpi_read(adapter, A_ELMER0_GPO, &val);
771 val &= ~1;
772 t1_tpi_write(adapter, A_ELMER0_GPO, val);
773
774 /* 2 */
775 msleep(1);
776
777 /* 3 */
778 msleep(1);
779
780 /* 4 */
781 msleep(2 /*1 extra ms for safety */ );
782
783 /* 5 */
784 val |= 1;
785 t1_tpi_write(adapter, A_ELMER0_GPO, val);
786
787 /* 6 */
788 msleep(15 /*1 extra ms for safety */ );
789
790 /* 7 */
791 msleep(1);
792
793 /* 8 */
794
795 /* Has PL4 analog block come out of reset correctly? */
796 t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
797 is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
798
799 /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
800 * figure out why? */
801
802 /* Have all PL4 block clocks locked? */
803 x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
804 /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
805 SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
806 SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
807 SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
808 is_pl4_outof_lock = (val & x);
809
810 /* ??? If this fails, might be able to software reset the XAUI part
811 * and try to recover... thus saving us from doing another HW reset */
812 /* Has the XAUI MABC PLL circuitry stablized? */
813 is_xaui_mabc_pll_locked =
814 (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
815
816 successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
817 && is_xaui_mabc_pll_locked);
818 }
819 return successful_reset ? 0 : 1;
820}
821
822struct gmac t1_pm3393_ops = {
823 STATS_TICK_SECS,
824 pm3393_mac_create,
825 pm3393_mac_reset
826};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 000000000000..b90e11f40d1f
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
1/*****************************************************************************
2 * *
3 * File: regs.h *
4 * $Revision: 1.8 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_REGS_H_
40#define _CXGB_REGS_H_
41
42/* SGE registers */
43#define A_SG_CONTROL 0x0
44
45#define S_CMDQ0_ENABLE 0
46#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
47#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
48
49#define S_CMDQ1_ENABLE 1
50#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
51#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
52
53#define S_FL0_ENABLE 2
54#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
55#define F_FL0_ENABLE V_FL0_ENABLE(1U)
56
57#define S_FL1_ENABLE 3
58#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
59#define F_FL1_ENABLE V_FL1_ENABLE(1U)
60
61#define S_CPL_ENABLE 4
62#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
63#define F_CPL_ENABLE V_CPL_ENABLE(1U)
64
65#define S_RESPONSE_QUEUE_ENABLE 5
66#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
67#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
68
69#define S_CMDQ_PRIORITY 6
70#define M_CMDQ_PRIORITY 0x3
71#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
72#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
73
74#define S_DISABLE_CMDQ1_GTS 9
75#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
76#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
77
78#define S_DISABLE_FL0_GTS 10
79#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
80#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
81
82#define S_DISABLE_FL1_GTS 11
83#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
84#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
85
86#define S_ENABLE_BIG_ENDIAN 12
87#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
88#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
89
90#define S_ISCSI_COALESCE 14
91#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
92#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
93
94#define S_RX_PKT_OFFSET 15
95#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
96
97#define S_VLAN_XTRACT 18
98#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
99#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
100
101#define A_SG_DOORBELL 0x4
102#define A_SG_CMD0BASELWR 0x8
103#define A_SG_CMD0BASEUPR 0xc
104#define A_SG_CMD1BASELWR 0x10
105#define A_SG_CMD1BASEUPR 0x14
106#define A_SG_FL0BASELWR 0x18
107#define A_SG_FL0BASEUPR 0x1c
108#define A_SG_FL1BASELWR 0x20
109#define A_SG_FL1BASEUPR 0x24
110#define A_SG_CMD0SIZE 0x28
111#define A_SG_FL0SIZE 0x2c
112#define A_SG_RSPSIZE 0x30
113#define A_SG_RSPBASELWR 0x34
114#define A_SG_RSPBASEUPR 0x38
115#define A_SG_FLTHRESHOLD 0x3c
116#define A_SG_RSPQUEUECREDIT 0x40
117#define A_SG_SLEEPING 0x48
118#define A_SG_INTRTIMER 0x4c
119#define A_SG_CMD1SIZE 0xb0
120#define A_SG_FL1SIZE 0xb4
121#define A_SG_INT_ENABLE 0xb8
122
123#define S_RESPQ_EXHAUSTED 0
124#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
125#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
126
127#define S_RESPQ_OVERFLOW 1
128#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
129#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
130
131#define S_FL_EXHAUSTED 2
132#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
133#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
134
135#define S_PACKET_TOO_BIG 3
136#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
137#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
138
139#define S_PACKET_MISMATCH 4
140#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
141#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
142
143#define A_SG_INT_CAUSE 0xbc
144#define A_SG_RESPACCUTIMER 0xc0
145
146/* MC3 registers */
147
148#define S_READY 1
149#define V_READY(x) ((x) << S_READY)
150#define F_READY V_READY(1U)
151
152/* MC4 registers */
153
154#define A_MC4_CFG 0x180
155#define S_MC4_SLOW 25
156#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
157#define F_MC4_SLOW V_MC4_SLOW(1U)
158
159/* TPI registers */
160
161#define A_TPI_ADDR 0x280
162#define A_TPI_WR_DATA 0x284
163#define A_TPI_RD_DATA 0x288
164#define A_TPI_CSR 0x28c
165
166#define S_TPIWR 0
167#define V_TPIWR(x) ((x) << S_TPIWR)
168#define F_TPIWR V_TPIWR(1U)
169
170#define S_TPIRDY 1
171#define V_TPIRDY(x) ((x) << S_TPIRDY)
172#define F_TPIRDY V_TPIRDY(1U)
173
174#define A_TPI_PAR 0x29c
175
176#define S_TPIPAR 0
177#define M_TPIPAR 0x7f
178#define V_TPIPAR(x) ((x) << S_TPIPAR)
179#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
180
181/* TP registers */
182
183#define A_TP_IN_CONFIG 0x300
184
185#define S_TP_IN_CSPI_CPL 3
186#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
187#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
188
189#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
190#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
191#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
192
193#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
194#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
195#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
196
197#define S_TP_IN_ESPI_ETHERNET 8
198#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
199#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
200
201#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
202#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
203#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
204
205#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
206#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
207#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
208
209#define S_OFFLOAD_DISABLE 14
210#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
211#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
212
213#define A_TP_OUT_CONFIG 0x304
214
215#define S_TP_OUT_CSPI_CPL 2
216#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
217#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
218
219#define S_TP_OUT_ESPI_ETHERNET 6
220#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
221#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
222
223#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
224#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
225#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
226
227#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
228#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
229#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
230
231#define A_TP_GLOBAL_CONFIG 0x308
232
233#define S_IP_TTL 0
234#define M_IP_TTL 0xff
235#define V_IP_TTL(x) ((x) << S_IP_TTL)
236
237#define S_TCP_CSUM 11
238#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
239#define F_TCP_CSUM V_TCP_CSUM(1U)
240
241#define S_UDP_CSUM 12
242#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
243#define F_UDP_CSUM V_UDP_CSUM(1U)
244
245#define S_IP_CSUM 13
246#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
247#define F_IP_CSUM V_IP_CSUM(1U)
248
249#define S_PATH_MTU 15
250#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
251#define F_PATH_MTU V_PATH_MTU(1U)
252
253#define S_5TUPLE_LOOKUP 17
254#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
255
256#define S_SYN_COOKIE_PARAMETER 26
257#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
258
259#define A_TP_PC_CONFIG 0x348
260#define S_DIS_TX_FILL_WIN_PUSH 12
261#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
262#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
263
264#define S_TP_PC_REV 30
265#define M_TP_PC_REV 0x3
266#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
267#define A_TP_RESET 0x44c
268#define S_TP_RESET 0
269#define V_TP_RESET(x) ((x) << S_TP_RESET)
270#define F_TP_RESET V_TP_RESET(1U)
271
272#define A_TP_INT_ENABLE 0x470
273#define A_TP_INT_CAUSE 0x474
274#define A_TP_TX_DROP_CONFIG 0x4b8
275
276#define S_ENABLE_TX_DROP 31
277#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
278#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
279
280#define S_ENABLE_TX_ERROR 30
281#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
282#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
283
284#define S_DROP_TICKS_CNT 4
285#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
286
287#define S_NUM_PKTS_DROPPED 0
288#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
289
290/* CSPI registers */
291
292#define S_DIP4ERR 0
293#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
294#define F_DIP4ERR V_DIP4ERR(1U)
295
296#define S_RXDROP 1
297#define V_RXDROP(x) ((x) << S_RXDROP)
298#define F_RXDROP V_RXDROP(1U)
299
300#define S_TXDROP 2
301#define V_TXDROP(x) ((x) << S_TXDROP)
302#define F_TXDROP V_TXDROP(1U)
303
304#define S_RXOVERFLOW 3
305#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
306#define F_RXOVERFLOW V_RXOVERFLOW(1U)
307
308#define S_RAMPARITYERR 4
309#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
310#define F_RAMPARITYERR V_RAMPARITYERR(1U)
311
312/* ESPI registers */
313
314#define A_ESPI_SCH_TOKEN0 0x880
315#define A_ESPI_SCH_TOKEN1 0x884
316#define A_ESPI_SCH_TOKEN2 0x888
317#define A_ESPI_SCH_TOKEN3 0x88c
318#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
319#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
320#define A_ESPI_CALENDAR_LENGTH 0x898
321#define A_PORT_CONFIG 0x89c
322
323#define S_RX_NPORTS 0
324#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
325
326#define S_TX_NPORTS 8
327#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
328
329#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
330
331#define S_RXSTATUSENABLE 0
332#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
333#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
334
335#define S_INTEL1010MODE 4
336#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
337#define F_INTEL1010MODE V_INTEL1010MODE(1U)
338
339#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
340#define A_ESPI_TRAIN 0x8ac
341#define A_ESPI_INTR_STATUS 0x8c8
342
343#define S_DIP2PARITYERR 5
344#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
345#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
346
347#define A_ESPI_INTR_ENABLE 0x8cc
348#define A_RX_DROP_THRESHOLD 0x8d0
349#define A_ESPI_RX_RESET 0x8ec
350#define A_ESPI_MISC_CONTROL 0x8f0
351
352#define S_OUT_OF_SYNC_COUNT 0
353#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
354
355#define S_DIP2_PARITY_ERR_THRES 5
356#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
357
358#define S_DIP4_THRES 9
359#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
360
361#define S_MONITORED_PORT_NUM 25
362#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
363
364#define S_MONITORED_DIRECTION 27
365#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
366#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
367
368#define S_MONITORED_INTERFACE 28
369#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
370#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
371
372#define A_ESPI_DIP2_ERR_COUNT 0x8f4
373#define A_ESPI_CMD_ADDR 0x8f8
374
375#define S_WRITE_DATA 0
376#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
377
378#define S_REGISTER_OFFSET 8
379#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
380
381#define S_CHANNEL_ADDR 12
382#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
383
384#define S_MODULE_ADDR 16
385#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
386
387#define S_BUNDLE_ADDR 20
388#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
389
390#define S_SPI4_COMMAND 24
391#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
392
393#define A_ESPI_GOSTAT 0x8fc
394#define S_ESPI_CMD_BUSY 8
395#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
396#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
397
398/* PL registers */
399
400#define A_PL_ENABLE 0xa00
401
402#define S_PL_INTR_SGE_ERR 0
403#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
404#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
405
406#define S_PL_INTR_SGE_DATA 1
407#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
408#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
409
410#define S_PL_INTR_TP 6
411#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
412#define F_PL_INTR_TP V_PL_INTR_TP(1U)
413
414#define S_PL_INTR_ESPI 8
415#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
416#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
417
418#define S_PL_INTR_PCIX 10
419#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
420#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
421
422#define S_PL_INTR_EXT 11
423#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
424#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
425
426#define A_PL_CAUSE 0xa04
427
428/* MC5 registers */
429
430#define A_MC5_CONFIG 0xc04
431
432#define S_TCAM_RESET 1
433#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
434#define F_TCAM_RESET V_TCAM_RESET(1U)
435
436#define S_M_BUS_ENABLE 5
437#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
438#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
439
440/* PCICFG registers */
441
442#define A_PCICFG_PM_CSR 0x44
443#define A_PCICFG_VPD_ADDR 0x4a
444
445#define S_VPD_OP_FLAG 15
446#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
447#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
448
449#define A_PCICFG_VPD_DATA 0x4c
450
451#define A_PCICFG_INTR_ENABLE 0xf4
452#define A_PCICFG_INTR_CAUSE 0xf8
453
454#define A_PCICFG_MODE 0xfc
455
456#define S_PCI_MODE_64BIT 0
457#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
458#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
459
460#define S_PCI_MODE_PCIX 5
461#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
462#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
463
464#define S_PCI_MODE_CLK 6
465#define M_PCI_MODE_CLK 0x3
466#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
467
468#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 000000000000..53b41d99b00b
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
1/*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
42#include <linux/config.h>
43#include <linux/types.h>
44#include <linux/errno.h>
45#include <linux/pci.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
52#include <linux/ip.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55
56#include "cpl5_cmd.h"
57#include "sge.h"
58#include "regs.h"
59#include "espi.h"
60
61
62#ifdef NETIF_F_TSO
63#include <linux/tcp.h>
64#endif
65
66#define SGE_CMDQ_N 2
67#define SGE_FREELQ_N 2
68#define SGE_CMDQ0_E_N 1024
69#define SGE_CMDQ1_E_N 128
70#define SGE_FREEL_SIZE 4096
71#define SGE_JUMBO_FREEL_SIZE 512
72#define SGE_FREEL_REFILL_THRESH 16
73#define SGE_RESPQ_E_N 1024
74#define SGE_INTRTIMER_NRES 1000
75#define SGE_RX_COPY_THRES 256
76#define SGE_RX_SM_BUF_SIZE 1536
77
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
87
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
101/*
102 * Command queue, receive buffer list, and response queue descriptors.
103 */
104#if defined(__BIG_ENDIAN_BITFIELD)
105struct cmdQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
110};
111
112struct freelQ_e {
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
117};
118
119struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
133};
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135struct cmdQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
140};
141
142struct freelQ_e {
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
147};
148
149struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
163} ;
164#endif
165
166/*
167 * SW Context Command and Freelist Queue Descriptors
168 */
169struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
173};
174
175struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
179};
180
181/*
182 * SW command, freelist and response rings
183 */
184struct cmdQ {
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
194 u8 sop; /* is next entry start of packet? */
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
198 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
199};
200
201struct freelQ {
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
206 u16 rx_buffer_size; /* Buffer size on this free list */
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
213};
214
215struct respQ {
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
220 struct respQ_e *entries; /* HW response descriptor Q */
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
228};
229
230/*
231 * Main SGE data structure
232 *
233 * Interrupts are handled by a single CPU and it is likely that on a MP system
234 * the application is migrated to another CPU. In that scenario, we try to
235 * seperate the RX(in irq context) and TX state in order to decrease memory
236 * contention.
237 */
238struct sge {
239 struct adapter *adapter; /* adapter backpointer */
240 struct net_device *netdev; /* netdevice backpointer */
241 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
242 struct respQ respQ; /* response Q */
243 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
244 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
245 unsigned int jumbo_fl; /* jumbo freelist Q index */
246 unsigned int intrtimer_nres; /* no-resource interrupt timer */
247 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
248 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
249 struct timer_list espibug_timer;
250 unsigned int espibug_timeout;
251 struct sk_buff *espibug_skb;
252 u32 sge_control; /* shadow value of sge control reg */
253 struct sge_intr_counts stats;
254 struct sge_port_stats port_stats[MAX_NPORTS];
255 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
256};
257
258/*
259 * PIO to indicate that memory mapped Q contains valid descriptor(s).
260 */
261static inline void doorbell_pio(struct adapter *adapter, u32 val)
262{
263 wmb();
264 writel(val, adapter->regs + A_SG_DOORBELL);
265}
266
267/*
268 * Frees all RX buffers on the freelist Q. The caller must make sure that
269 * the SGE is turned off before calling this function.
270 */
271static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
272{
273 unsigned int cidx = q->cidx;
274
275 while (q->credits--) {
276 struct freelQ_ce *ce = &q->centries[cidx];
277
278 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
279 pci_unmap_len(ce, dma_len),
280 PCI_DMA_FROMDEVICE);
281 dev_kfree_skb(ce->skb);
282 ce->skb = NULL;
283 if (++cidx == q->size)
284 cidx = 0;
285 }
286}
287
288/*
289 * Free RX free list and response queue resources.
290 */
291static void free_rx_resources(struct sge *sge)
292{
293 struct pci_dev *pdev = sge->adapter->pdev;
294 unsigned int size, i;
295
296 if (sge->respQ.entries) {
297 size = sizeof(struct respQ_e) * sge->respQ.size;
298 pci_free_consistent(pdev, size, sge->respQ.entries,
299 sge->respQ.dma_addr);
300 }
301
302 for (i = 0; i < SGE_FREELQ_N; i++) {
303 struct freelQ *q = &sge->freelQ[i];
304
305 if (q->centries) {
306 free_freelQ_buffers(pdev, q);
307 kfree(q->centries);
308 }
309 if (q->entries) {
310 size = sizeof(struct freelQ_e) * q->size;
311 pci_free_consistent(pdev, size, q->entries,
312 q->dma_addr);
313 }
314 }
315}
316
317/*
318 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
319 * response queue.
320 */
321static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
322{
323 struct pci_dev *pdev = sge->adapter->pdev;
324 unsigned int size, i;
325
326 for (i = 0; i < SGE_FREELQ_N; i++) {
327 struct freelQ *q = &sge->freelQ[i];
328
329 q->genbit = 1;
330 q->size = p->freelQ_size[i];
331 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
332 size = sizeof(struct freelQ_e) * q->size;
333 q->entries = (struct freelQ_e *)
334 pci_alloc_consistent(pdev, size, &q->dma_addr);
335 if (!q->entries)
336 goto err_no_mem;
337 memset(q->entries, 0, size);
338 size = sizeof(struct freelQ_ce) * q->size;
339 q->centries = kmalloc(size, GFP_KERNEL);
340 if (!q->centries)
341 goto err_no_mem;
342 memset(q->centries, 0, size);
343 }
344
345 /*
346 * Calculate the buffer sizes for the two free lists. FL0 accommodates
347 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
348 * including all the sk_buff overhead.
349 *
350 * Note: For T2 FL0 and FL1 are reversed.
351 */
352 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
353 sizeof(struct cpl_rx_data) +
354 sge->freelQ[!sge->jumbo_fl].dma_offset;
355 sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
356 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
357
358 /*
359 * Setup which skb recycle Q should be used when recycling buffers from
360 * each free list.
361 */
362 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
363 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
364
365 sge->respQ.genbit = 1;
366 sge->respQ.size = SGE_RESPQ_E_N;
367 sge->respQ.credits = 0;
368 size = sizeof(struct respQ_e) * sge->respQ.size;
369 sge->respQ.entries = (struct respQ_e *)
370 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
371 if (!sge->respQ.entries)
372 goto err_no_mem;
373 memset(sge->respQ.entries, 0, size);
374 return 0;
375
376err_no_mem:
377 free_rx_resources(sge);
378 return -ENOMEM;
379}
380
381/*
382 * Reclaims n TX descriptors and frees the buffers associated with them.
383 */
384static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
385{
386 struct cmdQ_ce *ce;
387 struct pci_dev *pdev = sge->adapter->pdev;
388 unsigned int cidx = q->cidx;
389
390 q->in_use -= n;
391 ce = &q->centries[cidx];
392 while (n--) {
393 if (q->sop)
394 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
395 pci_unmap_len(ce, dma_len),
396 PCI_DMA_TODEVICE);
397 else
398 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
399 pci_unmap_len(ce, dma_len),
400 PCI_DMA_TODEVICE);
401 q->sop = 0;
402 if (ce->skb) {
403 dev_kfree_skb(ce->skb);
404 q->sop = 1;
405 }
406 ce++;
407 if (++cidx == q->size) {
408 cidx = 0;
409 ce = q->centries;
410 }
411 }
412 q->cidx = cidx;
413}
414
415/*
416 * Free TX resources.
417 *
418 * Assumes that SGE is stopped and all interrupts are disabled.
419 */
420static void free_tx_resources(struct sge *sge)
421{
422 struct pci_dev *pdev = sge->adapter->pdev;
423 unsigned int size, i;
424
425 for (i = 0; i < SGE_CMDQ_N; i++) {
426 struct cmdQ *q = &sge->cmdQ[i];
427
428 if (q->centries) {
429 if (q->in_use)
430 free_cmdQ_buffers(sge, q, q->in_use);
431 kfree(q->centries);
432 }
433 if (q->entries) {
434 size = sizeof(struct cmdQ_e) * q->size;
435 pci_free_consistent(pdev, size, q->entries,
436 q->dma_addr);
437 }
438 }
439}
440
441/*
442 * Allocates basic TX resources, consisting of memory mapped command Qs.
443 */
444static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
445{
446 struct pci_dev *pdev = sge->adapter->pdev;
447 unsigned int size, i;
448
449 for (i = 0; i < SGE_CMDQ_N; i++) {
450 struct cmdQ *q = &sge->cmdQ[i];
451
452 q->genbit = 1;
453 q->sop = 1;
454 q->size = p->cmdQ_size[i];
455 q->in_use = 0;
456 q->status = 0;
457 q->processed = q->cleaned = 0;
458 q->stop_thres = 0;
459 spin_lock_init(&q->lock);
460 size = sizeof(struct cmdQ_e) * q->size;
461 q->entries = (struct cmdQ_e *)
462 pci_alloc_consistent(pdev, size, &q->dma_addr);
463 if (!q->entries)
464 goto err_no_mem;
465 memset(q->entries, 0, size);
466 size = sizeof(struct cmdQ_ce) * q->size;
467 q->centries = kmalloc(size, GFP_KERNEL);
468 if (!q->centries)
469 goto err_no_mem;
470 memset(q->centries, 0, size);
471 }
472
473 /*
474 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
475 * only. For queue 0 set the stop threshold so we can handle one more
476 * packet from each port, plus reserve an additional 24 entries for
477 * Ethernet packets only. Queue 1 never suspends nor do we reserve
478 * space for Ethernet packets.
479 */
480 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
481 (MAX_SKB_FRAGS + 1);
482 return 0;
483
484err_no_mem:
485 free_tx_resources(sge);
486 return -ENOMEM;
487}
488
489static inline void setup_ring_params(struct adapter *adapter, u64 addr,
490 u32 size, int base_reg_lo,
491 int base_reg_hi, int size_reg)
492{
493 writel((u32)addr, adapter->regs + base_reg_lo);
494 writel(addr >> 32, adapter->regs + base_reg_hi);
495 writel(size, adapter->regs + size_reg);
496}
497
498/*
499 * Enable/disable VLAN acceleration.
500 */
501void t1_set_vlan_accel(struct adapter *adapter, int on_off)
502{
503 struct sge *sge = adapter->sge;
504
505 sge->sge_control &= ~F_VLAN_XTRACT;
506 if (on_off)
507 sge->sge_control |= F_VLAN_XTRACT;
508 if (adapter->open_device_map) {
509 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
510 readl(adapter->regs + A_SG_CONTROL); /* flush */
511 }
512}
513
514/*
515 * Programs the various SGE registers. However, the engine is not yet enabled,
516 * but sge->sge_control is setup and ready to go.
517 */
518static void configure_sge(struct sge *sge, struct sge_params *p)
519{
520 struct adapter *ap = sge->adapter;
521
522 writel(0, ap->regs + A_SG_CONTROL);
523 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
524 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
525 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
526 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
527 setup_ring_params(ap, sge->freelQ[0].dma_addr,
528 sge->freelQ[0].size, A_SG_FL0BASELWR,
529 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
530 setup_ring_params(ap, sge->freelQ[1].dma_addr,
531 sge->freelQ[1].size, A_SG_FL1BASELWR,
532 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
533
534 /* The threshold comparison uses <. */
535 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
536
537 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
538 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
539 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
540
541 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
542 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
543 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
544 F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
545 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
546
547#if defined(__BIG_ENDIAN_BITFIELD)
548 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
549#endif
550
551 /* Initialize no-resource timer */
552 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
553
554 t1_sge_set_coalesce_params(sge, p);
555}
556
557/*
558 * Return the payload capacity of the jumbo free-list buffers.
559 */
560static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
561{
562 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
563 sge->freelQ[sge->jumbo_fl].dma_offset -
564 sizeof(struct cpl_rx_data);
565}
566
567/*
568 * Frees all SGE related resources and the sge structure itself
569 */
570void t1_sge_destroy(struct sge *sge)
571{
572 if (sge->espibug_skb)
573 kfree_skb(sge->espibug_skb);
574
575 free_tx_resources(sge);
576 free_rx_resources(sge);
577 kfree(sge);
578}
579
580/*
581 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
582 * context Q) until the Q is full or alloc_skb fails.
583 *
584 * It is possible that the generation bits already match, indicating that the
585 * buffer is already valid and nothing needs to be done. This happens when we
586 * copied a received buffer into a new sk_buff during the interrupt processing.
587 *
588 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
589 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
590 * aligned.
591 */
592static void refill_free_list(struct sge *sge, struct freelQ *q)
593{
594 struct pci_dev *pdev = sge->adapter->pdev;
595 struct freelQ_ce *ce = &q->centries[q->pidx];
596 struct freelQ_e *e = &q->entries[q->pidx];
597 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
598
599
600 while (q->credits < q->size) {
601 struct sk_buff *skb;
602 dma_addr_t mapping;
603
604 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
605 if (!skb)
606 break;
607
608 skb_reserve(skb, q->dma_offset);
609 mapping = pci_map_single(pdev, skb->data, dma_len,
610 PCI_DMA_FROMDEVICE);
611 ce->skb = skb;
612 pci_unmap_addr_set(ce, dma_addr, mapping);
613 pci_unmap_len_set(ce, dma_len, dma_len);
614 e->addr_lo = (u32)mapping;
615 e->addr_hi = (u64)mapping >> 32;
616 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
617 wmb();
618 e->gen2 = V_CMD_GEN2(q->genbit);
619
620 e++;
621 ce++;
622 if (++q->pidx == q->size) {
623 q->pidx = 0;
624 q->genbit ^= 1;
625 ce = q->centries;
626 e = q->entries;
627 }
628 q->credits++;
629 }
630
631}
632
633/*
634 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
635 * of both rings, we go into 'few interrupt mode' in order to give the system
636 * time to free up resources.
637 */
638static void freelQs_empty(struct sge *sge)
639{
640 struct adapter *adapter = sge->adapter;
641 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
642 u32 irqholdoff_reg;
643
644 refill_free_list(sge, &sge->freelQ[0]);
645 refill_free_list(sge, &sge->freelQ[1]);
646
647 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
648 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
649 irq_reg |= F_FL_EXHAUSTED;
650 irqholdoff_reg = sge->fixed_intrtimer;
651 } else {
652 /* Clear the F_FL_EXHAUSTED interrupts for now */
653 irq_reg &= ~F_FL_EXHAUSTED;
654 irqholdoff_reg = sge->intrtimer_nres;
655 }
656 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
657 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
658
659 /* We reenable the Qs to force a freelist GTS interrupt later */
660 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
661}
662
663#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
664#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
665#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
666 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
667
668/*
669 * Disable SGE Interrupts
670 */
671void t1_sge_intr_disable(struct sge *sge)
672{
673 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
674
675 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
676 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
677}
678
679/*
680 * Enable SGE interrupts.
681 */
682void t1_sge_intr_enable(struct sge *sge)
683{
684 u32 en = SGE_INT_ENABLE;
685 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
686
687 if (sge->adapter->flags & TSO_CAPABLE)
688 en &= ~F_PACKET_TOO_BIG;
689 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
690 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
691}
692
693/*
694 * Clear SGE interrupts.
695 */
696void t1_sge_intr_clear(struct sge *sge)
697{
698 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
699 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
700}
701
702/*
703 * SGE 'Error' interrupt handler
704 */
705int t1_sge_intr_error_handler(struct sge *sge)
706{
707 struct adapter *adapter = sge->adapter;
708 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
709
710 if (adapter->flags & TSO_CAPABLE)
711 cause &= ~F_PACKET_TOO_BIG;
712 if (cause & F_RESPQ_EXHAUSTED)
713 sge->stats.respQ_empty++;
714 if (cause & F_RESPQ_OVERFLOW) {
715 sge->stats.respQ_overflow++;
716 CH_ALERT("%s: SGE response queue overflow\n",
717 adapter->name);
718 }
719 if (cause & F_FL_EXHAUSTED) {
720 sge->stats.freelistQ_empty++;
721 freelQs_empty(sge);
722 }
723 if (cause & F_PACKET_TOO_BIG) {
724 sge->stats.pkt_too_big++;
725 CH_ALERT("%s: SGE max packet size exceeded\n",
726 adapter->name);
727 }
728 if (cause & F_PACKET_MISMATCH) {
729 sge->stats.pkt_mismatch++;
730 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
731 }
732 if (cause & SGE_INT_FATAL)
733 t1_fatal_err(adapter);
734
735 writel(cause, adapter->regs + A_SG_INT_CAUSE);
736 return 0;
737}
738
739const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
740{
741 return &sge->stats;
742}
743
744const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
745{
746 return &sge->port_stats[port];
747}
748
749/**
750 * recycle_fl_buf - recycle a free list buffer
751 * @fl: the free list
752 * @idx: index of buffer to recycle
753 *
754 * Recycles the specified buffer on the given free list by adding it at
755 * the next available slot on the list.
756 */
757static void recycle_fl_buf(struct freelQ *fl, int idx)
758{
759 struct freelQ_e *from = &fl->entries[idx];
760 struct freelQ_e *to = &fl->entries[fl->pidx];
761
762 fl->centries[fl->pidx] = fl->centries[idx];
763 to->addr_lo = from->addr_lo;
764 to->addr_hi = from->addr_hi;
765 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
766 wmb();
767 to->gen2 = V_CMD_GEN2(fl->genbit);
768 fl->credits++;
769
770 if (++fl->pidx == fl->size) {
771 fl->pidx = 0;
772 fl->genbit ^= 1;
773 }
774}
775
776/**
777 * get_packet - return the next ingress packet buffer
778 * @pdev: the PCI device that received the packet
779 * @fl: the SGE free list holding the packet
780 * @len: the actual packet length, excluding any SGE padding
781 * @dma_pad: padding at beginning of buffer left by SGE DMA
782 * @skb_pad: padding to be used if the packet is copied
783 * @copy_thres: length threshold under which a packet should be copied
784 * @drop_thres: # of remaining buffers before we start dropping packets
785 *
786 * Get the next packet from a free list and complete setup of the
787 * sk_buff. If the packet is small we make a copy and recycle the
788 * original buffer, otherwise we use the original buffer itself. If a
789 * positive drop threshold is supplied packets are dropped and their
790 * buffers recycled if (a) the number of remaining buffers is under the
791 * threshold and the packet is too big to copy, or (b) the packet should
792 * be copied but there is no memory for the copy.
793 */
794static inline struct sk_buff *get_packet(struct pci_dev *pdev,
795 struct freelQ *fl, unsigned int len,
796 int dma_pad, int skb_pad,
797 unsigned int copy_thres,
798 unsigned int drop_thres)
799{
800 struct sk_buff *skb;
801 struct freelQ_ce *ce = &fl->centries[fl->cidx];
802
803 if (len < copy_thres) {
804 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
805 if (likely(skb != NULL)) {
806 skb_reserve(skb, skb_pad);
807 skb_put(skb, len);
808 pci_dma_sync_single_for_cpu(pdev,
809 pci_unmap_addr(ce, dma_addr),
810 pci_unmap_len(ce, dma_len),
811 PCI_DMA_FROMDEVICE);
812 memcpy(skb->data, ce->skb->data + dma_pad, len);
813 pci_dma_sync_single_for_device(pdev,
814 pci_unmap_addr(ce, dma_addr),
815 pci_unmap_len(ce, dma_len),
816 PCI_DMA_FROMDEVICE);
817 } else if (!drop_thres)
818 goto use_orig_buf;
819
820 recycle_fl_buf(fl, fl->cidx);
821 return skb;
822 }
823
824 if (fl->credits < drop_thres) {
825 recycle_fl_buf(fl, fl->cidx);
826 return NULL;
827 }
828
829use_orig_buf:
830 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
831 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
832 skb = ce->skb;
833 skb_reserve(skb, dma_pad);
834 skb_put(skb, len);
835 return skb;
836}
837
838/**
839 * unexpected_offload - handle an unexpected offload packet
840 * @adapter: the adapter
841 * @fl: the free list that received the packet
842 *
843 * Called when we receive an unexpected offload packet (e.g., the TOE
844 * function is disabled or the card is a NIC). Prints a message and
845 * recycles the buffer.
846 */
847static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
848{
849 struct freelQ_ce *ce = &fl->centries[fl->cidx];
850 struct sk_buff *skb = ce->skb;
851
852 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
853 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
854 CH_ERR("%s: unexpected offload packet, cmd %u\n",
855 adapter->name, *skb->data);
856 recycle_fl_buf(fl, fl->cidx);
857}
858
859/*
860 * Write the command descriptors to transmit the given skb starting at
861 * descriptor pidx with the given generation.
862 */
863static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
864 unsigned int pidx, unsigned int gen,
865 struct cmdQ *q)
866{
867 dma_addr_t mapping;
868 struct cmdQ_e *e, *e1;
869 struct cmdQ_ce *ce;
870 unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
871
872 mapping = pci_map_single(adapter->pdev, skb->data,
873 skb->len - skb->data_len, PCI_DMA_TODEVICE);
874 ce = &q->centries[pidx];
875 ce->skb = NULL;
876 pci_unmap_addr_set(ce, dma_addr, mapping);
877 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
878
879 flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
880 V_CMD_GEN2(gen);
881 e = &q->entries[pidx];
882 e->addr_lo = (u32)mapping;
883 e->addr_hi = (u64)mapping >> 32;
884 e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
885 for (e1 = e, i = 0; nfrags--; i++) {
886 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
887
888 ce++;
889 e1++;
890 if (++pidx == q->size) {
891 pidx = 0;
892 gen ^= 1;
893 ce = q->centries;
894 e1 = q->entries;
895 }
896
897 mapping = pci_map_page(adapter->pdev, frag->page,
898 frag->page_offset, frag->size,
899 PCI_DMA_TODEVICE);
900 ce->skb = NULL;
901 pci_unmap_addr_set(ce, dma_addr, mapping);
902 pci_unmap_len_set(ce, dma_len, frag->size);
903
904 e1->addr_lo = (u32)mapping;
905 e1->addr_hi = (u64)mapping >> 32;
906 e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
907 e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
908 V_CMD_GEN2(gen);
909 }
910
911 ce->skb = skb;
912 wmb();
913 e->flags = flags;
914}
915
916/*
917 * Clean up completed Tx buffers.
918 */
919static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
920{
921 unsigned int reclaim = q->processed - q->cleaned;
922
923 if (reclaim) {
924 free_cmdQ_buffers(sge, q, reclaim);
925 q->cleaned += reclaim;
926 }
927}
928
929#ifndef SET_ETHTOOL_OPS
930# define __netif_rx_complete(dev) netif_rx_complete(dev)
931#endif
932
933/*
934 * We cannot use the standard netif_rx_schedule_prep() because we have multiple
935 * ports plus the TOE all multiplexing onto a single response queue, therefore
936 * accepting new responses cannot depend on the state of any particular port.
937 * So define our own equivalent that omits the netif_running() test.
938 */
939static inline int napi_schedule_prep(struct net_device *dev)
940{
941 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
942}
943
944
945/**
946 * sge_rx - process an ingress ethernet packet
947 * @sge: the sge structure
948 * @fl: the free list that contains the packet buffer
949 * @len: the packet length
950 *
951 * Process an ingress ethernet pakcet and deliver it to the stack.
952 */
953static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
954{
955 struct sk_buff *skb;
956 struct cpl_rx_pkt *p;
957 struct adapter *adapter = sge->adapter;
958
959 sge->stats.ethernet_pkts++;
960 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
961 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
962 SGE_RX_DROP_THRES);
963 if (!skb) {
964 sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
965 return 0;
966 }
967
968 p = (struct cpl_rx_pkt *)skb->data;
969 skb_pull(skb, sizeof(*p));
970 skb->dev = adapter->port[p->iff].dev;
971 skb->dev->last_rx = jiffies;
972 skb->protocol = eth_type_trans(skb, skb->dev);
973 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
974 skb->protocol == htons(ETH_P_IP) &&
975 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
976 sge->port_stats[p->iff].rx_cso_good++;
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 } else
979 skb->ip_summed = CHECKSUM_NONE;
980
981 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
982 sge->port_stats[p->iff].vlan_xtract++;
983 if (adapter->params.sge.polling)
984 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
985 ntohs(p->vlan));
986 else
987 vlan_hwaccel_rx(skb, adapter->vlan_grp,
988 ntohs(p->vlan));
989 } else if (adapter->params.sge.polling)
990 netif_receive_skb(skb);
991 else
992 netif_rx(skb);
993 return 0;
994}
995
996/*
997 * Returns true if a command queue has enough available descriptors that
998 * we can resume Tx operation after temporarily disabling its packet queue.
999 */
1000static inline int enough_free_Tx_descs(const struct cmdQ *q)
1001{
1002 unsigned int r = q->processed - q->cleaned;
1003
1004 return q->in_use - r < (q->size >> 1);
1005}
1006
1007/*
1008 * Called when sufficient space has become available in the SGE command queues
1009 * after the Tx packet schedulers have been suspended to restart the Tx path.
1010 */
1011static void restart_tx_queues(struct sge *sge)
1012{
1013 struct adapter *adap = sge->adapter;
1014
1015 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1016 int i;
1017
1018 for_each_port(adap, i) {
1019 struct net_device *nd = adap->port[i].dev;
1020
1021 if (test_and_clear_bit(nd->if_port,
1022 &sge->stopped_tx_queues) &&
1023 netif_running(nd)) {
1024 sge->stats.cmdQ_restarted[3]++;
1025 netif_wake_queue(nd);
1026 }
1027 }
1028 }
1029}
1030
1031/*
1032 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1033 * information.
1034 */
1035static unsigned int update_tx_info(struct adapter *adapter,
1036 unsigned int flags,
1037 unsigned int pr0)
1038{
1039 struct sge *sge = adapter->sge;
1040 struct cmdQ *cmdq = &sge->cmdQ[0];
1041
1042 cmdq->processed += pr0;
1043
1044 if (flags & F_CMDQ0_ENABLE) {
1045 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1046
1047 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1048 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1049 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1050 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1051 }
1052 flags &= ~F_CMDQ0_ENABLE;
1053 }
1054
1055 if (unlikely(sge->stopped_tx_queues != 0))
1056 restart_tx_queues(sge);
1057
1058 return flags;
1059}
1060
1061/*
1062 * Process SGE responses, up to the supplied budget. Returns the number of
1063 * responses processed. A negative budget is effectively unlimited.
1064 */
1065static int process_responses(struct adapter *adapter, int budget)
1066{
1067 struct sge *sge = adapter->sge;
1068 struct respQ *q = &sge->respQ;
1069 struct respQ_e *e = &q->entries[q->cidx];
1070 int budget_left = budget;
1071 unsigned int flags = 0;
1072 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1073
1074
1075 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1076 flags |= e->Qsleeping;
1077
1078 cmdq_processed[0] += e->Cmdq0CreditReturn;
1079 cmdq_processed[1] += e->Cmdq1CreditReturn;
1080
1081 /* We batch updates to the TX side to avoid cacheline
1082 * ping-pong of TX state information on MP where the sender
1083 * might run on a different CPU than this function...
1084 */
1085 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1086 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1087 cmdq_processed[0] = 0;
1088 }
1089 if (unlikely(cmdq_processed[1] > 16)) {
1090 sge->cmdQ[1].processed += cmdq_processed[1];
1091 cmdq_processed[1] = 0;
1092 }
1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095
1096 if (unlikely(!e->Sop || !e->Eop))
1097 BUG();
1098 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl);
1100 else
1101 sge_rx(sge, fl, e->BufferLength);
1102
1103 /*
1104 * Note: this depends on each packet consuming a
1105 * single free-list buffer; cf. the BUG above.
1106 */
1107 if (++fl->cidx == fl->size)
1108 fl->cidx = 0;
1109 if (unlikely(--fl->credits <
1110 fl->size - SGE_FREEL_REFILL_THRESH))
1111 refill_free_list(sge, fl);
1112 } else
1113 sge->stats.pure_rsps++;
1114
1115 e++;
1116 if (unlikely(++q->cidx == q->size)) {
1117 q->cidx = 0;
1118 q->genbit ^= 1;
1119 e = q->entries;
1120 }
1121 prefetch(e);
1122
1123 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1124 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1125 q->credits = 0;
1126 }
1127 --budget_left;
1128 }
1129
1130 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1131 sge->cmdQ[1].processed += cmdq_processed[1];
1132
1133 budget -= budget_left;
1134 return budget;
1135}
1136
1137/*
1138 * A simpler version of process_responses() that handles only pure (i.e.,
1139 * non data-carrying) responses. Such respones are too light-weight to justify
1140 * calling a softirq when using NAPI, so we handle them specially in hard
1141 * interrupt context. The function is called with a pointer to a response,
1142 * which the caller must ensure is a valid pure response. Returns 1 if it
1143 * encounters a valid data-carrying response, 0 otherwise.
1144 */
1145static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1146{
1147 struct sge *sge = adapter->sge;
1148 struct respQ *q = &sge->respQ;
1149 unsigned int flags = 0;
1150 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1151
1152 do {
1153 flags |= e->Qsleeping;
1154
1155 cmdq_processed[0] += e->Cmdq0CreditReturn;
1156 cmdq_processed[1] += e->Cmdq1CreditReturn;
1157
1158 e++;
1159 if (unlikely(++q->cidx == q->size)) {
1160 q->cidx = 0;
1161 q->genbit ^= 1;
1162 e = q->entries;
1163 }
1164 prefetch(e);
1165
1166 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1167 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1168 q->credits = 0;
1169 }
1170 sge->stats.pure_rsps++;
1171 } while (e->GenerationBit == q->genbit && !e->DataValid);
1172
1173 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1174 sge->cmdQ[1].processed += cmdq_processed[1];
1175
1176 return e->GenerationBit == q->genbit;
1177}
1178
1179/*
1180 * Handler for new data events when using NAPI. This does not need any locking
1181 * or protection from interrupts as data interrupts are off at this point and
1182 * other adapter interrupts do not interfere.
1183 */
1184static int t1_poll(struct net_device *dev, int *budget)
1185{
1186 struct adapter *adapter = dev->priv;
1187 int effective_budget = min(*budget, dev->quota);
1188
1189 int work_done = process_responses(adapter, effective_budget);
1190 *budget -= work_done;
1191 dev->quota -= work_done;
1192
1193 if (work_done >= effective_budget)
1194 return 1;
1195
1196 __netif_rx_complete(dev);
1197
1198 /*
1199 * Because we don't atomically flush the following write it is
1200 * possible that in very rare cases it can reach the device in a way
1201 * that races with a new response being written plus an error interrupt
1202 * causing the NAPI interrupt handler below to return unhandled status
1203 * to the OS. To protect against this would require flushing the write
1204 * and doing both the write and the flush with interrupts off. Way too
1205 * expensive and unjustifiable given the rarity of the race.
1206 */
1207 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1208 return 0;
1209}
1210
1211/*
1212 * Returns true if the device is already scheduled for polling.
1213 */
1214static inline int napi_is_scheduled(struct net_device *dev)
1215{
1216 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1217}
1218
1219/*
1220 * NAPI version of the main interrupt handler.
1221 */
1222static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
1223{
1224 int handled;
1225 struct adapter *adapter = data;
1226 struct sge *sge = adapter->sge;
1227 struct respQ *q = &adapter->sge->respQ;
1228
1229 /*
1230 * Clear the SGE_DATA interrupt first thing. Normally the NAPI
1231 * handler has control of the response queue and the interrupt handler
1232 * can look at the queue reliably only once it knows NAPI is off.
1233 * We can't wait that long to clear the SGE_DATA interrupt because we
1234 * could race with t1_poll rearming the SGE interrupt, so we need to
1235 * clear the interrupt speculatively and really early on.
1236 */
1237 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1238
1239 spin_lock(&adapter->async_lock);
1240 if (!napi_is_scheduled(sge->netdev)) {
1241 struct respQ_e *e = &q->entries[q->cidx];
1242
1243 if (e->GenerationBit == q->genbit) {
1244 if (e->DataValid ||
1245 process_pure_responses(adapter, e)) {
1246 if (likely(napi_schedule_prep(sge->netdev)))
1247 __netif_rx_schedule(sge->netdev);
1248 else
1249 printk(KERN_CRIT
1250 "NAPI schedule failure!\n");
1251 } else
1252 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1253 handled = 1;
1254 goto unlock;
1255 } else
1256 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1257 } else
1258 if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
1259 printk(KERN_ERR "data interrupt while NAPI running\n");
1260
1261 handled = t1_slow_intr_handler(adapter);
1262 if (!handled)
1263 sge->stats.unhandled_irqs++;
1264 unlock:
1265 spin_unlock(&adapter->async_lock);
1266 return IRQ_RETVAL(handled != 0);
1267}
1268
1269/*
1270 * Main interrupt handler, optimized assuming that we took a 'DATA'
1271 * interrupt.
1272 *
1273 * 1. Clear the interrupt
1274 * 2. Loop while we find valid descriptors and process them; accumulate
1275 * information that can be processed after the loop
1276 * 3. Tell the SGE at which index we stopped processing descriptors
1277 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1278 * outstanding TX buffers waiting, replenish RX buffers, potentially
1279 * reenable upper layers if they were turned off due to lack of TX
1280 * resources which are available again.
1281 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1282 * let the slow_intr_handler run and do error handling.
1283 */
1284static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
1285{
1286 int work_done;
1287 struct respQ_e *e;
1288 struct adapter *adapter = cookie;
1289 struct respQ *Q = &adapter->sge->respQ;
1290
1291 spin_lock(&adapter->async_lock);
1292 e = &Q->entries[Q->cidx];
1293 prefetch(e);
1294
1295 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1296
1297 if (likely(e->GenerationBit == Q->genbit))
1298 work_done = process_responses(adapter, -1);
1299 else
1300 work_done = t1_slow_intr_handler(adapter);
1301
1302 /*
1303 * The unconditional clearing of the PL_CAUSE above may have raced
1304 * with DMA completion and the corresponding generation of a response
1305 * to cause us to miss the resulting data interrupt. The next write
1306 * is also unconditional to recover the missed interrupt and render
1307 * this race harmless.
1308 */
1309 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1310
1311 if (!work_done)
1312 adapter->sge->stats.unhandled_irqs++;
1313 spin_unlock(&adapter->async_lock);
1314 return IRQ_RETVAL(work_done != 0);
1315}
1316
1317intr_handler_t t1_select_intr_handler(adapter_t *adapter)
1318{
1319 return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
1320}
1321
1322/*
1323 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1324 *
1325 * The code figures out how many entries the sk_buff will require in the
1326 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1327 * has complete. Then, it doesn't access the global structure anymore, but
1328 * uses the corresponding fields on the stack. In conjuction with a spinlock
1329 * around that code, we can make the function reentrant without holding the
1330 * lock when we actually enqueue (which might be expensive, especially on
1331 * architectures with IO MMUs).
1332 *
1333 * This runs with softirqs disabled.
1334 */
1335unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1336 unsigned int qid, struct net_device *dev)
1337{
1338 struct sge *sge = adapter->sge;
1339 struct cmdQ *q = &sge->cmdQ[qid];
1340 unsigned int credits, pidx, genbit, count;
1341
1342 spin_lock(&q->lock);
1343 reclaim_completed_tx(sge, q);
1344
1345 pidx = q->pidx;
1346 credits = q->size - q->in_use;
1347 count = 1 + skb_shinfo(skb)->nr_frags;
1348
1349 { /* Ethernet packet */
1350 if (unlikely(credits < count)) {
1351 netif_stop_queue(dev);
1352 set_bit(dev->if_port, &sge->stopped_tx_queues);
1353 sge->stats.cmdQ_full[3]++;
1354 spin_unlock(&q->lock);
1355 CH_ERR("%s: Tx ring full while queue awake!\n",
1356 adapter->name);
1357 return 1;
1358 }
1359 if (unlikely(credits - count < q->stop_thres)) {
1360 sge->stats.cmdQ_full[3]++;
1361 netif_stop_queue(dev);
1362 set_bit(dev->if_port, &sge->stopped_tx_queues);
1363 }
1364 }
1365 q->in_use += count;
1366 genbit = q->genbit;
1367 q->pidx += count;
1368 if (q->pidx >= q->size) {
1369 q->pidx -= q->size;
1370 q->genbit ^= 1;
1371 }
1372 spin_unlock(&q->lock);
1373
1374 write_tx_descs(adapter, skb, pidx, genbit, q);
1375
1376 /*
1377 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1378 * the doorbell if the Q is asleep. There is a natural race, where
1379 * the hardware is going to sleep just after we checked, however,
1380 * then the interrupt handler will detect the outstanding TX packet
1381 * and ring the doorbell for us.
1382 */
1383 if (qid)
1384 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1385 else {
1386 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1387 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1388 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1389 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1390 }
1391 }
1392 return 0;
1393}
1394
1395#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1396
1397/*
1398 * eth_hdr_len - return the length of an Ethernet header
1399 * @data: pointer to the start of the Ethernet header
1400 *
1401 * Returns the length of an Ethernet header, including optional VLAN tag.
1402 */
1403static inline int eth_hdr_len(const void *data)
1404{
1405 const struct ethhdr *e = data;
1406
1407 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1408}
1409
1410/*
1411 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1412 */
1413int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414{
1415 struct adapter *adapter = dev->priv;
1416 struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
1417 struct sge *sge = adapter->sge;
1418 struct cpl_tx_pkt *cpl;
1419
1420#ifdef NETIF_F_TSO
1421 if (skb_shinfo(skb)->tso_size) {
1422 int eth_type;
1423 struct cpl_tx_pkt_lso *hdr;
1424
1425 st->tso++;
1426
1427 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1428 CPL_ETH_II : CPL_ETH_II_VLAN;
1429
1430 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1431 hdr->opcode = CPL_TX_PKT_LSO;
1432 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1433 hdr->ip_hdr_words = skb->nh.iph->ihl;
1434 hdr->tcp_hdr_words = skb->h.th->doff;
1435 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1436 skb_shinfo(skb)->tso_size));
1437 hdr->len = htonl(skb->len - sizeof(*hdr));
1438 cpl = (struct cpl_tx_pkt *)hdr;
1439 sge->stats.tx_lso_pkts++;
1440 } else
1441#endif
1442 {
1443 /*
1444 * Packets shorter than ETH_HLEN can break the MAC, drop them
1445 * early. Also, we may get oversized packets because some
1446 * parts of the kernel don't handle our unusual hard_header_len
1447 * right, drop those too.
1448 */
1449 if (unlikely(skb->len < ETH_HLEN ||
1450 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1451 dev_kfree_skb_any(skb);
1452 return NET_XMIT_SUCCESS;
1453 }
1454
1455 /*
1456 * We are using a non-standard hard_header_len and some kernel
1457 * components, such as pktgen, do not handle it right.
1458 * Complain when this happens but try to fix things up.
1459 */
1460 if (unlikely(skb_headroom(skb) <
1461 dev->hard_header_len - ETH_HLEN)) {
1462 struct sk_buff *orig_skb = skb;
1463
1464 if (net_ratelimit())
1465 printk(KERN_ERR "%s: inadequate headroom in "
1466 "Tx packet\n", dev->name);
1467 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1468 dev_kfree_skb_any(orig_skb);
1469 if (!skb)
1470 return -ENOMEM;
1471 }
1472
1473 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1474 skb->ip_summed == CHECKSUM_HW &&
1475 skb->nh.iph->protocol == IPPROTO_UDP)
1476 if (unlikely(skb_checksum_help(skb, 0))) {
1477 dev_kfree_skb_any(skb);
1478 return -ENOMEM;
1479 }
1480
1481 /* Hmmm, assuming to catch the gratious arp... and we'll use
1482 * it to flush out stuck espi packets...
1483 */
1484 if (unlikely(!adapter->sge->espibug_skb)) {
1485 if (skb->protocol == htons(ETH_P_ARP) &&
1486 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
1487 adapter->sge->espibug_skb = skb;
1488 /* We want to re-use this skb later. We
1489 * simply bump the reference count and it
1490 * will not be freed...
1491 */
1492 skb = skb_get(skb);
1493 }
1494 }
1495
1496 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1497 cpl->opcode = CPL_TX_PKT;
1498 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1499 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
1500 /* the length field isn't used so don't bother setting it */
1501
1502 st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
1503 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
1504 sge->stats.tx_reg_pkts++;
1505 }
1506 cpl->iff = dev->if_port;
1507
1508#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1509 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1510 cpl->vlan_valid = 1;
1511 cpl->vlan = htons(vlan_tx_tag_get(skb));
1512 st->vlan_insert++;
1513 } else
1514#endif
1515 cpl->vlan_valid = 0;
1516
1517 dev->trans_start = jiffies;
1518 return t1_sge_tx(skb, adapter, 0, dev);
1519}
1520
1521/*
1522 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1523 */
1524static void sge_tx_reclaim_cb(unsigned long data)
1525{
1526 int i;
1527 struct sge *sge = (struct sge *)data;
1528
1529 for (i = 0; i < SGE_CMDQ_N; ++i) {
1530 struct cmdQ *q = &sge->cmdQ[i];
1531
1532 if (!spin_trylock(&q->lock))
1533 continue;
1534
1535 reclaim_completed_tx(sge, q);
1536 if (i == 0 && q->in_use) /* flush pending credits */
1537 writel(F_CMDQ0_ENABLE,
1538 sge->adapter->regs + A_SG_DOORBELL);
1539
1540 spin_unlock(&q->lock);
1541 }
1542 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1543}
1544
1545/*
1546 * Propagate changes of the SGE coalescing parameters to the HW.
1547 */
1548int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1549{
1550 sge->netdev->poll = t1_poll;
1551 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1552 core_ticks_per_usec(sge->adapter);
1553 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1554 return 0;
1555}
1556
1557/*
1558 * Allocates both RX and TX resources and configures the SGE. However,
1559 * the hardware is not enabled yet.
1560 */
1561int t1_sge_configure(struct sge *sge, struct sge_params *p)
1562{
1563 if (alloc_rx_resources(sge, p))
1564 return -ENOMEM;
1565 if (alloc_tx_resources(sge, p)) {
1566 free_rx_resources(sge);
1567 return -ENOMEM;
1568 }
1569 configure_sge(sge, p);
1570
1571 /*
1572 * Now that we have sized the free lists calculate the payload
1573 * capacity of the large buffers. Other parts of the driver use
1574 * this to set the max offload coalescing size so that RX packets
1575 * do not overflow our large buffers.
1576 */
1577 p->large_buf_capacity = jumbo_payload_capacity(sge);
1578 return 0;
1579}
1580
1581/*
1582 * Disables the DMA engine.
1583 */
1584void t1_sge_stop(struct sge *sge)
1585{
1586 writel(0, sge->adapter->regs + A_SG_CONTROL);
1587 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1588 if (is_T2(sge->adapter))
1589 del_timer_sync(&sge->espibug_timer);
1590 del_timer_sync(&sge->tx_reclaim_timer);
1591}
1592
1593/*
1594 * Enables the DMA engine.
1595 */
1596void t1_sge_start(struct sge *sge)
1597{
1598 refill_free_list(sge, &sge->freelQ[0]);
1599 refill_free_list(sge, &sge->freelQ[1]);
1600
1601 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1602 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1603 (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1604
1605 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1606
1607 if (is_T2(sge->adapter))
1608 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1609}
1610
1611/*
1612 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1613 */
1614static void espibug_workaround(void *data)
1615{
1616 struct adapter *adapter = (struct adapter *)data;
1617 struct sge *sge = adapter->sge;
1618
1619 if (netif_running(adapter->port[0].dev)) {
1620 struct sk_buff *skb = sge->espibug_skb;
1621
1622 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
1623
1624 if ((seop & 0xfff0fff) == 0xfff && skb) {
1625 if (!skb->cb[0]) {
1626 u8 ch_mac_addr[ETH_ALEN] =
1627 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
1628 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
1629 ch_mac_addr, ETH_ALEN);
1630 memcpy(skb->data + skb->len - 10, ch_mac_addr,
1631 ETH_ALEN);
1632 skb->cb[0] = 0xff;
1633 }
1634
1635 /* bump the reference count to avoid freeing of the
1636 * skb once the DMA has completed.
1637 */
1638 skb = skb_get(skb);
1639 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
1640 }
1641 }
1642 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1643}
1644
1645/*
1646 * Creates a t1_sge structure and returns suggested resource parameters.
1647 */
1648struct sge * __devinit t1_sge_create(struct adapter *adapter,
1649 struct sge_params *p)
1650{
1651 struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
1652
1653 if (!sge)
1654 return NULL;
1655 memset(sge, 0, sizeof(*sge));
1656
1657 sge->adapter = adapter;
1658 sge->netdev = adapter->port[0].dev;
1659 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
1660 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
1661
1662 init_timer(&sge->tx_reclaim_timer);
1663 sge->tx_reclaim_timer.data = (unsigned long)sge;
1664 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
1665
1666 if (is_T2(sge->adapter)) {
1667 init_timer(&sge->espibug_timer);
1668 sge->espibug_timer.function = (void *)&espibug_workaround;
1669 sge->espibug_timer.data = (unsigned long)sge->adapter;
1670 sge->espibug_timeout = 1;
1671 }
1672
1673
1674 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
1675 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
1676 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
1677 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
1678 p->rx_coalesce_usecs = 50;
1679 p->coalesce_enable = 0;
1680 p->sample_interval_usecs = 0;
1681 p->polling = 0;
1682
1683 return sge;
1684}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 000000000000..434b25586851
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
1/*****************************************************************************
2 * *
3 * File: sge.h *
4 * $Revision: 1.11 $ *
5 * $Date: 2005/06/21 22:10:55 $ *
6 * Description: *
7 * part of the Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#ifndef _CXGB_SGE_H_
40#define _CXGB_SGE_H_
41
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45
46#ifndef IRQ_RETVAL
47#define IRQ_RETVAL(x)
48typedef void irqreturn_t;
49#endif
50
51typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
52
53struct sge_intr_counts {
54 unsigned int respQ_empty; /* # times respQ empty */
55 unsigned int respQ_overflow; /* # respQ overflow (fatal) */
56 unsigned int freelistQ_empty; /* # times freelist empty */
57 unsigned int pkt_too_big; /* packet too large (fatal) */
58 unsigned int pkt_mismatch;
59 unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
60 unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
61 unsigned int ethernet_pkts; /* # of Ethernet packets received */
62 unsigned int offload_pkts; /* # of offload packets received */
63 unsigned int offload_bundles; /* # of offload pkt bundles delivered */
64 unsigned int pure_rsps; /* # of non-payload responses */
65 unsigned int unhandled_irqs; /* # of unhandled interrupts */
66 unsigned int tx_ipfrags;
67 unsigned int tx_reg_pkts;
68 unsigned int tx_lso_pkts;
69 unsigned int tx_do_cksum;
70};
71
72struct sge_port_stats {
73 unsigned long rx_cso_good; /* # of successful RX csum offloads */
74 unsigned long tx_cso; /* # of TX checksum offloads */
75 unsigned long vlan_xtract; /* # of VLAN tag extractions */
76 unsigned long vlan_insert; /* # of VLAN tag extractions */
77 unsigned long tso; /* # of TSO requests */
78 unsigned long rx_drops; /* # of packets dropped due to no mem */
79};
80
81struct sk_buff;
82struct net_device;
83struct adapter;
84struct sge_params;
85struct sge;
86
87struct sge *t1_sge_create(struct adapter *, struct sge_params *);
88int t1_sge_configure(struct sge *, struct sge_params *);
89int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
90void t1_sge_destroy(struct sge *);
91intr_handler_t t1_select_intr_handler(adapter_t *adapter);
92unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
93 unsigned int qid, struct net_device *netdev);
94int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
95void t1_set_vlan_accel(struct adapter *adapter, int on_off);
96void t1_sge_start(struct sge *);
97void t1_sge_stop(struct sge *);
98int t1_sge_intr_error_handler(struct sge *);
99void t1_sge_intr_enable(struct sge *);
100void t1_sge_intr_disable(struct sge *);
101void t1_sge_intr_clear(struct sge *);
102const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
103const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
104
105#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 000000000000..1ebb5d149aef
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
1/*****************************************************************************
2 * *
3 * File: subr.c *
4 * $Revision: 1.27 $ *
5 * $Date: 2005/06/22 01:08:36 $ *
6 * Description: *
7 * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41#include "elmer0.h"
42#include "regs.h"
43#include "gmac.h"
44#include "cphy.h"
45#include "sge.h"
46#include "espi.h"
47
48/**
49 * t1_wait_op_done - wait until an operation is completed
50 * @adapter: the adapter performing the operation
51 * @reg: the register to check for completion
52 * @mask: a single-bit field within @reg that indicates completion
53 * @polarity: the value of the field when the operation is completed
54 * @attempts: number of check iterations
55 * @delay: delay in usecs between iterations
56 *
57 * Wait until an operation is completed by checking a bit in a register
58 * up to @attempts times. Returns %0 if the operation completes and %1
59 * otherwise.
60 */
61static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
62 int attempts, int delay)
63{
64 while (1) {
65 u32 val = readl(adapter->regs + reg) & mask;
66
67 if (!!val == polarity)
68 return 0;
69 if (--attempts == 0)
70 return 1;
71 if (delay)
72 udelay(delay);
73 }
74}
75
76#define TPI_ATTEMPTS 50
77
78/*
79 * Write a register over the TPI interface (unlocked and locked versions).
80 */
81static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
82{
83 int tpi_busy;
84
85 writel(addr, adapter->regs + A_TPI_ADDR);
86 writel(value, adapter->regs + A_TPI_WR_DATA);
87 writel(F_TPIWR, adapter->regs + A_TPI_CSR);
88
89 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
90 TPI_ATTEMPTS, 3);
91 if (tpi_busy)
92 CH_ALERT("%s: TPI write to 0x%x failed\n",
93 adapter->name, addr);
94 return tpi_busy;
95}
96
97int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
98{
99 int ret;
100
101 spin_lock(&(adapter)->tpi_lock);
102 ret = __t1_tpi_write(adapter, addr, value);
103 spin_unlock(&(adapter)->tpi_lock);
104 return ret;
105}
106
107/*
108 * Read a register over the TPI interface (unlocked and locked versions).
109 */
110static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
111{
112 int tpi_busy;
113
114 writel(addr, adapter->regs + A_TPI_ADDR);
115 writel(0, adapter->regs + A_TPI_CSR);
116
117 tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
118 TPI_ATTEMPTS, 3);
119 if (tpi_busy)
120 CH_ALERT("%s: TPI read from 0x%x failed\n",
121 adapter->name, addr);
122 else
123 *valp = readl(adapter->regs + A_TPI_RD_DATA);
124 return tpi_busy;
125}
126
127int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
128{
129 int ret;
130
131 spin_lock(&(adapter)->tpi_lock);
132 ret = __t1_tpi_read(adapter, addr, valp);
133 spin_unlock(&(adapter)->tpi_lock);
134 return ret;
135}
136
137/*
138 * Called when a port's link settings change to propagate the new values to the
139 * associated PHY and MAC. After performing the common tasks it invokes an
140 * OS-specific handler.
141 */
142/* static */ void link_changed(adapter_t *adapter, int port_id)
143{
144 int link_ok, speed, duplex, fc;
145 struct cphy *phy = adapter->port[port_id].phy;
146 struct link_config *lc = &adapter->port[port_id].link_config;
147
148 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
149
150 lc->speed = speed < 0 ? SPEED_INVALID : speed;
151 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
152 if (!(lc->requested_fc & PAUSE_AUTONEG))
153 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
154
155 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
156 /* Set MAC speed, duplex, and flow control to match PHY. */
157 struct cmac *mac = adapter->port[port_id].mac;
158
159 mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
160 lc->fc = (unsigned char)fc;
161 }
162 t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
163}
164
165static int t1_pci_intr_handler(adapter_t *adapter)
166{
167 u32 pcix_cause;
168
169 pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
170
171 if (pcix_cause) {
172 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
173 pcix_cause);
174 t1_fatal_err(adapter); /* PCI errors are fatal */
175 }
176 return 0;
177}
178
179
180/*
181 * Wait until Elmer's MI1 interface is ready for new operations.
182 */
183static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
184{
185 int attempts = 100, busy;
186
187 do {
188 u32 val;
189
190 __t1_tpi_read(adapter, mi1_reg, &val);
191 busy = val & F_MI1_OP_BUSY;
192 if (busy)
193 udelay(10);
194 } while (busy && --attempts);
195 if (busy)
196 CH_ALERT("%s: MDIO operation timed out\n",
197 adapter->name);
198 return busy;
199}
200
201/*
202 * MI1 MDIO initialization.
203 */
204static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
205{
206 u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
207 u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
208 V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
209
210 if (!(bi->caps & SUPPORTED_10000baseT_Full))
211 val |= V_MI1_SOF(1);
212 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
213}
214
215static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
216 int reg_addr, unsigned int *valp)
217{
218 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
219
220 spin_lock(&(adapter)->tpi_lock);
221
222 /* Write the address we want. */
223 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
224 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
225 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
226 MI1_OP_INDIRECT_ADDRESS);
227 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
228
229 /* Write the operation we want. */
230 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
231 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
232
233 /* Read the data. */
234 __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
235 spin_unlock(&(adapter)->tpi_lock);
236 return 0;
237}
238
239static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
240 int reg_addr, unsigned int val)
241{
242 u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
243
244 spin_lock(&(adapter)->tpi_lock);
245
246 /* Write the address we want. */
247 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
248 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
249 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
250 MI1_OP_INDIRECT_ADDRESS);
251 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
252
253 /* Write the data. */
254 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
255 __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
256 mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
257 spin_unlock(&(adapter)->tpi_lock);
258 return 0;
259}
260
261static struct mdio_ops mi1_mdio_ext_ops = {
262 mi1_mdio_init,
263 mi1_mdio_ext_read,
264 mi1_mdio_ext_write
265};
266
267enum {
268 CH_BRD_N110_1F,
269 CH_BRD_N210_1F,
270};
271
272static struct board_info t1_board[] = {
273
274{ CHBT_BOARD_N110, 1/*ports#*/,
275 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
276 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
277 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
278 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
279 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
280 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
281 "Chelsio N110 1x10GBaseX NIC" },
282
283{ CHBT_BOARD_N210, 1/*ports#*/,
284 SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
285 CHBT_MAC_PM3393, CHBT_PHY_88X2010,
286 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
287 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
288 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
289 &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
290 "Chelsio N210 1x10GBaseX NIC" },
291
292};
293
294struct pci_device_id t1_pci_tbl[] = {
295 CH_DEVICE(7, 0, CH_BRD_N110_1F),
296 CH_DEVICE(10, 1, CH_BRD_N210_1F),
297 { 0, }
298};
299
300MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
301
302/*
303 * Return the board_info structure with a given index. Out-of-range indices
304 * return NULL.
305 */
306const struct board_info *t1_get_board_info(unsigned int board_id)
307{
308 return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
309}
310
311struct chelsio_vpd_t {
312 u32 format_version;
313 u8 serial_number[16];
314 u8 mac_base_address[6];
315 u8 pad[2]; /* make multiple-of-4 size requirement explicit */
316};
317
318#define EEPROMSIZE (8 * 1024)
319#define EEPROM_MAX_POLL 4
320
321/*
322 * Read SEEPROM. A zero is written to the flag register when the addres is
323 * written to the Control register. The hardware device will set the flag to a
324 * one when 4B have been transferred to the Data register.
325 */
326int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
327{
328 int i = EEPROM_MAX_POLL;
329 u16 val;
330
331 if (addr >= EEPROMSIZE || (addr & 3))
332 return -EINVAL;
333
334 pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
335 do {
336 udelay(50);
337 pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
338 } while (!(val & F_VPD_OP_FLAG) && --i);
339
340 if (!(val & F_VPD_OP_FLAG)) {
341 CH_ERR("%s: reading EEPROM address 0x%x failed\n",
342 adapter->name, addr);
343 return -EIO;
344 }
345 pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
346 *data = le32_to_cpu(*data);
347 return 0;
348}
349
350static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
351{
352 int addr, ret = 0;
353
354 for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
355 ret = t1_seeprom_read(adapter, addr,
356 (u32 *)((u8 *)vpd + addr));
357
358 return ret;
359}
360
361/*
362 * Read a port's MAC address from the VPD ROM.
363 */
364static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
365{
366 struct chelsio_vpd_t vpd;
367
368 if (t1_eeprom_vpd_get(adapter, &vpd))
369 return 1;
370 memcpy(mac_addr, vpd.mac_base_address, 5);
371 mac_addr[5] = vpd.mac_base_address[5] + index;
372 return 0;
373}
374
375/*
376 * Set up the MAC/PHY according to the requested link settings.
377 *
378 * If the PHY can auto-negotiate first decide what to advertise, then
379 * enable/disable auto-negotiation as desired and reset.
380 *
381 * If the PHY does not auto-negotiate we just reset it.
382 *
383 * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
384 * otherwise do it later based on the outcome of auto-negotiation.
385 */
386int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
387{
388 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
389
390 if (lc->supported & SUPPORTED_Autoneg) {
391 lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
392 if (fc) {
393 lc->advertising |= ADVERTISED_ASYM_PAUSE;
394 if (fc == (PAUSE_RX | PAUSE_TX))
395 lc->advertising |= ADVERTISED_PAUSE;
396 }
397 phy->ops->advertise(phy, lc->advertising);
398
399 if (lc->autoneg == AUTONEG_DISABLE) {
400 lc->speed = lc->requested_speed;
401 lc->duplex = lc->requested_duplex;
402 lc->fc = (unsigned char)fc;
403 mac->ops->set_speed_duplex_fc(mac, lc->speed,
404 lc->duplex, fc);
405 /* Also disables autoneg */
406 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
407 phy->ops->reset(phy, 0);
408 } else
409 phy->ops->autoneg_enable(phy); /* also resets PHY */
410 } else {
411 mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
412 lc->fc = (unsigned char)fc;
413 phy->ops->reset(phy, 0);
414 }
415 return 0;
416}
417
418/*
419 * External interrupt handler for boards using elmer0.
420 */
421int elmer0_ext_intr_handler(adapter_t *adapter)
422{
423 struct cphy *phy;
424 int phy_cause;
425 u32 cause;
426
427 t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
428
429 switch (board_info(adapter)->board) {
430 case CHBT_BOARD_N210:
431 case CHBT_BOARD_N110:
432 if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
433 phy = adapter->port[0].phy;
434 phy_cause = phy->ops->interrupt_handler(phy);
435 if (phy_cause & cphy_cause_link_change)
436 link_changed(adapter, 0);
437 }
438 break;
439 }
440 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
441 return 0;
442}
443
444/* Enables all interrupts. */
445void t1_interrupts_enable(adapter_t *adapter)
446{
447 unsigned int i;
448 u32 pl_intr;
449
450 adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
451
452 t1_sge_intr_enable(adapter->sge);
453 if (adapter->espi) {
454 adapter->slow_intr_mask |= F_PL_INTR_ESPI;
455 t1_espi_intr_enable(adapter->espi);
456 }
457
458 /* Enable MAC/PHY interrupts for each port. */
459 for_each_port(adapter, i) {
460 adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
461 adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
462 }
463
464 /* Enable PCIX & external chip interrupts on ASIC boards. */
465 pl_intr = readl(adapter->regs + A_PL_ENABLE);
466
467 /* PCI-X interrupts */
468 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
469 0xffffffff);
470
471 adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
472 pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
473 writel(pl_intr, adapter->regs + A_PL_ENABLE);
474}
475
476/* Disables all interrupts. */
477void t1_interrupts_disable(adapter_t* adapter)
478{
479 unsigned int i;
480
481 t1_sge_intr_disable(adapter->sge);
482 if (adapter->espi)
483 t1_espi_intr_disable(adapter->espi);
484
485 /* Disable MAC/PHY interrupts for each port. */
486 for_each_port(adapter, i) {
487 adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
488 adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
489 }
490
491 /* Disable PCIX & external chip interrupts. */
492 writel(0, adapter->regs + A_PL_ENABLE);
493
494 /* PCI-X interrupts */
495 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
496
497 adapter->slow_intr_mask = 0;
498}
499
500/* Clears all interrupts */
501void t1_interrupts_clear(adapter_t* adapter)
502{
503 unsigned int i;
504 u32 pl_intr;
505
506
507 t1_sge_intr_clear(adapter->sge);
508 if (adapter->espi)
509 t1_espi_intr_clear(adapter->espi);
510
511 /* Clear MAC/PHY interrupts for each port. */
512 for_each_port(adapter, i) {
513 adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
514 adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
515 }
516
517 /* Enable interrupts for external devices. */
518 pl_intr = readl(adapter->regs + A_PL_CAUSE);
519
520 writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
521 adapter->regs + A_PL_CAUSE);
522
523 /* PCI-X interrupts */
524 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
525}
526
527/*
528 * Slow path interrupt handler for ASICs.
529 */
530int t1_slow_intr_handler(adapter_t *adapter)
531{
532 u32 cause = readl(adapter->regs + A_PL_CAUSE);
533
534 cause &= adapter->slow_intr_mask;
535 if (!cause)
536 return 0;
537 if (cause & F_PL_INTR_SGE_ERR)
538 t1_sge_intr_error_handler(adapter->sge);
539 if (cause & F_PL_INTR_ESPI)
540 t1_espi_intr_handler(adapter->espi);
541 if (cause & F_PL_INTR_PCIX)
542 t1_pci_intr_handler(adapter);
543 if (cause & F_PL_INTR_EXT)
544 t1_elmer0_ext_intr(adapter);
545
546 /* Clear the interrupts just processed. */
547 writel(cause, adapter->regs + A_PL_CAUSE);
548 (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
549 return 1;
550}
551
552/* Pause deadlock avoidance parameters */
553#define DROP_MSEC 16
554#define DROP_PKTS_CNT 1
555
556static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
557{
558 u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
559
560 if (enable)
561 val |= csum_bit;
562 else
563 val &= ~csum_bit;
564 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
565}
566
567void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
568{
569 set_csum_offload(adapter, F_IP_CSUM, enable);
570}
571
572void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
573{
574 set_csum_offload(adapter, F_UDP_CSUM, enable);
575}
576
577void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
578{
579 set_csum_offload(adapter, F_TCP_CSUM, enable);
580}
581
582static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
583{
584 u32 val;
585
586 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
587 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
588 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
589 F_TP_IN_ESPI_CHECK_TCP_CSUM;
590 writel(val, adapter->regs + A_TP_IN_CONFIG);
591 writel(F_TP_OUT_CSPI_CPL |
592 F_TP_OUT_ESPI_ETHERNET |
593 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
594 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
595 adapter->regs + A_TP_OUT_CONFIG);
596
597 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
598 val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
599 writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
600
601 /*
602 * Enable pause frame deadlock prevention.
603 */
604 if (is_T2(adapter)) {
605 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
606
607 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
608 V_DROP_TICKS_CNT(drop_ticks) |
609 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
610 adapter->regs + A_TP_TX_DROP_CONFIG);
611 }
612
613 writel(F_TP_RESET, adapter->regs + A_TP_RESET);
614}
615
616int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
617 struct adapter_params *p)
618{
619 p->chip_version = bi->chip_term;
620 if (p->chip_version == CHBT_TERM_T1 ||
621 p->chip_version == CHBT_TERM_T2) {
622 u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
623
624 val = G_TP_PC_REV(val);
625 if (val == 2)
626 p->chip_revision = TERM_T1B;
627 else if (val == 3)
628 p->chip_revision = TERM_T2;
629 else
630 return -1;
631 } else
632 return -1;
633 return 0;
634}
635
636/*
637 * Enable board components other than the Chelsio chip, such as external MAC
638 * and PHY.
639 */
640static int board_init(adapter_t *adapter, const struct board_info *bi)
641{
642 switch (bi->board) {
643 case CHBT_BOARD_N110:
644 case CHBT_BOARD_N210:
645 writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
646 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
647 break;
648 }
649 return 0;
650}
651
652/*
653 * Initialize and configure the Terminator HW modules. Note that external
654 * MAC and PHYs are initialized separately.
655 */
656int t1_init_hw_modules(adapter_t *adapter)
657{
658 int err = -EIO;
659 const struct board_info *bi = board_info(adapter);
660
661 if (!bi->clock_mc4) {
662 u32 val = readl(adapter->regs + A_MC4_CFG);
663
664 writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
665 writel(F_M_BUS_ENABLE | F_TCAM_RESET,
666 adapter->regs + A_MC5_CONFIG);
667 }
668
669 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
670 bi->espi_nports))
671 goto out_err;
672
673 t1_tp_reset(adapter, bi->clock_core);
674
675 err = t1_sge_configure(adapter->sge, &adapter->params.sge);
676 if (err)
677 goto out_err;
678
679 err = 0;
680 out_err:
681 return err;
682}
683
684/*
685 * Determine a card's PCI mode.
686 */
687static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
688{
689 static unsigned short speed_map[] = { 33, 66, 100, 133 };
690 u32 pci_mode;
691
692 pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
693 p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
694 p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
695 p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
696}
697
698/*
699 * Release the structures holding the SW per-Terminator-HW-module state.
700 */
701void t1_free_sw_modules(adapter_t *adapter)
702{
703 unsigned int i;
704
705 for_each_port(adapter, i) {
706 struct cmac *mac = adapter->port[i].mac;
707 struct cphy *phy = adapter->port[i].phy;
708
709 if (mac)
710 mac->ops->destroy(mac);
711 if (phy)
712 phy->ops->destroy(phy);
713 }
714
715 if (adapter->sge)
716 t1_sge_destroy(adapter->sge);
717 if (adapter->espi)
718 t1_espi_destroy(adapter->espi);
719}
720
721static void __devinit init_link_config(struct link_config *lc,
722 const struct board_info *bi)
723{
724 lc->supported = bi->caps;
725 lc->requested_speed = lc->speed = SPEED_INVALID;
726 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
727 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
728 if (lc->supported & SUPPORTED_Autoneg) {
729 lc->advertising = lc->supported;
730 lc->autoneg = AUTONEG_ENABLE;
731 lc->requested_fc |= PAUSE_AUTONEG;
732 } else {
733 lc->advertising = 0;
734 lc->autoneg = AUTONEG_DISABLE;
735 }
736}
737
738
739/*
740 * Allocate and initialize the data structures that hold the SW state of
741 * the Terminator HW modules.
742 */
743int __devinit t1_init_sw_modules(adapter_t *adapter,
744 const struct board_info *bi)
745{
746 unsigned int i;
747
748 adapter->params.brd_info = bi;
749 adapter->params.nports = bi->port_number;
750 adapter->params.stats_update_period = bi->gmac->stats_update_period;
751
752 adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
753 if (!adapter->sge) {
754 CH_ERR("%s: SGE initialization failed\n",
755 adapter->name);
756 goto error;
757 }
758
759 if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
760 CH_ERR("%s: ESPI initialization failed\n",
761 adapter->name);
762 goto error;
763 }
764
765 board_init(adapter, bi);
766 bi->mdio_ops->init(adapter, bi);
767 if (bi->gphy->reset)
768 bi->gphy->reset(adapter);
769 if (bi->gmac->reset)
770 bi->gmac->reset(adapter);
771
772 for_each_port(adapter, i) {
773 u8 hw_addr[6];
774 struct cmac *mac;
775 int phy_addr = bi->mdio_phybaseaddr + i;
776
777 adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
778 bi->mdio_ops);
779 if (!adapter->port[i].phy) {
780 CH_ERR("%s: PHY %d initialization failed\n",
781 adapter->name, i);
782 goto error;
783 }
784
785 adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
786 if (!mac) {
787 CH_ERR("%s: MAC %d initialization failed\n",
788 adapter->name, i);
789 goto error;
790 }
791
792 /*
793 * Get the port's MAC addresses either from the EEPROM if one
794 * exists or the one hardcoded in the MAC.
795 */
796 if (vpd_macaddress_get(adapter, i, hw_addr)) {
797 CH_ERR("%s: could not read MAC address from VPD ROM\n",
798 adapter->port[i].dev->name);
799 goto error;
800 }
801 memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
802 init_link_config(&adapter->port[i].link_config, bi);
803 }
804
805 get_pci_mode(adapter, &adapter->params.pci);
806 t1_interrupts_clear(adapter);
807 return 0;
808
809 error:
810 t1_free_sw_modules(adapter);
811 return -1;
812}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..81816c2b708a
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
1/*****************************************************************************
2 * *
3 * File: suni1x10gexp_regs.h *
4 * $Revision: 1.9 $ *
5 * $Date: 2005/06/22 00:17:04 $ *
6 * Description: *
7 * PMC/SIERRA (pm3393) MAC-PHY functionality. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Maintainers: maintainers@chelsio.com *
25 * *
26 * Authors: PMC/SIERRA *
27 * *
28 * History: *
29 * *
30 ****************************************************************************/
31
32#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
33#define _CXGB_SUNI1x10GEXP_REGS_H_
34
35/******************************************************************************/
36/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
37/******************************************************************************/
38/* Refer to the Register Bit Masks bellow for the naming of each register and */
39/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
40/******************************************************************************/
41
42#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
43#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
44#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
45#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
46#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
47#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
48#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
49#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
50#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
51#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
52#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
53#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
54#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
55#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
56#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
57#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
58#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
59#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
60#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
61#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
62#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
63#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
64#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
65#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
66#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
67#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
68#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
69#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
70#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
71#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
72#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
73#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
74#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
75#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
76#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
77#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
78#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
79#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
80#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
81#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
82#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
83#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
84#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
85#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
86#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
87#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
88#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
89#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
90#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
91#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
92#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
93#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
94#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
95#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
96#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
97#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
98#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
99#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
100#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
101#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
102#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
103#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
104#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
105#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
106#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
107#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
108#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
109#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
110#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
111#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
112#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
113#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
114#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
115#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
116#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
117#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
118#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
119#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
120#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
121
122/******************************************************************************/
123/* -- End register offset definitions -- */
124/******************************************************************************/
125
126/******************************************************************************/
127/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
128/******************************************************************************/
129
130/*----------------------------------------------------------------------------
131 * Register 0x0004: S/UNI-1x10GE-XP Device Status
132 * Bit 9 TOP_SXRA_EXPIRED
133 * Bit 8 TOP_MDIO_BUSY
134 * Bit 7 TOP_DTRB
135 * Bit 6 TOP_EXPIRED
136 * Bit 5 TOP_PAUSED
137 * Bit 4 TOP_PL4_ID_DOOL
138 * Bit 3 TOP_PL4_IS_DOOL
139 * Bit 2 TOP_PL4_ID_ROOL
140 * Bit 1 TOP_PL4_IS_ROOL
141 * Bit 0 TOP_PL4_OUT_ROOL
142 *----------------------------------------------------------------------------*/
143#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
144#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
145#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
146#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
147#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
148#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
149#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
150
151/*----------------------------------------------------------------------------
152 * Register 0x000E:PM3393 Global interrupt enable
153 * Bit 15 TOP_INTE
154 *----------------------------------------------------------------------------*/
155#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
156
157/*----------------------------------------------------------------------------
158 * Register 0x2040: RXXG Configuration 1
159 * Bit 15 RXXG_RXEN
160 * Bit 14 RXXG_ROCF
161 * Bit 13 RXXG_PAD_STRIP
162 * Bit 10 RXXG_PUREP
163 * Bit 9 RXXG_LONGP
164 * Bit 8 RXXG_PARF
165 * Bit 7 RXXG_FLCHK
166 * Bit 5 RXXG_PASS_CTRL
167 * Bit 3 RXXG_CRC_STRIP
168 * Bit 2-0 RXXG_MIFG
169 *----------------------------------------------------------------------------*/
170#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
171#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
172#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
173#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
174
175/*----------------------------------------------------------------------------
176 * Register 0x2070: RXXG Address Filter Control 2
177 * Bit 1 RXXG_PMODE
178 * Bit 0 RXXG_MHASH_EN
179 *----------------------------------------------------------------------------*/
180#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
181#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
182
183/*----------------------------------------------------------------------------
184 * Register 0x2100: MSTAT Control
185 * Bit 2 MSTAT_WRITE
186 * Bit 1 MSTAT_CLEAR
187 * Bit 0 MSTAT_SNAP
188 *----------------------------------------------------------------------------*/
189#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
190#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
191
192/*----------------------------------------------------------------------------
193 * Register 0x3040: TXXG Configuration Register 1
194 * Bit 15 TXXG_TXEN0
195 * Bit 13 TXXG_HOSTPAUSE
196 * Bit 12-7 TXXG_IPGT
197 * Bit 5 TXXG_32BIT_ALIGN
198 * Bit 4 TXXG_CRCEN
199 * Bit 3 TXXG_FCTX
200 * Bit 2 TXXG_FCRX
201 * Bit 1 TXXG_PADEN
202 * Bit 0 TXXG_SPRE
203 *----------------------------------------------------------------------------*/
204#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
205#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
206#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
207#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
208#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
209#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
210#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
211
212#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
213
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d0fa2448761d..25cc20e415da 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -156,7 +156,7 @@
156 156
157#define DRV_NAME "e100" 157#define DRV_NAME "e100"
158#define DRV_EXT "-NAPI" 158#define DRV_EXT "-NAPI"
159#define DRV_VERSION "3.4.8-k2"DRV_EXT 159#define DRV_VERSION "3.4.14-k2"DRV_EXT
160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
162#define PFX DRV_NAME ": " 162#define PFX DRV_NAME ": "
@@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
785} 785}
786 786
787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
788static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
789{ 790{
790 unsigned long flags; 791 unsigned long flags;
@@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
798 if(likely(!readb(&nic->csr->scb.cmd_lo))) 799 if(likely(!readb(&nic->csr->scb.cmd_lo)))
799 break; 800 break;
800 cpu_relax(); 801 cpu_relax();
801 if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1))) 802 if(unlikely(i > E100_WAIT_SCB_FAST))
802 udelay(5); 803 udelay(5);
803 } 804 }
804 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { 805 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
902 903
903static void e100_get_defaults(struct nic *nic) 904static void e100_get_defaults(struct nic *nic)
904{ 905{
905 struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 906 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
906 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 907 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
907 908
908 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); 909 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
909 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ 910 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1006 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1007 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1007} 1008}
1008 1009
1010/********************************************************/
1011/* Micro code for 8086:1229 Rev 8 */
1012/********************************************************/
1013
1014/* Parameter values for the D101M B-step */
1015#define D101M_CPUSAVER_TIMER_DWORD 78
1016#define D101M_CPUSAVER_BUNDLE_DWORD 65
1017#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1018
1019#define D101M_B_RCVBUNDLE_UCODE \
1020{\
10210x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10220x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10230x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10240x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10250x00380438, 0x00000000, 0x00140000, 0x00380555, \
10260x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10270x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10280x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10290x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10300x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10310x00000000, 0x00000000, 0x00000000, 0x00000000, \
10320x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10330x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10340x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10350x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10360x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10370x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10380x00000000, 0x00000000, 0x00000000, 0x00000000, \
10390x00000000, 0x00000000, 0x00000000, 0x00000000, \
10400x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10410x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10420x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10430x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10440x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10450x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10460x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10470x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10480x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10490x00380559, 0x00000000, 0x00000000, 0x00000000, \
10500x00000000, 0x00000000, 0x00000000, 0x00000000, \
10510x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10520x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10530x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1054}
1055
1056/********************************************************/
1057/* Micro code for 8086:1229 Rev 9 */
1058/********************************************************/
1059
1060/* Parameter values for the D101S */
1061#define D101S_CPUSAVER_TIMER_DWORD 78
1062#define D101S_CPUSAVER_BUNDLE_DWORD 67
1063#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1064
1065#define D101S_RCVBUNDLE_UCODE \
1066{\
10670x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
10680x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
10690x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
10700x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
10710x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
10720x00308000, 0x00100610, 0x00100561, 0x000E0408, \
10730x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10740x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10750x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
10760x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
10770x00000000, 0x00000000, 0x00000000, 0x00000000, \
10780x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
10790x003A047E, 0x00044010, 0x00380819, 0x00000000, \
10800x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
10810x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
10820x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
10830x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
10840x00101313, 0x00380700, 0x00000000, 0x00000000, \
10850x00000000, 0x00000000, 0x00000000, 0x00000000, \
10860x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10870x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
10880x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
10890x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
10900x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
10910x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
10920x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
10930x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
10940x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
10950x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
10960x00000000, 0x00000000, 0x00000000, 0x00000000, \
10970x00000000, 0x00000000, 0x00000000, 0x00130831, \
10980x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
10990x00041000, 0x00010004, 0x00380700 \
1100}
1101
1102/********************************************************/
1103/* Micro code for the 8086:1229 Rev F/10 */
1104/********************************************************/
1105
1106/* Parameter values for the D102 E-step */
1107#define D102_E_CPUSAVER_TIMER_DWORD 42
1108#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1109#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1110
1111#define D102_E_RCVBUNDLE_UCODE \
1112{\
11130x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11140x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11150x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11160x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11170x00000000, 0x00000000, 0x00000000, 0x00000000, \
11180x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11190x00000000, 0x00000000, 0x00000000, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00000000, 0x00000000, 0x00000000, 0x00000000, \
11220x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11230x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11240x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11250x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11260x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11270x00000000, 0x00000000, 0x00000000, 0x00000000, \
11280x00000000, 0x00000000, 0x00000000, 0x00000000, \
11290x00000000, 0x00000000, 0x00000000, 0x00000000, \
11300x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11310x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00000000, \
11330x00000000, 0x00000000, 0x00000000, 0x00000000, \
11340x00000000, 0x00000000, 0x00000000, 0x00000000, \
11350x00000000, 0x00000000, 0x00000000, 0x00000000, \
11360x00000000, 0x00000000, 0x00000000, 0x00000000, \
11370x00000000, 0x00000000, 0x00000000, 0x00000000, \
11380x00000000, 0x00000000, 0x00000000, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00000000, \
11400x00000000, 0x00000000, 0x00000000, 0x00000000, \
11410x00000000, 0x00000000, 0x00000000, 0x00000000, \
11420x00000000, 0x00000000, 0x00000000, 0x00000000, \
11430x00000000, 0x00000000, 0x00000000, 0x00000000, \
11440x00000000, 0x00000000, 0x00000000, 0x00000000, \
11450x00000000, 0x00000000, 0x00000000, 0x00000000, \
1146}
1147
1009static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1148static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1010{ 1149{
1011 int i; 1150/* *INDENT-OFF* */
1012 static const u32 ucode[UCODE_SIZE] = { 1151 static struct {
1013 /* NFS packets are misinterpreted as TCO packets and 1152 u32 ucode[UCODE_SIZE + 1];
1014 * incorrectly routed to the BMC over SMBus. This 1153 u8 mac;
1015 * microcode patch checks the fragmented IP bit in the 1154 u8 timer_dword;
1016 * NFS/UDP header to distinguish between NFS and TCO. */ 1155 u8 bundle_dword;
1017 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 1156 u8 min_size_dword;
1018 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, 1157 } ucode_opts[] = {
1019 0x00906EFD, 0x00900EFD, 0x00E00EF8, 1158 { D101M_B_RCVBUNDLE_UCODE,
1020 }; 1159 mac_82559_D101M,
1160 D101M_CPUSAVER_TIMER_DWORD,
1161 D101M_CPUSAVER_BUNDLE_DWORD,
1162 D101M_CPUSAVER_MIN_SIZE_DWORD },
1163 { D101S_RCVBUNDLE_UCODE,
1164 mac_82559_D101S,
1165 D101S_CPUSAVER_TIMER_DWORD,
1166 D101S_CPUSAVER_BUNDLE_DWORD,
1167 D101S_CPUSAVER_MIN_SIZE_DWORD },
1168 { D102_E_RCVBUNDLE_UCODE,
1169 mac_82551_F,
1170 D102_E_CPUSAVER_TIMER_DWORD,
1171 D102_E_CPUSAVER_BUNDLE_DWORD,
1172 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1173 { D102_E_RCVBUNDLE_UCODE,
1174 mac_82551_10,
1175 D102_E_CPUSAVER_TIMER_DWORD,
1176 D102_E_CPUSAVER_BUNDLE_DWORD,
1177 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1178 { {0}, 0, 0, 0, 0}
1179 }, *opts;
1180/* *INDENT-ON* */
1181
1182#define BUNDLESMALL 1
1183#define BUNDLEMAX 50
1184#define INTDELAY 15000
1185
1186 opts = ucode_opts;
1187
1188 /* do not load u-code for ICH devices */
1189 if (nic->flags & ich)
1190 return;
1191
1192 /* Search for ucode match against h/w rev_id */
1193 while (opts->mac) {
1194 if (nic->mac == opts->mac) {
1195 int i;
1196 u32 *ucode = opts->ucode;
1197
1198 /* Insert user-tunable settings */
1199 ucode[opts->timer_dword] &= 0xFFFF0000;
1200 ucode[opts->timer_dword] |=
1201 (u16) INTDELAY;
1202 ucode[opts->bundle_dword] &= 0xFFFF0000;
1203 ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
1204 ucode[opts->min_size_dword] &= 0xFFFF0000;
1205 ucode[opts->min_size_dword] |=
1206 (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1207
1208 for(i = 0; i < UCODE_SIZE; i++)
1209 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1210 cb->command = cpu_to_le16(cb_ucode);
1211 return;
1212 }
1213 opts++;
1214 }
1021 1215
1022 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { 1216 cb->command = cpu_to_le16(cb_nop);
1023 for(i = 0; i < UCODE_SIZE; i++)
1024 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1025 cb->command = cpu_to_le16(cb_ucode);
1026 } else
1027 cb->command = cpu_to_le16(cb_nop);
1028} 1217}
1029 1218
1030static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1219static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1307{ 1496{
1308 cb->command = nic->tx_command; 1497 cb->command = nic->tx_command;
1309 /* interrupt every 16 packets regardless of delay */ 1498 /* interrupt every 16 packets regardless of delay */
1310 if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; 1499 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1500 cb->command |= cpu_to_le16(cb_i);
1311 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1501 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1312 cb->u.tcb.tcb_byte_count = 0; 1502 cb->u.tcb.tcb_byte_count = 0;
1313 cb->u.tcb.threshold = nic->tx_threshold; 1503 cb->u.tcb.threshold = nic->tx_threshold;
1314 cb->u.tcb.tbd_count = 1; 1504 cb->u.tcb.tbd_count = 1;
1315 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1505 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1316 skb->data, skb->len, PCI_DMA_TODEVICE)); 1506 skb->data, skb->len, PCI_DMA_TODEVICE));
1317 // check for mapping failure? 1507 /* check for mapping failure? */
1318 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1508 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1319} 1509}
1320 1510
@@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1539 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1540 nic->net_stats.rx_dropped++; 1730 nic->net_stats.rx_dropped++;
1541 dev_kfree_skb_any(skb); 1731 dev_kfree_skb_any(skb);
1542 } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) { 1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1543 /* Don't indicate oversized frames */ 1733 /* Don't indicate oversized frames */
1544 nic->rx_over_length_errors++; 1734 nic->rx_over_length_errors++;
1545 nic->net_stats.rx_dropped++; 1735 nic->net_stats.rx_dropped++;
@@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget)
1706static void e100_netpoll(struct net_device *netdev) 1896static void e100_netpoll(struct net_device *netdev)
1707{ 1897{
1708 struct nic *nic = netdev_priv(netdev); 1898 struct nic *nic = netdev_priv(netdev);
1899
1709 e100_disable_irq(nic); 1900 e100_disable_irq(nic);
1710 e100_intr(nic->pdev->irq, netdev, NULL); 1901 e100_intr(nic->pdev->irq, netdev, NULL);
1711 e100_tx_clean(nic); 1902 e100_tx_clean(nic);
@@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev,
2108 } 2299 }
2109 for(i = 0; i < E100_TEST_LEN; i++) 2300 for(i = 0; i < E100_TEST_LEN; i++)
2110 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; 2301 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2302
2303 msleep_interruptible(4 * 1000);
2111} 2304}
2112 2305
2113static int e100_phys_id(struct net_device *netdev, u32 data) 2306static int e100_phys_id(struct net_device *netdev, u32 data)
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b82fd15d0891..9b596e0bbf95 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2767,7 +2767,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2767 " next_to_use <%x>\n" 2767 " next_to_use <%x>\n"
2768 " next_to_clean <%x>\n" 2768 " next_to_clean <%x>\n"
2769 "buffer_info[next_to_clean]\n" 2769 "buffer_info[next_to_clean]\n"
2770 " dma <%zx>\n" 2770 " dma <%llx>\n"
2771 " time_stamp <%lx>\n" 2771 " time_stamp <%lx>\n"
2772 " next_to_watch <%x>\n" 2772 " next_to_watch <%x>\n"
2773 " jiffies <%lx>\n" 2773 " jiffies <%lx>\n"
@@ -2776,7 +2776,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2776 E1000_READ_REG(&adapter->hw, TDT), 2776 E1000_READ_REG(&adapter->hw, TDT),
2777 tx_ring->next_to_use, 2777 tx_ring->next_to_use,
2778 i, 2778 i,
2779 tx_ring->buffer_info[i].dma, 2779 (unsigned long long)tx_ring->buffer_info[i].dma,
2780 tx_ring->buffer_info[i].time_stamp, 2780 tx_ring->buffer_info[i].time_stamp,
2781 eop, 2781 eop,
2782 jiffies, 2782 jiffies,
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 1795425f512e..8c62ced2c9b2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1263,8 +1263,8 @@ speedo_init_rx_ring(struct net_device *dev)
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); 1265 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1266 /* XXX: do we really want to call this before the NULL check? --hch */ 1266 if (skb)
1267 rx_align(skb); /* Align IP on 16 byte boundary */ 1267 rx_align(skb); /* Align IP on 16 byte boundary */
1268 sp->rx_skbuff[i] = skb; 1268 sp->rx_skbuff[i] = skb;
1269 if (skb == NULL) 1269 if (skb == NULL)
1270 break; /* OK. Just initially short of Rx bufs. */ 1270 break; /* OK. Just initially short of Rx bufs. */
@@ -1654,8 +1654,8 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1654 struct sk_buff *skb; 1654 struct sk_buff *skb;
1655 /* Get a fresh skbuff to replace the consumed one. */ 1655 /* Get a fresh skbuff to replace the consumed one. */
1656 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); 1656 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1657 /* XXX: do we really want to call this before the NULL check? --hch */ 1657 if (skb)
1658 rx_align(skb); /* Align IP on 16 byte boundary */ 1658 rx_align(skb); /* Align IP on 16 byte boundary */
1659 sp->rx_skbuff[entry] = skb; 1659 sp->rx_skbuff[entry] = skb;
1660 if (skb == NULL) { 1660 if (skb == NULL) {
1661 sp->rx_ringp[entry] = NULL; 1661 sp->rx_ringp[entry] = NULL;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 64f0f697c958..7d93948aec83 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -85,6 +85,16 @@
85 * 0.33: 16 May 2005: Support for MCP51 added. 85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. 86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added. 87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * per-packet flags.
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * of nv_remove
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
88 * 98 *
89 * Known bugs: 99 * Known bugs:
90 * We suspect that on some hardware no TX done interrupts are generated. 100 * We suspect that on some hardware no TX done interrupts are generated.
@@ -96,7 +106,7 @@
96 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 106 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
97 * superfluous timer interrupts from the nic. 107 * superfluous timer interrupts from the nic.
98 */ 108 */
99#define FORCEDETH_VERSION "0.35" 109#define FORCEDETH_VERSION "0.41"
100#define DRV_NAME "forcedeth" 110#define DRV_NAME "forcedeth"
101 111
102#include <linux/module.h> 112#include <linux/module.h>
@@ -131,11 +141,10 @@
131 * Hardware access: 141 * Hardware access:
132 */ 142 */
133 143
134#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */ 144#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
135#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */ 145#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
136#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */ 146#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
137#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */ 147#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
138#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
139 148
140enum { 149enum {
141 NvRegIrqStatus = 0x000, 150 NvRegIrqStatus = 0x000,
@@ -146,13 +155,16 @@ enum {
146#define NVREG_IRQ_RX 0x0002 155#define NVREG_IRQ_RX 0x0002
147#define NVREG_IRQ_RX_NOBUF 0x0004 156#define NVREG_IRQ_RX_NOBUF 0x0004
148#define NVREG_IRQ_TX_ERR 0x0008 157#define NVREG_IRQ_TX_ERR 0x0008
149#define NVREG_IRQ_TX2 0x0010 158#define NVREG_IRQ_TX_OK 0x0010
150#define NVREG_IRQ_TIMER 0x0020 159#define NVREG_IRQ_TIMER 0x0020
151#define NVREG_IRQ_LINK 0x0040 160#define NVREG_IRQ_LINK 0x0040
161#define NVREG_IRQ_TX_ERROR 0x0080
152#define NVREG_IRQ_TX1 0x0100 162#define NVREG_IRQ_TX1 0x0100
153#define NVREG_IRQMASK_WANTED_1 0x005f 163#define NVREG_IRQMASK_WANTED 0x00df
154#define NVREG_IRQMASK_WANTED_2 0x0147 164
155#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) 165#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
166 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
167 NVREG_IRQ_TX1))
156 168
157 NvRegUnknownSetupReg6 = 0x008, 169 NvRegUnknownSetupReg6 = 0x008,
158#define NVREG_UNKSETUP6_VAL 3 170#define NVREG_UNKSETUP6_VAL 3
@@ -286,6 +298,18 @@ struct ring_desc {
286 u32 FlagLen; 298 u32 FlagLen;
287}; 299};
288 300
301struct ring_desc_ex {
302 u32 PacketBufferHigh;
303 u32 PacketBufferLow;
304 u32 Reserved;
305 u32 FlagLen;
306};
307
308typedef union _ring_type {
309 struct ring_desc* orig;
310 struct ring_desc_ex* ex;
311} ring_type;
312
289#define FLAG_MASK_V1 0xffff0000 313#define FLAG_MASK_V1 0xffff0000
290#define FLAG_MASK_V2 0xffffc000 314#define FLAG_MASK_V2 0xffffc000
291#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 315#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
@@ -293,7 +317,7 @@ struct ring_desc {
293 317
294#define NV_TX_LASTPACKET (1<<16) 318#define NV_TX_LASTPACKET (1<<16)
295#define NV_TX_RETRYERROR (1<<19) 319#define NV_TX_RETRYERROR (1<<19)
296#define NV_TX_LASTPACKET1 (1<<24) 320#define NV_TX_FORCED_INTERRUPT (1<<24)
297#define NV_TX_DEFERRED (1<<26) 321#define NV_TX_DEFERRED (1<<26)
298#define NV_TX_CARRIERLOST (1<<27) 322#define NV_TX_CARRIERLOST (1<<27)
299#define NV_TX_LATECOLLISION (1<<28) 323#define NV_TX_LATECOLLISION (1<<28)
@@ -303,7 +327,7 @@ struct ring_desc {
303 327
304#define NV_TX2_LASTPACKET (1<<29) 328#define NV_TX2_LASTPACKET (1<<29)
305#define NV_TX2_RETRYERROR (1<<18) 329#define NV_TX2_RETRYERROR (1<<18)
306#define NV_TX2_LASTPACKET1 (1<<23) 330#define NV_TX2_FORCED_INTERRUPT (1<<30)
307#define NV_TX2_DEFERRED (1<<25) 331#define NV_TX2_DEFERRED (1<<25)
308#define NV_TX2_CARRIERLOST (1<<26) 332#define NV_TX2_CARRIERLOST (1<<26)
309#define NV_TX2_LATECOLLISION (1<<27) 333#define NV_TX2_LATECOLLISION (1<<27)
@@ -379,9 +403,13 @@ struct ring_desc {
379#define TX_LIMIT_START 62 403#define TX_LIMIT_START 62
380 404
381/* rx/tx mac addr + type + vlan + align + slack*/ 405/* rx/tx mac addr + type + vlan + align + slack*/
382#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) 406#define NV_RX_HEADERS (64)
383/* even more slack */ 407/* even more slack. */
384#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) 408#define NV_RX_ALLOC_PAD (64)
409
410/* maximum mtu size */
411#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
412#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
385 413
386#define OOM_REFILL (1+HZ/20) 414#define OOM_REFILL (1+HZ/20)
387#define POLL_WAIT (1+HZ/100) 415#define POLL_WAIT (1+HZ/100)
@@ -396,6 +424,7 @@ struct ring_desc {
396 */ 424 */
397#define DESC_VER_1 0x0 425#define DESC_VER_1 0x0
398#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) 426#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
427#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK)
399 428
400/* PHY defines */ 429/* PHY defines */
401#define PHY_OUI_MARVELL 0x5043 430#define PHY_OUI_MARVELL 0x5043
@@ -468,11 +497,12 @@ struct fe_priv {
468 /* rx specific fields. 497 /* rx specific fields.
469 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 498 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
470 */ 499 */
471 struct ring_desc *rx_ring; 500 ring_type rx_ring;
472 unsigned int cur_rx, refill_rx; 501 unsigned int cur_rx, refill_rx;
473 struct sk_buff *rx_skbuff[RX_RING]; 502 struct sk_buff *rx_skbuff[RX_RING];
474 dma_addr_t rx_dma[RX_RING]; 503 dma_addr_t rx_dma[RX_RING];
475 unsigned int rx_buf_sz; 504 unsigned int rx_buf_sz;
505 unsigned int pkt_limit;
476 struct timer_list oom_kick; 506 struct timer_list oom_kick;
477 struct timer_list nic_poll; 507 struct timer_list nic_poll;
478 508
@@ -484,7 +514,7 @@ struct fe_priv {
484 /* 514 /*
485 * tx specific fields. 515 * tx specific fields.
486 */ 516 */
487 struct ring_desc *tx_ring; 517 ring_type tx_ring;
488 unsigned int next_tx, nic_tx; 518 unsigned int next_tx, nic_tx;
489 struct sk_buff *tx_skbuff[TX_RING]; 519 struct sk_buff *tx_skbuff[TX_RING];
490 dma_addr_t tx_dma[TX_RING]; 520 dma_addr_t tx_dma[TX_RING];
@@ -519,6 +549,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
519 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 549 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
520} 550}
521 551
552static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
553{
554 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
555}
556
522static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 557static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
523 int delay, int delaymax, const char *msg) 558 int delay, int delaymax, const char *msg)
524{ 559{
@@ -792,7 +827,7 @@ static int nv_alloc_rx(struct net_device *dev)
792 nr = refill_rx % RX_RING; 827 nr = refill_rx % RX_RING;
793 if (np->rx_skbuff[nr] == NULL) { 828 if (np->rx_skbuff[nr] == NULL) {
794 829
795 skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); 830 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
796 if (!skb) 831 if (!skb)
797 break; 832 break;
798 833
@@ -803,9 +838,16 @@ static int nv_alloc_rx(struct net_device *dev)
803 } 838 }
804 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, 839 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
805 PCI_DMA_FROMDEVICE); 840 PCI_DMA_FROMDEVICE);
806 np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 841 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
807 wmb(); 842 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
808 np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); 843 wmb();
844 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
845 } else {
846 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
847 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
848 wmb();
849 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
850 }
809 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 851 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
810 dev->name, refill_rx); 852 dev->name, refill_rx);
811 refill_rx++; 853 refill_rx++;
@@ -831,19 +873,37 @@ static void nv_do_rx_refill(unsigned long data)
831 enable_irq(dev->irq); 873 enable_irq(dev->irq);
832} 874}
833 875
834static int nv_init_ring(struct net_device *dev) 876static void nv_init_rx(struct net_device *dev)
835{ 877{
836 struct fe_priv *np = get_nvpriv(dev); 878 struct fe_priv *np = get_nvpriv(dev);
837 int i; 879 int i;
838 880
839 np->next_tx = np->nic_tx = 0;
840 for (i = 0; i < TX_RING; i++)
841 np->tx_ring[i].FlagLen = 0;
842
843 np->cur_rx = RX_RING; 881 np->cur_rx = RX_RING;
844 np->refill_rx = 0; 882 np->refill_rx = 0;
845 for (i = 0; i < RX_RING; i++) 883 for (i = 0; i < RX_RING; i++)
846 np->rx_ring[i].FlagLen = 0; 884 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
885 np->rx_ring.orig[i].FlagLen = 0;
886 else
887 np->rx_ring.ex[i].FlagLen = 0;
888}
889
890static void nv_init_tx(struct net_device *dev)
891{
892 struct fe_priv *np = get_nvpriv(dev);
893 int i;
894
895 np->next_tx = np->nic_tx = 0;
896 for (i = 0; i < TX_RING; i++)
897 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
898 np->tx_ring.orig[i].FlagLen = 0;
899 else
900 np->tx_ring.ex[i].FlagLen = 0;
901}
902
903static int nv_init_ring(struct net_device *dev)
904{
905 nv_init_tx(dev);
906 nv_init_rx(dev);
847 return nv_alloc_rx(dev); 907 return nv_alloc_rx(dev);
848} 908}
849 909
@@ -852,7 +912,10 @@ static void nv_drain_tx(struct net_device *dev)
852 struct fe_priv *np = get_nvpriv(dev); 912 struct fe_priv *np = get_nvpriv(dev);
853 int i; 913 int i;
854 for (i = 0; i < TX_RING; i++) { 914 for (i = 0; i < TX_RING; i++) {
855 np->tx_ring[i].FlagLen = 0; 915 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
916 np->tx_ring.orig[i].FlagLen = 0;
917 else
918 np->tx_ring.ex[i].FlagLen = 0;
856 if (np->tx_skbuff[i]) { 919 if (np->tx_skbuff[i]) {
857 pci_unmap_single(np->pci_dev, np->tx_dma[i], 920 pci_unmap_single(np->pci_dev, np->tx_dma[i],
858 np->tx_skbuff[i]->len, 921 np->tx_skbuff[i]->len,
@@ -869,7 +932,10 @@ static void nv_drain_rx(struct net_device *dev)
869 struct fe_priv *np = get_nvpriv(dev); 932 struct fe_priv *np = get_nvpriv(dev);
870 int i; 933 int i;
871 for (i = 0; i < RX_RING; i++) { 934 for (i = 0; i < RX_RING; i++) {
872 np->rx_ring[i].FlagLen = 0; 935 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
936 np->rx_ring.orig[i].FlagLen = 0;
937 else
938 np->rx_ring.ex[i].FlagLen = 0;
873 wmb(); 939 wmb();
874 if (np->rx_skbuff[i]) { 940 if (np->rx_skbuff[i]) {
875 pci_unmap_single(np->pci_dev, np->rx_dma[i], 941 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -900,11 +966,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
900 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, 966 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
901 PCI_DMA_TODEVICE); 967 PCI_DMA_TODEVICE);
902 968
903 np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
970 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
971 else {
972 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
973 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
974 }
904 975
905 spin_lock_irq(&np->lock); 976 spin_lock_irq(&np->lock);
906 wmb(); 977 wmb();
907 np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); 978 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
979 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
980 else
981 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
908 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", 982 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
909 dev->name, np->next_tx); 983 dev->name, np->next_tx);
910 { 984 {
@@ -942,7 +1016,10 @@ static void nv_tx_done(struct net_device *dev)
942 while (np->nic_tx != np->next_tx) { 1016 while (np->nic_tx != np->next_tx) {
943 i = np->nic_tx % TX_RING; 1017 i = np->nic_tx % TX_RING;
944 1018
945 Flags = le32_to_cpu(np->tx_ring[i].FlagLen); 1019 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1020 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1021 else
1022 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
946 1023
947 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1024 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
948 dev->name, np->nic_tx, Flags); 1025 dev->name, np->nic_tx, Flags);
@@ -993,9 +1070,56 @@ static void nv_tx_timeout(struct net_device *dev)
993 struct fe_priv *np = get_nvpriv(dev); 1070 struct fe_priv *np = get_nvpriv(dev);
994 u8 __iomem *base = get_hwbase(dev); 1071 u8 __iomem *base = get_hwbase(dev);
995 1072
996 dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name, 1073 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
997 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); 1074 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
998 1075
1076 {
1077 int i;
1078
1079 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1080 dev->name, (unsigned long)np->ring_addr,
1081 np->next_tx, np->nic_tx);
1082 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1083 for (i=0;i<0x400;i+= 32) {
1084 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1085 i,
1086 readl(base + i + 0), readl(base + i + 4),
1087 readl(base + i + 8), readl(base + i + 12),
1088 readl(base + i + 16), readl(base + i + 20),
1089 readl(base + i + 24), readl(base + i + 28));
1090 }
1091 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1092 for (i=0;i<TX_RING;i+= 4) {
1093 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1094 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1095 i,
1096 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1097 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1098 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1099 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1100 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1101 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1102 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1103 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1104 } else {
1105 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1106 i,
1107 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1108 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1109 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1110 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1111 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1112 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1113 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1114 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1115 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1116 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1117 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1118 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1119 }
1120 }
1121 }
1122
999 spin_lock_irq(&np->lock); 1123 spin_lock_irq(&np->lock);
1000 1124
1001 /* 1) stop tx engine */ 1125 /* 1) stop tx engine */
@@ -1009,7 +1133,10 @@ static void nv_tx_timeout(struct net_device *dev)
1009 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1133 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1010 nv_drain_tx(dev); 1134 nv_drain_tx(dev);
1011 np->next_tx = np->nic_tx = 0; 1135 np->next_tx = np->nic_tx = 0;
1012 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 1136 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1137 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1138 else
1139 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1013 netif_wake_queue(dev); 1140 netif_wake_queue(dev);
1014 } 1141 }
1015 1142
@@ -1084,8 +1211,13 @@ static void nv_rx_process(struct net_device *dev)
1084 break; /* we scanned the whole ring - do not continue */ 1211 break; /* we scanned the whole ring - do not continue */
1085 1212
1086 i = np->cur_rx % RX_RING; 1213 i = np->cur_rx % RX_RING;
1087 Flags = le32_to_cpu(np->rx_ring[i].FlagLen); 1214 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1088 len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver); 1215 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1216 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1217 } else {
1218 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1219 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1220 }
1089 1221
1090 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1222 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1091 dev->name, np->cur_rx, Flags); 1223 dev->name, np->cur_rx, Flags);
@@ -1207,15 +1339,133 @@ next_pkt:
1207 } 1339 }
1208} 1340}
1209 1341
1342static void set_bufsize(struct net_device *dev)
1343{
1344 struct fe_priv *np = netdev_priv(dev);
1345
1346 if (dev->mtu <= ETH_DATA_LEN)
1347 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1348 else
1349 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1350}
1351
1210/* 1352/*
1211 * nv_change_mtu: dev->change_mtu function 1353 * nv_change_mtu: dev->change_mtu function
1212 * Called with dev_base_lock held for read. 1354 * Called with dev_base_lock held for read.
1213 */ 1355 */
1214static int nv_change_mtu(struct net_device *dev, int new_mtu) 1356static int nv_change_mtu(struct net_device *dev, int new_mtu)
1215{ 1357{
1216 if (new_mtu > ETH_DATA_LEN) 1358 struct fe_priv *np = get_nvpriv(dev);
1359 int old_mtu;
1360
1361 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1217 return -EINVAL; 1362 return -EINVAL;
1363
1364 old_mtu = dev->mtu;
1218 dev->mtu = new_mtu; 1365 dev->mtu = new_mtu;
1366
1367 /* return early if the buffer sizes will not change */
1368 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1369 return 0;
1370 if (old_mtu == new_mtu)
1371 return 0;
1372
1373 /* synchronized against open : rtnl_lock() held by caller */
1374 if (netif_running(dev)) {
1375 u8 *base = get_hwbase(dev);
1376 /*
1377 * It seems that the nic preloads valid ring entries into an
1378 * internal buffer. The procedure for flushing everything is
1379 * guessed, there is probably a simpler approach.
1380 * Changing the MTU is a rare event, it shouldn't matter.
1381 */
1382 disable_irq(dev->irq);
1383 spin_lock_bh(&dev->xmit_lock);
1384 spin_lock(&np->lock);
1385 /* stop engines */
1386 nv_stop_rx(dev);
1387 nv_stop_tx(dev);
1388 nv_txrx_reset(dev);
1389 /* drain rx queue */
1390 nv_drain_rx(dev);
1391 nv_drain_tx(dev);
1392 /* reinit driver view of the rx queue */
1393 nv_init_rx(dev);
1394 nv_init_tx(dev);
1395 /* alloc new rx buffers */
1396 set_bufsize(dev);
1397 if (nv_alloc_rx(dev)) {
1398 if (!np->in_shutdown)
1399 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1400 }
1401 /* reinit nic view of the rx queue */
1402 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1403 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1404 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1405 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1406 else
1407 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1408 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1409 base + NvRegRingSizes);
1410 pci_push(base);
1411 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
1412 pci_push(base);
1413
1414 /* restart rx engine */
1415 nv_start_rx(dev);
1416 nv_start_tx(dev);
1417 spin_unlock(&np->lock);
1418 spin_unlock_bh(&dev->xmit_lock);
1419 enable_irq(dev->irq);
1420 }
1421 return 0;
1422}
1423
1424static void nv_copy_mac_to_hw(struct net_device *dev)
1425{
1426 u8 *base = get_hwbase(dev);
1427 u32 mac[2];
1428
1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1430 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1431 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1432
1433 writel(mac[0], base + NvRegMacAddrA);
1434 writel(mac[1], base + NvRegMacAddrB);
1435}
1436
1437/*
1438 * nv_set_mac_address: dev->set_mac_address function
1439 * Called with rtnl_lock() held.
1440 */
1441static int nv_set_mac_address(struct net_device *dev, void *addr)
1442{
1443 struct fe_priv *np = get_nvpriv(dev);
1444 struct sockaddr *macaddr = (struct sockaddr*)addr;
1445
1446 if(!is_valid_ether_addr(macaddr->sa_data))
1447 return -EADDRNOTAVAIL;
1448
1449 /* synchronized against open : rtnl_lock() held by caller */
1450 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1451
1452 if (netif_running(dev)) {
1453 spin_lock_bh(&dev->xmit_lock);
1454 spin_lock_irq(&np->lock);
1455
1456 /* stop rx engine */
1457 nv_stop_rx(dev);
1458
1459 /* set mac address */
1460 nv_copy_mac_to_hw(dev);
1461
1462 /* restart rx engine */
1463 nv_start_rx(dev);
1464 spin_unlock_irq(&np->lock);
1465 spin_unlock_bh(&dev->xmit_lock);
1466 } else {
1467 nv_copy_mac_to_hw(dev);
1468 }
1219 return 0; 1469 return 0;
1220} 1470}
1221 1471
@@ -1470,7 +1720,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1470 if (!(events & np->irqmask)) 1720 if (!(events & np->irqmask))
1471 break; 1721 break;
1472 1722
1473 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) { 1723 if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
1474 spin_lock(&np->lock); 1724 spin_lock(&np->lock);
1475 nv_tx_done(dev); 1725 nv_tx_done(dev);
1476 spin_unlock(&np->lock); 1726 spin_unlock(&np->lock);
@@ -1761,6 +2011,50 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1761 return 0; 2011 return 0;
1762} 2012}
1763 2013
2014#define FORCEDETH_REGS_VER 1
2015#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2016
2017static int nv_get_regs_len(struct net_device *dev)
2018{
2019 return FORCEDETH_REGS_SIZE;
2020}
2021
2022static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2023{
2024 struct fe_priv *np = get_nvpriv(dev);
2025 u8 __iomem *base = get_hwbase(dev);
2026 u32 *rbuf = buf;
2027 int i;
2028
2029 regs->version = FORCEDETH_REGS_VER;
2030 spin_lock_irq(&np->lock);
2031 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
2032 rbuf[i] = readl(base + i*sizeof(u32));
2033 spin_unlock_irq(&np->lock);
2034}
2035
2036static int nv_nway_reset(struct net_device *dev)
2037{
2038 struct fe_priv *np = get_nvpriv(dev);
2039 int ret;
2040
2041 spin_lock_irq(&np->lock);
2042 if (np->autoneg) {
2043 int bmcr;
2044
2045 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2046 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2047 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2048
2049 ret = 0;
2050 } else {
2051 ret = -EINVAL;
2052 }
2053 spin_unlock_irq(&np->lock);
2054
2055 return ret;
2056}
2057
1764static struct ethtool_ops ops = { 2058static struct ethtool_ops ops = {
1765 .get_drvinfo = nv_get_drvinfo, 2059 .get_drvinfo = nv_get_drvinfo,
1766 .get_link = ethtool_op_get_link, 2060 .get_link = ethtool_op_get_link,
@@ -1768,6 +2062,9 @@ static struct ethtool_ops ops = {
1768 .set_wol = nv_set_wol, 2062 .set_wol = nv_set_wol,
1769 .get_settings = nv_get_settings, 2063 .get_settings = nv_get_settings,
1770 .set_settings = nv_set_settings, 2064 .set_settings = nv_set_settings,
2065 .get_regs_len = nv_get_regs_len,
2066 .get_regs = nv_get_regs,
2067 .nway_reset = nv_nway_reset,
1771}; 2068};
1772 2069
1773static int nv_open(struct net_device *dev) 2070static int nv_open(struct net_device *dev)
@@ -1792,6 +2089,7 @@ static int nv_open(struct net_device *dev)
1792 writel(0, base + NvRegAdapterControl); 2089 writel(0, base + NvRegAdapterControl);
1793 2090
1794 /* 2) initialize descriptor rings */ 2091 /* 2) initialize descriptor rings */
2092 set_bufsize(dev);
1795 oom = nv_init_ring(dev); 2093 oom = nv_init_ring(dev);
1796 2094
1797 writel(0, base + NvRegLinkSpeed); 2095 writel(0, base + NvRegLinkSpeed);
@@ -1802,20 +2100,14 @@ static int nv_open(struct net_device *dev)
1802 np->in_shutdown = 0; 2100 np->in_shutdown = 0;
1803 2101
1804 /* 3) set mac address */ 2102 /* 3) set mac address */
1805 { 2103 nv_copy_mac_to_hw(dev);
1806 u32 mac[2];
1807
1808 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1809 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1810 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1811
1812 writel(mac[0], base + NvRegMacAddrA);
1813 writel(mac[1], base + NvRegMacAddrB);
1814 }
1815 2104
1816 /* 4) give hw rings */ 2105 /* 4) give hw rings */
1817 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2106 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
1818 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 2107 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2108 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2109 else
2110 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1819 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2111 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1820 base + NvRegRingSizes); 2112 base + NvRegRingSizes);
1821 2113
@@ -1837,7 +2129,7 @@ static int nv_open(struct net_device *dev)
1837 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 2129 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
1838 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 2130 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
1839 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 2131 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
1840 writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); 2132 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1841 2133
1842 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 2134 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
1843 get_random_bytes(&i, sizeof(i)); 2135 get_random_bytes(&i, sizeof(i));
@@ -1888,6 +2180,9 @@ static int nv_open(struct net_device *dev)
1888 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 2180 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1889 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 2181 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
1890 } 2182 }
2183 /* set linkspeed to invalid value, thus force nv_update_linkspeed
2184 * to init hw */
2185 np->linkspeed = 0;
1891 ret = nv_update_linkspeed(dev); 2186 ret = nv_update_linkspeed(dev);
1892 nv_start_rx(dev); 2187 nv_start_rx(dev);
1893 nv_start_tx(dev); 2188 nv_start_tx(dev);
@@ -1942,6 +2237,12 @@ static int nv_close(struct net_device *dev)
1942 if (np->wolenabled) 2237 if (np->wolenabled)
1943 nv_start_rx(dev); 2238 nv_start_rx(dev);
1944 2239
2240 /* special op: write back the misordered MAC address - otherwise
2241 * the next nv_probe would see a wrong address.
2242 */
2243 writel(np->orig_mac[0], base + NvRegMacAddrA);
2244 writel(np->orig_mac[1], base + NvRegMacAddrB);
2245
1945 /* FIXME: power down nic */ 2246 /* FIXME: power down nic */
1946 2247
1947 return 0; 2248 return 0;
@@ -2006,32 +2307,55 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2006 } 2307 }
2007 2308
2008 /* handle different descriptor versions */ 2309 /* handle different descriptor versions */
2009 if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || 2310 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2010 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || 2311 /* packet format 3: supports 40-bit addressing */
2011 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || 2312 np->desc_ver = DESC_VER_3;
2012 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || 2313 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2013 pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) 2314 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2014 np->desc_ver = DESC_VER_1; 2315 pci_name(pci_dev));
2015 else 2316 }
2317 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2318 /* packet format 2: supports jumbo frames */
2016 np->desc_ver = DESC_VER_2; 2319 np->desc_ver = DESC_VER_2;
2320 } else {
2321 /* original packet format */
2322 np->desc_ver = DESC_VER_1;
2323 }
2324
2325 np->pkt_limit = NV_PKTLIMIT_1;
2326 if (id->driver_data & DEV_HAS_LARGEDESC)
2327 np->pkt_limit = NV_PKTLIMIT_2;
2017 2328
2018 err = -ENOMEM; 2329 err = -ENOMEM;
2019 np->base = ioremap(addr, NV_PCI_REGSZ); 2330 np->base = ioremap(addr, NV_PCI_REGSZ);
2020 if (!np->base) 2331 if (!np->base)
2021 goto out_relreg; 2332 goto out_relreg;
2022 dev->base_addr = (unsigned long)np->base; 2333 dev->base_addr = (unsigned long)np->base;
2334
2023 dev->irq = pci_dev->irq; 2335 dev->irq = pci_dev->irq;
2024 np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2336
2025 &np->ring_addr); 2337 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2026 if (!np->rx_ring) 2338 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
2027 goto out_unmap; 2339 sizeof(struct ring_desc) * (RX_RING + TX_RING),
2028 np->tx_ring = &np->rx_ring[RX_RING]; 2340 &np->ring_addr);
2341 if (!np->rx_ring.orig)
2342 goto out_unmap;
2343 np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
2344 } else {
2345 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
2346 sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2347 &np->ring_addr);
2348 if (!np->rx_ring.ex)
2349 goto out_unmap;
2350 np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
2351 }
2029 2352
2030 dev->open = nv_open; 2353 dev->open = nv_open;
2031 dev->stop = nv_close; 2354 dev->stop = nv_close;
2032 dev->hard_start_xmit = nv_start_xmit; 2355 dev->hard_start_xmit = nv_start_xmit;
2033 dev->get_stats = nv_get_stats; 2356 dev->get_stats = nv_get_stats;
2034 dev->change_mtu = nv_change_mtu; 2357 dev->change_mtu = nv_change_mtu;
2358 dev->set_mac_address = nv_set_mac_address;
2035 dev->set_multicast_list = nv_set_multicast; 2359 dev->set_multicast_list = nv_set_multicast;
2036#ifdef CONFIG_NET_POLL_CONTROLLER 2360#ifdef CONFIG_NET_POLL_CONTROLLER
2037 dev->poll_controller = nv_poll_controller; 2361 dev->poll_controller = nv_poll_controller;
@@ -2080,17 +2404,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2080 2404
2081 if (np->desc_ver == DESC_VER_1) { 2405 if (np->desc_ver == DESC_VER_1) {
2082 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; 2406 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
2083 if (id->driver_data & DEV_NEED_LASTPACKET1)
2084 np->tx_flags |= NV_TX_LASTPACKET1;
2085 } else { 2407 } else {
2086 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; 2408 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
2087 if (id->driver_data & DEV_NEED_LASTPACKET1)
2088 np->tx_flags |= NV_TX2_LASTPACKET1;
2089 } 2409 }
2090 if (id->driver_data & DEV_IRQMASK_1) 2410 np->irqmask = NVREG_IRQMASK_WANTED;
2091 np->irqmask = NVREG_IRQMASK_WANTED_1;
2092 if (id->driver_data & DEV_IRQMASK_2)
2093 np->irqmask = NVREG_IRQMASK_WANTED_2;
2094 if (id->driver_data & DEV_NEED_TIMERIRQ) 2411 if (id->driver_data & DEV_NEED_TIMERIRQ)
2095 np->irqmask |= NVREG_IRQ_TIMER; 2412 np->irqmask |= NVREG_IRQ_TIMER;
2096 if (id->driver_data & DEV_NEED_LINKTIMER) { 2413 if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2155,8 +2472,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2155 return 0; 2472 return 0;
2156 2473
2157out_freering: 2474out_freering:
2158 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), 2475 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2159 np->rx_ring, np->ring_addr); 2476 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
2477 np->rx_ring.orig, np->ring_addr);
2478 else
2479 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
2480 np->rx_ring.ex, np->ring_addr);
2160 pci_set_drvdata(pci_dev, NULL); 2481 pci_set_drvdata(pci_dev, NULL);
2161out_unmap: 2482out_unmap:
2162 iounmap(get_hwbase(dev)); 2483 iounmap(get_hwbase(dev));
@@ -2174,18 +2495,14 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2174{ 2495{
2175 struct net_device *dev = pci_get_drvdata(pci_dev); 2496 struct net_device *dev = pci_get_drvdata(pci_dev);
2176 struct fe_priv *np = get_nvpriv(dev); 2497 struct fe_priv *np = get_nvpriv(dev);
2177 u8 __iomem *base = get_hwbase(dev);
2178 2498
2179 unregister_netdev(dev); 2499 unregister_netdev(dev);
2180 2500
2181 /* special op: write back the misordered MAC address - otherwise
2182 * the next nv_probe would see a wrong address.
2183 */
2184 writel(np->orig_mac[0], base + NvRegMacAddrA);
2185 writel(np->orig_mac[1], base + NvRegMacAddrB);
2186
2187 /* free all structures */ 2501 /* free all structures */
2188 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr); 2502 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2503 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
2504 else
2505 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
2189 iounmap(get_hwbase(dev)); 2506 iounmap(get_hwbase(dev));
2190 pci_release_regions(pci_dev); 2507 pci_release_regions(pci_dev);
2191 pci_disable_device(pci_dev); 2508 pci_disable_device(pci_dev);
@@ -2195,109 +2512,64 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
2195 2512
2196static struct pci_device_id pci_tbl[] = { 2513static struct pci_device_id pci_tbl[] = {
2197 { /* nForce Ethernet Controller */ 2514 { /* nForce Ethernet Controller */
2198 .vendor = PCI_VENDOR_ID_NVIDIA, 2515 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
2199 .device = PCI_DEVICE_ID_NVIDIA_NVENET_1, 2516 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2200 .subvendor = PCI_ANY_ID,
2201 .subdevice = PCI_ANY_ID,
2202 .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2203 }, 2517 },
2204 { /* nForce2 Ethernet Controller */ 2518 { /* nForce2 Ethernet Controller */
2205 .vendor = PCI_VENDOR_ID_NVIDIA, 2519 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
2206 .device = PCI_DEVICE_ID_NVIDIA_NVENET_2, 2520 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2207 .subvendor = PCI_ANY_ID,
2208 .subdevice = PCI_ANY_ID,
2209 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2210 }, 2521 },
2211 { /* nForce3 Ethernet Controller */ 2522 { /* nForce3 Ethernet Controller */
2212 .vendor = PCI_VENDOR_ID_NVIDIA, 2523 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
2213 .device = PCI_DEVICE_ID_NVIDIA_NVENET_3, 2524 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2214 .subvendor = PCI_ANY_ID,
2215 .subdevice = PCI_ANY_ID,
2216 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2217 }, 2525 },
2218 { /* nForce3 Ethernet Controller */ 2526 { /* nForce3 Ethernet Controller */
2219 .vendor = PCI_VENDOR_ID_NVIDIA, 2527 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
2220 .device = PCI_DEVICE_ID_NVIDIA_NVENET_4, 2528 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2221 .subvendor = PCI_ANY_ID,
2222 .subdevice = PCI_ANY_ID,
2223 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2224 }, 2529 },
2225 { /* nForce3 Ethernet Controller */ 2530 { /* nForce3 Ethernet Controller */
2226 .vendor = PCI_VENDOR_ID_NVIDIA, 2531 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
2227 .device = PCI_DEVICE_ID_NVIDIA_NVENET_5, 2532 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2228 .subvendor = PCI_ANY_ID,
2229 .subdevice = PCI_ANY_ID,
2230 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2231 }, 2533 },
2232 { /* nForce3 Ethernet Controller */ 2534 { /* nForce3 Ethernet Controller */
2233 .vendor = PCI_VENDOR_ID_NVIDIA, 2535 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
2234 .device = PCI_DEVICE_ID_NVIDIA_NVENET_6, 2536 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2235 .subvendor = PCI_ANY_ID,
2236 .subdevice = PCI_ANY_ID,
2237 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2238 }, 2537 },
2239 { /* nForce3 Ethernet Controller */ 2538 { /* nForce3 Ethernet Controller */
2240 .vendor = PCI_VENDOR_ID_NVIDIA, 2539 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
2241 .device = PCI_DEVICE_ID_NVIDIA_NVENET_7, 2540 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
2242 .subvendor = PCI_ANY_ID,
2243 .subdevice = PCI_ANY_ID,
2244 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2245 }, 2541 },
2246 { /* CK804 Ethernet Controller */ 2542 { /* CK804 Ethernet Controller */
2247 .vendor = PCI_VENDOR_ID_NVIDIA, 2543 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
2248 .device = PCI_DEVICE_ID_NVIDIA_NVENET_8, 2544 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2249 .subvendor = PCI_ANY_ID,
2250 .subdevice = PCI_ANY_ID,
2251 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2252 }, 2545 },
2253 { /* CK804 Ethernet Controller */ 2546 { /* CK804 Ethernet Controller */
2254 .vendor = PCI_VENDOR_ID_NVIDIA, 2547 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
2255 .device = PCI_DEVICE_ID_NVIDIA_NVENET_9, 2548 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2256 .subvendor = PCI_ANY_ID,
2257 .subdevice = PCI_ANY_ID,
2258 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2259 }, 2549 },
2260 { /* MCP04 Ethernet Controller */ 2550 { /* MCP04 Ethernet Controller */
2261 .vendor = PCI_VENDOR_ID_NVIDIA, 2551 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
2262 .device = PCI_DEVICE_ID_NVIDIA_NVENET_10, 2552 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2263 .subvendor = PCI_ANY_ID,
2264 .subdevice = PCI_ANY_ID,
2265 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2266 }, 2553 },
2267 { /* MCP04 Ethernet Controller */ 2554 { /* MCP04 Ethernet Controller */
2268 .vendor = PCI_VENDOR_ID_NVIDIA, 2555 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
2269 .device = PCI_DEVICE_ID_NVIDIA_NVENET_11, 2556 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2270 .subvendor = PCI_ANY_ID,
2271 .subdevice = PCI_ANY_ID,
2272 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2273 }, 2557 },
2274 { /* MCP51 Ethernet Controller */ 2558 { /* MCP51 Ethernet Controller */
2275 .vendor = PCI_VENDOR_ID_NVIDIA, 2559 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
2276 .device = PCI_DEVICE_ID_NVIDIA_NVENET_12, 2560 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2277 .subvendor = PCI_ANY_ID,
2278 .subdevice = PCI_ANY_ID,
2279 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2280 }, 2561 },
2281 { /* MCP51 Ethernet Controller */ 2562 { /* MCP51 Ethernet Controller */
2282 .vendor = PCI_VENDOR_ID_NVIDIA, 2563 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
2283 .device = PCI_DEVICE_ID_NVIDIA_NVENET_13, 2564 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
2284 .subvendor = PCI_ANY_ID,
2285 .subdevice = PCI_ANY_ID,
2286 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2287 }, 2565 },
2288 { /* MCP55 Ethernet Controller */ 2566 { /* MCP55 Ethernet Controller */
2289 .vendor = PCI_VENDOR_ID_NVIDIA, 2567 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2290 .device = PCI_DEVICE_ID_NVIDIA_NVENET_14, 2568 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2291 .subvendor = PCI_ANY_ID,
2292 .subdevice = PCI_ANY_ID,
2293 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2294 }, 2569 },
2295 { /* MCP55 Ethernet Controller */ 2570 { /* MCP55 Ethernet Controller */
2296 .vendor = PCI_VENDOR_ID_NVIDIA, 2571 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2297 .device = PCI_DEVICE_ID_NVIDIA_NVENET_15, 2572 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
2298 .subvendor = PCI_ANY_ID,
2299 .subdevice = PCI_ANY_ID,
2300 .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
2301 }, 2573 },
2302 {0,}, 2574 {0,},
2303}; 2575};
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index f9e3be96963c..0b230222bfea 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -308,12 +308,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
308{ 308{
309 struct sockaddr_ax25 *sa = addr; 309 struct sockaddr_ax25 *sa = addr;
310 310
311 if (sa->sax25_family != AF_AX25)
312 return -EINVAL;
313
314 if (!sa->sax25_ndigis)
315 return -EINVAL;
316
317 spin_lock_irq(&dev->xmit_lock); 311 spin_lock_irq(&dev->xmit_lock);
318 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); 312 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
319 spin_unlock_irq(&dev->xmit_lock); 313 spin_unlock_irq(&dev->xmit_lock);
@@ -668,6 +662,9 @@ static int sixpack_open(struct tty_struct *tty)
668 netif_start_queue(dev); 662 netif_start_queue(dev);
669 663
670 init_timer(&sp->tx_t); 664 init_timer(&sp->tx_t);
665 sp->tx_t.function = sp_xmit_on_air;
666 sp->tx_t.data = (unsigned long) sp;
667
671 init_timer(&sp->resync_t); 668 init_timer(&sp->resync_t);
672 669
673 spin_unlock_bh(&sp->lock); 670 spin_unlock_bh(&sp->lock);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 0cd54306e636..de087cd609d9 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
1config MKISS 1config MKISS
2 tristate "Serial port KISS driver" 2 tristate "Serial port KISS driver"
3 depends on AX25 && BROKEN_ON_SMP 3 depends on AX25
4 ---help--- 4 ---help---
5 KISS is a protocol used for the exchange of data between a computer 5 KISS is a protocol used for the exchange of data between a computer
6 and a Terminal Node Controller (a small embedded system commonly 6 and a Terminal Node Controller (a small embedded system commonly
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a7f15d9f13e5..5298096afbdb 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -54,6 +54,7 @@
54#include <linux/kmod.h> 54#include <linux/kmod.h>
55#include <linux/hdlcdrv.h> 55#include <linux/hdlcdrv.h>
56#include <linux/baycom.h> 56#include <linux/baycom.h>
57#include <linux/jiffies.h>
57#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 58#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
58/* prototypes for ax25_encapsulate and ax25_rebuild_header */ 59/* prototypes for ax25_encapsulate and ax25_rebuild_header */
59#include <net/ax25.h> 60#include <net/ax25.h>
@@ -287,7 +288,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
287 * measure the interrupt frequency 288 * measure the interrupt frequency
288 */ 289 */
289 bc->debug_vals.cur_intcnt++; 290 bc->debug_vals.cur_intcnt++;
290 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 291 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
291 bc->debug_vals.last_jiffies = cur_jiffies; 292 bc->debug_vals.last_jiffies = cur_jiffies;
292 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 293 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
293 bc->debug_vals.cur_intcnt = 0; 294 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 612ad452bee0..3b1bef1ee215 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -84,6 +84,7 @@
84#include <linux/baycom.h> 84#include <linux/baycom.h>
85#include <linux/parport.h> 85#include <linux/parport.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/jiffies.h>
87 88
88#include <asm/bug.h> 89#include <asm/bug.h>
89#include <asm/system.h> 90#include <asm/system.h>
@@ -165,7 +166,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
165 * measure the interrupt frequency 166 * measure the interrupt frequency
166 */ 167 */
167 bc->debug_vals.cur_intcnt++; 168 bc->debug_vals.cur_intcnt++;
168 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 169 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
169 bc->debug_vals.last_jiffies = cur_jiffies; 170 bc->debug_vals.last_jiffies = cur_jiffies;
170 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 171 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
171 bc->debug_vals.cur_intcnt = 0; 172 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 25f270b05378..232793d2ce6b 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -79,6 +79,7 @@
79#include <asm/io.h> 79#include <asm/io.h>
80#include <linux/hdlcdrv.h> 80#include <linux/hdlcdrv.h>
81#include <linux/baycom.h> 81#include <linux/baycom.h>
82#include <linux/jiffies.h>
82 83
83/* --------------------------------------------------------------------- */ 84/* --------------------------------------------------------------------- */
84 85
@@ -159,7 +160,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
159 * measure the interrupt frequency 160 * measure the interrupt frequency
160 */ 161 */
161 bc->debug_vals.cur_intcnt++; 162 bc->debug_vals.cur_intcnt++;
162 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 163 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
163 bc->debug_vals.last_jiffies = cur_jiffies; 164 bc->debug_vals.last_jiffies = cur_jiffies;
164 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 165 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
165 bc->debug_vals.cur_intcnt = 0; 166 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index eead85d00962..be596a3eb3fd 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -69,6 +69,7 @@
69#include <asm/io.h> 69#include <asm/io.h>
70#include <linux/hdlcdrv.h> 70#include <linux/hdlcdrv.h>
71#include <linux/baycom.h> 71#include <linux/baycom.h>
72#include <linux/jiffies.h>
72 73
73/* --------------------------------------------------------------------- */ 74/* --------------------------------------------------------------------- */
74 75
@@ -150,7 +151,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
150 * measure the interrupt frequency 151 * measure the interrupt frequency
151 */ 152 */
152 bc->debug_vals.cur_intcnt++; 153 bc->debug_vals.cur_intcnt++;
153 if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) { 154 if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
154 bc->debug_vals.last_jiffies = cur_jiffies; 155 bc->debug_vals.last_jiffies = cur_jiffies;
155 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; 156 bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
156 bc->debug_vals.cur_intcnt = 0; 157 bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ba9f0580e1f9..2946e037a9b1 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -98,7 +98,7 @@ static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
98 98
99static char bpq_eth_addr[6]; 99static char bpq_eth_addr[6];
100 100
101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *); 101static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
102static int bpq_device_event(struct notifier_block *, unsigned long, void *); 102static int bpq_device_event(struct notifier_block *, unsigned long, void *);
103static const char *bpq_print_ethaddr(const unsigned char *); 103static const char *bpq_print_ethaddr(const unsigned char *);
104 104
@@ -165,7 +165,7 @@ static inline int dev_is_ethdev(struct net_device *dev)
165/* 165/*
166 * Receive an AX.25 frame via an ethernet interface. 166 * Receive an AX.25 frame via an ethernet interface.
167 */ 167 */
168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 168static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
169{ 169{
170 int len; 170 int len;
171 char * ptr; 171 char * ptr;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3035422f5ad8..63b1a2b86acb 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -1,30 +1,19 @@
1/* 1/*
2 * MKISS Driver 2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
3 * 5 *
4 * This module: 6 * This program is distributed in the hope it will be useful, but WITHOUT
5 * This module is free software; you can redistribute it and/or 7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6 * modify it under the terms of the GNU General Public License 8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
7 * as published by the Free Software Foundation; either version 9 * for more details.
8 * 2 of the License, or (at your option) any later version.
9 * 10 *
10 * This module implements the AX.25 protocol for kernel-based 11 * You should have received a copy of the GNU General Public License along
11 * devices like TTYs. It interfaces between a raw TTY, and the 12 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * kernel's AX.25 protocol layers, just like slip.c. 13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
13 * AX.25 needs to be separated from slip.c while slip.c is no
14 * longer a static kernel device since it is a module.
15 * This method clears the way to implement other kiss protocols
16 * like mkiss smack g8bpq ..... so far only mkiss is implemented.
17 * 14 *
18 * Hans Alblas <hans@esrac.ele.tue.nl> 15 * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
19 * 16 * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
20 * History
21 * Jonathan (G4KLX) Fixed to match Linux networking changes - 2.1.15.
22 * Matthias (DG2FEF) Added support for FlexNet CRC (on special request)
23 * Fixed bug in ax25_close(): dev_lock_wait() was
24 * called twice, causing a deadlock.
25 * Jeroen (PE1RXQ) Removed old MKISS_MAGIC stuff and calls to
26 * MOD_*_USE_COUNT
27 * Remove cli() and fix rtnl lock usage.
28 */ 17 */
29 18
30#include <linux/config.h> 19#include <linux/config.h>
@@ -46,177 +35,300 @@
46#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
47#include <linux/skbuff.h> 36#include <linux/skbuff.h>
48#include <linux/if_arp.h> 37#include <linux/if_arp.h>
38#include <linux/jiffies.h>
49 39
50#include <net/ax25.h> 40#include <net/ax25.h>
51 41
52#include "mkiss.h"
53
54#ifdef CONFIG_INET 42#ifdef CONFIG_INET
55#include <linux/ip.h> 43#include <linux/ip.h>
56#include <linux/tcp.h> 44#include <linux/tcp.h>
57#endif 45#endif
58 46
59static char banner[] __initdata = KERN_INFO "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n"; 47#define AX_MTU 236
60 48
61typedef struct ax25_ctrl { 49/* SLIP/KISS protocol characters. */
62 struct ax_disp ctrl; /* */ 50#define END 0300 /* indicates end of frame */
63 struct net_device dev; /* the device */ 51#define ESC 0333 /* indicates byte stuffing */
64} ax25_ctrl_t; 52#define ESC_END 0334 /* ESC ESC_END means END 'data' */
65 53#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
66static ax25_ctrl_t **ax25_ctrls; 54
67 55struct mkiss {
68int ax25_maxdev = AX25_MAXDEV; /* Can be overridden with insmod! */ 56 struct tty_struct *tty; /* ptr to TTY structure */
69 57 struct net_device *dev; /* easy for intr handling */
70static struct tty_ldisc ax_ldisc; 58
71 59 /* These are pointers to the malloc()ed frame buffers. */
72static int ax25_init(struct net_device *); 60 spinlock_t buflock;/* lock for rbuf and xbuf */
73static int kiss_esc(unsigned char *, unsigned char *, int); 61 unsigned char *rbuff; /* receiver buffer */
74static int kiss_esc_crc(unsigned char *, unsigned char *, unsigned short, int); 62 int rcount; /* received chars counter */
75static void kiss_unesc(struct ax_disp *, unsigned char); 63 unsigned char *xbuff; /* transmitter buffer */
64 unsigned char *xhead; /* pointer to next byte to XMIT */
65 int xleft; /* bytes left in XMIT queue */
66
67 struct net_device_stats stats;
68
69 /* Detailed SLIP statistics. */
70 int mtu; /* Our mtu (to spot changes!) */
71 int buffsize; /* Max buffers sizes */
72
73 unsigned long flags; /* Flag values/ mode etc */
74 /* long req'd: used by set_bit --RR */
75#define AXF_INUSE 0 /* Channel in use */
76#define AXF_ESCAPE 1 /* ESC received */
77#define AXF_ERROR 2 /* Parity, etc. error */
78#define AXF_KEEPTEST 3 /* Keepalive test flag */
79#define AXF_OUTWAIT 4 /* is outpacket was flag */
80
81 int mode;
82 int crcmode; /* MW: for FlexNet, SMACK etc. */
83#define CRC_MODE_NONE 0
84#define CRC_MODE_FLEX 1
85#define CRC_MODE_SMACK 2
86
87 atomic_t refcnt;
88 struct semaphore dead_sem;
89};
76 90
77/*---------------------------------------------------------------------------*/ 91/*---------------------------------------------------------------------------*/
78 92
79static const unsigned short Crc_flex_table[] = { 93static const unsigned short crc_flex_table[] = {
80 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38, 94 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
81 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770, 95 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
82 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9, 96 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
83 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1, 97 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
84 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a, 98 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
85 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672, 99 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
86 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb, 100 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
87 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3, 101 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
88 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c, 102 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
89 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574, 103 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
90 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd, 104 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
91 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5, 105 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
92 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e, 106 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
93 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476, 107 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
94 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf, 108 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
95 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7, 109 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
96 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30, 110 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
97 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378, 111 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
98 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1, 112 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
99 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9, 113 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
100 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32, 114 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
101 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a, 115 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
102 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3, 116 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
103 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb, 117 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
104 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34, 118 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
105 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c, 119 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
106 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5, 120 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
107 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd, 121 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
108 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36, 122 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
109 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e, 123 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
110 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7, 124 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
111 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff 125 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
112}; 126};
113 127
114/*---------------------------------------------------------------------------*/
115
116static unsigned short calc_crc_flex(unsigned char *cp, int size) 128static unsigned short calc_crc_flex(unsigned char *cp, int size)
117{ 129{
118 unsigned short crc = 0xffff; 130 unsigned short crc = 0xffff;
119
120 while (size--)
121 crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
122 131
123 return crc; 132 while (size--)
124} 133 crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
125 134
126/*---------------------------------------------------------------------------*/ 135 return crc;
136}
127 137
128static int check_crc_flex(unsigned char *cp, int size) 138static int check_crc_flex(unsigned char *cp, int size)
129{ 139{
130 unsigned short crc = 0xffff; 140 unsigned short crc = 0xffff;
131 141
132 if (size < 3) 142 if (size < 3)
133 return -1; 143 return -1;
134 144
135 while (size--) 145 while (size--)
136 crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff]; 146 crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
137 147
138 if ((crc & 0xffff) != 0x7070) 148 if ((crc & 0xffff) != 0x7070)
139 return -1; 149 return -1;
140 150
141 return 0; 151 return 0;
142} 152}
143 153
144/*---------------------------------------------------------------------------*/ 154/*
155 * Standard encapsulation
156 */
145 157
146/* Find a free channel, and link in this `tty' line. */ 158static int kiss_esc(unsigned char *s, unsigned char *d, int len)
147static inline struct ax_disp *ax_alloc(void)
148{ 159{
149 ax25_ctrl_t *axp=NULL; 160 unsigned char *ptr = d;
150 int i; 161 unsigned char c;
151 162
152 for (i = 0; i < ax25_maxdev; i++) { 163 /*
153 axp = ax25_ctrls[i]; 164 * Send an initial END character to flush out any data that may have
165 * accumulated in the receiver due to line noise.
166 */
154 167
155 /* Not allocated ? */ 168 *ptr++ = END;
156 if (axp == NULL)
157 break;
158 169
159 /* Not in use ? */ 170 while (len-- > 0) {
160 if (!test_and_set_bit(AXF_INUSE, &axp->ctrl.flags)) 171 switch (c = *s++) {
172 case END:
173 *ptr++ = ESC;
174 *ptr++ = ESC_END;
161 break; 175 break;
176 case ESC:
177 *ptr++ = ESC;
178 *ptr++ = ESC_ESC;
179 break;
180 default:
181 *ptr++ = c;
182 break;
183 }
162 } 184 }
163 185
164 /* Sorry, too many, all slots in use */ 186 *ptr++ = END;
165 if (i >= ax25_maxdev) 187
166 return NULL; 188 return ptr - d;
189}
190
191/*
192 * MW:
193 * OK its ugly, but tell me a better solution without copying the
194 * packet to a temporary buffer :-)
195 */
196static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
197 int len)
198{
199 unsigned char *ptr = d;
200 unsigned char c=0;
201
202 *ptr++ = END;
203 while (len > 0) {
204 if (len > 2)
205 c = *s++;
206 else if (len > 1)
207 c = crc >> 8;
208 else if (len > 0)
209 c = crc & 0xff;
210
211 len--;
167 212
168 /* If no channels are available, allocate one */ 213 switch (c) {
169 if (axp == NULL && (ax25_ctrls[i] = kmalloc(sizeof(ax25_ctrl_t), GFP_KERNEL)) != NULL) { 214 case END:
170 axp = ax25_ctrls[i]; 215 *ptr++ = ESC;
216 *ptr++ = ESC_END;
217 break;
218 case ESC:
219 *ptr++ = ESC;
220 *ptr++ = ESC_ESC;
221 break;
222 default:
223 *ptr++ = c;
224 break;
225 }
171 } 226 }
172 memset(axp, 0, sizeof(ax25_ctrl_t)); 227 *ptr++ = END;
173 228
174 /* Initialize channel control data */ 229 return ptr - d;
175 set_bit(AXF_INUSE, &axp->ctrl.flags); 230}
176 sprintf(axp->dev.name, "ax%d", i++); 231
177 axp->ctrl.tty = NULL; 232/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
178 axp->dev.base_addr = i; 233static void ax_bump(struct mkiss *ax)
179 axp->dev.priv = (void *)&axp->ctrl; 234{
180 axp->dev.next = NULL; 235 struct sk_buff *skb;
181 axp->dev.init = ax25_init; 236 int count;
182 237
183 if (axp != NULL) { 238 spin_lock_bh(&ax->buflock);
184 /* 239 if (ax->rbuff[0] > 0x0f) {
185 * register device so that it can be ifconfig'ed 240 if (ax->rbuff[0] & 0x20) {
186 * ax25_init() will be called as a side-effect 241 ax->crcmode = CRC_MODE_FLEX;
187 * SIDE-EFFECT WARNING: ax25_init() CLEARS axp->ctrl ! 242 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
188 */ 243 ax->stats.rx_errors++;
189 if (register_netdev(&axp->dev) == 0) { 244 return;
190 /* (Re-)Set the INUSE bit. Very Important! */ 245 }
191 set_bit(AXF_INUSE, &axp->ctrl.flags); 246 ax->rcount -= 2;
192 axp->ctrl.dev = &axp->dev; 247 /* dl9sau bugfix: the trailling two bytes flexnet crc
193 axp->dev.priv = (void *) &axp->ctrl; 248 * will not be passed to the kernel. thus we have
194 249 * to correct the kissparm signature, because it
195 return &axp->ctrl; 250 * indicates a crc but there's none
196 } else { 251 */
197 clear_bit(AXF_INUSE,&axp->ctrl.flags); 252 *ax->rbuff &= ~0x20;
198 printk(KERN_ERR "mkiss: ax_alloc() - register_netdev() failure.\n");
199 } 253 }
254 }
255 spin_unlock_bh(&ax->buflock);
256
257 count = ax->rcount;
258
259 if ((skb = dev_alloc_skb(count)) == NULL) {
260 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
261 ax->dev->name);
262 ax->stats.rx_dropped++;
263 return;
200 } 264 }
201 265
202 return NULL; 266 spin_lock_bh(&ax->buflock);
267 memcpy(skb_put(skb,count), ax->rbuff, count);
268 spin_unlock_bh(&ax->buflock);
269 skb->protocol = ax25_type_trans(skb, ax->dev);
270 netif_rx(skb);
271 ax->dev->last_rx = jiffies;
272 ax->stats.rx_packets++;
273 ax->stats.rx_bytes += count;
203} 274}
204 275
205/* Free an AX25 channel. */ 276static void kiss_unesc(struct mkiss *ax, unsigned char s)
206static inline void ax_free(struct ax_disp *ax)
207{ 277{
208 /* Free all AX25 frame buffers. */ 278 switch (s) {
209 if (ax->rbuff) 279 case END:
210 kfree(ax->rbuff); 280 /* drop keeptest bit = VSV */
211 ax->rbuff = NULL; 281 if (test_bit(AXF_KEEPTEST, &ax->flags))
212 if (ax->xbuff) 282 clear_bit(AXF_KEEPTEST, &ax->flags);
213 kfree(ax->xbuff); 283
214 ax->xbuff = NULL; 284 if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
215 if (!test_and_clear_bit(AXF_INUSE, &ax->flags)) 285 ax_bump(ax);
216 printk(KERN_ERR "mkiss: %s: ax_free for already free unit.\n", ax->dev->name); 286
287 clear_bit(AXF_ESCAPE, &ax->flags);
288 ax->rcount = 0;
289 return;
290
291 case ESC:
292 set_bit(AXF_ESCAPE, &ax->flags);
293 return;
294 case ESC_ESC:
295 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
296 s = ESC;
297 break;
298 case ESC_END:
299 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
300 s = END;
301 break;
302 }
303
304 spin_lock_bh(&ax->buflock);
305 if (!test_bit(AXF_ERROR, &ax->flags)) {
306 if (ax->rcount < ax->buffsize) {
307 ax->rbuff[ax->rcount++] = s;
308 spin_unlock_bh(&ax->buflock);
309 return;
310 }
311
312 ax->stats.rx_over_errors++;
313 set_bit(AXF_ERROR, &ax->flags);
314 }
315 spin_unlock_bh(&ax->buflock);
316}
317
318static int ax_set_mac_address(struct net_device *dev, void *addr)
319{
320 struct sockaddr_ax25 *sa = addr;
321
322 spin_lock_irq(&dev->xmit_lock);
323 memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
324 spin_unlock_irq(&dev->xmit_lock);
325
326 return 0;
217} 327}
218 328
219static void ax_changedmtu(struct ax_disp *ax) 329/*---------------------------------------------------------------------------*/
330
331static void ax_changedmtu(struct mkiss *ax)
220{ 332{
221 struct net_device *dev = ax->dev; 333 struct net_device *dev = ax->dev;
222 unsigned char *xbuff, *rbuff, *oxbuff, *orbuff; 334 unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
@@ -236,7 +348,8 @@ static void ax_changedmtu(struct ax_disp *ax)
236 rbuff = kmalloc(len + 4, GFP_ATOMIC); 348 rbuff = kmalloc(len + 4, GFP_ATOMIC);
237 349
238 if (xbuff == NULL || rbuff == NULL) { 350 if (xbuff == NULL || rbuff == NULL) {
239 printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, MTU change cancelled.\n", 351 printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, "
352 "MTU change cancelled.\n",
240 ax->dev->name); 353 ax->dev->name);
241 dev->mtu = ax->mtu; 354 dev->mtu = ax->mtu;
242 if (xbuff != NULL) 355 if (xbuff != NULL)
@@ -258,7 +371,7 @@ static void ax_changedmtu(struct ax_disp *ax)
258 memcpy(ax->xbuff, ax->xhead, ax->xleft); 371 memcpy(ax->xbuff, ax->xhead, ax->xleft);
259 } else { 372 } else {
260 ax->xleft = 0; 373 ax->xleft = 0;
261 ax->tx_dropped++; 374 ax->stats.tx_dropped++;
262 } 375 }
263 } 376 }
264 377
@@ -269,7 +382,7 @@ static void ax_changedmtu(struct ax_disp *ax)
269 memcpy(ax->rbuff, orbuff, ax->rcount); 382 memcpy(ax->rbuff, orbuff, ax->rcount);
270 } else { 383 } else {
271 ax->rcount = 0; 384 ax->rcount = 0;
272 ax->rx_over_errors++; 385 ax->stats.rx_over_errors++;
273 set_bit(AXF_ERROR, &ax->flags); 386 set_bit(AXF_ERROR, &ax->flags);
274 } 387 }
275 } 388 }
@@ -279,72 +392,14 @@ static void ax_changedmtu(struct ax_disp *ax)
279 392
280 spin_unlock_bh(&ax->buflock); 393 spin_unlock_bh(&ax->buflock);
281 394
282 if (oxbuff != NULL) 395 kfree(oxbuff);
283 kfree(oxbuff); 396 kfree(orbuff);
284 if (orbuff != NULL)
285 kfree(orbuff);
286}
287
288
289/* Set the "sending" flag. This must be atomic. */
290static inline void ax_lock(struct ax_disp *ax)
291{
292 netif_stop_queue(ax->dev);
293}
294
295
296/* Clear the "sending" flag. This must be atomic. */
297static inline void ax_unlock(struct ax_disp *ax)
298{
299 netif_start_queue(ax->dev);
300}
301
302/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
303static void ax_bump(struct ax_disp *ax)
304{
305 struct sk_buff *skb;
306 int count;
307
308 spin_lock_bh(&ax->buflock);
309 if (ax->rbuff[0] > 0x0f) {
310 if (ax->rbuff[0] & 0x20) {
311 ax->crcmode = CRC_MODE_FLEX;
312 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
313 ax->rx_errors++;
314 return;
315 }
316 ax->rcount -= 2;
317 /* dl9sau bugfix: the trailling two bytes flexnet crc
318 * will not be passed to the kernel. thus we have
319 * to correct the kissparm signature, because it
320 * indicates a crc but there's none
321 */
322 *ax->rbuff &= ~0x20;
323 }
324 }
325 spin_unlock_bh(&ax->buflock);
326
327 count = ax->rcount;
328
329 if ((skb = dev_alloc_skb(count)) == NULL) {
330 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name);
331 ax->rx_dropped++;
332 return;
333 }
334
335 spin_lock_bh(&ax->buflock);
336 memcpy(skb_put(skb,count), ax->rbuff, count);
337 spin_unlock_bh(&ax->buflock);
338 skb->protocol = ax25_type_trans(skb, ax->dev);
339 netif_rx(skb);
340 ax->dev->last_rx = jiffies;
341 ax->rx_packets++;
342 ax->rx_bytes+=count;
343} 397}
344 398
345/* Encapsulate one AX.25 packet and stuff into a TTY queue. */ 399/* Encapsulate one AX.25 packet and stuff into a TTY queue. */
346static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len) 400static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
347{ 401{
402 struct mkiss *ax = netdev_priv(dev);
348 unsigned char *p; 403 unsigned char *p;
349 int actual, count; 404 int actual, count;
350 405
@@ -354,8 +409,8 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
354 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ 409 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
355 len = ax->mtu; 410 len = ax->mtu;
356 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name); 411 printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
357 ax->tx_dropped++; 412 ax->stats.tx_dropped++;
358 ax_unlock(ax); 413 netif_start_queue(dev);
359 return; 414 return;
360 } 415 }
361 416
@@ -376,10 +431,11 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
376 break; 431 break;
377 } 432 }
378 433
379 ax->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 434 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
380 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); 435 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
381 ax->tx_packets++; 436 ax->stats.tx_packets++;
382 ax->tx_bytes+=actual; 437 ax->stats.tx_bytes += actual;
438
383 ax->dev->trans_start = jiffies; 439 ax->dev->trans_start = jiffies;
384 ax->xleft = count - actual; 440 ax->xleft = count - actual;
385 ax->xhead = ax->xbuff + actual; 441 ax->xhead = ax->xbuff + actual;
@@ -387,37 +443,10 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
387 spin_unlock_bh(&ax->buflock); 443 spin_unlock_bh(&ax->buflock);
388} 444}
389 445
390/*
391 * Called by the driver when there's room for more data. If we have
392 * more packets to send, we send them here.
393 */
394static void ax25_write_wakeup(struct tty_struct *tty)
395{
396 int actual;
397 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
398
399 /* First make sure we're connected. */
400 if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
401 return;
402 if (ax->xleft <= 0) {
403 /* Now serial buffer is almost free & we can start
404 * transmission of another packet
405 */
406 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
407
408 netif_wake_queue(ax->dev);
409 return;
410 }
411
412 actual = tty->driver->write(tty, ax->xhead, ax->xleft);
413 ax->xleft -= actual;
414 ax->xhead += actual;
415}
416
417/* Encapsulate an AX.25 packet and kick it into a TTY queue. */ 446/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
418static int ax_xmit(struct sk_buff *skb, struct net_device *dev) 447static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
419{ 448{
420 struct ax_disp *ax = netdev_priv(dev); 449 struct mkiss *ax = netdev_priv(dev);
421 450
422 if (!netif_running(dev)) { 451 if (!netif_running(dev)) {
423 printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); 452 printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
@@ -429,7 +458,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
429 * May be we must check transmitter timeout here ? 458 * May be we must check transmitter timeout here ?
430 * 14 Oct 1994 Dmitry Gorodchanin. 459 * 14 Oct 1994 Dmitry Gorodchanin.
431 */ 460 */
432 if (jiffies - dev->trans_start < 20 * HZ) { 461 if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
433 /* 20 sec timeout not reached */ 462 /* 20 sec timeout not reached */
434 return 1; 463 return 1;
435 } 464 }
@@ -439,20 +468,30 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
439 "bad line quality" : "driver error"); 468 "bad line quality" : "driver error");
440 469
441 ax->xleft = 0; 470 ax->xleft = 0;
442 ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 471 clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
443 ax_unlock(ax); 472 netif_start_queue(dev);
444 } 473 }
445 474
446 /* We were not busy, so we are now... :-) */ 475 /* We were not busy, so we are now... :-) */
447 if (skb != NULL) { 476 if (skb != NULL) {
448 ax_lock(ax); 477 netif_stop_queue(dev);
449 ax_encaps(ax, skb->data, skb->len); 478 ax_encaps(dev, skb->data, skb->len);
450 kfree_skb(skb); 479 kfree_skb(skb);
451 } 480 }
452 481
453 return 0; 482 return 0;
454} 483}
455 484
485static int ax_open_dev(struct net_device *dev)
486{
487 struct mkiss *ax = netdev_priv(dev);
488
489 if (ax->tty == NULL)
490 return -ENODEV;
491
492 return 0;
493}
494
456#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 495#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
457 496
458/* Return the frame type ID */ 497/* Return the frame type ID */
@@ -481,7 +520,7 @@ static int ax_rebuild_header(struct sk_buff *skb)
481/* Open the low-level part of the AX25 channel. Easy! */ 520/* Open the low-level part of the AX25 channel. Easy! */
482static int ax_open(struct net_device *dev) 521static int ax_open(struct net_device *dev)
483{ 522{
484 struct ax_disp *ax = netdev_priv(dev); 523 struct mkiss *ax = netdev_priv(dev);
485 unsigned long len; 524 unsigned long len;
486 525
487 if (ax->tty == NULL) 526 if (ax->tty == NULL)
@@ -518,7 +557,6 @@ static int ax_open(struct net_device *dev)
518 557
519 spin_lock_init(&ax->buflock); 558 spin_lock_init(&ax->buflock);
520 559
521 netif_start_queue(dev);
522 return 0; 560 return 0;
523 561
524noxbuff: 562noxbuff:
@@ -532,68 +570,100 @@ norbuff:
532/* Close the low-level part of the AX25 channel. Easy! */ 570/* Close the low-level part of the AX25 channel. Easy! */
533static int ax_close(struct net_device *dev) 571static int ax_close(struct net_device *dev)
534{ 572{
535 struct ax_disp *ax = netdev_priv(dev); 573 struct mkiss *ax = netdev_priv(dev);
536 574
537 if (ax->tty == NULL) 575 if (ax->tty)
538 return -EBUSY; 576 clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
539
540 ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
541 577
542 netif_stop_queue(dev); 578 netif_stop_queue(dev);
543 579
544 return 0; 580 return 0;
545} 581}
546 582
547static int ax25_receive_room(struct tty_struct *tty) 583static struct net_device_stats *ax_get_stats(struct net_device *dev)
548{ 584{
549 return 65536; /* We can handle an infinite amount of data. :-) */ 585 struct mkiss *ax = netdev_priv(dev);
586
587 return &ax->stats;
588}
589
590static void ax_setup(struct net_device *dev)
591{
592 static char ax25_bcast[AX25_ADDR_LEN] =
593 {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
594 static char ax25_test[AX25_ADDR_LEN] =
595 {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
596
597 /* Finish setting up the DEVICE info. */
598 dev->mtu = AX_MTU;
599 dev->hard_start_xmit = ax_xmit;
600 dev->open = ax_open_dev;
601 dev->stop = ax_close;
602 dev->get_stats = ax_get_stats;
603 dev->set_mac_address = ax_set_mac_address;
604 dev->hard_header_len = 0;
605 dev->addr_len = 0;
606 dev->type = ARPHRD_AX25;
607 dev->tx_queue_len = 10;
608 dev->hard_header = ax_header;
609 dev->rebuild_header = ax_rebuild_header;
610
611 memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
612 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
613
614 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
550} 615}
551 616
552/* 617/*
553 * Handle the 'receiver data ready' interrupt. 618 * We have a potential race on dereferencing tty->disc_data, because the tty
554 * This function is called by the 'tty_io' module in the kernel when 619 * layer provides no locking at all - thus one cpu could be running
555 * a block of data has been received, which can now be decapsulated 620 * sixpack_receive_buf while another calls sixpack_close, which zeroes
556 * and sent on to the AX.25 layer for further processing. 621 * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
622 * best way to fix this is to use a rwlock in the tty struct, but for now we
623 * use a single global rwlock for all ttys in ppp line discipline.
557 */ 624 */
558static void ax25_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) 625static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
626
627static struct mkiss *mkiss_get(struct tty_struct *tty)
559{ 628{
560 struct ax_disp *ax = (struct ax_disp *) tty->disc_data; 629 struct mkiss *ax;
561 630
562 if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev)) 631 read_lock(&disc_data_lock);
563 return; 632 ax = tty->disc_data;
633 if (ax)
634 atomic_inc(&ax->refcnt);
635 read_unlock(&disc_data_lock);
564 636
565 /* 637 return ax;
566 * Argh! mtu change time! - costs us the packet part received 638}
567 * at the change
568 */
569 if (ax->mtu != ax->dev->mtu + 73)
570 ax_changedmtu(ax);
571
572 /* Read the characters out of the buffer */
573 while (count--) {
574 if (fp != NULL && *fp++) {
575 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
576 ax->rx_errors++;
577 cp++;
578 continue;
579 }
580 639
581 kiss_unesc(ax, *cp++); 640static void mkiss_put(struct mkiss *ax)
582 } 641{
642 if (atomic_dec_and_test(&ax->refcnt))
643 up(&ax->dead_sem);
583} 644}
584 645
585static int ax25_open(struct tty_struct *tty) 646static int mkiss_open(struct tty_struct *tty)
586{ 647{
587 struct ax_disp *ax = (struct ax_disp *) tty->disc_data; 648 struct net_device *dev;
649 struct mkiss *ax;
588 int err; 650 int err;
589 651
590 /* First make sure we're not already connected. */ 652 if (!capable(CAP_NET_ADMIN))
591 if (ax && ax->magic == AX25_MAGIC) 653 return -EPERM;
592 return -EEXIST;
593 654
594 /* OK. Find a free AX25 channel to use. */ 655 dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
595 if ((ax = ax_alloc()) == NULL) 656 if (!dev) {
596 return -ENFILE; 657 err = -ENOMEM;
658 goto out;
659 }
660
661 ax = netdev_priv(dev);
662 ax->dev = dev;
663
664 spin_lock_init(&ax->buflock);
665 atomic_set(&ax->refcnt, 1);
666 init_MUTEX_LOCKED(&ax->dead_sem);
597 667
598 ax->tty = tty; 668 ax->tty = tty;
599 tty->disc_data = ax; 669 tty->disc_data = ax;
@@ -602,283 +672,212 @@ static int ax25_open(struct tty_struct *tty)
602 tty->driver->flush_buffer(tty); 672 tty->driver->flush_buffer(tty);
603 673
604 /* Restore default settings */ 674 /* Restore default settings */
605 ax->dev->type = ARPHRD_AX25; 675 dev->type = ARPHRD_AX25;
606 676
607 /* Perform the low-level AX25 initialization. */ 677 /* Perform the low-level AX25 initialization. */
608 if ((err = ax_open(ax->dev))) 678 if ((err = ax_open(ax->dev))) {
609 return err; 679 goto out_free_netdev;
680 }
610 681
611 /* Done. We have linked the TTY line to a channel. */ 682 if (register_netdev(dev))
612 return ax->dev->base_addr; 683 goto out_free_buffers;
613}
614 684
615static void ax25_close(struct tty_struct *tty) 685 netif_start_queue(dev);
616{
617 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
618 686
619 /* First make sure we're connected. */ 687 /* Done. We have linked the TTY line to a channel. */
620 if (ax == NULL || ax->magic != AX25_MAGIC) 688 return 0;
621 return;
622 689
623 unregister_netdev(ax->dev); 690out_free_buffers:
691 kfree(ax->rbuff);
692 kfree(ax->xbuff);
624 693
625 tty->disc_data = NULL; 694out_free_netdev:
626 ax->tty = NULL; 695 free_netdev(dev);
627 696
628 ax_free(ax); 697out:
698 return err;
629} 699}
630 700
631 701static void mkiss_close(struct tty_struct *tty)
632static struct net_device_stats *ax_get_stats(struct net_device *dev)
633{ 702{
634 static struct net_device_stats stats; 703 struct mkiss *ax;
635 struct ax_disp *ax = netdev_priv(dev);
636
637 memset(&stats, 0, sizeof(struct net_device_stats));
638
639 stats.rx_packets = ax->rx_packets;
640 stats.tx_packets = ax->tx_packets;
641 stats.rx_bytes = ax->rx_bytes;
642 stats.tx_bytes = ax->tx_bytes;
643 stats.rx_dropped = ax->rx_dropped;
644 stats.tx_dropped = ax->tx_dropped;
645 stats.tx_errors = ax->tx_errors;
646 stats.rx_errors = ax->rx_errors;
647 stats.rx_over_errors = ax->rx_over_errors;
648
649 return &stats;
650}
651 704
705 write_lock(&disc_data_lock);
706 ax = tty->disc_data;
707 tty->disc_data = NULL;
708 write_unlock(&disc_data_lock);
652 709
653/************************************************************************ 710 if (ax == 0)
654 * STANDARD ENCAPSULATION * 711 return;
655 ************************************************************************/
656
657static int kiss_esc(unsigned char *s, unsigned char *d, int len)
658{
659 unsigned char *ptr = d;
660 unsigned char c;
661 712
662 /* 713 /*
663 * Send an initial END character to flush out any 714 * We have now ensured that nobody can start using ap from now on, but
664 * data that may have accumulated in the receiver 715 * we have to wait for all existing users to finish.
665 * due to line noise.
666 */ 716 */
717 if (!atomic_dec_and_test(&ax->refcnt))
718 down(&ax->dead_sem);
667 719
668 *ptr++ = END; 720 unregister_netdev(ax->dev);
669
670 while (len-- > 0) {
671 switch (c = *s++) {
672 case END:
673 *ptr++ = ESC;
674 *ptr++ = ESC_END;
675 break;
676 case ESC:
677 *ptr++ = ESC;
678 *ptr++ = ESC_ESC;
679 break;
680 default:
681 *ptr++ = c;
682 break;
683 }
684 }
685 721
686 *ptr++ = END; 722 /* Free all AX25 frame buffers. */
723 kfree(ax->rbuff);
724 kfree(ax->xbuff);
687 725
688 return ptr - d; 726 ax->tty = NULL;
689} 727}
690 728
691/* 729/* Perform I/O control on an active ax25 channel. */
692 * MW: 730static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
693 * OK its ugly, but tell me a better solution without copying the 731 unsigned int cmd, unsigned long arg)
694 * packet to a temporary buffer :-)
695 */
696static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, int len)
697{ 732{
698 unsigned char *ptr = d; 733 struct mkiss *ax = mkiss_get(tty);
699 unsigned char c=0; 734 struct net_device *dev = ax->dev;
700 735 unsigned int tmp, err;
701 *ptr++ = END;
702 while (len > 0) {
703 if (len > 2)
704 c = *s++;
705 else if (len > 1)
706 c = crc >> 8;
707 else if (len > 0)
708 c = crc & 0xff;
709 736
710 len--; 737 /* First make sure we're connected. */
738 if (ax == NULL)
739 return -ENXIO;
711 740
712 switch (c) { 741 switch (cmd) {
713 case END: 742 case SIOCGIFNAME:
714 *ptr++ = ESC; 743 err = copy_to_user((void __user *) arg, ax->dev->name,
715 *ptr++ = ESC_END; 744 strlen(ax->dev->name) + 1) ? -EFAULT : 0;
716 break; 745 break;
717 case ESC: 746
718 *ptr++ = ESC; 747 case SIOCGIFENCAP:
719 *ptr++ = ESC_ESC; 748 err = put_user(4, (int __user *) arg);
720 break; 749 break;
721 default: 750
722 *ptr++ = c; 751 case SIOCSIFENCAP:
723 break; 752 if (get_user(tmp, (int __user *) arg)) {
753 err = -EFAULT;
754 break;
724 } 755 }
725 }
726 *ptr++ = END;
727 return ptr - d;
728}
729 756
730static void kiss_unesc(struct ax_disp *ax, unsigned char s) 757 ax->mode = tmp;
731{ 758 dev->addr_len = AX25_ADDR_LEN;
732 switch (s) { 759 dev->hard_header_len = AX25_KISS_HEADER_LEN +
733 case END: 760 AX25_MAX_HEADER_LEN + 3;
734 /* drop keeptest bit = VSV */ 761 dev->type = ARPHRD_AX25;
735 if (test_bit(AXF_KEEPTEST, &ax->flags))
736 clear_bit(AXF_KEEPTEST, &ax->flags);
737 762
738 if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2)) 763 err = 0;
739 ax_bump(ax); 764 break;
740 765
741 clear_bit(AXF_ESCAPE, &ax->flags); 766 case SIOCSIFHWADDR: {
742 ax->rcount = 0; 767 char addr[AX25_ADDR_LEN];
743 return; 768printk(KERN_INFO "In SIOCSIFHWADDR");
744 769
745 case ESC: 770 if (copy_from_user(&addr,
746 set_bit(AXF_ESCAPE, &ax->flags); 771 (void __user *) arg, AX25_ADDR_LEN)) {
747 return; 772 err = -EFAULT;
748 case ESC_ESC:
749 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
750 s = ESC;
751 break; 773 break;
752 case ESC_END:
753 if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
754 s = END;
755 break;
756 }
757
758 spin_lock_bh(&ax->buflock);
759 if (!test_bit(AXF_ERROR, &ax->flags)) {
760 if (ax->rcount < ax->buffsize) {
761 ax->rbuff[ax->rcount++] = s;
762 spin_unlock_bh(&ax->buflock);
763 return;
764 } 774 }
765 775
766 ax->rx_over_errors++; 776 spin_lock_irq(&dev->xmit_lock);
767 set_bit(AXF_ERROR, &ax->flags); 777 memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
778 spin_unlock_irq(&dev->xmit_lock);
779
780 err = 0;
781 break;
782 }
783 default:
784 err = -ENOIOCTLCMD;
768 } 785 }
769 spin_unlock_bh(&ax->buflock);
770}
771 786
787 mkiss_put(ax);
772 788
773static int ax_set_mac_address(struct net_device *dev, void __user *addr) 789 return err;
774{
775 if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
776 return -EFAULT;
777 return 0;
778} 790}
779 791
780static int ax_set_dev_mac_address(struct net_device *dev, void *addr) 792/*
793 * Handle the 'receiver data ready' interrupt.
794 * This function is called by the 'tty_io' module in the kernel when
795 * a block of data has been received, which can now be decapsulated
796 * and sent on to the AX.25 layer for further processing.
797 */
798static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
799 char *fp, int count)
781{ 800{
782 struct sockaddr *sa = addr; 801 struct mkiss *ax = mkiss_get(tty);
783
784 memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
785 802
786 return 0; 803 if (!ax)
787} 804 return;
788
789
790/* Perform I/O control on an active ax25 channel. */
791static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
792{
793 struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
794 unsigned int tmp;
795 805
796 /* First make sure we're connected. */ 806 /*
797 if (ax == NULL || ax->magic != AX25_MAGIC) 807 * Argh! mtu change time! - costs us the packet part received
798 return -EINVAL; 808 * at the change
809 */
810 if (ax->mtu != ax->dev->mtu + 73)
811 ax_changedmtu(ax);
799 812
800 switch (cmd) { 813 /* Read the characters out of the buffer */
801 case SIOCGIFNAME: 814 while (count--) {
802 if (copy_to_user(arg, ax->dev->name, strlen(ax->dev->name) + 1)) 815 if (fp != NULL && *fp++) {
803 return -EFAULT; 816 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
804 return 0; 817 ax->stats.rx_errors++;
805 818 cp++;
806 case SIOCGIFENCAP: 819 continue;
807 return put_user(4, (int __user *)arg); 820 }
808
809 case SIOCSIFENCAP:
810 if (get_user(tmp, (int __user *)arg))
811 return -EFAULT;
812 ax->mode = tmp;
813 ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
814 ax->dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3;
815 ax->dev->type = ARPHRD_AX25;
816 return 0;
817
818 case SIOCSIFHWADDR:
819 return ax_set_mac_address(ax->dev, arg);
820 821
821 default: 822 kiss_unesc(ax, *cp++);
822 return -ENOIOCTLCMD;
823 } 823 }
824
825 mkiss_put(ax);
826 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
827 && tty->driver->unthrottle)
828 tty->driver->unthrottle(tty);
824} 829}
825 830
826static int ax_open_dev(struct net_device *dev) 831static int mkiss_receive_room(struct tty_struct *tty)
827{ 832{
828 struct ax_disp *ax = netdev_priv(dev); 833 return 65536; /* We can handle an infinite amount of data. :-) */
829
830 if (ax->tty == NULL)
831 return -ENODEV;
832
833 return 0;
834} 834}
835 835
836 836/*
837/* Initialize the driver. Called by network startup. */ 837 * Called by the driver when there's room for more data. If we have
838static int ax25_init(struct net_device *dev) 838 * more packets to send, we send them here.
839 */
840static void mkiss_write_wakeup(struct tty_struct *tty)
839{ 841{
840 struct ax_disp *ax = netdev_priv(dev); 842 struct mkiss *ax = mkiss_get(tty);
841 843 int actual;
842 static char ax25_bcast[AX25_ADDR_LEN] =
843 {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
844 static char ax25_test[AX25_ADDR_LEN] =
845 {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
846
847 if (ax == NULL) /* Allocation failed ?? */
848 return -ENODEV;
849 844
850 /* Set up the "AX25 Control Block". (And clear statistics) */ 845 if (!ax)
851 memset(ax, 0, sizeof (struct ax_disp)); 846 return;
852 ax->magic = AX25_MAGIC;
853 ax->dev = dev;
854 847
855 /* Finish setting up the DEVICE info. */ 848 if (ax->xleft <= 0) {
856 dev->mtu = AX_MTU; 849 /* Now serial buffer is almost free & we can start
857 dev->hard_start_xmit = ax_xmit; 850 * transmission of another packet
858 dev->open = ax_open_dev; 851 */
859 dev->stop = ax_close; 852 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
860 dev->get_stats = ax_get_stats;
861 dev->set_mac_address = ax_set_dev_mac_address;
862 dev->hard_header_len = 0;
863 dev->addr_len = 0;
864 dev->type = ARPHRD_AX25;
865 dev->tx_queue_len = 10;
866 dev->hard_header = ax_header;
867 dev->rebuild_header = ax_rebuild_header;
868 853
869 memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN); 854 netif_wake_queue(ax->dev);
870 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN); 855 goto out;
856 }
871 857
872 /* New-style flags. */ 858 actual = tty->driver->write(tty, ax->xhead, ax->xleft);
873 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 859 ax->xleft -= actual;
860 ax->xhead += actual;
874 861
875 return 0; 862out:
863 mkiss_put(ax);
876} 864}
877 865
866static struct tty_ldisc ax_ldisc = {
867 .magic = TTY_LDISC_MAGIC,
868 .name = "mkiss",
869 .open = mkiss_open,
870 .close = mkiss_close,
871 .ioctl = mkiss_ioctl,
872 .receive_buf = mkiss_receive_buf,
873 .receive_room = mkiss_receive_room,
874 .write_wakeup = mkiss_write_wakeup
875};
878 876
879/* ******************************************************************** */ 877static char banner[] __initdata = KERN_INFO \
880/* * Init MKISS driver * */ 878 "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
881/* ******************************************************************** */ 879static char msg_regfail[] __initdata = KERN_ERR \
880 "mkiss: can't register line discipline (err = %d)\n";
882 881
883static int __init mkiss_init_driver(void) 882static int __init mkiss_init_driver(void)
884{ 883{
@@ -886,64 +885,27 @@ static int __init mkiss_init_driver(void)
886 885
887 printk(banner); 886 printk(banner);
888 887
889 if (ax25_maxdev < 4) 888 if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0)
890 ax25_maxdev = 4; /* Sanity */ 889 printk(msg_regfail);
891 890
892 if ((ax25_ctrls = kmalloc(sizeof(void *) * ax25_maxdev, GFP_KERNEL)) == NULL) {
893 printk(KERN_ERR "mkiss: Can't allocate ax25_ctrls[] array!\n");
894 return -ENOMEM;
895 }
896
897 /* Clear the pointer array, we allocate devices when we need them */
898 memset(ax25_ctrls, 0, sizeof(void*) * ax25_maxdev); /* Pointers */
899
900 /* Fill in our line protocol discipline, and register it */
901 ax_ldisc.magic = TTY_LDISC_MAGIC;
902 ax_ldisc.name = "mkiss";
903 ax_ldisc.open = ax25_open;
904 ax_ldisc.close = ax25_close;
905 ax_ldisc.ioctl = (int (*)(struct tty_struct *, struct file *,
906 unsigned int, unsigned long))ax25_disp_ioctl;
907 ax_ldisc.receive_buf = ax25_receive_buf;
908 ax_ldisc.receive_room = ax25_receive_room;
909 ax_ldisc.write_wakeup = ax25_write_wakeup;
910
911 if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0) {
912 printk(KERN_ERR "mkiss: can't register line discipline (err = %d)\n", status);
913 kfree(ax25_ctrls);
914 }
915 return status; 891 return status;
916} 892}
917 893
894static const char msg_unregfail[] __exitdata = KERN_ERR \
895 "mkiss: can't unregister line discipline (err = %d)\n";
896
918static void __exit mkiss_exit_driver(void) 897static void __exit mkiss_exit_driver(void)
919{ 898{
920 int i; 899 int ret;
921
922 for (i = 0; i < ax25_maxdev; i++) {
923 if (ax25_ctrls[i]) {
924 /*
925 * VSV = if dev->start==0, then device
926 * unregistered while close proc.
927 */
928 if (netif_running(&ax25_ctrls[i]->dev))
929 unregister_netdev(&ax25_ctrls[i]->dev);
930 kfree(ax25_ctrls[i]);
931 }
932 }
933 900
934 kfree(ax25_ctrls); 901 if ((ret = tty_unregister_ldisc(N_AX25)))
935 ax25_ctrls = NULL; 902 printk(msg_unregfail, ret);
936
937 if ((i = tty_unregister_ldisc(N_AX25)))
938 printk(KERN_ERR "mkiss: can't unregister line discipline (err = %d)\n", i);
939} 903}
940 904
941MODULE_AUTHOR("Hans Albas PE1AYX <hans@esrac.ele.tue.nl>"); 905MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
942MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs"); 906MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
943MODULE_PARM(ax25_maxdev, "i");
944MODULE_PARM_DESC(ax25_maxdev, "number of MKISS devices");
945MODULE_LICENSE("GPL"); 907MODULE_LICENSE("GPL");
946MODULE_ALIAS_LDISC(N_AX25); 908MODULE_ALIAS_LDISC(N_AX25);
909
947module_init(mkiss_init_driver); 910module_init(mkiss_init_driver);
948module_exit(mkiss_exit_driver); 911module_exit(mkiss_exit_driver);
949
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index c7fb3675c09d..0de3bb906174 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1253,7 +1253,7 @@ static int emac_init_tah(struct ocp_enet_private *fep)
1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | 1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
1254 TAH_MR_DIG); 1254 TAH_MR_DIG);
1255 1255
1256 iounmap(&tahp); 1256 iounmap(tahp);
1257 1257
1258 return 0; 1258 return 0;
1259} 1259}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c39b0609742a..32d5fabd4b10 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1144,7 +1144,7 @@ static void ibmveth_proc_unregister_driver(void)
1144 1144
1145static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1145static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1146 { "network", "IBM,l-lan"}, 1146 { "network", "IBM,l-lan"},
1147 { 0,} 1147 { "", "" }
1148}; 1148};
1149 1149
1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 55af32e9bf08..183ba97785b0 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1370,7 +1370,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1370 */ 1370 */
1371static struct vio_device_id veth_device_table[] __devinitdata = { 1371static struct vio_device_id veth_device_table[] __devinitdata = {
1372 { "vlan", "" }, 1372 { "vlan", "" },
1373 { NULL, NULL } 1373 { "", "" }
1374}; 1374};
1375MODULE_DEVICE_TABLE(vio, veth_device_table); 1375MODULE_DEVICE_TABLE(vio, veth_device_table);
1376 1376
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f8d3385c7842..c83271b38621 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -119,7 +119,7 @@ struct ixgb_adapter;
119 * so a DMA handle can be stored along with the buffer */ 119 * so a DMA handle can be stored along with the buffer */
120struct ixgb_buffer { 120struct ixgb_buffer {
121 struct sk_buff *skb; 121 struct sk_buff *skb;
122 uint64_t dma; 122 dma_addr_t dma;
123 unsigned long time_stamp; 123 unsigned long time_stamp;
124 uint16_t length; 124 uint16_t length;
125 uint16_t next_to_watch; 125 uint16_t next_to_watch;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 3aae110c5560..661a46b95a61 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -565,24 +565,6 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
565 } 565 }
566} 566}
567 567
568/******************************************************************************
569 * return the compatibility flags from EEPROM
570 *
571 * hw - Struct containing variables accessed by shared code
572 *
573 * Returns:
574 * compatibility flags if EEPROM contents are valid, 0 otherwise
575 ******************************************************************************/
576uint16_t
577ixgb_get_ee_compatibility(struct ixgb_hw *hw)
578{
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return (le16_to_cpu(ee_map->compatibility));
583
584 return(0);
585}
586 568
587/****************************************************************************** 569/******************************************************************************
588 * return the Printed Board Assembly number from EEPROM 570 * return the Printed Board Assembly number from EEPROM
@@ -602,81 +584,6 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
602 return(0); 584 return(0);
603} 585}
604 586
605/******************************************************************************
606 * return the Initialization Control Word 1 from EEPROM
607 *
608 * hw - Struct containing variables accessed by shared code
609 *
610 * Returns:
611 * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
612 ******************************************************************************/
613uint16_t
614ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
615{
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return (le16_to_cpu(ee_map->init_ctrl_reg_1));
620
621 return(0);
622}
623
624/******************************************************************************
625 * return the Initialization Control Word 2 from EEPROM
626 *
627 * hw - Struct containing variables accessed by shared code
628 *
629 * Returns:
630 * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
631 ******************************************************************************/
632uint16_t
633ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
634{
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return (le16_to_cpu(ee_map->init_ctrl_reg_2));
639
640 return(0);
641}
642
643/******************************************************************************
644 * return the Subsystem Id from EEPROM
645 *
646 * hw - Struct containing variables accessed by shared code
647 *
648 * Returns:
649 * Subsystem Id if EEPROM contents are valid, 0 otherwise
650 ******************************************************************************/
651uint16_t
652ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
653{
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return (le16_to_cpu(ee_map->subsystem_id));
658
659 return(0);
660}
661
662/******************************************************************************
663 * return the Sub Vendor Id from EEPROM
664 *
665 * hw - Struct containing variables accessed by shared code
666 *
667 * Returns:
668 * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
669 ******************************************************************************/
670uint16_t
671ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
672{
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return (le16_to_cpu(ee_map->subvendor_id));
677
678 return(0);
679}
680 587
681/****************************************************************************** 588/******************************************************************************
682 * return the Device Id from EEPROM 589 * return the Device Id from EEPROM
@@ -694,81 +601,6 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 601 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return (le16_to_cpu(ee_map->device_id)); 602 return (le16_to_cpu(ee_map->device_id));
696 603
697 return(0); 604 return (0);
698}
699
700/******************************************************************************
701 * return the Vendor Id from EEPROM
702 *
703 * hw - Struct containing variables accessed by shared code
704 *
705 * Returns:
706 * Device Id if EEPROM contents are valid, 0 otherwise
707 ******************************************************************************/
708uint16_t
709ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
710{
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return (le16_to_cpu(ee_map->vendor_id));
715
716 return(0);
717}
718
719/******************************************************************************
720 * return the Software Defined Pins Register from EEPROM
721 *
722 * hw - Struct containing variables accessed by shared code
723 *
724 * Returns:
725 * SDP Register if EEPROM contents are valid, 0 otherwise
726 ******************************************************************************/
727uint16_t
728ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
729{
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return (le16_to_cpu(ee_map->swdpins_reg));
734
735 return(0);
736} 605}
737 606
738/******************************************************************************
739 * return the D3 Power Management Bits from EEPROM
740 *
741 * hw - Struct containing variables accessed by shared code
742 *
743 * Returns:
744 * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
745 ******************************************************************************/
746uint8_t
747ixgb_get_ee_d3_power(struct ixgb_hw *hw)
748{
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return (le16_to_cpu(ee_map->d3_power));
753
754 return(0);
755}
756
757/******************************************************************************
758 * return the D0 Power Management Bits from EEPROM
759 *
760 * hw - Struct containing variables accessed by shared code
761 *
762 * Returns:
763 * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
764 ******************************************************************************/
765uint8_t
766ixgb_get_ee_d0_power(struct ixgb_hw *hw)
767{
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return (le16_to_cpu(ee_map->d0_power));
772
773 return(0);
774}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 3fa113854eeb..9d026ed77ddd 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -98,10 +98,10 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
98static int 98static int
99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 99ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
100{ 100{
101 struct ixgb_adapter *adapter = netdev->priv; 101 struct ixgb_adapter *adapter = netdev_priv(netdev);
102 102
103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 103 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
104 ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 104 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
105 ecmd->port = PORT_FIBRE; 105 ecmd->port = PORT_FIBRE;
106 ecmd->transceiver = XCVR_EXTERNAL; 106 ecmd->transceiver = XCVR_EXTERNAL;
107 107
@@ -120,7 +120,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
120static int 120static int
121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 121ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
122{ 122{
123 struct ixgb_adapter *adapter = netdev->priv; 123 struct ixgb_adapter *adapter = netdev_priv(netdev);
124 124
125 if(ecmd->autoneg == AUTONEG_ENABLE || 125 if(ecmd->autoneg == AUTONEG_ENABLE ||
126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 126 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
@@ -130,6 +130,12 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
130 ixgb_down(adapter, TRUE); 130 ixgb_down(adapter, TRUE);
131 ixgb_reset(adapter); 131 ixgb_reset(adapter);
132 ixgb_up(adapter); 132 ixgb_up(adapter);
133 /* be optimistic about our link, since we were up before */
134 adapter->link_speed = 10000;
135 adapter->link_duplex = FULL_DUPLEX;
136 netif_carrier_on(netdev);
137 netif_wake_queue(netdev);
138
133 } else 139 } else
134 ixgb_reset(adapter); 140 ixgb_reset(adapter);
135 141
@@ -140,7 +146,7 @@ static void
140ixgb_get_pauseparam(struct net_device *netdev, 146ixgb_get_pauseparam(struct net_device *netdev,
141 struct ethtool_pauseparam *pause) 147 struct ethtool_pauseparam *pause)
142{ 148{
143 struct ixgb_adapter *adapter = netdev->priv; 149 struct ixgb_adapter *adapter = netdev_priv(netdev);
144 struct ixgb_hw *hw = &adapter->hw; 150 struct ixgb_hw *hw = &adapter->hw;
145 151
146 pause->autoneg = AUTONEG_DISABLE; 152 pause->autoneg = AUTONEG_DISABLE;
@@ -159,7 +165,7 @@ static int
159ixgb_set_pauseparam(struct net_device *netdev, 165ixgb_set_pauseparam(struct net_device *netdev,
160 struct ethtool_pauseparam *pause) 166 struct ethtool_pauseparam *pause)
161{ 167{
162 struct ixgb_adapter *adapter = netdev->priv; 168 struct ixgb_adapter *adapter = netdev_priv(netdev);
163 struct ixgb_hw *hw = &adapter->hw; 169 struct ixgb_hw *hw = &adapter->hw;
164 170
165 if(pause->autoneg == AUTONEG_ENABLE) 171 if(pause->autoneg == AUTONEG_ENABLE)
@@ -177,6 +183,11 @@ ixgb_set_pauseparam(struct net_device *netdev,
177 if(netif_running(adapter->netdev)) { 183 if(netif_running(adapter->netdev)) {
178 ixgb_down(adapter, TRUE); 184 ixgb_down(adapter, TRUE);
179 ixgb_up(adapter); 185 ixgb_up(adapter);
186 /* be optimistic about our link, since we were up before */
187 adapter->link_speed = 10000;
188 adapter->link_duplex = FULL_DUPLEX;
189 netif_carrier_on(netdev);
190 netif_wake_queue(netdev);
180 } else 191 } else
181 ixgb_reset(adapter); 192 ixgb_reset(adapter);
182 193
@@ -186,19 +197,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
186static uint32_t 197static uint32_t
187ixgb_get_rx_csum(struct net_device *netdev) 198ixgb_get_rx_csum(struct net_device *netdev)
188{ 199{
189 struct ixgb_adapter *adapter = netdev->priv; 200 struct ixgb_adapter *adapter = netdev_priv(netdev);
201
190 return adapter->rx_csum; 202 return adapter->rx_csum;
191} 203}
192 204
193static int 205static int
194ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) 206ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
195{ 207{
196 struct ixgb_adapter *adapter = netdev->priv; 208 struct ixgb_adapter *adapter = netdev_priv(netdev);
209
197 adapter->rx_csum = data; 210 adapter->rx_csum = data;
198 211
199 if(netif_running(netdev)) { 212 if(netif_running(netdev)) {
200 ixgb_down(adapter,TRUE); 213 ixgb_down(adapter,TRUE);
201 ixgb_up(adapter); 214 ixgb_up(adapter);
215 /* be optimistic about our link, since we were up before */
216 adapter->link_speed = 10000;
217 adapter->link_duplex = FULL_DUPLEX;
218 netif_carrier_on(netdev);
219 netif_wake_queue(netdev);
202 } else 220 } else
203 ixgb_reset(adapter); 221 ixgb_reset(adapter);
204 return 0; 222 return 0;
@@ -246,14 +264,15 @@ static void
246ixgb_get_regs(struct net_device *netdev, 264ixgb_get_regs(struct net_device *netdev,
247 struct ethtool_regs *regs, void *p) 265 struct ethtool_regs *regs, void *p)
248{ 266{
249 struct ixgb_adapter *adapter = netdev->priv; 267 struct ixgb_adapter *adapter = netdev_priv(netdev);
250 struct ixgb_hw *hw = &adapter->hw; 268 struct ixgb_hw *hw = &adapter->hw;
251 uint32_t *reg = p; 269 uint32_t *reg = p;
252 uint32_t *reg_start = reg; 270 uint32_t *reg_start = reg;
253 uint8_t i; 271 uint8_t i;
254 272
255 /* the 1 (one) below indicates an attempt at versioning, if the 273 /* the 1 (one) below indicates an attempt at versioning, if the
256 * interface in ethtool or the driver this 1 should be incremented */ 274 * interface in ethtool or the driver changes, this 1 should be
275 * incremented */
257 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; 276 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
258 277
259 /* General Registers */ 278 /* General Registers */
@@ -283,7 +302,8 @@ ixgb_get_regs(struct net_device *netdev,
283 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ 302 *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
284 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ 303 *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
285 304
286 for (i = 0; i < IXGB_RAR_ENTRIES; i++) { 305 /* there are 16 RAR entries in hardware, we only use 3 */
306 for(i = 0; i < 16; i++) {
287 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ 307 *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
288 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ 308 *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
289 } 309 }
@@ -391,7 +411,7 @@ static int
391ixgb_get_eeprom(struct net_device *netdev, 411ixgb_get_eeprom(struct net_device *netdev,
392 struct ethtool_eeprom *eeprom, uint8_t *bytes) 412 struct ethtool_eeprom *eeprom, uint8_t *bytes)
393{ 413{
394 struct ixgb_adapter *adapter = netdev->priv; 414 struct ixgb_adapter *adapter = netdev_priv(netdev);
395 struct ixgb_hw *hw = &adapter->hw; 415 struct ixgb_hw *hw = &adapter->hw;
396 uint16_t *eeprom_buff; 416 uint16_t *eeprom_buff;
397 int i, max_len, first_word, last_word; 417 int i, max_len, first_word, last_word;
@@ -439,7 +459,7 @@ static int
439ixgb_set_eeprom(struct net_device *netdev, 459ixgb_set_eeprom(struct net_device *netdev,
440 struct ethtool_eeprom *eeprom, uint8_t *bytes) 460 struct ethtool_eeprom *eeprom, uint8_t *bytes)
441{ 461{
442 struct ixgb_adapter *adapter = netdev->priv; 462 struct ixgb_adapter *adapter = netdev_priv(netdev);
443 struct ixgb_hw *hw = &adapter->hw; 463 struct ixgb_hw *hw = &adapter->hw;
444 uint16_t *eeprom_buff; 464 uint16_t *eeprom_buff;
445 void *ptr; 465 void *ptr;
@@ -497,7 +517,7 @@ static void
497ixgb_get_drvinfo(struct net_device *netdev, 517ixgb_get_drvinfo(struct net_device *netdev,
498 struct ethtool_drvinfo *drvinfo) 518 struct ethtool_drvinfo *drvinfo)
499{ 519{
500 struct ixgb_adapter *adapter = netdev->priv; 520 struct ixgb_adapter *adapter = netdev_priv(netdev);
501 521
502 strncpy(drvinfo->driver, ixgb_driver_name, 32); 522 strncpy(drvinfo->driver, ixgb_driver_name, 32);
503 strncpy(drvinfo->version, ixgb_driver_version, 32); 523 strncpy(drvinfo->version, ixgb_driver_version, 32);
@@ -512,7 +532,7 @@ static void
512ixgb_get_ringparam(struct net_device *netdev, 532ixgb_get_ringparam(struct net_device *netdev,
513 struct ethtool_ringparam *ring) 533 struct ethtool_ringparam *ring)
514{ 534{
515 struct ixgb_adapter *adapter = netdev->priv; 535 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 536 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
517 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 537 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
518 538
@@ -530,7 +550,7 @@ static int
530ixgb_set_ringparam(struct net_device *netdev, 550ixgb_set_ringparam(struct net_device *netdev,
531 struct ethtool_ringparam *ring) 551 struct ethtool_ringparam *ring)
532{ 552{
533 struct ixgb_adapter *adapter = netdev->priv; 553 struct ixgb_adapter *adapter = netdev_priv(netdev);
534 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 554 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
535 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 555 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
536 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; 556 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
@@ -573,6 +593,11 @@ ixgb_set_ringparam(struct net_device *netdev,
573 adapter->tx_ring = tx_new; 593 adapter->tx_ring = tx_new;
574 if((err = ixgb_up(adapter))) 594 if((err = ixgb_up(adapter)))
575 return err; 595 return err;
596 /* be optimistic about our link, since we were up before */
597 adapter->link_speed = 10000;
598 adapter->link_duplex = FULL_DUPLEX;
599 netif_carrier_on(netdev);
600 netif_wake_queue(netdev);
576 } 601 }
577 602
578 return 0; 603 return 0;
@@ -607,7 +632,7 @@ ixgb_led_blink_callback(unsigned long data)
607static int 632static int
608ixgb_phys_id(struct net_device *netdev, uint32_t data) 633ixgb_phys_id(struct net_device *netdev, uint32_t data)
609{ 634{
610 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
611 636
612 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 637 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
613 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 638 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
@@ -643,7 +668,7 @@ static void
643ixgb_get_ethtool_stats(struct net_device *netdev, 668ixgb_get_ethtool_stats(struct net_device *netdev,
644 struct ethtool_stats *stats, uint64_t *data) 669 struct ethtool_stats *stats, uint64_t *data)
645{ 670{
646 struct ixgb_adapter *adapter = netdev->priv; 671 struct ixgb_adapter *adapter = netdev_priv(netdev);
647 int i; 672 int i;
648 673
649 ixgb_update_stats(adapter); 674 ixgb_update_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 97898efe7cc8..8bcf31ed10c2 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -822,17 +822,8 @@ extern void ixgb_clear_vfta(struct ixgb_hw *hw);
822 822
823/* Access functions to eeprom data */ 823/* Access functions to eeprom data */
824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 824void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
825uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
826uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 825uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
827uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
828uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
829uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
830uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
831uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 826uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
832uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
833uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
834uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
835uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
836boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); 827boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
837uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 828uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
838 829
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 097b90ccf575..5c555373adbe 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -29,6 +29,11 @@
29#include "ixgb.h" 29#include "ixgb.h"
30 30
31/* Change Log 31/* Change Log
32 * 1.0.96 04/19/05
33 * - Make needlessly global code static -- bunk@stusta.de
34 * - ethtool cleanup -- shemminger@osdl.org
35 * - Support for MODULE_VERSION -- linville@tuxdriver.com
36 * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
32 * 1.0.88 01/05/05 37 * 1.0.88 01/05/05
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson 38 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down 39 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
@@ -47,10 +52,9 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
47#else 52#else
48#define DRIVERNAPI "-NAPI" 53#define DRIVERNAPI "-NAPI"
49#endif 54#endif
50 55#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
51#define DRV_VERSION "1.0.95-k2"DRIVERNAPI
52char ixgb_driver_version[] = DRV_VERSION; 56char ixgb_driver_version[] = DRV_VERSION;
53char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 57static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
54 58
55/* ixgb_pci_tbl - PCI Device ID Table 59/* ixgb_pci_tbl - PCI Device ID Table
56 * 60 *
@@ -145,10 +149,12 @@ MODULE_LICENSE("GPL");
145MODULE_VERSION(DRV_VERSION); 149MODULE_VERSION(DRV_VERSION);
146 150
147/* some defines for controlling descriptor fetches in h/w */ 151/* some defines for controlling descriptor fetches in h/w */
148#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
149#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
150 pushed this many descriptors from head */
151#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ 152#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
153#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
154 * this */
155#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
156 * is pushed this many descriptors
157 * from head */
152 158
153/** 159/**
154 * ixgb_init_module - Driver Registration Routine 160 * ixgb_init_module - Driver Registration Routine
@@ -376,7 +382,7 @@ ixgb_probe(struct pci_dev *pdev,
376 SET_NETDEV_DEV(netdev, &pdev->dev); 382 SET_NETDEV_DEV(netdev, &pdev->dev);
377 383
378 pci_set_drvdata(pdev, netdev); 384 pci_set_drvdata(pdev, netdev);
379 adapter = netdev->priv; 385 adapter = netdev_priv(netdev);
380 adapter->netdev = netdev; 386 adapter->netdev = netdev;
381 adapter->pdev = pdev; 387 adapter->pdev = pdev;
382 adapter->hw.back = adapter; 388 adapter->hw.back = adapter;
@@ -512,7 +518,7 @@ static void __devexit
512ixgb_remove(struct pci_dev *pdev) 518ixgb_remove(struct pci_dev *pdev)
513{ 519{
514 struct net_device *netdev = pci_get_drvdata(pdev); 520 struct net_device *netdev = pci_get_drvdata(pdev);
515 struct ixgb_adapter *adapter = netdev->priv; 521 struct ixgb_adapter *adapter = netdev_priv(netdev);
516 522
517 unregister_netdev(netdev); 523 unregister_netdev(netdev);
518 524
@@ -583,7 +589,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
583static int 589static int
584ixgb_open(struct net_device *netdev) 590ixgb_open(struct net_device *netdev)
585{ 591{
586 struct ixgb_adapter *adapter = netdev->priv; 592 struct ixgb_adapter *adapter = netdev_priv(netdev);
587 int err; 593 int err;
588 594
589 /* allocate transmit descriptors */ 595 /* allocate transmit descriptors */
@@ -626,7 +632,7 @@ err_setup_tx:
626static int 632static int
627ixgb_close(struct net_device *netdev) 633ixgb_close(struct net_device *netdev)
628{ 634{
629 struct ixgb_adapter *adapter = netdev->priv; 635 struct ixgb_adapter *adapter = netdev_priv(netdev);
630 636
631 ixgb_down(adapter, TRUE); 637 ixgb_down(adapter, TRUE);
632 638
@@ -1017,7 +1023,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1017static int 1023static int
1018ixgb_set_mac(struct net_device *netdev, void *p) 1024ixgb_set_mac(struct net_device *netdev, void *p)
1019{ 1025{
1020 struct ixgb_adapter *adapter = netdev->priv; 1026 struct ixgb_adapter *adapter = netdev_priv(netdev);
1021 struct sockaddr *addr = p; 1027 struct sockaddr *addr = p;
1022 1028
1023 if(!is_valid_ether_addr(addr->sa_data)) 1029 if(!is_valid_ether_addr(addr->sa_data))
@@ -1043,7 +1049,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
1043static void 1049static void
1044ixgb_set_multi(struct net_device *netdev) 1050ixgb_set_multi(struct net_device *netdev)
1045{ 1051{
1046 struct ixgb_adapter *adapter = netdev->priv; 1052 struct ixgb_adapter *adapter = netdev_priv(netdev);
1047 struct ixgb_hw *hw = &adapter->hw; 1053 struct ixgb_hw *hw = &adapter->hw;
1048 struct dev_mc_list *mc_ptr; 1054 struct dev_mc_list *mc_ptr;
1049 uint32_t rctl; 1055 uint32_t rctl;
@@ -1371,7 +1377,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1371static int 1377static int
1372ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1378ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1373{ 1379{
1374 struct ixgb_adapter *adapter = netdev->priv; 1380 struct ixgb_adapter *adapter = netdev_priv(netdev);
1375 unsigned int first; 1381 unsigned int first;
1376 unsigned int tx_flags = 0; 1382 unsigned int tx_flags = 0;
1377 unsigned long flags; 1383 unsigned long flags;
@@ -1425,7 +1431,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1425static void 1431static void
1426ixgb_tx_timeout(struct net_device *netdev) 1432ixgb_tx_timeout(struct net_device *netdev)
1427{ 1433{
1428 struct ixgb_adapter *adapter = netdev->priv; 1434 struct ixgb_adapter *adapter = netdev_priv(netdev);
1429 1435
1430 /* Do the reset outside of interrupt context */ 1436 /* Do the reset outside of interrupt context */
1431 schedule_work(&adapter->tx_timeout_task); 1437 schedule_work(&adapter->tx_timeout_task);
@@ -1434,7 +1440,7 @@ ixgb_tx_timeout(struct net_device *netdev)
1434static void 1440static void
1435ixgb_tx_timeout_task(struct net_device *netdev) 1441ixgb_tx_timeout_task(struct net_device *netdev)
1436{ 1442{
1437 struct ixgb_adapter *adapter = netdev->priv; 1443 struct ixgb_adapter *adapter = netdev_priv(netdev);
1438 1444
1439 ixgb_down(adapter, TRUE); 1445 ixgb_down(adapter, TRUE);
1440 ixgb_up(adapter); 1446 ixgb_up(adapter);
@@ -1451,7 +1457,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
1451static struct net_device_stats * 1457static struct net_device_stats *
1452ixgb_get_stats(struct net_device *netdev) 1458ixgb_get_stats(struct net_device *netdev)
1453{ 1459{
1454 struct ixgb_adapter *adapter = netdev->priv; 1460 struct ixgb_adapter *adapter = netdev_priv(netdev);
1455 1461
1456 return &adapter->net_stats; 1462 return &adapter->net_stats;
1457} 1463}
@@ -1467,7 +1473,7 @@ ixgb_get_stats(struct net_device *netdev)
1467static int 1473static int
1468ixgb_change_mtu(struct net_device *netdev, int new_mtu) 1474ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1469{ 1475{
1470 struct ixgb_adapter *adapter = netdev->priv; 1476 struct ixgb_adapter *adapter = netdev_priv(netdev);
1471 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1477 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1472 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1478 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1473 1479
@@ -1522,7 +1528,8 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1522 1528
1523 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1529 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1524 /* fix up multicast stats by removing broadcasts */ 1530 /* fix up multicast stats by removing broadcasts */
1525 multi -= bcast; 1531 if(multi >= bcast)
1532 multi -= bcast;
1526 1533
1527 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1534 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1528 adapter->stats.mprch += (multi >> 32); 1535 adapter->stats.mprch += (multi >> 32);
@@ -1641,7 +1648,7 @@ static irqreturn_t
1641ixgb_intr(int irq, void *data, struct pt_regs *regs) 1648ixgb_intr(int irq, void *data, struct pt_regs *regs)
1642{ 1649{
1643 struct net_device *netdev = data; 1650 struct net_device *netdev = data;
1644 struct ixgb_adapter *adapter = netdev->priv; 1651 struct ixgb_adapter *adapter = netdev_priv(netdev);
1645 struct ixgb_hw *hw = &adapter->hw; 1652 struct ixgb_hw *hw = &adapter->hw;
1646 uint32_t icr = IXGB_READ_REG(hw, ICR); 1653 uint32_t icr = IXGB_READ_REG(hw, ICR);
1647#ifndef CONFIG_IXGB_NAPI 1654#ifndef CONFIG_IXGB_NAPI
@@ -1688,7 +1695,7 @@ ixgb_intr(int irq, void *data, struct pt_regs *regs)
1688static int 1695static int
1689ixgb_clean(struct net_device *netdev, int *budget) 1696ixgb_clean(struct net_device *netdev, int *budget)
1690{ 1697{
1691 struct ixgb_adapter *adapter = netdev->priv; 1698 struct ixgb_adapter *adapter = netdev_priv(netdev);
1692 int work_to_do = min(*budget, netdev->quota); 1699 int work_to_do = min(*budget, netdev->quota);
1693 int tx_cleaned; 1700 int tx_cleaned;
1694 int work_done = 0; 1701 int work_done = 0;
@@ -2017,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2017static void 2024static void
2018ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2025ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2019{ 2026{
2020 struct ixgb_adapter *adapter = netdev->priv; 2027 struct ixgb_adapter *adapter = netdev_priv(netdev);
2021 uint32_t ctrl, rctl; 2028 uint32_t ctrl, rctl;
2022 2029
2023 ixgb_irq_disable(adapter); 2030 ixgb_irq_disable(adapter);
@@ -2055,7 +2062,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2055static void 2062static void
2056ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2063ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2057{ 2064{
2058 struct ixgb_adapter *adapter = netdev->priv; 2065 struct ixgb_adapter *adapter = netdev_priv(netdev);
2059 uint32_t vfta, index; 2066 uint32_t vfta, index;
2060 2067
2061 /* add VID to filter table */ 2068 /* add VID to filter table */
@@ -2069,7 +2076,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2069static void 2076static void
2070ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2077ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2071{ 2078{
2072 struct ixgb_adapter *adapter = netdev->priv; 2079 struct ixgb_adapter *adapter = netdev_priv(netdev);
2073 uint32_t vfta, index; 2080 uint32_t vfta, index;
2074 2081
2075 ixgb_irq_disable(adapter); 2082 ixgb_irq_disable(adapter);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 7fec613e1675..8423cb6875f0 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -1,5 +1,10 @@
1/* 1/*
2 * sonic.c 2 * jazzsonic.c
3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, and (from the mac68k project) introduced
7 * dhd's support for 16-bit cards.
3 * 8 *
4 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) 9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
5 * 10 *
@@ -28,8 +33,8 @@
28#include <linux/netdevice.h> 33#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
30#include <linux/skbuff.h> 35#include <linux/skbuff.h>
31#include <linux/bitops.h>
32#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/dma-mapping.h>
33 38
34#include <asm/bootinfo.h> 39#include <asm/bootinfo.h>
35#include <asm/system.h> 40#include <asm/system.h>
@@ -44,22 +49,20 @@ static struct platform_device *jazz_sonic_device;
44 49
45#define SONIC_MEM_SIZE 0x100 50#define SONIC_MEM_SIZE 0x100
46 51
47#define SREGS_PAD(n) u16 n;
48
49#include "sonic.h" 52#include "sonic.h"
50 53
51/* 54/*
52 * Macros to access SONIC registers 55 * Macros to access SONIC registers
53 */ 56 */
54#define SONIC_READ(reg) (*((volatile unsigned int *)base_addr+reg)) 57#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg))
55 58
56#define SONIC_WRITE(reg,val) \ 59#define SONIC_WRITE(reg,val) \
57do { \ 60do { \
58 *((volatile unsigned int *)base_addr+(reg)) = (val); \ 61 *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
59} while (0) 62} while (0)
60 63
61 64
62/* use 0 for production, 1 for verification, >2 for debug */ 65/* use 0 for production, 1 for verification, >1 for debug */
63#ifdef SONIC_DEBUG 66#ifdef SONIC_DEBUG
64static unsigned int sonic_debug = SONIC_DEBUG; 67static unsigned int sonic_debug = SONIC_DEBUG;
65#else 68#else
@@ -85,18 +88,18 @@ static unsigned short known_revisions[] =
85 0xffff /* end of list */ 88 0xffff /* end of list */
86}; 89};
87 90
88static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr, 91static int __init sonic_probe1(struct net_device *dev)
89 unsigned int irq)
90{ 92{
91 static unsigned version_printed; 93 static unsigned version_printed;
92 unsigned int silicon_revision; 94 unsigned int silicon_revision;
93 unsigned int val; 95 unsigned int val;
94 struct sonic_local *lp; 96 struct sonic_local *lp = netdev_priv(dev);
95 int err = -ENODEV; 97 int err = -ENODEV;
96 int i; 98 int i;
97 99
98 if (!request_mem_region(base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) 100 if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
99 return -EBUSY; 101 return -EBUSY;
102
100 /* 103 /*
101 * get the Silicon Revision ID. If this is one of the known 104 * get the Silicon Revision ID. If this is one of the known
102 * one assume that we found a SONIC ethernet controller at 105 * one assume that we found a SONIC ethernet controller at
@@ -120,11 +123,7 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
120 if (sonic_debug && version_printed++ == 0) 123 if (sonic_debug && version_printed++ == 0)
121 printk(version); 124 printk(version);
122 125
123 printk("%s: Sonic ethernet found at 0x%08lx, ", dev->name, base_addr); 126 printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr);
124
125 /* Fill in the 'dev' fields. */
126 dev->base_addr = base_addr;
127 dev->irq = irq;
128 127
129 /* 128 /*
130 * Put the sonic into software reset, then 129 * Put the sonic into software reset, then
@@ -138,84 +137,44 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
138 dev->dev_addr[i*2+1] = val >> 8; 137 dev->dev_addr[i*2+1] = val >> 8;
139 } 138 }
140 139
141 printk("HW Address ");
142 for (i = 0; i < 6; i++) {
143 printk("%2.2x", dev->dev_addr[i]);
144 if (i<5)
145 printk(":");
146 }
147
148 printk(" IRQ %d\n", irq);
149
150 err = -ENOMEM; 140 err = -ENOMEM;
151 141
152 /* Initialize the device structure. */ 142 /* Initialize the device structure. */
153 if (dev->priv == NULL) {
154 /*
155 * the memory be located in the same 64kb segment
156 */
157 lp = NULL;
158 i = 0;
159 do {
160 lp = kmalloc(sizeof(*lp), GFP_KERNEL);
161 if ((unsigned long) lp >> 16
162 != ((unsigned long)lp + sizeof(*lp) ) >> 16) {
163 /* FIXME, free the memory later */
164 kfree(lp);
165 lp = NULL;
166 }
167 } while (lp == NULL && i++ < 20);
168
169 if (lp == NULL) {
170 printk("%s: couldn't allocate memory for descriptors\n",
171 dev->name);
172 goto out;
173 }
174 143
175 memset(lp, 0, sizeof(struct sonic_local)); 144 lp->dma_bitmode = SONIC_BITMODE32;
176
177 /* get the virtual dma address */
178 lp->cda_laddr = vdma_alloc(CPHYSADDR(lp),sizeof(*lp));
179 if (lp->cda_laddr == ~0UL) {
180 printk("%s: couldn't get DMA page entry for "
181 "descriptors\n", dev->name);
182 goto out1;
183 }
184
185 lp->tda_laddr = lp->cda_laddr + sizeof (lp->cda);
186 lp->rra_laddr = lp->tda_laddr + sizeof (lp->tda);
187 lp->rda_laddr = lp->rra_laddr + sizeof (lp->rra);
188
189 /* allocate receive buffer area */
190 /* FIXME, maybe we should use skbs */
191 lp->rba = kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL);
192 if (!lp->rba) {
193 printk("%s: couldn't allocate receive buffers\n",
194 dev->name);
195 goto out2;
196 }
197 145
198 /* get virtual dma address */ 146 /* Allocate the entire chunk of memory for the descriptors.
199 lp->rba_laddr = vdma_alloc(CPHYSADDR(lp->rba), 147 Note that this cannot cross a 64K boundary. */
200 SONIC_NUM_RRS * SONIC_RBSIZE); 148 if ((lp->descriptors = dma_alloc_coherent(lp->device,
201 if (lp->rba_laddr == ~0UL) { 149 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
202 printk("%s: couldn't get DMA page entry for receive " 150 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
203 "buffers\n",dev->name); 151 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
204 goto out3; 152 goto out;
205 }
206
207 /* now convert pointer to KSEG1 pointer */
208 lp->rba = (char *)KSEG1ADDR(lp->rba);
209 flush_cache_all();
210 dev->priv = (struct sonic_local *)KSEG1ADDR(lp);
211 } 153 }
212 154
213 lp = (struct sonic_local *)dev->priv; 155 /* Now set up the pointers to point to the appropriate places */
156 lp->cda = lp->descriptors;
157 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
158 * SONIC_BUS_SCALE(lp->dma_bitmode));
159 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
160 * SONIC_BUS_SCALE(lp->dma_bitmode));
161 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
162 * SONIC_BUS_SCALE(lp->dma_bitmode));
163
164 lp->cda_laddr = lp->descriptors_laddr;
165 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
166 * SONIC_BUS_SCALE(lp->dma_bitmode));
167 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
168 * SONIC_BUS_SCALE(lp->dma_bitmode));
169 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
170 * SONIC_BUS_SCALE(lp->dma_bitmode));
171
214 dev->open = sonic_open; 172 dev->open = sonic_open;
215 dev->stop = sonic_close; 173 dev->stop = sonic_close;
216 dev->hard_start_xmit = sonic_send_packet; 174 dev->hard_start_xmit = sonic_send_packet;
217 dev->get_stats = sonic_get_stats; 175 dev->get_stats = sonic_get_stats;
218 dev->set_multicast_list = &sonic_multicast_list; 176 dev->set_multicast_list = &sonic_multicast_list;
177 dev->tx_timeout = sonic_tx_timeout;
219 dev->watchdog_timeo = TX_TIMEOUT; 178 dev->watchdog_timeo = TX_TIMEOUT;
220 179
221 /* 180 /*
@@ -226,14 +185,8 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
226 SONIC_WRITE(SONIC_MPT,0xffff); 185 SONIC_WRITE(SONIC_MPT,0xffff);
227 186
228 return 0; 187 return 0;
229out3:
230 kfree(lp->rba);
231out2:
232 vdma_free(lp->cda_laddr);
233out1:
234 kfree(lp);
235out: 188out:
236 release_region(base_addr, SONIC_MEM_SIZE); 189 release_region(dev->base_addr, SONIC_MEM_SIZE);
237 return err; 190 return err;
238} 191}
239 192
@@ -245,7 +198,6 @@ static int __init jazz_sonic_probe(struct device *device)
245{ 198{
246 struct net_device *dev; 199 struct net_device *dev;
247 struct sonic_local *lp; 200 struct sonic_local *lp;
248 unsigned long base_addr;
249 int err = 0; 201 int err = 0;
250 int i; 202 int i;
251 203
@@ -255,21 +207,26 @@ static int __init jazz_sonic_probe(struct device *device)
255 if (mips_machgroup != MACH_GROUP_JAZZ) 207 if (mips_machgroup != MACH_GROUP_JAZZ)
256 return -ENODEV; 208 return -ENODEV;
257 209
258 dev = alloc_etherdev(0); 210 dev = alloc_etherdev(sizeof(struct sonic_local));
259 if (!dev) 211 if (!dev)
260 return -ENOMEM; 212 return -ENOMEM;
261 213
214 lp = netdev_priv(dev);
215 lp->device = device;
216 SET_NETDEV_DEV(dev, device);
217 SET_MODULE_OWNER(dev);
218
262 netdev_boot_setup_check(dev); 219 netdev_boot_setup_check(dev);
263 base_addr = dev->base_addr;
264 220
265 if (base_addr >= KSEG0) { /* Check a single specified location. */ 221 if (dev->base_addr >= KSEG0) { /* Check a single specified location. */
266 err = sonic_probe1(dev, base_addr, dev->irq); 222 err = sonic_probe1(dev);
267 } else if (base_addr != 0) { /* Don't probe at all. */ 223 } else if (dev->base_addr != 0) { /* Don't probe at all. */
268 err = -ENXIO; 224 err = -ENXIO;
269 } else { 225 } else {
270 for (i = 0; sonic_portlist[i].port; i++) { 226 for (i = 0; sonic_portlist[i].port; i++) {
271 int io = sonic_portlist[i].port; 227 dev->base_addr = sonic_portlist[i].port;
272 if (sonic_probe1(dev, io, sonic_portlist[i].irq) == 0) 228 dev->irq = sonic_portlist[i].irq;
229 if (sonic_probe1(dev) == 0)
273 break; 230 break;
274 } 231 }
275 if (!sonic_portlist[i].port) 232 if (!sonic_portlist[i].port)
@@ -281,14 +238,17 @@ static int __init jazz_sonic_probe(struct device *device)
281 if (err) 238 if (err)
282 goto out1; 239 goto out1;
283 240
241 printk("%s: MAC ", dev->name);
242 for (i = 0; i < 6; i++) {
243 printk("%2.2x", dev->dev_addr[i]);
244 if (i < 5)
245 printk(":");
246 }
247 printk(" IRQ %d\n", dev->irq);
248
284 return 0; 249 return 0;
285 250
286out1: 251out1:
287 lp = dev->priv;
288 vdma_free(lp->rba_laddr);
289 kfree(lp->rba);
290 vdma_free(lp->cda_laddr);
291 kfree(lp);
292 release_region(dev->base_addr, SONIC_MEM_SIZE); 252 release_region(dev->base_addr, SONIC_MEM_SIZE);
293out: 253out:
294 free_netdev(dev); 254 free_netdev(dev);
@@ -296,21 +256,22 @@ out:
296 return err; 256 return err;
297} 257}
298 258
299/* 259MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
300 * SONIC uses a normal IRQ 260module_param(sonic_debug, int, 0);
301 */ 261MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
302#define sonic_request_irq request_irq
303#define sonic_free_irq free_irq
304 262
305#define sonic_chiptomem(x) KSEG1ADDR(vdma_log2phys(x)) 263#define SONIC_IRQ_FLAG SA_INTERRUPT
306 264
307#include "sonic.c" 265#include "sonic.c"
308 266
309static int __devexit jazz_sonic_device_remove (struct device *device) 267static int __devexit jazz_sonic_device_remove (struct device *device)
310{ 268{
311 struct net_device *dev = device->driver_data; 269 struct net_device *dev = device->driver_data;
270 struct sonic_local* lp = netdev_priv(dev);
312 271
313 unregister_netdev (dev); 272 unregister_netdev (dev);
273 dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
274 lp->descriptors, lp->descriptors_laddr);
314 release_region (dev->base_addr, SONIC_MEM_SIZE); 275 release_region (dev->base_addr, SONIC_MEM_SIZE);
315 free_netdev (dev); 276 free_netdev (dev);
316 277
@@ -323,7 +284,7 @@ static struct device_driver jazz_sonic_driver = {
323 .probe = jazz_sonic_probe, 284 .probe = jazz_sonic_probe,
324 .remove = __devexit_p(jazz_sonic_device_remove), 285 .remove = __devexit_p(jazz_sonic_device_remove),
325}; 286};
326 287
327static void jazz_sonic_platform_release (struct device *device) 288static void jazz_sonic_platform_release (struct device *device)
328{ 289{
329 struct platform_device *pldev; 290 struct platform_device *pldev;
@@ -336,10 +297,11 @@ static void jazz_sonic_platform_release (struct device *device)
336static int __init jazz_sonic_init_module(void) 297static int __init jazz_sonic_init_module(void)
337{ 298{
338 struct platform_device *pldev; 299 struct platform_device *pldev;
300 int err;
339 301
340 if (driver_register(&jazz_sonic_driver)) { 302 if ((err = driver_register(&jazz_sonic_driver))) {
341 printk(KERN_ERR "Driver registration failed\n"); 303 printk(KERN_ERR "Driver registration failed\n");
342 return -ENOMEM; 304 return err;
343 } 305 }
344 306
345 jazz_sonic_device = NULL; 307 jazz_sonic_device = NULL;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1f61f0cc95d8..690a1aae0b34 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,6 +68,7 @@ static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
68 * of largesending device modulo TCP checksum, which is ignored for loopback. 68 * of largesending device modulo TCP checksum, which is ignored for loopback.
69 */ 69 */
70 70
71#ifdef LOOPBACK_TSO
71static void emulate_large_send_offload(struct sk_buff *skb) 72static void emulate_large_send_offload(struct sk_buff *skb)
72{ 73{
73 struct iphdr *iph = skb->nh.iph; 74 struct iphdr *iph = skb->nh.iph;
@@ -119,6 +120,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
119 120
120 dev_kfree_skb(skb); 121 dev_kfree_skb(skb);
121} 122}
123#endif /* LOOPBACK_TSO */
122 124
123/* 125/*
124 * The higher levels take care of making this non-reentrant (it's 126 * The higher levels take care of making this non-reentrant (it's
@@ -130,12 +132,13 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
130 132
131 skb_orphan(skb); 133 skb_orphan(skb);
132 134
133 skb->protocol=eth_type_trans(skb,dev); 135 skb->protocol = eth_type_trans(skb,dev);
134 skb->dev=dev; 136 skb->dev = dev;
135#ifndef LOOPBACK_MUST_CHECKSUM 137#ifndef LOOPBACK_MUST_CHECKSUM
136 skb->ip_summed = CHECKSUM_UNNECESSARY; 138 skb->ip_summed = CHECKSUM_UNNECESSARY;
137#endif 139#endif
138 140
141#ifdef LOOPBACK_TSO
139 if (skb_shinfo(skb)->tso_size) { 142 if (skb_shinfo(skb)->tso_size) {
140 BUG_ON(skb->protocol != htons(ETH_P_IP)); 143 BUG_ON(skb->protocol != htons(ETH_P_IP));
141 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
@@ -143,14 +146,14 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
143 emulate_large_send_offload(skb); 146 emulate_large_send_offload(skb);
144 return 0; 147 return 0;
145 } 148 }
146 149#endif
147 dev->last_rx = jiffies; 150 dev->last_rx = jiffies;
148 151
149 lb_stats = &per_cpu(loopback_stats, get_cpu()); 152 lb_stats = &per_cpu(loopback_stats, get_cpu());
150 lb_stats->rx_bytes += skb->len; 153 lb_stats->rx_bytes += skb->len;
151 lb_stats->tx_bytes += skb->len; 154 lb_stats->tx_bytes = lb_stats->rx_bytes;
152 lb_stats->rx_packets++; 155 lb_stats->rx_packets++;
153 lb_stats->tx_packets++; 156 lb_stats->tx_packets = lb_stats->rx_packets;
154 put_cpu(); 157 put_cpu();
155 158
156 netif_rx(skb); 159 netif_rx(skb);
@@ -208,9 +211,12 @@ struct net_device loopback_dev = {
208 .type = ARPHRD_LOOPBACK, /* 0x0001*/ 211 .type = ARPHRD_LOOPBACK, /* 0x0001*/
209 .rebuild_header = eth_rebuild_header, 212 .rebuild_header = eth_rebuild_header,
210 .flags = IFF_LOOPBACK, 213 .flags = IFF_LOOPBACK,
211 .features = NETIF_F_SG|NETIF_F_FRAGLIST 214 .features = NETIF_F_SG | NETIF_F_FRAGLIST
212 |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA 215#ifdef LOOPBACK_TSO
213 |NETIF_F_LLTX, 216 | NETIF_F_TSO
217#endif
218 | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA
219 | NETIF_F_LLTX,
214 .ethtool_ops = &loopback_ethtool_ops, 220 .ethtool_ops = &loopback_ethtool_ops,
215}; 221};
216 222
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index be28c65de729..405e18365ede 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -1,6 +1,12 @@
1/* 1/*
2 * macsonic.c 2 * macsonic.c
3 * 3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, converted to unified driver model, made it work as
7 * a module again, and from the mac68k project, introduced more 32-bit cards
8 * and dhd's support for 16-bit cards.
9 *
4 * (C) 1998 Alan Cox 10 * (C) 1998 Alan Cox
5 * 11 *
6 * Debugging Andreas Ehliar, Michael Schmitz 12 * Debugging Andreas Ehliar, Michael Schmitz
@@ -26,8 +32,8 @@
26 */ 32 */
27 33
28#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/module.h>
29#include <linux/types.h> 36#include <linux/types.h>
30#include <linux/ctype.h>
31#include <linux/fcntl.h> 37#include <linux/fcntl.h>
32#include <linux/interrupt.h> 38#include <linux/interrupt.h>
33#include <linux/init.h> 39#include <linux/init.h>
@@ -41,8 +47,8 @@
41#include <linux/netdevice.h> 47#include <linux/netdevice.h>
42#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
43#include <linux/skbuff.h> 49#include <linux/skbuff.h>
44#include <linux/module.h> 50#include <linux/device.h>
45#include <linux/bitops.h> 51#include <linux/dma-mapping.h>
46 52
47#include <asm/bootinfo.h> 53#include <asm/bootinfo.h>
48#include <asm/system.h> 54#include <asm/system.h>
@@ -54,25 +60,28 @@
54#include <asm/macints.h> 60#include <asm/macints.h>
55#include <asm/mac_via.h> 61#include <asm/mac_via.h>
56 62
57#define SREGS_PAD(n) u16 n; 63static char mac_sonic_string[] = "macsonic";
64static struct platform_device *mac_sonic_device;
58 65
59#include "sonic.h" 66#include "sonic.h"
60 67
61#define SONIC_READ(reg) \ 68/* These should basically be bus-size and endian independent (since
62 nubus_readl(base_addr+(reg)) 69 the SONIC is at least smart enough that it uses the same endianness
63#define SONIC_WRITE(reg,val) \ 70 as the host, unlike certain less enlightened Macintosh NICs) */
64 nubus_writel((val), base_addr+(reg)) 71#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \
65#define sonic_read(dev, reg) \ 72 + lp->reg_offset))
66 nubus_readl((dev)->base_addr+(reg)) 73#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
67#define sonic_write(dev, reg, val) \ 74 + lp->reg_offset))
68 nubus_writel((val), (dev)->base_addr+(reg)) 75
69 76/* use 0 for production, 1 for verification, >1 for debug */
77#ifdef SONIC_DEBUG
78static unsigned int sonic_debug = SONIC_DEBUG;
79#else
80static unsigned int sonic_debug = 1;
81#endif
70 82
71static int sonic_debug;
72static int sonic_version_printed; 83static int sonic_version_printed;
73 84
74static int reg_offset;
75
76extern int mac_onboard_sonic_probe(struct net_device* dev); 85extern int mac_onboard_sonic_probe(struct net_device* dev);
77extern int mac_nubus_sonic_probe(struct net_device* dev); 86extern int mac_nubus_sonic_probe(struct net_device* dev);
78 87
@@ -108,40 +117,6 @@ enum macsonic_type {
108 117
109#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) 118#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
110 119
111struct net_device * __init macsonic_probe(int unit)
112{
113 struct net_device *dev = alloc_etherdev(0);
114 int err;
115
116 if (!dev)
117 return ERR_PTR(-ENOMEM);
118
119 if (unit >= 0)
120 sprintf(dev->name, "eth%d", unit);
121
122 SET_MODULE_OWNER(dev);
123
124 /* This will catch fatal stuff like -ENOMEM as well as success */
125 err = mac_onboard_sonic_probe(dev);
126 if (err == 0)
127 goto found;
128 if (err != -ENODEV)
129 goto out;
130 err = mac_nubus_sonic_probe(dev);
131 if (err)
132 goto out;
133found:
134 err = register_netdev(dev);
135 if (err)
136 goto out1;
137 return dev;
138out1:
139 kfree(dev->priv);
140out:
141 free_netdev(dev);
142 return ERR_PTR(err);
143}
144
145/* 120/*
146 * For reversing the PROM address 121 * For reversing the PROM address
147 */ 122 */
@@ -160,103 +135,55 @@ static inline void bit_reverse_addr(unsigned char addr[6])
160 135
161int __init macsonic_init(struct net_device* dev) 136int __init macsonic_init(struct net_device* dev)
162{ 137{
163 struct sonic_local* lp = NULL; 138 struct sonic_local* lp = netdev_priv(dev);
164 int i;
165 139
166 /* Allocate the entire chunk of memory for the descriptors. 140 /* Allocate the entire chunk of memory for the descriptors.
167 Note that this cannot cross a 64K boundary. */ 141 Note that this cannot cross a 64K boundary. */
168 for (i = 0; i < 20; i++) { 142 if ((lp->descriptors = dma_alloc_coherent(lp->device,
169 unsigned long desc_base, desc_top; 143 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
170 if((lp = kmalloc(sizeof(struct sonic_local), GFP_KERNEL | GFP_DMA)) == NULL) { 144 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
171 printk(KERN_ERR "%s: couldn't allocate descriptor buffers\n", dev->name); 145 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
172 return -ENOMEM;
173 }
174
175 desc_base = (unsigned long) lp;
176 desc_top = desc_base + sizeof(struct sonic_local);
177 if ((desc_top & 0xffff) >= (desc_base & 0xffff))
178 break;
179 /* Hmm. try again (FIXME: does this actually work?) */
180 kfree(lp);
181 printk(KERN_DEBUG
182 "%s: didn't get continguous chunk [%08lx - %08lx], trying again\n",
183 dev->name, desc_base, desc_top);
184 }
185
186 if (lp == NULL) {
187 printk(KERN_ERR "%s: tried 20 times to allocate descriptor buffers, giving up.\n",
188 dev->name);
189 return -ENOMEM; 146 return -ENOMEM;
190 } 147 }
191
192 dev->priv = lp;
193
194#if 0
195 /* this code is only here as a curiousity... mainly, where the
196 fuck did SONIC_BUS_SCALE come from, and what was it supposed
197 to do? the normal allocation works great for 32 bit stuffs.. */
198 148
199 /* Now set up the pointers to point to the appropriate places */ 149 /* Now set up the pointers to point to the appropriate places */
200 lp->cda = lp->sonic_desc; 150 lp->cda = lp->descriptors;
201 lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); 151 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
152 * SONIC_BUS_SCALE(lp->dma_bitmode));
202 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS 153 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
203 * SONIC_BUS_SCALE(lp->dma_bitmode)); 154 * SONIC_BUS_SCALE(lp->dma_bitmode));
204 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 155 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
205 * SONIC_BUS_SCALE(lp->dma_bitmode)); 156 * SONIC_BUS_SCALE(lp->dma_bitmode));
206 157
207#endif 158 lp->cda_laddr = lp->descriptors_laddr;
208 159 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
209 memset(lp, 0, sizeof(struct sonic_local)); 160 * SONIC_BUS_SCALE(lp->dma_bitmode));
210 161 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
211 lp->cda_laddr = (unsigned int)&(lp->cda); 162 * SONIC_BUS_SCALE(lp->dma_bitmode));
212 lp->tda_laddr = (unsigned int)lp->tda; 163 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
213 lp->rra_laddr = (unsigned int)lp->rra; 164 * SONIC_BUS_SCALE(lp->dma_bitmode));
214 lp->rda_laddr = (unsigned int)lp->rda;
215
216 /* FIXME, maybe we should use skbs */
217 if ((lp->rba = (char *)
218 kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL | GFP_DMA)) == NULL) {
219 printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name);
220 dev->priv = NULL;
221 kfree(lp);
222 return -ENOMEM;
223 }
224
225 lp->rba_laddr = (unsigned int)lp->rba;
226
227 {
228 int rs, ds;
229
230 /* almost always 12*4096, but let's not take chances */
231 rs = ((SONIC_NUM_RRS * SONIC_RBSIZE + 4095) / 4096) * 4096;
232 /* almost always under a page, but let's not take chances */
233 ds = ((sizeof(struct sonic_local) + 4095) / 4096) * 4096;
234 kernel_set_cachemode(lp->rba, rs, IOMAP_NOCACHE_SER);
235 kernel_set_cachemode(lp, ds, IOMAP_NOCACHE_SER);
236 }
237
238#if 0
239 flush_cache_all();
240#endif
241 165
242 dev->open = sonic_open; 166 dev->open = sonic_open;
243 dev->stop = sonic_close; 167 dev->stop = sonic_close;
244 dev->hard_start_xmit = sonic_send_packet; 168 dev->hard_start_xmit = sonic_send_packet;
245 dev->get_stats = sonic_get_stats; 169 dev->get_stats = sonic_get_stats;
246 dev->set_multicast_list = &sonic_multicast_list; 170 dev->set_multicast_list = &sonic_multicast_list;
171 dev->tx_timeout = sonic_tx_timeout;
172 dev->watchdog_timeo = TX_TIMEOUT;
247 173
248 /* 174 /*
249 * clear tally counter 175 * clear tally counter
250 */ 176 */
251 sonic_write(dev, SONIC_CRCT, 0xffff); 177 SONIC_WRITE(SONIC_CRCT, 0xffff);
252 sonic_write(dev, SONIC_FAET, 0xffff); 178 SONIC_WRITE(SONIC_FAET, 0xffff);
253 sonic_write(dev, SONIC_MPT, 0xffff); 179 SONIC_WRITE(SONIC_MPT, 0xffff);
254 180
255 return 0; 181 return 0;
256} 182}
257 183
258int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) 184int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
259{ 185{
186 struct sonic_local *lp = netdev_priv(dev);
260 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 187 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
261 int i; 188 int i;
262 189
@@ -270,6 +197,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
270 why this is so. */ 197 why this is so. */
271 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 198 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
272 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 199 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
200 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
273 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 201 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
274 bit_reverse_addr(dev->dev_addr); 202 bit_reverse_addr(dev->dev_addr);
275 else 203 else
@@ -281,22 +209,23 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
281 the card... */ 209 the card... */
282 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 210 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
283 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 211 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
212 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
284 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 213 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
285 { 214 {
286 unsigned short val; 215 unsigned short val;
287 216
288 printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n"); 217 printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n");
289 218
290 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 219 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
291 sonic_write(dev, SONIC_CEP, 15); 220 SONIC_WRITE(SONIC_CEP, 15);
292 221
293 val = sonic_read(dev, SONIC_CAP2); 222 val = SONIC_READ(SONIC_CAP2);
294 dev->dev_addr[5] = val >> 8; 223 dev->dev_addr[5] = val >> 8;
295 dev->dev_addr[4] = val & 0xff; 224 dev->dev_addr[4] = val & 0xff;
296 val = sonic_read(dev, SONIC_CAP1); 225 val = SONIC_READ(SONIC_CAP1);
297 dev->dev_addr[3] = val >> 8; 226 dev->dev_addr[3] = val >> 8;
298 dev->dev_addr[2] = val & 0xff; 227 dev->dev_addr[2] = val & 0xff;
299 val = sonic_read(dev, SONIC_CAP0); 228 val = SONIC_READ(SONIC_CAP0);
300 dev->dev_addr[1] = val >> 8; 229 dev->dev_addr[1] = val >> 8;
301 dev->dev_addr[0] = val & 0xff; 230 dev->dev_addr[0] = val & 0xff;
302 231
@@ -311,6 +240,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
311 240
312 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 241 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
313 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 242 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
243 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
314 memcmp(dev->dev_addr, "\x00\x05\x02", 3)) 244 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
315 { 245 {
316 /* 246 /*
@@ -325,8 +255,9 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
325{ 255{
326 /* Bwahahaha */ 256 /* Bwahahaha */
327 static int once_is_more_than_enough; 257 static int once_is_more_than_enough;
328 int i; 258 struct sonic_local* lp = netdev_priv(dev);
329 int dma_bitmode; 259 int sr;
260 int commslot = 0;
330 261
331 if (once_is_more_than_enough) 262 if (once_is_more_than_enough)
332 return -ENODEV; 263 return -ENODEV;
@@ -335,20 +266,18 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
335 if (!MACH_IS_MAC) 266 if (!MACH_IS_MAC)
336 return -ENODEV; 267 return -ENODEV;
337 268
338 printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
339
340 if (macintosh_config->ether_type != MAC_ETHER_SONIC) 269 if (macintosh_config->ether_type != MAC_ETHER_SONIC)
341 {
342 printk("none.\n");
343 return -ENODEV; 270 return -ENODEV;
344 } 271
345 272 printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
273
346 /* Bogus probing, on the models which may or may not have 274 /* Bogus probing, on the models which may or may not have
347 Ethernet (BTW, the Ethernet *is* always at the same 275 Ethernet (BTW, the Ethernet *is* always at the same
348 address, and nothing else lives there, at least if Apple's 276 address, and nothing else lives there, at least if Apple's
349 documentation is to be believed) */ 277 documentation is to be believed) */
350 if (macintosh_config->ident == MAC_MODEL_Q630 || 278 if (macintosh_config->ident == MAC_MODEL_Q630 ||
351 macintosh_config->ident == MAC_MODEL_P588 || 279 macintosh_config->ident == MAC_MODEL_P588 ||
280 macintosh_config->ident == MAC_MODEL_P575 ||
352 macintosh_config->ident == MAC_MODEL_C610) { 281 macintosh_config->ident == MAC_MODEL_C610) {
353 unsigned long flags; 282 unsigned long flags;
354 int card_present; 283 int card_present;
@@ -361,13 +290,13 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
361 printk("none.\n"); 290 printk("none.\n");
362 return -ENODEV; 291 return -ENODEV;
363 } 292 }
293 commslot = 1;
364 } 294 }
365 295
366 printk("yes\n"); 296 printk("yes\n");
367 297
368 /* Danger! My arms are flailing wildly! You *must* set this 298 /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
369 before using sonic_read() */ 299 * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
370
371 dev->base_addr = ONBOARD_SONIC_REGISTERS; 300 dev->base_addr = ONBOARD_SONIC_REGISTERS;
372 if (via_alt_mapping) 301 if (via_alt_mapping)
373 dev->irq = IRQ_AUTO_3; 302 dev->irq = IRQ_AUTO_3;
@@ -379,84 +308,66 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
379 sonic_version_printed = 1; 308 sonic_version_printed = 1;
380 } 309 }
381 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", 310 printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
382 dev->name, dev->base_addr); 311 lp->device->bus_id, dev->base_addr);
383
384 /* Now do a song and dance routine in an attempt to determine
385 the bus width */
386 312
387 /* The PowerBook's SONIC is 16 bit always. */ 313 /* The PowerBook's SONIC is 16 bit always. */
388 if (macintosh_config->ident == MAC_MODEL_PB520) { 314 if (macintosh_config->ident == MAC_MODEL_PB520) {
389 reg_offset = 0; 315 lp->reg_offset = 0;
390 dma_bitmode = 0; 316 lp->dma_bitmode = SONIC_BITMODE16;
391 } else if (macintosh_config->ident == MAC_MODEL_C610) { 317 sr = SONIC_READ(SONIC_SR);
392 reg_offset = 0; 318 } else if (commslot) {
393 dma_bitmode = 1;
394 } else {
395 /* Some of the comm-slot cards are 16 bit. But some 319 /* Some of the comm-slot cards are 16 bit. But some
396 of them are not. The 32-bit cards use offset 2 and 320 of them are not. The 32-bit cards use offset 2 and
397 pad with zeroes or sometimes ones (I think...) 321 have known revisions, we try reading the revision
398 Therefore, if we try offset 0 and get a silicon 322 register at offset 2, if we don't get a known revision
399 revision of 0, we assume 16 bit. */ 323 we assume 16 bit at offset 0. */
400 int sr; 324 lp->reg_offset = 2;
401 325 lp->dma_bitmode = SONIC_BITMODE16;
402 /* Technically this is not necessary since we zeroed 326
403 it above */ 327 sr = SONIC_READ(SONIC_SR);
404 reg_offset = 0; 328 if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101)
405 dma_bitmode = 0; 329 /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */
406 sr = sonic_read(dev, SONIC_SR); 330 lp->dma_bitmode = SONIC_BITMODE32;
407 if (sr == 0 || sr == 0xffff) { 331 else {
408 reg_offset = 2; 332 lp->dma_bitmode = SONIC_BITMODE16;
409 /* 83932 is 0x0004, 83934 is 0x0100 or 0x0101 */ 333 lp->reg_offset = 0;
410 sr = sonic_read(dev, SONIC_SR); 334 sr = SONIC_READ(SONIC_SR);
411 dma_bitmode = 1;
412
413 } 335 }
414 printk(KERN_INFO 336 } else {
415 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 337 /* All onboard cards are at offset 2 with 32 bit DMA. */
416 dev->name, sr, dma_bitmode?32:16, reg_offset); 338 lp->reg_offset = 2;
339 lp->dma_bitmode = SONIC_BITMODE32;
340 sr = SONIC_READ(SONIC_SR);
417 } 341 }
418 342 printk(KERN_INFO
343 "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
344 lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset);
419 345
420 /* this carries my sincere apologies -- by the time I got to updating 346#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
421 the driver, support for "reg_offsets" appeares nowhere in the sonic 347 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
422 code, going back for over a year. Fortunately, my Mac does't seem 348 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
423 to use whatever this was. 349#endif
424 350
425 If you know how this is supposed to be implemented, either fix it,
426 or contact me (sammy@oh.verio.com) to explain what it is. --Sam */
427
428 if(reg_offset) {
429 printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
430 return -ENODEV;
431 }
432
433 /* Software reset, then initialize control registers. */ 351 /* Software reset, then initialize control registers. */
434 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 352 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
435 sonic_write(dev, SONIC_DCR, SONIC_DCR_BMS | 353
436 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_EXBUS | 354 SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS |
437 (dma_bitmode ? SONIC_DCR_DW : 0)); 355 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
356 (lp->dma_bitmode ? SONIC_DCR_DW : 0));
438 357
439 /* This *must* be written back to in order to restore the 358 /* This *must* be written back to in order to restore the
440 extended programmable output bits */ 359 * extended programmable output bits, as it may not have been
441 sonic_write(dev, SONIC_DCR2, 0); 360 * initialised since the hardware reset. */
361 SONIC_WRITE(SONIC_DCR2, 0);
442 362
443 /* Clear *and* disable interrupts to be on the safe side */ 363 /* Clear *and* disable interrupts to be on the safe side */
444 sonic_write(dev, SONIC_ISR,0x7fff); 364 SONIC_WRITE(SONIC_IMR, 0);
445 sonic_write(dev, SONIC_IMR,0); 365 SONIC_WRITE(SONIC_ISR, 0x7fff);
446 366
447 /* Now look for the MAC address. */ 367 /* Now look for the MAC address. */
448 if (mac_onboard_sonic_ethernet_addr(dev) != 0) 368 if (mac_onboard_sonic_ethernet_addr(dev) != 0)
449 return -ENODEV; 369 return -ENODEV;
450 370
451 printk(KERN_INFO "MAC ");
452 for (i = 0; i < 6; i++) {
453 printk("%2.2x", dev->dev_addr[i]);
454 if (i < 5)
455 printk(":");
456 }
457
458 printk(" IRQ %d\n", dev->irq);
459
460 /* Shared init code */ 371 /* Shared init code */
461 return macsonic_init(dev); 372 return macsonic_init(dev);
462} 373}
@@ -468,8 +379,10 @@ int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev,
468 int i; 379 int i;
469 for(i = 0; i < 6; i++) 380 for(i = 0; i < 6; i++)
470 dev->dev_addr[i] = SONIC_READ_PROM(i); 381 dev->dev_addr[i] = SONIC_READ_PROM(i);
471 /* For now we are going to assume that they're all bit-reversed */ 382
472 bit_reverse_addr(dev->dev_addr); 383 /* Some of the addresses are bit-reversed */
384 if (id != MACSONIC_DAYNA)
385 bit_reverse_addr(dev->dev_addr);
473 386
474 return 0; 387 return 0;
475} 388}
@@ -487,6 +400,15 @@ int __init macsonic_ident(struct nubus_dev* ndev)
487 else 400 else
488 return MACSONIC_APPLE; 401 return MACSONIC_APPLE;
489 } 402 }
403
404 if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
405 ndev->dr_sw == NUBUS_DRSW_DAYNA)
406 return MACSONIC_DAYNA;
407
408 if (ndev->dr_hw == NUBUS_DRHW_SONIC_LC &&
409 ndev->dr_sw == 0) { /* huh? */
410 return MACSONIC_APPLE16;
411 }
490 return -1; 412 return -1;
491} 413}
492 414
@@ -494,12 +416,12 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
494{ 416{
495 static int slots; 417 static int slots;
496 struct nubus_dev* ndev = NULL; 418 struct nubus_dev* ndev = NULL;
419 struct sonic_local* lp = netdev_priv(dev);
497 unsigned long base_addr, prom_addr; 420 unsigned long base_addr, prom_addr;
498 u16 sonic_dcr; 421 u16 sonic_dcr;
499 int id; 422 int id = -1;
500 int i; 423 int reg_offset, dma_bitmode;
501 int dma_bitmode; 424
502
503 /* Find the first SONIC that hasn't been initialized already */ 425 /* Find the first SONIC that hasn't been initialized already */
504 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, 426 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
505 NUBUS_TYPE_ETHERNET, ndev)) != NULL) 427 NUBUS_TYPE_ETHERNET, ndev)) != NULL)
@@ -521,51 +443,52 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
521 case MACSONIC_DUODOCK: 443 case MACSONIC_DUODOCK:
522 base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS; 444 base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
523 prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE; 445 prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
524 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 446 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
525 | SONIC_DCR_TFT0; 447 SONIC_DCR_TFT0;
526 reg_offset = 2; 448 reg_offset = 2;
527 dma_bitmode = 1; 449 dma_bitmode = SONIC_BITMODE32;
528 break; 450 break;
529 case MACSONIC_APPLE: 451 case MACSONIC_APPLE:
530 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 452 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
531 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; 453 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
532 sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; 454 sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
533 reg_offset = 0; 455 reg_offset = 0;
534 dma_bitmode = 1; 456 dma_bitmode = SONIC_BITMODE32;
535 break; 457 break;
536 case MACSONIC_APPLE16: 458 case MACSONIC_APPLE16:
537 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 459 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
538 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; 460 prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
539 sonic_dcr = SONIC_DCR_EXBUS 461 sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
540 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 462 SONIC_DCR_PO1 | SONIC_DCR_BMS;
541 | SONIC_DCR_PO1 | SONIC_DCR_BMS;
542 reg_offset = 0; 463 reg_offset = 0;
543 dma_bitmode = 0; 464 dma_bitmode = SONIC_BITMODE16;
544 break; 465 break;
545 case MACSONIC_DAYNALINK: 466 case MACSONIC_DAYNALINK:
546 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; 467 base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
547 prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE; 468 prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
548 sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 469 sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
549 | SONIC_DCR_PO1 | SONIC_DCR_BMS; 470 SONIC_DCR_PO1 | SONIC_DCR_BMS;
550 reg_offset = 0; 471 reg_offset = 0;
551 dma_bitmode = 0; 472 dma_bitmode = SONIC_BITMODE16;
552 break; 473 break;
553 case MACSONIC_DAYNA: 474 case MACSONIC_DAYNA:
554 base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS; 475 base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
555 prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR; 476 prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
556 sonic_dcr = SONIC_DCR_BMS 477 sonic_dcr = SONIC_DCR_BMS |
557 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; 478 SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
558 reg_offset = 0; 479 reg_offset = 0;
559 dma_bitmode = 0; 480 dma_bitmode = SONIC_BITMODE16;
560 break; 481 break;
561 default: 482 default:
562 printk(KERN_ERR "macsonic: WTF, id is %d\n", id); 483 printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
563 return -ENODEV; 484 return -ENODEV;
564 } 485 }
565 486
566 /* Danger! My arms are flailing wildly! You *must* set this 487 /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
567 before using sonic_read() */ 488 * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
568 dev->base_addr = base_addr; 489 dev->base_addr = base_addr;
490 lp->reg_offset = reg_offset;
491 lp->dma_bitmode = dma_bitmode;
569 dev->irq = SLOT2IRQ(ndev->board->slot); 492 dev->irq = SLOT2IRQ(ndev->board->slot);
570 493
571 if (!sonic_version_printed) { 494 if (!sonic_version_printed) {
@@ -573,29 +496,66 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
573 sonic_version_printed = 1; 496 sonic_version_printed = 1;
574 } 497 }
575 printk(KERN_INFO "%s: %s in slot %X\n", 498 printk(KERN_INFO "%s: %s in slot %X\n",
576 dev->name, ndev->board->name, ndev->board->slot); 499 lp->device->bus_id, ndev->board->name, ndev->board->slot);
577 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", 500 printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
578 dev->name, sonic_read(dev, SONIC_SR), dma_bitmode?32:16, reg_offset); 501 lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
579 502
580 if(reg_offset) { 503#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
581 printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name); 504 printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
582 return -ENODEV; 505 SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
583 } 506#endif
584 507
585 /* Software reset, then initialize control registers. */ 508 /* Software reset, then initialize control registers. */
586 sonic_write(dev, SONIC_CMD, SONIC_CR_RST); 509 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
587 sonic_write(dev, SONIC_DCR, sonic_dcr 510 SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0));
588 | (dma_bitmode ? SONIC_DCR_DW : 0)); 511 /* This *must* be written back to in order to restore the
512 * extended programmable output bits, since it may not have been
513 * initialised since the hardware reset. */
514 SONIC_WRITE(SONIC_DCR2, 0);
589 515
590 /* Clear *and* disable interrupts to be on the safe side */ 516 /* Clear *and* disable interrupts to be on the safe side */
591 sonic_write(dev, SONIC_ISR,0x7fff); 517 SONIC_WRITE(SONIC_IMR, 0);
592 sonic_write(dev, SONIC_IMR,0); 518 SONIC_WRITE(SONIC_ISR, 0x7fff);
593 519
594 /* Now look for the MAC address. */ 520 /* Now look for the MAC address. */
595 if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0) 521 if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
596 return -ENODEV; 522 return -ENODEV;
597 523
598 printk(KERN_INFO "MAC "); 524 /* Shared init code */
525 return macsonic_init(dev);
526}
527
528static int __init mac_sonic_probe(struct device *device)
529{
530 struct net_device *dev;
531 struct sonic_local *lp;
532 int err;
533 int i;
534
535 dev = alloc_etherdev(sizeof(struct sonic_local));
536 if (!dev)
537 return -ENOMEM;
538
539 lp = netdev_priv(dev);
540 lp->device = device;
541 SET_NETDEV_DEV(dev, device);
542 SET_MODULE_OWNER(dev);
543
544 /* This will catch fatal stuff like -ENOMEM as well as success */
545 err = mac_onboard_sonic_probe(dev);
546 if (err == 0)
547 goto found;
548 if (err != -ENODEV)
549 goto out;
550 err = mac_nubus_sonic_probe(dev);
551 if (err)
552 goto out;
553found:
554 err = register_netdev(dev);
555 if (err)
556 goto out;
557
558 printk("%s: MAC ", dev->name);
599 for (i = 0; i < 6; i++) { 559 for (i = 0; i < 6; i++) {
600 printk("%2.2x", dev->dev_addr[i]); 560 printk("%2.2x", dev->dev_addr[i]);
601 if (i < 5) 561 if (i < 5)
@@ -603,55 +563,95 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
603 } 563 }
604 printk(" IRQ %d\n", dev->irq); 564 printk(" IRQ %d\n", dev->irq);
605 565
606 /* Shared init code */ 566 return 0;
607 return macsonic_init(dev);
608}
609 567
610#ifdef MODULE 568out:
611static struct net_device *dev_macsonic; 569 free_netdev(dev);
612 570
613MODULE_PARM(sonic_debug, "i"); 571 return err;
572}
573
574MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
575module_param(sonic_debug, int, 0);
614MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)"); 576MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
615 577
616int 578#define SONIC_IRQ_FLAG IRQ_FLG_FAST
617init_module(void) 579
580#include "sonic.c"
581
582static int __devexit mac_sonic_device_remove (struct device *device)
618{ 583{
619 dev_macsonic = macsonic_probe(-1); 584 struct net_device *dev = device->driver_data;
620 if (IS_ERR(dev_macsonic)) { 585 struct sonic_local* lp = netdev_priv(dev);
621 printk(KERN_WARNING "macsonic.c: No card found\n"); 586
622 return PTR_ERR(dev_macsonic); 587 unregister_netdev (dev);
623 } 588 dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
589 lp->descriptors, lp->descriptors_laddr);
590 free_netdev (dev);
591
624 return 0; 592 return 0;
625} 593}
626 594
627void 595static struct device_driver mac_sonic_driver = {
628cleanup_module(void) 596 .name = mac_sonic_string,
597 .bus = &platform_bus_type,
598 .probe = mac_sonic_probe,
599 .remove = __devexit_p(mac_sonic_device_remove),
600};
601
602static void mac_sonic_platform_release(struct device *device)
629{ 603{
630 unregister_netdev(dev_macsonic); 604 struct platform_device *pldev;
631 kfree(dev_macsonic->priv); 605
632 free_netdev(dev_macsonic); 606 /* free device */
607 pldev = to_platform_device (device);
608 kfree (pldev);
633} 609}
634#endif /* MODULE */
635 610
611static int __init mac_sonic_init_module(void)
612{
613 struct platform_device *pldev;
614 int err;
636 615
637#define vdma_alloc(foo, bar) ((u32)foo) 616 if ((err = driver_register(&mac_sonic_driver))) {
638#define vdma_free(baz) 617 printk(KERN_ERR "Driver registration failed\n");
639#define sonic_chiptomem(bat) (bat) 618 return err;
640#define PHYSADDR(quux) (quux) 619 }
641#define CPHYSADDR(quux) (quux)
642 620
643#define sonic_request_irq request_irq 621 mac_sonic_device = NULL;
644#define sonic_free_irq free_irq
645 622
646#include "sonic.c" 623 if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
624 goto out_unregister;
625 }
647 626
648/* 627 memset(pldev, 0, sizeof (*pldev));
649 * Local variables: 628 pldev->name = mac_sonic_string;
650 * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o macsonic.o macsonic.c" 629 pldev->id = 0;
651 * version-control: t 630 pldev->dev.release = mac_sonic_platform_release;
652 * kept-new-versions: 5 631 mac_sonic_device = pldev;
653 * c-indent-level: 8 632
654 * tab-width: 8 633 if (platform_device_register (pldev)) {
655 * End: 634 kfree(pldev);
656 * 635 mac_sonic_device = NULL;
657 */ 636 }
637
638 return 0;
639
640out_unregister:
641 platform_device_unregister(pldev);
642
643 return -ENOMEM;
644}
645
646static void __exit mac_sonic_cleanup_module(void)
647{
648 driver_unregister(&mac_sonic_driver);
649
650 if (mac_sonic_device) {
651 platform_device_unregister(mac_sonic_device);
652 mac_sonic_device = NULL;
653 }
654}
655
656module_init(mac_sonic_init_module);
657module_exit(mac_sonic_cleanup_module);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0405e1f0d3df..fb6b232069d6 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1157,16 +1157,20 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1157 if (!skb_shinfo(skb)->nr_frags) { 1157 if (!skb_shinfo(skb)->nr_frags) {
1158linear: 1158linear:
1159 if (skb->ip_summed != CHECKSUM_HW) { 1159 if (skb->ip_summed != CHECKSUM_HW) {
1160 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1160 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1161 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1161 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC; 1162 ETH_TX_FIRST_DESC |
1163 ETH_TX_LAST_DESC |
1164 5 << ETH_TX_IHL_SHIFT;
1162 pkt_info.l4i_chk = 0; 1165 pkt_info.l4i_chk = 0;
1163 } else { 1166 } else {
1164 u32 ipheader = skb->nh.iph->ihl << 11;
1165 1167
1166 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | 1168 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1167 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC | 1169 ETH_TX_FIRST_DESC |
1168 ETH_GEN_TCP_UDP_CHECKSUM | 1170 ETH_TX_LAST_DESC |
1169 ETH_GEN_IP_V_4_CHECKSUM | ipheader; 1171 ETH_GEN_TCP_UDP_CHECKSUM |
1172 ETH_GEN_IP_V_4_CHECKSUM |
1173 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1170 /* CPU already calculated pseudo header checksum. */ 1174 /* CPU already calculated pseudo header checksum. */
1171 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1175 if (skb->nh.iph->protocol == IPPROTO_UDP) {
1172 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1176 pkt_info.cmd_sts |= ETH_UDP_FRAME;
@@ -1193,7 +1197,6 @@ linear:
1193 stats->tx_bytes += pkt_info.byte_cnt; 1197 stats->tx_bytes += pkt_info.byte_cnt;
1194 } else { 1198 } else {
1195 unsigned int frag; 1199 unsigned int frag;
1196 u32 ipheader;
1197 1200
1198 /* Since hardware can't handle unaligned fragments smaller 1201 /* Since hardware can't handle unaligned fragments smaller
1199 * than 9 bytes, if we find any, we linearize the skb 1202 * than 9 bytes, if we find any, we linearize the skb
@@ -1222,12 +1225,16 @@ linear:
1222 DMA_TO_DEVICE); 1225 DMA_TO_DEVICE);
1223 pkt_info.l4i_chk = 0; 1226 pkt_info.l4i_chk = 0;
1224 pkt_info.return_info = 0; 1227 pkt_info.return_info = 0;
1225 pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
1226 1228
1227 if (skb->ip_summed == CHECKSUM_HW) { 1229 if (skb->ip_summed != CHECKSUM_HW)
1228 ipheader = skb->nh.iph->ihl << 11; 1230 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1229 pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1231 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1230 ETH_GEN_IP_V_4_CHECKSUM | ipheader; 1232 5 << ETH_TX_IHL_SHIFT;
1233 else {
1234 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1235 ETH_GEN_TCP_UDP_CHECKSUM |
1236 ETH_GEN_IP_V_4_CHECKSUM |
1237 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1231 /* CPU already calculated pseudo header checksum. */ 1238 /* CPU already calculated pseudo header checksum. */
1232 if (skb->nh.iph->protocol == IPPROTO_UDP) { 1239 if (skb->nh.iph->protocol == IPPROTO_UDP) {
1233 pkt_info.cmd_sts |= ETH_UDP_FRAME; 1240 pkt_info.cmd_sts |= ETH_UDP_FRAME;
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 57c4f8fbfdb6..7678b59c2952 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -49,7 +49,7 @@
49/* Checksum offload for Tx works for most packets, but 49/* Checksum offload for Tx works for most packets, but
50 * fails if previous packet sent did not use hw csum 50 * fails if previous packet sent did not use hw csum
51 */ 51 */
52#undef MV643XX_CHECKSUM_OFFLOAD_TX 52#define MV643XX_CHECKSUM_OFFLOAD_TX
53#define MV643XX_NAPI 53#define MV643XX_NAPI
54#define MV643XX_TX_FAST_REFILL 54#define MV643XX_TX_FAST_REFILL
55#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */ 55#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */
@@ -217,6 +217,8 @@
217#define ETH_TX_ENABLE_INTERRUPT (BIT23) 217#define ETH_TX_ENABLE_INTERRUPT (BIT23)
218#define ETH_AUTO_MODE (BIT30) 218#define ETH_AUTO_MODE (BIT30)
219 219
220#define ETH_TX_IHL_SHIFT 11
221
220/* typedefs */ 222/* typedefs */
221 223
222typedef enum _eth_func_ret_status { 224typedef enum _eth_func_ret_status {
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 4a391ea0f58a..a1ac4bd1696e 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -486,9 +486,9 @@ struct netdrv_private {
486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); 486MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver"); 487MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
488MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
489MODULE_PARM (multicast_filter_limit, "i"); 489module_param(multicast_filter_limit, int, 0);
490MODULE_PARM (max_interrupt_work, "i"); 490module_param(max_interrupt_work, int, 0);
491MODULE_PARM (media, "1-" __MODULE_STRING(8) "i"); 491module_param_array(media, int, NULL, 0);
492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses"); 492MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt"); 493MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex"); 494MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9d8197bb293a..384a736a0d2f 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -134,7 +134,7 @@ typedef struct local_info_t {
134 u_char mc_filter[8]; 134 u_char mc_filter[8];
135} local_info_t; 135} local_info_t;
136 136
137#define MC_FILTERBREAK 64 137#define MC_FILTERBREAK 8
138 138
139/*====================================================================*/ 139/*====================================================================*/
140/* 140/*
@@ -1012,7 +1012,7 @@ static void fjn_reset(struct net_device *dev)
1012 outb(BANK_1U, ioaddr + CONFIG_1); 1012 outb(BANK_1U, ioaddr + CONFIG_1);
1013 1013
1014 /* set the multicast table to accept none. */ 1014 /* set the multicast table to accept none. */
1015 for (i = 0; i < 6; i++) 1015 for (i = 0; i < 8; i++)
1016 outb(0x00, ioaddr + MAR_ADR + i); 1016 outb(0x00, ioaddr + MAR_ADR + i);
1017 1017
1018 /* Switch to bank 2 (runtime mode) */ 1018 /* Switch to bank 2 (runtime mode) */
@@ -1269,6 +1269,16 @@ static void set_rx_mode(struct net_device *dev)
1269 u_long flags; 1269 u_long flags;
1270 int i; 1270 int i;
1271 1271
1272 int saved_config_0 = inb(ioaddr + CONFIG_0);
1273
1274 local_irq_save(flags);
1275
1276 /* Disable Tx and Rx */
1277 if (sram_config == 0)
1278 outb(CONFIG0_RST, ioaddr + CONFIG_0);
1279 else
1280 outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
1281
1272 if (dev->flags & IFF_PROMISC) { 1282 if (dev->flags & IFF_PROMISC) {
1273 /* Unconditionally log net taps. */ 1283 /* Unconditionally log net taps. */
1274 printk("%s: Promiscuous mode enabled.\n", dev->name); 1284 printk("%s: Promiscuous mode enabled.\n", dev->name);
@@ -1290,20 +1300,23 @@ static void set_rx_mode(struct net_device *dev)
1290 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1300 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1291 i++, mclist = mclist->next) { 1301 i++, mclist = mclist->next) {
1292 unsigned int bit = 1302 unsigned int bit =
1293 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 1303 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
1294 mc_filter[bit >> 3] |= (1 << bit); 1304 mc_filter[bit >> 3] |= (1 << (bit & 7));
1295 } 1305 }
1306 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
1296 } 1307 }
1297 1308
1298 local_irq_save(flags);
1299 if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) { 1309 if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
1300 int saved_bank = inb(ioaddr + CONFIG_1); 1310 int saved_bank = inb(ioaddr + CONFIG_1);
1301 /* Switch to bank 1 and set the multicast table. */ 1311 /* Switch to bank 1 and set the multicast table. */
1302 outb(0xe4, ioaddr + CONFIG_1); 1312 outb(0xe4, ioaddr + CONFIG_1);
1303 for (i = 0; i < 8; i++) 1313 for (i = 0; i < 8; i++)
1304 outb(mc_filter[i], ioaddr + 8 + i); 1314 outb(mc_filter[i], ioaddr + MAR_ADR + i);
1305 memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter)); 1315 memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
1306 outb(saved_bank, ioaddr + CONFIG_1); 1316 outb(saved_bank, ioaddr + CONFIG_1);
1307 } 1317 }
1318
1319 outb(saved_config_0, ioaddr + CONFIG_0);
1320
1308 local_irq_restore(flags); 1321 local_irq_restore(flags);
1309} 1322}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
new file mode 100644
index 000000000000..6a2fe3583478
--- /dev/null
+++ b/drivers/net/phy/Kconfig
@@ -0,0 +1,57 @@
1#
2# PHY Layer Configuration
3#
4
5menu "PHY device support"
6
7config PHYLIB
8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET
10 help
11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for
13 managing PHY devices.
14
15config PHYCONTROL
16 bool " Support for automatically handling PHY state changes"
17 depends on PHYLIB
18 help
19 Adds code to perform all the work for keeping PHY link
20 state (speed/duplex/etc) up-to-date. Also handles
21 interrupts.
22
23comment "MII PHY device drivers"
24 depends on PHYLIB
25
26config MARVELL_PHY
27 tristate "Drivers for Marvell PHYs"
28 depends on PHYLIB
29 ---help---
30 Currently has a driver for the 88E1011S
31
32config DAVICOM_PHY
33 tristate "Drivers for Davicom PHYs"
34 depends on PHYLIB
35 ---help---
36 Currently supports dm9161e and dm9131
37
38config QSEMI_PHY
39 tristate "Drivers for Quality Semiconductor PHYs"
40 depends on PHYLIB
41 ---help---
42 Currently supports the qs6612
43
44config LXT_PHY
45 tristate "Drivers for the Intel LXT PHYs"
46 depends on PHYLIB
47 ---help---
48 Currently supports the lxt970, lxt971
49
50config CICADA_PHY
51 tristate "Drivers for the Cicada PHYs"
52 depends on PHYLIB
53 ---help---
54 Currently supports the cis8204
55
56endmenu
57
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
new file mode 100644
index 000000000000..e4116a5fbb4c
--- /dev/null
+++ b/drivers/net/phy/Makefile
@@ -0,0 +1,10 @@
1# Makefile for Linux PHY drivers
2
3libphy-objs := phy.o phy_device.o mdio_bus.o
4
5obj-$(CONFIG_PHYLIB) += libphy.o
6obj-$(CONFIG_MARVELL_PHY) += marvell.o
7obj-$(CONFIG_DAVICOM_PHY) += davicom.o
8obj-$(CONFIG_CICADA_PHY) += cicada.o
9obj-$(CONFIG_LXT_PHY) += lxt.o
10obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
new file mode 100644
index 000000000000..c47fb2ecd147
--- /dev/null
+++ b/drivers/net/phy/cicada.c
@@ -0,0 +1,134 @@
1/*
2 * drivers/net/phy/cicada.c
3 *
4 * Driver for Cicada PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* Cicada Extended Control Register 1 */
42#define MII_CIS8201_EXT_CON1 0x17
43#define MII_CIS8201_EXTCON1_INIT 0x0000
44
45/* Cicada Interrupt Mask Register */
46#define MII_CIS8201_IMASK 0x19
47#define MII_CIS8201_IMASK_IEN 0x8000
48#define MII_CIS8201_IMASK_SPEED 0x4000
49#define MII_CIS8201_IMASK_LINK 0x2000
50#define MII_CIS8201_IMASK_DUPLEX 0x1000
51#define MII_CIS8201_IMASK_MASK 0xf000
52
53/* Cicada Interrupt Status Register */
54#define MII_CIS8201_ISTAT 0x1a
55#define MII_CIS8201_ISTAT_STATUS 0x8000
56#define MII_CIS8201_ISTAT_SPEED 0x4000
57#define MII_CIS8201_ISTAT_LINK 0x2000
58#define MII_CIS8201_ISTAT_DUPLEX 0x1000
59
60/* Cicada Auxiliary Control/Status Register */
61#define MII_CIS8201_AUX_CONSTAT 0x1c
62#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
63#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
64#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
65#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
66#define MII_CIS8201_AUXCONSTAT_100 0x0008
67
68MODULE_DESCRIPTION("Cicadia PHY driver");
69MODULE_AUTHOR("Andy Fleming");
70MODULE_LICENSE("GPL");
71
72static int cis820x_config_init(struct phy_device *phydev)
73{
74 int err;
75
76 err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT,
77 MII_CIS8201_AUXCONSTAT_INIT);
78
79 if (err < 0)
80 return err;
81
82 err = phy_write(phydev, MII_CIS8201_EXT_CON1,
83 MII_CIS8201_EXTCON1_INIT);
84
85 return err;
86}
87
88static int cis820x_ack_interrupt(struct phy_device *phydev)
89{
90 int err = phy_read(phydev, MII_CIS8201_ISTAT);
91
92 return (err < 0) ? err : 0;
93}
94
95static int cis820x_config_intr(struct phy_device *phydev)
96{
97 int err;
98
99 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
100 err = phy_write(phydev, MII_CIS8201_IMASK,
101 MII_CIS8201_IMASK_MASK);
102 else
103 err = phy_write(phydev, MII_CIS8201_IMASK, 0);
104
105 return err;
106}
107
108/* Cicada 820x */
109static struct phy_driver cis8204_driver = {
110 .phy_id = 0x000fc440,
111 .name = "Cicada Cis8204",
112 .phy_id_mask = 0x000fffc0,
113 .features = PHY_GBIT_FEATURES,
114 .flags = PHY_HAS_INTERRUPT,
115 .config_init = &cis820x_config_init,
116 .config_aneg = &genphy_config_aneg,
117 .read_status = &genphy_read_status,
118 .ack_interrupt = &cis820x_ack_interrupt,
119 .config_intr = &cis820x_config_intr,
120 .driver = { .owner = THIS_MODULE,},
121};
122
123static int __init cis8204_init(void)
124{
125 return phy_driver_register(&cis8204_driver);
126}
127
128static void __exit cis8204_exit(void)
129{
130 phy_driver_unregister(&cis8204_driver);
131}
132
133module_init(cis8204_init);
134module_exit(cis8204_exit);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
new file mode 100644
index 000000000000..6caf499fae32
--- /dev/null
+++ b/drivers/net/phy/davicom.c
@@ -0,0 +1,195 @@
1/*
2 * drivers/net/phy/davicom.c
3 *
4 * Driver for Davicom PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_DM9161_SCR 0x10
42#define MII_DM9161_SCR_INIT 0x0610
43
44/* DM9161 Interrupt Register */
45#define MII_DM9161_INTR 0x15
46#define MII_DM9161_INTR_PEND 0x8000
47#define MII_DM9161_INTR_DPLX_MASK 0x0800
48#define MII_DM9161_INTR_SPD_MASK 0x0400
49#define MII_DM9161_INTR_LINK_MASK 0x0200
50#define MII_DM9161_INTR_MASK 0x0100
51#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
52#define MII_DM9161_INTR_SPD_CHANGE 0x0008
53#define MII_DM9161_INTR_LINK_CHANGE 0x0004
54#define MII_DM9161_INTR_INIT 0x0000
55#define MII_DM9161_INTR_STOP \
56(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
57 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
58
59/* DM9161 10BT Configuration/Status */
60#define MII_DM9161_10BTCSR 0x12
61#define MII_DM9161_10BTCSR_INIT 0x7800
62
63MODULE_DESCRIPTION("Davicom PHY driver");
64MODULE_AUTHOR("Andy Fleming");
65MODULE_LICENSE("GPL");
66
67
68#define DM9161_DELAY 1
69static int dm9161_config_intr(struct phy_device *phydev)
70{
71 int temp;
72
73 temp = phy_read(phydev, MII_DM9161_INTR);
74
75 if (temp < 0)
76 return temp;
77
78 if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
79 temp &= ~(MII_DM9161_INTR_STOP);
80 else
81 temp |= MII_DM9161_INTR_STOP;
82
83 temp = phy_write(phydev, MII_DM9161_INTR, temp);
84
85 return temp;
86}
87
88static int dm9161_config_aneg(struct phy_device *phydev)
89{
90 int err;
91
92 /* Isolate the PHY */
93 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
94
95 if (err < 0)
96 return err;
97
98 /* Configure the new settings */
99 err = genphy_config_aneg(phydev);
100
101 if (err < 0)
102 return err;
103
104 return 0;
105}
106
107static int dm9161_config_init(struct phy_device *phydev)
108{
109 int err;
110
111 /* Isolate the PHY */
112 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
113
114 if (err < 0)
115 return err;
116
117 /* Do not bypass the scrambler/descrambler */
118 err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
119
120 if (err < 0)
121 return err;
122
123 /* Clear 10BTCSR to default */
124 err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
125
126 if (err < 0)
127 return err;
128
129 /* Reconnect the PHY, and enable Autonegotiation */
130 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
131
132 if (err < 0)
133 return err;
134
135 return 0;
136}
137
138static int dm9161_ack_interrupt(struct phy_device *phydev)
139{
140 int err = phy_read(phydev, MII_DM9161_INTR);
141
142 return (err < 0) ? err : 0;
143}
144
145static struct phy_driver dm9161_driver = {
146 .phy_id = 0x0181b880,
147 .name = "Davicom DM9161E",
148 .phy_id_mask = 0x0ffffff0,
149 .features = PHY_BASIC_FEATURES,
150 .config_init = dm9161_config_init,
151 .config_aneg = dm9161_config_aneg,
152 .read_status = genphy_read_status,
153 .driver = { .owner = THIS_MODULE,},
154};
155
156static struct phy_driver dm9131_driver = {
157 .phy_id = 0x00181b80,
158 .name = "Davicom DM9131",
159 .phy_id_mask = 0x0ffffff0,
160 .features = PHY_BASIC_FEATURES,
161 .flags = PHY_HAS_INTERRUPT,
162 .config_aneg = genphy_config_aneg,
163 .read_status = genphy_read_status,
164 .ack_interrupt = dm9161_ack_interrupt,
165 .config_intr = dm9161_config_intr,
166 .driver = { .owner = THIS_MODULE,},
167};
168
169static int __init davicom_init(void)
170{
171 int ret;
172
173 ret = phy_driver_register(&dm9161_driver);
174 if (ret)
175 goto err1;
176
177 ret = phy_driver_register(&dm9131_driver);
178 if (ret)
179 goto err2;
180 return 0;
181
182 err2:
183 phy_driver_unregister(&dm9161_driver);
184 err1:
185 return ret;
186}
187
188static void __exit davicom_exit(void)
189{
190 phy_driver_unregister(&dm9161_driver);
191 phy_driver_unregister(&dm9131_driver);
192}
193
194module_init(davicom_init);
195module_exit(davicom_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
new file mode 100644
index 000000000000..4c840448ec86
--- /dev/null
+++ b/drivers/net/phy/lxt.c
@@ -0,0 +1,179 @@
1/*
2 * drivers/net/phy/lxt.c
3 *
4 * Driver for Intel LXT PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* The Level one LXT970 is used by many boards */
42
43#define MII_LXT970_IER 17 /* Interrupt Enable Register */
44
45#define MII_LXT970_IER_IEN 0x0002
46
47#define MII_LXT970_ISR 18 /* Interrupt Status Register */
48
49#define MII_LXT970_CONFIG 19 /* Configuration Register */
50
51/* ------------------------------------------------------------------------- */
52/* The Level one LXT971 is used on some of my custom boards */
53
54/* register definitions for the 971 */
55#define MII_LXT971_IER 18 /* Interrupt Enable Register */
56#define MII_LXT971_IER_IEN 0x00f2
57
58#define MII_LXT971_ISR 19 /* Interrupt Status Register */
59
60
61MODULE_DESCRIPTION("Intel LXT PHY driver");
62MODULE_AUTHOR("Andy Fleming");
63MODULE_LICENSE("GPL");
64
65static int lxt970_ack_interrupt(struct phy_device *phydev)
66{
67 int err;
68
69 err = phy_read(phydev, MII_BMSR);
70
71 if (err < 0)
72 return err;
73
74 err = phy_read(phydev, MII_LXT970_ISR);
75
76 if (err < 0)
77 return err;
78
79 return 0;
80}
81
82static int lxt970_config_intr(struct phy_device *phydev)
83{
84 int err;
85
86 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
87 err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
88 else
89 err = phy_write(phydev, MII_LXT970_IER, 0);
90
91 return err;
92}
93
94static int lxt970_config_init(struct phy_device *phydev)
95{
96 int err;
97
98 err = phy_write(phydev, MII_LXT970_CONFIG, 0);
99
100 return err;
101}
102
103
104static int lxt971_ack_interrupt(struct phy_device *phydev)
105{
106 int err = phy_read(phydev, MII_LXT971_ISR);
107
108 if (err < 0)
109 return err;
110
111 return 0;
112}
113
114static int lxt971_config_intr(struct phy_device *phydev)
115{
116 int err;
117
118 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
119 err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
120 else
121 err = phy_write(phydev, MII_LXT971_IER, 0);
122
123 return err;
124}
125
126static struct phy_driver lxt970_driver = {
127 .phy_id = 0x07810000,
128 .name = "LXT970",
129 .phy_id_mask = 0x0fffffff,
130 .features = PHY_BASIC_FEATURES,
131 .flags = PHY_HAS_INTERRUPT,
132 .config_init = lxt970_config_init,
133 .config_aneg = genphy_config_aneg,
134 .read_status = genphy_read_status,
135 .ack_interrupt = lxt970_ack_interrupt,
136 .config_intr = lxt970_config_intr,
137 .driver = { .owner = THIS_MODULE,},
138};
139
140static struct phy_driver lxt971_driver = {
141 .phy_id = 0x0001378e,
142 .name = "LXT971",
143 .phy_id_mask = 0x0fffffff,
144 .features = PHY_BASIC_FEATURES,
145 .flags = PHY_HAS_INTERRUPT,
146 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status,
148 .ack_interrupt = lxt971_ack_interrupt,
149 .config_intr = lxt971_config_intr,
150 .driver = { .owner = THIS_MODULE,},
151};
152
153static int __init lxt_init(void)
154{
155 int ret;
156
157 ret = phy_driver_register(&lxt970_driver);
158 if (ret)
159 goto err1;
160
161 ret = phy_driver_register(&lxt971_driver);
162 if (ret)
163 goto err2;
164 return 0;
165
166 err2:
167 phy_driver_unregister(&lxt970_driver);
168 err1:
169 return ret;
170}
171
172static void __exit lxt_exit(void)
173{
174 phy_driver_unregister(&lxt970_driver);
175 phy_driver_unregister(&lxt971_driver);
176}
177
178module_init(lxt_init);
179module_exit(lxt_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
new file mode 100644
index 000000000000..4a72b025006b
--- /dev/null
+++ b/drivers/net/phy/marvell.c
@@ -0,0 +1,140 @@
1/*
2 * drivers/net/phy/marvell.c
3 *
4 * Driver for Marvell PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#define MII_M1011_IEVENT 0x13
42#define MII_M1011_IEVENT_CLEAR 0x0000
43
44#define MII_M1011_IMASK 0x12
45#define MII_M1011_IMASK_INIT 0x6400
46#define MII_M1011_IMASK_CLEAR 0x0000
47
48MODULE_DESCRIPTION("Marvell PHY driver");
49MODULE_AUTHOR("Andy Fleming");
50MODULE_LICENSE("GPL");
51
52static int marvell_ack_interrupt(struct phy_device *phydev)
53{
54 int err;
55
56 /* Clear the interrupts by reading the reg */
57 err = phy_read(phydev, MII_M1011_IEVENT);
58
59 if (err < 0)
60 return err;
61
62 return 0;
63}
64
65static int marvell_config_intr(struct phy_device *phydev)
66{
67 int err;
68
69 if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
70 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
71 else
72 err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
73
74 return err;
75}
76
77static int marvell_config_aneg(struct phy_device *phydev)
78{
79 int err;
80
81 /* The Marvell PHY has an errata which requires
82 * that certain registers get written in order
83 * to restart autonegotiation */
84 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
85
86 if (err < 0)
87 return err;
88
89 err = phy_write(phydev, 0x1d, 0x1f);
90 if (err < 0)
91 return err;
92
93 err = phy_write(phydev, 0x1e, 0x200c);
94 if (err < 0)
95 return err;
96
97 err = phy_write(phydev, 0x1d, 0x5);
98 if (err < 0)
99 return err;
100
101 err = phy_write(phydev, 0x1e, 0);
102 if (err < 0)
103 return err;
104
105 err = phy_write(phydev, 0x1e, 0x100);
106 if (err < 0)
107 return err;
108
109
110 err = genphy_config_aneg(phydev);
111
112 return err;
113}
114
115
116static struct phy_driver m88e1101_driver = {
117 .phy_id = 0x01410c00,
118 .phy_id_mask = 0xffffff00,
119 .name = "Marvell 88E1101",
120 .features = PHY_GBIT_FEATURES,
121 .flags = PHY_HAS_INTERRUPT,
122 .config_aneg = &marvell_config_aneg,
123 .read_status = &genphy_read_status,
124 .ack_interrupt = &marvell_ack_interrupt,
125 .config_intr = &marvell_config_intr,
126 .driver = { .owner = THIS_MODULE,},
127};
128
129static int __init marvell_init(void)
130{
131 return phy_driver_register(&m88e1101_driver);
132}
133
134static void __exit marvell_exit(void)
135{
136 phy_driver_unregister(&m88e1101_driver);
137}
138
139module_init(marvell_init);
140module_exit(marvell_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
new file mode 100644
index 000000000000..41f62c0c5fcb
--- /dev/null
+++ b/drivers/net/phy/mdio_bus.c
@@ -0,0 +1,176 @@
1/*
2 * drivers/net/phy/mdio_bus.c
3 *
4 * MDIO Bus interface
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* mdiobus_register
42 *
43 * description: Called by a bus driver to bring up all the PHYs
44 * on a given bus, and attach them to the bus
45 */
46int mdiobus_register(struct mii_bus *bus)
47{
48 int i;
49 int err = 0;
50
51 spin_lock_init(&bus->mdio_lock);
52
53 if (NULL == bus || NULL == bus->name ||
54 NULL == bus->read ||
55 NULL == bus->write)
56 return -EINVAL;
57
58 if (bus->reset)
59 bus->reset(bus);
60
61 for (i = 0; i < PHY_MAX_ADDR; i++) {
62 struct phy_device *phydev;
63
64 phydev = get_phy_device(bus, i);
65
66 if (IS_ERR(phydev))
67 return PTR_ERR(phydev);
68
69 /* There's a PHY at this address
70 * We need to set:
71 * 1) IRQ
72 * 2) bus_id
73 * 3) parent
74 * 4) bus
75 * 5) mii_bus
76 * And, we need to register it */
77 if (phydev) {
78 phydev->irq = bus->irq[i];
79
80 phydev->dev.parent = bus->dev;
81 phydev->dev.bus = &mdio_bus_type;
82 sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i);
83
84 phydev->bus = bus;
85
86 err = device_register(&phydev->dev);
87
88 if (err)
89 printk(KERN_ERR "phy %d failed to register\n",
90 i);
91 }
92
93 bus->phy_map[i] = phydev;
94 }
95
96 pr_info("%s: probed\n", bus->name);
97
98 return err;
99}
100EXPORT_SYMBOL(mdiobus_register);
101
102void mdiobus_unregister(struct mii_bus *bus)
103{
104 int i;
105
106 for (i = 0; i < PHY_MAX_ADDR; i++) {
107 if (bus->phy_map[i]) {
108 device_unregister(&bus->phy_map[i]->dev);
109 kfree(bus->phy_map[i]);
110 }
111 }
112}
113EXPORT_SYMBOL(mdiobus_unregister);
114
115/* mdio_bus_match
116 *
117 * description: Given a PHY device, and a PHY driver, return 1 if
118 * the driver supports the device. Otherwise, return 0
119 */
120static int mdio_bus_match(struct device *dev, struct device_driver *drv)
121{
122 struct phy_device *phydev = to_phy_device(dev);
123 struct phy_driver *phydrv = to_phy_driver(drv);
124
125 return (phydrv->phy_id == (phydev->phy_id & phydrv->phy_id_mask));
126}
127
128/* Suspend and resume. Copied from platform_suspend and
129 * platform_resume
130 */
131static int mdio_bus_suspend(struct device * dev, u32 state)
132{
133 int ret = 0;
134 struct device_driver *drv = dev->driver;
135
136 if (drv && drv->suspend) {
137 ret = drv->suspend(dev, state, SUSPEND_DISABLE);
138 if (ret == 0)
139 ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
140 if (ret == 0)
141 ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
142 }
143 return ret;
144}
145
146static int mdio_bus_resume(struct device * dev)
147{
148 int ret = 0;
149 struct device_driver *drv = dev->driver;
150
151 if (drv && drv->resume) {
152 ret = drv->resume(dev, RESUME_POWER_ON);
153 if (ret == 0)
154 ret = drv->resume(dev, RESUME_RESTORE_STATE);
155 if (ret == 0)
156 ret = drv->resume(dev, RESUME_ENABLE);
157 }
158 return ret;
159}
160
161struct bus_type mdio_bus_type = {
162 .name = "mdio_bus",
163 .match = mdio_bus_match,
164 .suspend = mdio_bus_suspend,
165 .resume = mdio_bus_resume,
166};
167
168int __init mdio_bus_init(void)
169{
170 return bus_register(&mdio_bus_type);
171}
172
173void __exit mdio_bus_exit(void)
174{
175 bus_unregister(&mdio_bus_type);
176}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
new file mode 100644
index 000000000000..d9e11f93bf3a
--- /dev/null
+++ b/drivers/net/phy/phy.c
@@ -0,0 +1,871 @@
1/*
2 * drivers/net/phy/phy.c
3 *
4 * Framework for configuring and reading PHY devices
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42/* Convenience function to print out the current phy status
43 */
44void phy_print_status(struct phy_device *phydev)
45{
46 pr_info("%s: Link is %s", phydev->dev.bus_id,
47 phydev->link ? "Up" : "Down");
48 if (phydev->link)
49 printk(" - %d/%s", phydev->speed,
50 DUPLEX_FULL == phydev->duplex ?
51 "Full" : "Half");
52
53 printk("\n");
54}
55EXPORT_SYMBOL(phy_print_status);
56
57
58/* Convenience functions for reading/writing a given PHY
59 * register. They MUST NOT be called from interrupt context,
60 * because the bus read/write functions may wait for an interrupt
61 * to conclude the operation. */
62int phy_read(struct phy_device *phydev, u16 regnum)
63{
64 int retval;
65 struct mii_bus *bus = phydev->bus;
66
67 spin_lock_bh(&bus->mdio_lock);
68 retval = bus->read(bus, phydev->addr, regnum);
69 spin_unlock_bh(&bus->mdio_lock);
70
71 return retval;
72}
73EXPORT_SYMBOL(phy_read);
74
75int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
76{
77 int err;
78 struct mii_bus *bus = phydev->bus;
79
80 spin_lock_bh(&bus->mdio_lock);
81 err = bus->write(bus, phydev->addr, regnum, val);
82 spin_unlock_bh(&bus->mdio_lock);
83
84 return err;
85}
86EXPORT_SYMBOL(phy_write);
87
88
89int phy_clear_interrupt(struct phy_device *phydev)
90{
91 int err = 0;
92
93 if (phydev->drv->ack_interrupt)
94 err = phydev->drv->ack_interrupt(phydev);
95
96 return err;
97}
98
99
100int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
101{
102 int err = 0;
103
104 phydev->interrupts = interrupts;
105 if (phydev->drv->config_intr)
106 err = phydev->drv->config_intr(phydev);
107
108 return err;
109}
110
111
112/* phy_aneg_done
113 *
114 * description: Reads the status register and returns 0 either if
115 * auto-negotiation is incomplete, or if there was an error.
116 * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
117 */
118static inline int phy_aneg_done(struct phy_device *phydev)
119{
120 int retval;
121
122 retval = phy_read(phydev, MII_BMSR);
123
124 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
125}
126
127/* A structure for mapping a particular speed and duplex
128 * combination to a particular SUPPORTED and ADVERTISED value */
129struct phy_setting {
130 int speed;
131 int duplex;
132 u32 setting;
133};
134
135/* A mapping of all SUPPORTED settings to speed/duplex */
136static struct phy_setting settings[] = {
137 {
138 .speed = 10000,
139 .duplex = DUPLEX_FULL,
140 .setting = SUPPORTED_10000baseT_Full,
141 },
142 {
143 .speed = SPEED_1000,
144 .duplex = DUPLEX_FULL,
145 .setting = SUPPORTED_1000baseT_Full,
146 },
147 {
148 .speed = SPEED_1000,
149 .duplex = DUPLEX_HALF,
150 .setting = SUPPORTED_1000baseT_Half,
151 },
152 {
153 .speed = SPEED_100,
154 .duplex = DUPLEX_FULL,
155 .setting = SUPPORTED_100baseT_Full,
156 },
157 {
158 .speed = SPEED_100,
159 .duplex = DUPLEX_HALF,
160 .setting = SUPPORTED_100baseT_Half,
161 },
162 {
163 .speed = SPEED_10,
164 .duplex = DUPLEX_FULL,
165 .setting = SUPPORTED_10baseT_Full,
166 },
167 {
168 .speed = SPEED_10,
169 .duplex = DUPLEX_HALF,
170 .setting = SUPPORTED_10baseT_Half,
171 },
172};
173
174#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
175
176/* phy_find_setting
177 *
178 * description: Searches the settings array for the setting which
179 * matches the desired speed and duplex, and returns the index
180 * of that setting. Returns the index of the last setting if
181 * none of the others match.
182 */
183static inline int phy_find_setting(int speed, int duplex)
184{
185 int idx = 0;
186
187 while (idx < ARRAY_SIZE(settings) &&
188 (settings[idx].speed != speed ||
189 settings[idx].duplex != duplex))
190 idx++;
191
192 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
193}
194
195/* phy_find_valid
196 * idx: The first index in settings[] to search
197 * features: A mask of the valid settings
198 *
199 * description: Returns the index of the first valid setting less
200 * than or equal to the one pointed to by idx, as determined by
201 * the mask in features. Returns the index of the last setting
202 * if nothing else matches.
203 */
204static inline int phy_find_valid(int idx, u32 features)
205{
206 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
207 idx++;
208
209 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
210}
211
212/* phy_sanitize_settings
213 *
214 * description: Make sure the PHY is set to supported speeds and
215 * duplexes. Drop down by one in this order: 1000/FULL,
216 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
217 */
218void phy_sanitize_settings(struct phy_device *phydev)
219{
220 u32 features = phydev->supported;
221 int idx;
222
223 /* Sanitize settings based on PHY capabilities */
224 if ((features & SUPPORTED_Autoneg) == 0)
225 phydev->autoneg = 0;
226
227 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
228 features);
229
230 phydev->speed = settings[idx].speed;
231 phydev->duplex = settings[idx].duplex;
232}
233EXPORT_SYMBOL(phy_sanitize_settings);
234
235/* phy_ethtool_sset:
236 * A generic ethtool sset function. Handles all the details
237 *
238 * A few notes about parameter checking:
239 * - We don't set port or transceiver, so we don't care what they
240 * were set to.
241 * - phy_start_aneg() will make sure forced settings are sane, and
242 * choose the next best ones from the ones selected, so we don't
243 * care if ethtool tries to give us bad values
244 *
245 * A note about the PHYCONTROL Layer. If you turn off
246 * CONFIG_PHYCONTROL, you will need to read the PHY status
247 * registers after this function completes, and update your
248 * controller manually.
249 */
250int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
251{
252 if (cmd->phy_address != phydev->addr)
253 return -EINVAL;
254
255 /* We make sure that we don't pass unsupported
256 * values in to the PHY */
257 cmd->advertising &= phydev->supported;
258
259 /* Verify the settings we care about. */
260 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
261 return -EINVAL;
262
263 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
264 return -EINVAL;
265
266 if (cmd->autoneg == AUTONEG_DISABLE
267 && ((cmd->speed != SPEED_1000
268 && cmd->speed != SPEED_100
269 && cmd->speed != SPEED_10)
270 || (cmd->duplex != DUPLEX_HALF
271 && cmd->duplex != DUPLEX_FULL)))
272 return -EINVAL;
273
274 phydev->autoneg = cmd->autoneg;
275
276 phydev->speed = cmd->speed;
277
278 phydev->advertising = cmd->advertising;
279
280 if (AUTONEG_ENABLE == cmd->autoneg)
281 phydev->advertising |= ADVERTISED_Autoneg;
282 else
283 phydev->advertising &= ~ADVERTISED_Autoneg;
284
285 phydev->duplex = cmd->duplex;
286
287 /* Restart the PHY */
288 phy_start_aneg(phydev);
289
290 return 0;
291}
292
293int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
294{
295 cmd->supported = phydev->supported;
296
297 cmd->advertising = phydev->advertising;
298
299 cmd->speed = phydev->speed;
300 cmd->duplex = phydev->duplex;
301 cmd->port = PORT_MII;
302 cmd->phy_address = phydev->addr;
303 cmd->transceiver = XCVR_EXTERNAL;
304 cmd->autoneg = phydev->autoneg;
305
306 return 0;
307}
308
309
310/* Note that this function is currently incompatible with the
311 * PHYCONTROL layer. It changes registers without regard to
312 * current state. Use at own risk
313 */
314int phy_mii_ioctl(struct phy_device *phydev,
315 struct mii_ioctl_data *mii_data, int cmd)
316{
317 u16 val = mii_data->val_in;
318
319 switch (cmd) {
320 case SIOCGMIIPHY:
321 mii_data->phy_id = phydev->addr;
322 break;
323 case SIOCGMIIREG:
324 mii_data->val_out = phy_read(phydev, mii_data->reg_num);
325 break;
326
327 case SIOCSMIIREG:
328 if (!capable(CAP_NET_ADMIN))
329 return -EPERM;
330
331 if (mii_data->phy_id == phydev->addr) {
332 switch(mii_data->reg_num) {
333 case MII_BMCR:
334 if (val & (BMCR_RESET|BMCR_ANENABLE))
335 phydev->autoneg = AUTONEG_DISABLE;
336 else
337 phydev->autoneg = AUTONEG_ENABLE;
338 if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
339 phydev->duplex = DUPLEX_FULL;
340 else
341 phydev->duplex = DUPLEX_HALF;
342 break;
343 case MII_ADVERTISE:
344 phydev->advertising = val;
345 break;
346 default:
347 /* do nothing */
348 break;
349 }
350 }
351
352 phy_write(phydev, mii_data->reg_num, val);
353
354 if (mii_data->reg_num == MII_BMCR
355 && val & BMCR_RESET
356 && phydev->drv->config_init)
357 phydev->drv->config_init(phydev);
358 break;
359 }
360
361 return 0;
362}
363
364/* phy_start_aneg
365 *
366 * description: Sanitizes the settings (if we're not
367 * autonegotiating them), and then calls the driver's
368 * config_aneg function. If the PHYCONTROL Layer is operating,
369 * we change the state to reflect the beginning of
370 * Auto-negotiation or forcing.
371 */
372int phy_start_aneg(struct phy_device *phydev)
373{
374 int err;
375
376 spin_lock(&phydev->lock);
377
378 if (AUTONEG_DISABLE == phydev->autoneg)
379 phy_sanitize_settings(phydev);
380
381 err = phydev->drv->config_aneg(phydev);
382
383#ifdef CONFIG_PHYCONTROL
384 if (err < 0)
385 goto out_unlock;
386
387 if (phydev->state != PHY_HALTED) {
388 if (AUTONEG_ENABLE == phydev->autoneg) {
389 phydev->state = PHY_AN;
390 phydev->link_timeout = PHY_AN_TIMEOUT;
391 } else {
392 phydev->state = PHY_FORCING;
393 phydev->link_timeout = PHY_FORCE_TIMEOUT;
394 }
395 }
396
397out_unlock:
398#endif
399 spin_unlock(&phydev->lock);
400 return err;
401}
402EXPORT_SYMBOL(phy_start_aneg);
403
404
405#ifdef CONFIG_PHYCONTROL
406static void phy_change(void *data);
407static void phy_timer(unsigned long data);
408
409/* phy_start_machine:
410 *
411 * description: The PHY infrastructure can run a state machine
412 * which tracks whether the PHY is starting up, negotiating,
413 * etc. This function starts the timer which tracks the state
414 * of the PHY. If you want to be notified when the state
415 * changes, pass in the callback, otherwise, pass NULL. If you
416 * want to maintain your own state machine, do not call this
417 * function. */
418void phy_start_machine(struct phy_device *phydev,
419 void (*handler)(struct net_device *))
420{
421 phydev->adjust_state = handler;
422
423 init_timer(&phydev->phy_timer);
424 phydev->phy_timer.function = &phy_timer;
425 phydev->phy_timer.data = (unsigned long) phydev;
426 mod_timer(&phydev->phy_timer, jiffies + HZ);
427}
428
429/* phy_stop_machine
430 *
431 * description: Stops the state machine timer, sets the state to
432 * UP (unless it wasn't up yet), and then frees the interrupt,
433 * if it is in use. This function must be called BEFORE
434 * phy_detach.
435 */
436void phy_stop_machine(struct phy_device *phydev)
437{
438 del_timer_sync(&phydev->phy_timer);
439
440 spin_lock(&phydev->lock);
441 if (phydev->state > PHY_UP)
442 phydev->state = PHY_UP;
443 spin_unlock(&phydev->lock);
444
445 if (phydev->irq != PHY_POLL)
446 phy_stop_interrupts(phydev);
447
448 phydev->adjust_state = NULL;
449}
450
451/* phy_force_reduction
452 *
453 * description: Reduces the speed/duplex settings by
454 * one notch. The order is so:
455 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
456 * 10/FULL, 10/HALF. The function bottoms out at 10/HALF.
457 */
458static void phy_force_reduction(struct phy_device *phydev)
459{
460 int idx;
461
462 idx = phy_find_setting(phydev->speed, phydev->duplex);
463
464 idx++;
465
466 idx = phy_find_valid(idx, phydev->supported);
467
468 phydev->speed = settings[idx].speed;
469 phydev->duplex = settings[idx].duplex;
470
471 pr_info("Trying %d/%s\n", phydev->speed,
472 DUPLEX_FULL == phydev->duplex ?
473 "FULL" : "HALF");
474}
475
476
477/* phy_error:
478 *
479 * Moves the PHY to the HALTED state in response to a read
480 * or write error, and tells the controller the link is down.
481 * Must not be called from interrupt context, or while the
482 * phydev->lock is held.
483 */
484void phy_error(struct phy_device *phydev)
485{
486 spin_lock(&phydev->lock);
487 phydev->state = PHY_HALTED;
488 spin_unlock(&phydev->lock);
489}
490
491/* phy_interrupt
492 *
493 * description: When a PHY interrupt occurs, the handler disables
494 * interrupts, and schedules a work task to clear the interrupt.
495 */
496static irqreturn_t phy_interrupt(int irq, void *phy_dat, struct pt_regs *regs)
497{
498 struct phy_device *phydev = phy_dat;
499
500 /* The MDIO bus is not allowed to be written in interrupt
501 * context, so we need to disable the irq here. A work
502 * queue will write the PHY to disable and clear the
503 * interrupt, and then reenable the irq line. */
504 disable_irq_nosync(irq);
505
506 schedule_work(&phydev->phy_queue);
507
508 return IRQ_HANDLED;
509}
510
511/* Enable the interrupts from the PHY side */
512int phy_enable_interrupts(struct phy_device *phydev)
513{
514 int err;
515
516 err = phy_clear_interrupt(phydev);
517
518 if (err < 0)
519 return err;
520
521 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
522
523 return err;
524}
525EXPORT_SYMBOL(phy_enable_interrupts);
526
527/* Disable the PHY interrupts from the PHY side */
528int phy_disable_interrupts(struct phy_device *phydev)
529{
530 int err;
531
532 /* Disable PHY interrupts */
533 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
534
535 if (err)
536 goto phy_err;
537
538 /* Clear the interrupt */
539 err = phy_clear_interrupt(phydev);
540
541 if (err)
542 goto phy_err;
543
544 return 0;
545
546phy_err:
547 phy_error(phydev);
548
549 return err;
550}
551EXPORT_SYMBOL(phy_disable_interrupts);
552
553/* phy_start_interrupts
554 *
555 * description: Request the interrupt for the given PHY. If
556 * this fails, then we set irq to PHY_POLL.
557 * Otherwise, we enable the interrupts in the PHY.
558 * Returns 0 on success.
559 * This should only be called with a valid IRQ number.
560 */
561int phy_start_interrupts(struct phy_device *phydev)
562{
563 int err = 0;
564
565 INIT_WORK(&phydev->phy_queue, phy_change, phydev);
566
567 if (request_irq(phydev->irq, phy_interrupt,
568 SA_SHIRQ,
569 "phy_interrupt",
570 phydev) < 0) {
571 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
572 phydev->bus->name,
573 phydev->irq);
574 phydev->irq = PHY_POLL;
575 return 0;
576 }
577
578 err = phy_enable_interrupts(phydev);
579
580 return err;
581}
582EXPORT_SYMBOL(phy_start_interrupts);
583
584int phy_stop_interrupts(struct phy_device *phydev)
585{
586 int err;
587
588 err = phy_disable_interrupts(phydev);
589
590 if (err)
591 phy_error(phydev);
592
593 free_irq(phydev->irq, phydev);
594
595 return err;
596}
597EXPORT_SYMBOL(phy_stop_interrupts);
598
599
600/* Scheduled by the phy_interrupt/timer to handle PHY changes */
601static void phy_change(void *data)
602{
603 int err;
604 struct phy_device *phydev = data;
605
606 err = phy_disable_interrupts(phydev);
607
608 if (err)
609 goto phy_err;
610
611 spin_lock(&phydev->lock);
612 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
613 phydev->state = PHY_CHANGELINK;
614 spin_unlock(&phydev->lock);
615
616 enable_irq(phydev->irq);
617
618 /* Reenable interrupts */
619 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
620
621 if (err)
622 goto irq_enable_err;
623
624 return;
625
626irq_enable_err:
627 disable_irq(phydev->irq);
628phy_err:
629 phy_error(phydev);
630}
631
632/* Bring down the PHY link, and stop checking the status. */
633void phy_stop(struct phy_device *phydev)
634{
635 spin_lock(&phydev->lock);
636
637 if (PHY_HALTED == phydev->state)
638 goto out_unlock;
639
640 if (phydev->irq != PHY_POLL) {
641 /* Clear any pending interrupts */
642 phy_clear_interrupt(phydev);
643
644 /* Disable PHY Interrupts */
645 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
646 }
647
648 phydev->state = PHY_HALTED;
649
650out_unlock:
651 spin_unlock(&phydev->lock);
652}
653
654
655/* phy_start
656 *
657 * description: Indicates the attached device's readiness to
658 * handle PHY-related work. Used during startup to start the
659 * PHY, and after a call to phy_stop() to resume operation.
660 * Also used to indicate the MDIO bus has cleared an error
661 * condition.
662 */
663void phy_start(struct phy_device *phydev)
664{
665 spin_lock(&phydev->lock);
666
667 switch (phydev->state) {
668 case PHY_STARTING:
669 phydev->state = PHY_PENDING;
670 break;
671 case PHY_READY:
672 phydev->state = PHY_UP;
673 break;
674 case PHY_HALTED:
675 phydev->state = PHY_RESUMING;
676 default:
677 break;
678 }
679 spin_unlock(&phydev->lock);
680}
681EXPORT_SYMBOL(phy_stop);
682EXPORT_SYMBOL(phy_start);
683
684/* PHY timer which handles the state machine */
685static void phy_timer(unsigned long data)
686{
687 struct phy_device *phydev = (struct phy_device *)data;
688 int needs_aneg = 0;
689 int err = 0;
690
691 spin_lock(&phydev->lock);
692
693 if (phydev->adjust_state)
694 phydev->adjust_state(phydev->attached_dev);
695
696 switch(phydev->state) {
697 case PHY_DOWN:
698 case PHY_STARTING:
699 case PHY_READY:
700 case PHY_PENDING:
701 break;
702 case PHY_UP:
703 needs_aneg = 1;
704
705 phydev->link_timeout = PHY_AN_TIMEOUT;
706
707 break;
708 case PHY_AN:
709 /* Check if negotiation is done. Break
710 * if there's an error */
711 err = phy_aneg_done(phydev);
712 if (err < 0)
713 break;
714
715 /* If auto-negotiation is done, we change to
716 * either RUNNING, or NOLINK */
717 if (err > 0) {
718 err = phy_read_status(phydev);
719
720 if (err)
721 break;
722
723 if (phydev->link) {
724 phydev->state = PHY_RUNNING;
725 netif_carrier_on(phydev->attached_dev);
726 } else {
727 phydev->state = PHY_NOLINK;
728 netif_carrier_off(phydev->attached_dev);
729 }
730
731 phydev->adjust_link(phydev->attached_dev);
732
733 } else if (0 == phydev->link_timeout--) {
734 /* The counter expired, so either we
735 * switch to forced mode, or the
736 * magic_aneg bit exists, and we try aneg
737 * again */
738 if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
739 int idx;
740
741 /* We'll start from the
742 * fastest speed, and work
743 * our way down */
744 idx = phy_find_valid(0,
745 phydev->supported);
746
747 phydev->speed = settings[idx].speed;
748 phydev->duplex = settings[idx].duplex;
749
750 phydev->autoneg = AUTONEG_DISABLE;
751 phydev->state = PHY_FORCING;
752 phydev->link_timeout =
753 PHY_FORCE_TIMEOUT;
754
755 pr_info("Trying %d/%s\n",
756 phydev->speed,
757 DUPLEX_FULL ==
758 phydev->duplex ?
759 "FULL" : "HALF");
760 }
761
762 needs_aneg = 1;
763 }
764 break;
765 case PHY_NOLINK:
766 err = phy_read_status(phydev);
767
768 if (err)
769 break;
770
771 if (phydev->link) {
772 phydev->state = PHY_RUNNING;
773 netif_carrier_on(phydev->attached_dev);
774 phydev->adjust_link(phydev->attached_dev);
775 }
776 break;
777 case PHY_FORCING:
778 err = phy_read_status(phydev);
779
780 if (err)
781 break;
782
783 if (phydev->link) {
784 phydev->state = PHY_RUNNING;
785 netif_carrier_on(phydev->attached_dev);
786 } else {
787 if (0 == phydev->link_timeout--) {
788 phy_force_reduction(phydev);
789 needs_aneg = 1;
790 }
791 }
792
793 phydev->adjust_link(phydev->attached_dev);
794 break;
795 case PHY_RUNNING:
796 /* Only register a CHANGE if we are
797 * polling */
798 if (PHY_POLL == phydev->irq)
799 phydev->state = PHY_CHANGELINK;
800 break;
801 case PHY_CHANGELINK:
802 err = phy_read_status(phydev);
803
804 if (err)
805 break;
806
807 if (phydev->link) {
808 phydev->state = PHY_RUNNING;
809 netif_carrier_on(phydev->attached_dev);
810 } else {
811 phydev->state = PHY_NOLINK;
812 netif_carrier_off(phydev->attached_dev);
813 }
814
815 phydev->adjust_link(phydev->attached_dev);
816
817 if (PHY_POLL != phydev->irq)
818 err = phy_config_interrupt(phydev,
819 PHY_INTERRUPT_ENABLED);
820 break;
821 case PHY_HALTED:
822 if (phydev->link) {
823 phydev->link = 0;
824 netif_carrier_off(phydev->attached_dev);
825 phydev->adjust_link(phydev->attached_dev);
826 }
827 break;
828 case PHY_RESUMING:
829
830 err = phy_clear_interrupt(phydev);
831
832 if (err)
833 break;
834
835 err = phy_config_interrupt(phydev,
836 PHY_INTERRUPT_ENABLED);
837
838 if (err)
839 break;
840
841 if (AUTONEG_ENABLE == phydev->autoneg) {
842 err = phy_aneg_done(phydev);
843 if (err < 0)
844 break;
845
846 /* err > 0 if AN is done.
847 * Otherwise, it's 0, and we're
848 * still waiting for AN */
849 if (err > 0) {
850 phydev->state = PHY_RUNNING;
851 } else {
852 phydev->state = PHY_AN;
853 phydev->link_timeout = PHY_AN_TIMEOUT;
854 }
855 } else
856 phydev->state = PHY_RUNNING;
857 break;
858 }
859
860 spin_unlock(&phydev->lock);
861
862 if (needs_aneg)
863 err = phy_start_aneg(phydev);
864
865 if (err < 0)
866 phy_error(phydev);
867
868 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
869}
870
871#endif /* CONFIG_PHYCONTROL */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
new file mode 100644
index 000000000000..33f7bdb5857c
--- /dev/null
+++ b/drivers/net/phy/phy_device.c
@@ -0,0 +1,696 @@
1/*
2 * drivers/net/phy/phy_device.c
3 *
4 * Framework for finding and configuring PHYs.
5 * Also contains generic PHY driver
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h>
33#include <linux/version.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/phy.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42static struct phy_driver genphy_driver;
43extern int mdio_bus_init(void);
44extern void mdio_bus_exit(void);
45
46/* get_phy_device
47 *
48 * description: Reads the ID registers of the PHY at addr on the
49 * bus, then allocates and returns the phy_device to
50 * represent it.
51 */
52struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
53{
54 int phy_reg;
55 u32 phy_id;
56 struct phy_device *dev = NULL;
57
58 /* Grab the bits from PHYIR1, and put them
59 * in the upper half */
60 phy_reg = bus->read(bus, addr, MII_PHYSID1);
61
62 if (phy_reg < 0)
63 return ERR_PTR(phy_reg);
64
65 phy_id = (phy_reg & 0xffff) << 16;
66
67 /* Grab the bits from PHYIR2, and put them in the lower half */
68 phy_reg = bus->read(bus, addr, MII_PHYSID2);
69
70 if (phy_reg < 0)
71 return ERR_PTR(phy_reg);
72
73 phy_id |= (phy_reg & 0xffff);
74
75 /* If the phy_id is all Fs, there is no device there */
76 if (0xffffffff == phy_id)
77 return NULL;
78
79 /* Otherwise, we allocate the device, and initialize the
80 * default values */
81 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
82
83 if (NULL == dev)
84 return ERR_PTR(-ENOMEM);
85
86 dev->speed = 0;
87 dev->duplex = -1;
88 dev->pause = dev->asym_pause = 0;
89 dev->link = 1;
90
91 dev->autoneg = AUTONEG_ENABLE;
92
93 dev->addr = addr;
94 dev->phy_id = phy_id;
95 dev->bus = bus;
96
97 dev->state = PHY_DOWN;
98
99 spin_lock_init(&dev->lock);
100
101 return dev;
102}
103
104#ifdef CONFIG_PHYCONTROL
105/* phy_prepare_link:
106 *
107 * description: Tells the PHY infrastructure to handle the
108 * gory details on monitoring link status (whether through
109 * polling or an interrupt), and to call back to the
110 * connected device driver when the link status changes.
111 * If you want to monitor your own link state, don't call
112 * this function */
113void phy_prepare_link(struct phy_device *phydev,
114 void (*handler)(struct net_device *))
115{
116 phydev->adjust_link = handler;
117}
118
119/* phy_connect:
120 *
121 * description: Convenience function for connecting ethernet
122 * devices to PHY devices. The default behavior is for
123 * the PHY infrastructure to handle everything, and only notify
124 * the connected driver when the link status changes. If you
125 * don't want, or can't use the provided functionality, you may
126 * choose to call only the subset of functions which provide
127 * the desired functionality.
128 */
129struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
130 void (*handler)(struct net_device *), u32 flags)
131{
132 struct phy_device *phydev;
133
134 phydev = phy_attach(dev, phy_id, flags);
135
136 if (IS_ERR(phydev))
137 return phydev;
138
139 phy_prepare_link(phydev, handler);
140
141 phy_start_machine(phydev, NULL);
142
143 if (phydev->irq > 0)
144 phy_start_interrupts(phydev);
145
146 return phydev;
147}
148EXPORT_SYMBOL(phy_connect);
149
150void phy_disconnect(struct phy_device *phydev)
151{
152 if (phydev->irq > 0)
153 phy_stop_interrupts(phydev);
154
155 phy_stop_machine(phydev);
156
157 phydev->adjust_link = NULL;
158
159 phy_detach(phydev);
160}
161EXPORT_SYMBOL(phy_disconnect);
162
163#endif /* CONFIG_PHYCONTROL */
164
165/* phy_attach:
166 *
167 * description: Called by drivers to attach to a particular PHY
168 * device. The phy_device is found, and properly hooked up
169 * to the phy_driver. If no driver is attached, then the
170 * genphy_driver is used. The phy_device is given a ptr to
171 * the attaching device, and given a callback for link status
172 * change. The phy_device is returned to the attaching
173 * driver.
174 */
175static int phy_compare_id(struct device *dev, void *data)
176{
177 return strcmp((char *)data, dev->bus_id) ? 0 : 1;
178}
179
180struct phy_device *phy_attach(struct net_device *dev,
181 const char *phy_id, u32 flags)
182{
183 struct bus_type *bus = &mdio_bus_type;
184 struct phy_device *phydev;
185 struct device *d;
186
187 /* Search the list of PHY devices on the mdio bus for the
188 * PHY with the requested name */
189 d = bus_find_device(bus, NULL, (void *)phy_id, phy_compare_id);
190
191 if (d) {
192 phydev = to_phy_device(d);
193 } else {
194 printk(KERN_ERR "%s not found\n", phy_id);
195 return ERR_PTR(-ENODEV);
196 }
197
198 /* Assume that if there is no driver, that it doesn't
199 * exist, and we should use the genphy driver. */
200 if (NULL == d->driver) {
201 int err;
202 down_write(&d->bus->subsys.rwsem);
203 d->driver = &genphy_driver.driver;
204
205 err = d->driver->probe(d);
206
207 if (err < 0)
208 return ERR_PTR(err);
209
210 device_bind_driver(d);
211 up_write(&d->bus->subsys.rwsem);
212 }
213
214 if (phydev->attached_dev) {
215 printk(KERN_ERR "%s: %s already attached\n",
216 dev->name, phy_id);
217 return ERR_PTR(-EBUSY);
218 }
219
220 phydev->attached_dev = dev;
221
222 phydev->dev_flags = flags;
223
224 return phydev;
225}
226EXPORT_SYMBOL(phy_attach);
227
228void phy_detach(struct phy_device *phydev)
229{
230 phydev->attached_dev = NULL;
231
232 /* If the device had no specific driver before (i.e. - it
233 * was using the generic driver), we unbind the device
234 * from the generic driver so that there's a chance a
235 * real driver could be loaded */
236 if (phydev->dev.driver == &genphy_driver.driver) {
237 down_write(&phydev->dev.bus->subsys.rwsem);
238 device_release_driver(&phydev->dev);
239 up_write(&phydev->dev.bus->subsys.rwsem);
240 }
241}
242EXPORT_SYMBOL(phy_detach);
243
244
245/* Generic PHY support and helper functions */
246
247/* genphy_config_advert
248 *
249 * description: Writes MII_ADVERTISE with the appropriate values,
250 * after sanitizing the values to make sure we only advertise
251 * what is supported
252 */
253int genphy_config_advert(struct phy_device *phydev)
254{
255 u32 advertise;
256 int adv;
257 int err;
258
259 /* Only allow advertising what
260 * this PHY supports */
261 phydev->advertising &= phydev->supported;
262 advertise = phydev->advertising;
263
264 /* Setup standard advertisement */
265 adv = phy_read(phydev, MII_ADVERTISE);
266
267 if (adv < 0)
268 return adv;
269
270 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
271 ADVERTISE_PAUSE_ASYM);
272 if (advertise & ADVERTISED_10baseT_Half)
273 adv |= ADVERTISE_10HALF;
274 if (advertise & ADVERTISED_10baseT_Full)
275 adv |= ADVERTISE_10FULL;
276 if (advertise & ADVERTISED_100baseT_Half)
277 adv |= ADVERTISE_100HALF;
278 if (advertise & ADVERTISED_100baseT_Full)
279 adv |= ADVERTISE_100FULL;
280 if (advertise & ADVERTISED_Pause)
281 adv |= ADVERTISE_PAUSE_CAP;
282 if (advertise & ADVERTISED_Asym_Pause)
283 adv |= ADVERTISE_PAUSE_ASYM;
284
285 err = phy_write(phydev, MII_ADVERTISE, adv);
286
287 if (err < 0)
288 return err;
289
290 /* Configure gigabit if it's supported */
291 if (phydev->supported & (SUPPORTED_1000baseT_Half |
292 SUPPORTED_1000baseT_Full)) {
293 adv = phy_read(phydev, MII_CTRL1000);
294
295 if (adv < 0)
296 return adv;
297
298 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
299 if (advertise & SUPPORTED_1000baseT_Half)
300 adv |= ADVERTISE_1000HALF;
301 if (advertise & SUPPORTED_1000baseT_Full)
302 adv |= ADVERTISE_1000FULL;
303 err = phy_write(phydev, MII_CTRL1000, adv);
304
305 if (err < 0)
306 return err;
307 }
308
309 return adv;
310}
311EXPORT_SYMBOL(genphy_config_advert);
312
313/* genphy_setup_forced
314 *
315 * description: Configures MII_BMCR to force speed/duplex
316 * to the values in phydev. Assumes that the values are valid.
317 * Please see phy_sanitize_settings() */
318int genphy_setup_forced(struct phy_device *phydev)
319{
320 int ctl = BMCR_RESET;
321
322 phydev->pause = phydev->asym_pause = 0;
323
324 if (SPEED_1000 == phydev->speed)
325 ctl |= BMCR_SPEED1000;
326 else if (SPEED_100 == phydev->speed)
327 ctl |= BMCR_SPEED100;
328
329 if (DUPLEX_FULL == phydev->duplex)
330 ctl |= BMCR_FULLDPLX;
331
332 ctl = phy_write(phydev, MII_BMCR, ctl);
333
334 if (ctl < 0)
335 return ctl;
336
337 /* We just reset the device, so we'd better configure any
338 * settings the PHY requires to operate */
339 if (phydev->drv->config_init)
340 ctl = phydev->drv->config_init(phydev);
341
342 return ctl;
343}
344
345
346/* Enable and Restart Autonegotiation */
347int genphy_restart_aneg(struct phy_device *phydev)
348{
349 int ctl;
350
351 ctl = phy_read(phydev, MII_BMCR);
352
353 if (ctl < 0)
354 return ctl;
355
356 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
357
358 /* Don't isolate the PHY if we're negotiating */
359 ctl &= ~(BMCR_ISOLATE);
360
361 ctl = phy_write(phydev, MII_BMCR, ctl);
362
363 return ctl;
364}
365
366
367/* genphy_config_aneg
368 *
369 * description: If auto-negotiation is enabled, we configure the
370 * advertising, and then restart auto-negotiation. If it is not
371 * enabled, then we write the BMCR
372 */
373int genphy_config_aneg(struct phy_device *phydev)
374{
375 int err = 0;
376
377 if (AUTONEG_ENABLE == phydev->autoneg) {
378 err = genphy_config_advert(phydev);
379
380 if (err < 0)
381 return err;
382
383 err = genphy_restart_aneg(phydev);
384 } else
385 err = genphy_setup_forced(phydev);
386
387 return err;
388}
389EXPORT_SYMBOL(genphy_config_aneg);
390
391/* genphy_update_link
392 *
393 * description: Update the value in phydev->link to reflect the
394 * current link value. In order to do this, we need to read
395 * the status register twice, keeping the second value
396 */
397int genphy_update_link(struct phy_device *phydev)
398{
399 int status;
400
401 /* Do a fake read */
402 status = phy_read(phydev, MII_BMSR);
403
404 if (status < 0)
405 return status;
406
407 /* Read link and autonegotiation status */
408 status = phy_read(phydev, MII_BMSR);
409
410 if (status < 0)
411 return status;
412
413 if ((status & BMSR_LSTATUS) == 0)
414 phydev->link = 0;
415 else
416 phydev->link = 1;
417
418 return 0;
419}
420
421/* genphy_read_status
422 *
423 * description: Check the link, then figure out the current state
424 * by comparing what we advertise with what the link partner
425 * advertises. Start by checking the gigabit possibilities,
426 * then move on to 10/100.
427 */
428int genphy_read_status(struct phy_device *phydev)
429{
430 int adv;
431 int err;
432 int lpa;
433 int lpagb = 0;
434
435 /* Update the link, but return if there
436 * was an error */
437 err = genphy_update_link(phydev);
438 if (err)
439 return err;
440
441 if (AUTONEG_ENABLE == phydev->autoneg) {
442 if (phydev->supported & (SUPPORTED_1000baseT_Half
443 | SUPPORTED_1000baseT_Full)) {
444 lpagb = phy_read(phydev, MII_STAT1000);
445
446 if (lpagb < 0)
447 return lpagb;
448
449 adv = phy_read(phydev, MII_CTRL1000);
450
451 if (adv < 0)
452 return adv;
453
454 lpagb &= adv << 2;
455 }
456
457 lpa = phy_read(phydev, MII_LPA);
458
459 if (lpa < 0)
460 return lpa;
461
462 adv = phy_read(phydev, MII_ADVERTISE);
463
464 if (adv < 0)
465 return adv;
466
467 lpa &= adv;
468
469 phydev->speed = SPEED_10;
470 phydev->duplex = DUPLEX_HALF;
471 phydev->pause = phydev->asym_pause = 0;
472
473 if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
474 phydev->speed = SPEED_1000;
475
476 if (lpagb & LPA_1000FULL)
477 phydev->duplex = DUPLEX_FULL;
478 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
479 phydev->speed = SPEED_100;
480
481 if (lpa & LPA_100FULL)
482 phydev->duplex = DUPLEX_FULL;
483 } else
484 if (lpa & LPA_10FULL)
485 phydev->duplex = DUPLEX_FULL;
486
487 if (phydev->duplex == DUPLEX_FULL){
488 phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
489 phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
490 }
491 } else {
492 int bmcr = phy_read(phydev, MII_BMCR);
493 if (bmcr < 0)
494 return bmcr;
495
496 if (bmcr & BMCR_FULLDPLX)
497 phydev->duplex = DUPLEX_FULL;
498 else
499 phydev->duplex = DUPLEX_HALF;
500
501 if (bmcr & BMCR_SPEED1000)
502 phydev->speed = SPEED_1000;
503 else if (bmcr & BMCR_SPEED100)
504 phydev->speed = SPEED_100;
505 else
506 phydev->speed = SPEED_10;
507
508 phydev->pause = phydev->asym_pause = 0;
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(genphy_read_status);
514
515static int genphy_config_init(struct phy_device *phydev)
516{
517 u32 val;
518 u32 features;
519
520 /* For now, I'll claim that the generic driver supports
521 * all possible port types */
522 features = (SUPPORTED_TP | SUPPORTED_MII
523 | SUPPORTED_AUI | SUPPORTED_FIBRE |
524 SUPPORTED_BNC);
525
526 /* Do we support autonegotiation? */
527 val = phy_read(phydev, MII_BMSR);
528
529 if (val < 0)
530 return val;
531
532 if (val & BMSR_ANEGCAPABLE)
533 features |= SUPPORTED_Autoneg;
534
535 if (val & BMSR_100FULL)
536 features |= SUPPORTED_100baseT_Full;
537 if (val & BMSR_100HALF)
538 features |= SUPPORTED_100baseT_Half;
539 if (val & BMSR_10FULL)
540 features |= SUPPORTED_10baseT_Full;
541 if (val & BMSR_10HALF)
542 features |= SUPPORTED_10baseT_Half;
543
544 if (val & BMSR_ESTATEN) {
545 val = phy_read(phydev, MII_ESTATUS);
546
547 if (val < 0)
548 return val;
549
550 if (val & ESTATUS_1000_TFULL)
551 features |= SUPPORTED_1000baseT_Full;
552 if (val & ESTATUS_1000_THALF)
553 features |= SUPPORTED_1000baseT_Half;
554 }
555
556 phydev->supported = features;
557 phydev->advertising = features;
558
559 return 0;
560}
561
562
563/* phy_probe
564 *
565 * description: Take care of setting up the phy_device structure,
566 * set the state to READY (the driver's init function should
567 * set it to STARTING if needed).
568 */
569static int phy_probe(struct device *dev)
570{
571 struct phy_device *phydev;
572 struct phy_driver *phydrv;
573 struct device_driver *drv;
574 int err = 0;
575
576 phydev = to_phy_device(dev);
577
578 /* Make sure the driver is held.
579 * XXX -- Is this correct? */
580 drv = get_driver(phydev->dev.driver);
581 phydrv = to_phy_driver(drv);
582 phydev->drv = phydrv;
583
584 /* Disable the interrupt if the PHY doesn't support it */
585 if (!(phydrv->flags & PHY_HAS_INTERRUPT))
586 phydev->irq = PHY_POLL;
587
588 spin_lock(&phydev->lock);
589
590 /* Start out supporting everything. Eventually,
591 * a controller will attach, and may modify one
592 * or both of these values */
593 phydev->supported = phydrv->features;
594 phydev->advertising = phydrv->features;
595
596 /* Set the state to READY by default */
597 phydev->state = PHY_READY;
598
599 if (phydev->drv->probe)
600 err = phydev->drv->probe(phydev);
601
602 spin_unlock(&phydev->lock);
603
604 if (err < 0)
605 return err;
606
607 if (phydev->drv->config_init)
608 err = phydev->drv->config_init(phydev);
609
610 return err;
611}
612
613static int phy_remove(struct device *dev)
614{
615 struct phy_device *phydev;
616
617 phydev = to_phy_device(dev);
618
619 spin_lock(&phydev->lock);
620 phydev->state = PHY_DOWN;
621 spin_unlock(&phydev->lock);
622
623 if (phydev->drv->remove)
624 phydev->drv->remove(phydev);
625
626 put_driver(dev->driver);
627 phydev->drv = NULL;
628
629 return 0;
630}
631
632int phy_driver_register(struct phy_driver *new_driver)
633{
634 int retval;
635
636 memset(&new_driver->driver, 0, sizeof(new_driver->driver));
637 new_driver->driver.name = new_driver->name;
638 new_driver->driver.bus = &mdio_bus_type;
639 new_driver->driver.probe = phy_probe;
640 new_driver->driver.remove = phy_remove;
641
642 retval = driver_register(&new_driver->driver);
643
644 if (retval) {
645 printk(KERN_ERR "%s: Error %d in registering driver\n",
646 new_driver->name, retval);
647
648 return retval;
649 }
650
651 pr_info("%s: Registered new driver\n", new_driver->name);
652
653 return 0;
654}
655EXPORT_SYMBOL(phy_driver_register);
656
657void phy_driver_unregister(struct phy_driver *drv)
658{
659 driver_unregister(&drv->driver);
660}
661EXPORT_SYMBOL(phy_driver_unregister);
662
663static struct phy_driver genphy_driver = {
664 .phy_id = 0xffffffff,
665 .phy_id_mask = 0xffffffff,
666 .name = "Generic PHY",
667 .config_init = genphy_config_init,
668 .features = 0,
669 .config_aneg = genphy_config_aneg,
670 .read_status = genphy_read_status,
671 .driver = {.owner= THIS_MODULE, },
672};
673
674static int __init phy_init(void)
675{
676 int rc;
677
678 rc = mdio_bus_init();
679 if (rc)
680 return rc;
681
682 rc = phy_driver_register(&genphy_driver);
683 if (rc)
684 mdio_bus_exit();
685
686 return rc;
687}
688
689static void __exit phy_exit(void)
690{
691 phy_driver_unregister(&genphy_driver);
692 mdio_bus_exit();
693}
694
695subsys_initcall(phy_init);
696module_exit(phy_exit);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
new file mode 100644
index 000000000000..d461ba457631
--- /dev/null
+++ b/drivers/net/phy/qsemi.c
@@ -0,0 +1,143 @@
1/*
2 * drivers/net/phy/qsemi.c
3 *
4 * Driver for Quality Semiconductor PHYs
5 *
6 * Author: Andy Fleming
7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/phy.h>
36
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41/* ------------------------------------------------------------------------- */
42/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
43
44/* register definitions */
45
46#define MII_QS6612_MCR 17 /* Mode Control Register */
47#define MII_QS6612_FTR 27 /* Factory Test Register */
48#define MII_QS6612_MCO 28 /* Misc. Control Register */
49#define MII_QS6612_ISR 29 /* Interrupt Source Register */
50#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
51#define MII_QS6612_IMR_INIT 0x003a
52#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
53
54#define QS6612_PCR_AN_COMPLETE 0x1000
55#define QS6612_PCR_RLBEN 0x0200
56#define QS6612_PCR_DCREN 0x0100
57#define QS6612_PCR_4B5BEN 0x0040
58#define QS6612_PCR_TX_ISOLATE 0x0020
59#define QS6612_PCR_MLT3_DIS 0x0002
60#define QS6612_PCR_SCRM_DESCRM 0x0001
61
62MODULE_DESCRIPTION("Quality Semiconductor PHY driver");
63MODULE_AUTHOR("Andy Fleming");
64MODULE_LICENSE("GPL");
65
66/* Returns 0, unless there's a write error */
67static int qs6612_config_init(struct phy_device *phydev)
68{
69 /* The PHY powers up isolated on the RPX,
70 * so send a command to allow operation.
71 * XXX - My docs indicate this should be 0x0940
72 * ...or something. The current value sets three
73 * reserved bits, bit 11, which specifies it should be
74 * set to one, bit 10, which specifies it should be set
75 * to 0, and bit 7, which doesn't specify. However, my
76 * docs are preliminary, and I will leave it like this
77 * until someone more knowledgable corrects me or it.
78 * -- Andy Fleming
79 */
80 return phy_write(phydev, MII_QS6612_PCR, 0x0dc0);
81}
82
83static int qs6612_ack_interrupt(struct phy_device *phydev)
84{
85 int err;
86
87 err = phy_read(phydev, MII_QS6612_ISR);
88
89 if (err < 0)
90 return err;
91
92 err = phy_read(phydev, MII_BMSR);
93
94 if (err < 0)
95 return err;
96
97 err = phy_read(phydev, MII_EXPANSION);
98
99 if (err < 0)
100 return err;
101
102 return 0;
103}
104
105static int qs6612_config_intr(struct phy_device *phydev)
106{
107 int err;
108 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
109 err = phy_write(phydev, MII_QS6612_IMR,
110 MII_QS6612_IMR_INIT);
111 else
112 err = phy_write(phydev, MII_QS6612_IMR, 0);
113
114 return err;
115
116}
117
118static struct phy_driver qs6612_driver = {
119 .phy_id = 0x00181440,
120 .name = "QS6612",
121 .phy_id_mask = 0xfffffff0,
122 .features = PHY_BASIC_FEATURES,
123 .flags = PHY_HAS_INTERRUPT,
124 .config_init = qs6612_config_init,
125 .config_aneg = genphy_config_aneg,
126 .read_status = genphy_read_status,
127 .ack_interrupt = qs6612_ack_interrupt,
128 .config_intr = qs6612_config_intr,
129 .driver = { .owner = THIS_MODULE,},
130};
131
132static int __init qs6612_init(void)
133{
134 return phy_driver_register(&qs6612_driver);
135}
136
137static void __exit qs6612_exit(void)
138{
139 phy_driver_unregister(&qs6612_driver);
140}
141
142module_init(qs6612_init);
143module_exit(qs6612_exit);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index a32668e88e09..bb71638a7c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1657,7 +1657,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1657 skb->dev = ppp->dev; 1657 skb->dev = ppp->dev;
1658 skb->protocol = htons(npindex_to_ethertype[npi]); 1658 skb->protocol = htons(npindex_to_ethertype[npi]);
1659 skb->mac.raw = skb->data; 1659 skb->mac.raw = skb->data;
1660 skb->input_dev = ppp->dev;
1661 netif_rx(skb); 1660 netif_rx(skb);
1662 ppp->dev->last_rx = jiffies; 1661 ppp->dev->last_rx = jiffies;
1663 } 1662 }
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index ce1a9bf7b9a7..82f236cc3b9b 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -377,7 +377,8 @@ abort_kfree:
377 ***********************************************************************/ 377 ***********************************************************************/
378static int pppoe_rcv(struct sk_buff *skb, 378static int pppoe_rcv(struct sk_buff *skb,
379 struct net_device *dev, 379 struct net_device *dev,
380 struct packet_type *pt) 380 struct packet_type *pt,
381 struct net_device *orig_dev)
381 382
382{ 383{
383 struct pppoe_hdr *ph; 384 struct pppoe_hdr *ph;
@@ -426,7 +427,8 @@ out:
426 ***********************************************************************/ 427 ***********************************************************************/
427static int pppoe_disc_rcv(struct sk_buff *skb, 428static int pppoe_disc_rcv(struct sk_buff *skb,
428 struct net_device *dev, 429 struct net_device *dev,
429 struct packet_type *pt) 430 struct packet_type *pt,
431 struct net_device *orig_dev)
430 432
431{ 433{
432 struct pppoe_hdr *ph; 434 struct pppoe_hdr *ph;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d5afe05cd826..f0471d102e3c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,6 +187,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
187 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), }, 187 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
188 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), }, 188 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
189 { PCI_DEVICE(0x16ec, 0x0116), }, 189 { PCI_DEVICE(0x16ec, 0x0116), },
190 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
190 {0,}, 191 {0,},
191}; 192};
192 193
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 12a86f96d973..ec1a18d189a1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1429,6 +1429,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1429{ 1429{
1430 struct rr_private *rrpriv = netdev_priv(dev); 1430 struct rr_private *rrpriv = netdev_priv(dev);
1431 struct rr_regs __iomem *regs = rrpriv->regs; 1431 struct rr_regs __iomem *regs = rrpriv->regs;
1432 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
1432 struct ring_ctrl *txctrl; 1433 struct ring_ctrl *txctrl;
1433 unsigned long flags; 1434 unsigned long flags;
1434 u32 index, len = skb->len; 1435 u32 index, len = skb->len;
@@ -1460,7 +1461,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1460 ifield = (u32 *)skb_push(skb, 8); 1461 ifield = (u32 *)skb_push(skb, 8);
1461 1462
1462 ifield[0] = 0; 1463 ifield[0] = 0;
1463 ifield[1] = skb->private.ifield; 1464 ifield[1] = hcb->ifield;
1464 1465
1465 /* 1466 /*
1466 * We don't need the lock before we are actually going to start 1467 * We don't need the lock before we are actually going to start
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7092ca6b277e..2234a8f05eb2 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -62,6 +62,7 @@ typedef struct _XENA_dev_config {
62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6) 62#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7) 63#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8) 64#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
65#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
65#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8) 66#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
66#define ADAPTER_STATUS_MC_DRAM_READY BIT(24) 67#define ADAPTER_STATUS_MC_DRAM_READY BIT(24)
67#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25) 68#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
@@ -77,21 +78,34 @@ typedef struct _XENA_dev_config {
77#define ADAPTER_ECC_EN BIT(55) 78#define ADAPTER_ECC_EN BIT(55)
78 79
79 u64 serr_source; 80 u64 serr_source;
80#define SERR_SOURCE_PIC BIT(0) 81#define SERR_SOURCE_PIC BIT(0)
81#define SERR_SOURCE_TXDMA BIT(1) 82#define SERR_SOURCE_TXDMA BIT(1)
82#define SERR_SOURCE_RXDMA BIT(2) 83#define SERR_SOURCE_RXDMA BIT(2)
83#define SERR_SOURCE_MAC BIT(3) 84#define SERR_SOURCE_MAC BIT(3)
84#define SERR_SOURCE_MC BIT(4) 85#define SERR_SOURCE_MC BIT(4)
85#define SERR_SOURCE_XGXS BIT(5) 86#define SERR_SOURCE_XGXS BIT(5)
86#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \ 87#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
87 SERR_SOURCE_TXDMA | \ 88 SERR_SOURCE_TXDMA | \
88 SERR_SOURCE_RXDMA | \ 89 SERR_SOURCE_RXDMA | \
89 SERR_SOURCE_MAC | \ 90 SERR_SOURCE_MAC | \
90 SERR_SOURCE_MC | \ 91 SERR_SOURCE_MC | \
91 SERR_SOURCE_XGXS) 92 SERR_SOURCE_XGXS)
92 93
93 94 u64 pci_mode;
94 u8 unused_0[0x800 - 0x120]; 95#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
96#define PCI_MODE_PCI_33 0
97#define PCI_MODE_PCI_66 0x1
98#define PCI_MODE_PCIX_M1_66 0x2
99#define PCI_MODE_PCIX_M1_100 0x3
100#define PCI_MODE_PCIX_M1_133 0x4
101#define PCI_MODE_PCIX_M2_66 0x5
102#define PCI_MODE_PCIX_M2_100 0x6
103#define PCI_MODE_PCIX_M2_133 0x7
104#define PCI_MODE_UNSUPPORTED BIT(0)
105#define PCI_MODE_32_BITS BIT(8)
106#define PCI_MODE_UNKNOWN_MODE BIT(9)
107
108 u8 unused_0[0x800 - 0x128];
95 109
96/* PCI-X Controller registers */ 110/* PCI-X Controller registers */
97 u64 pic_int_status; 111 u64 pic_int_status;
@@ -153,7 +167,11 @@ typedef struct _XENA_dev_config {
153 u8 unused4[0x08]; 167 u8 unused4[0x08];
154 168
155 u64 gpio_int_reg; 169 u64 gpio_int_reg;
170#define GPIO_INT_REG_LINK_DOWN BIT(1)
171#define GPIO_INT_REG_LINK_UP BIT(2)
156 u64 gpio_int_mask; 172 u64 gpio_int_mask;
173#define GPIO_INT_MASK_LINK_DOWN BIT(1)
174#define GPIO_INT_MASK_LINK_UP BIT(2)
157 u64 gpio_alarms; 175 u64 gpio_alarms;
158 176
159 u8 unused5[0x38]; 177 u8 unused5[0x38];
@@ -223,19 +241,16 @@ typedef struct _XENA_dev_config {
223 u64 xmsi_data; 241 u64 xmsi_data;
224 242
225 u64 rx_mat; 243 u64 rx_mat;
244#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
226 245
227 u8 unused6[0x8]; 246 u8 unused6[0x8];
228 247
229 u64 tx_mat0_7; 248 u64 tx_mat0_n[0x8];
230 u64 tx_mat8_15; 249#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
231 u64 tx_mat16_23;
232 u64 tx_mat24_31;
233 u64 tx_mat32_39;
234 u64 tx_mat40_47;
235 u64 tx_mat48_55;
236 u64 tx_mat56_63;
237 250
238 u8 unused_1[0x10]; 251 u8 unused_1[0x8];
252 u64 stat_byte_cnt;
253#define STAT_BC(n) vBIT(n,4,12)
239 254
240 /* Automated statistics collection */ 255 /* Automated statistics collection */
241 u64 stat_cfg; 256 u64 stat_cfg;
@@ -246,6 +261,7 @@ typedef struct _XENA_dev_config {
246#define STAT_TRSF_PER(n) TBD 261#define STAT_TRSF_PER(n) TBD
247#define PER_SEC 0x208d5 262#define PER_SEC 0x208d5
248#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32) 263#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
264#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
249 265
250 u64 stat_addr; 266 u64 stat_addr;
251 267
@@ -267,8 +283,15 @@ typedef struct _XENA_dev_config {
267 283
268 u64 gpio_control; 284 u64 gpio_control;
269#define GPIO_CTRL_GPIO_0 BIT(8) 285#define GPIO_CTRL_GPIO_0 BIT(8)
286 u64 misc_control;
287#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
288
289 u8 unused7_1[0x240 - 0x208];
290
291 u64 wreq_split_mask;
292#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
270 293
271 u8 unused7[0x600]; 294 u8 unused7_2[0x800 - 0x248];
272 295
273/* TxDMA registers */ 296/* TxDMA registers */
274 u64 txdma_int_status; 297 u64 txdma_int_status;
@@ -290,6 +313,7 @@ typedef struct _XENA_dev_config {
290 313
291 u64 pcc_err_reg; 314 u64 pcc_err_reg;
292#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8) 315#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
316#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
293 317
294 u64 pcc_err_mask; 318 u64 pcc_err_mask;
295 u64 pcc_err_alarm; 319 u64 pcc_err_alarm;
@@ -468,6 +492,7 @@ typedef struct _XENA_dev_config {
468#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23)) 492#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23))
469#define PRC_CTRL_NO_SNOOP_DESC BIT(22) 493#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
470#define PRC_CTRL_NO_SNOOP_BUFF BIT(23) 494#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
495#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
471#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) 496#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
472 497
473 u64 prc_alarm_action; 498 u64 prc_alarm_action;
@@ -691,6 +716,10 @@ typedef struct _XENA_dev_config {
691#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22) 716#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
692#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 717#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
693#define MC_ERR_REG_SM_ERR BIT(31) 718#define MC_ERR_REG_SM_ERR BIT(31)
719#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \
720 BIT(7) | BIT(17) | BIT(19))
721#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \
722 BIT(15) | BIT(18) | BIT(20))
694 u64 mc_err_mask; 723 u64 mc_err_mask;
695 u64 mc_err_alarm; 724 u64 mc_err_alarm;
696 725
@@ -736,7 +765,19 @@ typedef struct _XENA_dev_config {
736 u64 mc_rldram_test_d1; 765 u64 mc_rldram_test_d1;
737 u8 unused24[0x300 - 0x288]; 766 u8 unused24[0x300 - 0x288];
738 u64 mc_rldram_test_d2; 767 u64 mc_rldram_test_d2;
739 u8 unused25[0x700 - 0x308]; 768
769 u8 unused24_1[0x360 - 0x308];
770 u64 mc_rldram_ctrl;
771#define MC_RLDRAM_ENABLE_ODT BIT(7)
772
773 u8 unused24_2[0x640 - 0x368];
774 u64 mc_rldram_ref_per_herc;
775#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
776
777 u8 unused24_3[0x660 - 0x648];
778 u64 mc_rldram_mrs_herc;
779
780 u8 unused25[0x700 - 0x668];
740 u64 mc_debug_ctrl; 781 u64 mc_debug_ctrl;
741 782
742 u8 unused26[0x3000 - 0x2f08]; 783 u8 unused26[0x3000 - 0x2f08];
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ea638b162d3f..7ca78228b104 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -11,29 +11,28 @@
11 * See the file COPYING in this distribution for more information. 11 * See the file COPYING in this distribution for more information.
12 * 12 *
13 * Credits: 13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some 15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for 16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable 17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues. 18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some 19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel. 20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were 21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments. 22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture 23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code. 24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver. 25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 * 26 *
27 * The module loadable parameters that are supported by the driver and a brief 27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables. 28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver. 30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This 31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/ 36 ************************************************************************/
38 37
39#include <linux/config.h> 38#include <linux/config.h>
@@ -56,27 +55,39 @@
56#include <linux/ethtool.h> 55#include <linux/ethtool.h>
57#include <linux/version.h> 56#include <linux/version.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
58#include <linux/if_vlan.h>
59 59
60#include <asm/io.h>
61#include <asm/system.h> 60#include <asm/system.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
62#include <asm/io.h>
63 63
64/* local include */ 64/* local include */
65#include "s2io.h" 65#include "s2io.h"
66#include "s2io-regs.h" 66#include "s2io-regs.h"
67 67
68/* S2io Driver name & version. */ 68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "s2io"; 69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 1.7.7.1"; 70static char s2io_driver_version[] = "Version 2.0.3.1";
71
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
71 78
72/* 79 return ret;
80}
81
82/*
73 * Cards with following subsystem_id have a link state indication 83 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D. 84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id. 85 * macro below identifies these cards given the subsystem_id.
76 */ 86 */
77#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \ 87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \ 88 (dev_type == XFRAME_I_DEVICE) ? \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0 89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
80 91
81#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ 92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
@@ -86,9 +97,12 @@ static char s2io_driver_version[] = "Version 1.7.7.1";
86static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87{ 98{
88 int level = 0; 99 int level = 0;
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) { 100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
90 level = LOW; 104 level = LOW;
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) { 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
92 level = PANIC; 106 level = PANIC;
93 } 107 }
94 } 108 }
@@ -145,6 +159,9 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
145 {"rmac_pause_cnt"}, 159 {"rmac_pause_cnt"},
146 {"rmac_accepted_ip"}, 160 {"rmac_accepted_ip"},
147 {"rmac_err_tcp"}, 161 {"rmac_err_tcp"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
148}; 165};
149 166
150#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -153,8 +170,37 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
153#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN 170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN 171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
155 172
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
156 202
157/* 203/*
158 * Constants to be programmed into the Xena's registers, to configure 204 * Constants to be programmed into the Xena's registers, to configure
159 * the XAUI. 205 * the XAUI.
160 */ 206 */
@@ -162,7 +208,28 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
162#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163#define END_SIGN 0x0 209#define END_SIGN 0x0
164 210
165static u64 default_mdio_cfg[] = { 211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
213 0x8000051536750000ULL, 0x80000515367500E0ULL,
214 /* Write data */
215 0x8000051536750004ULL, 0x80000515367500E4ULL,
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
222 /* Write data */
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
224 /* Set address */
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226 /* Write data */
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 /* Done */
229 END_SIGN
230};
231
232static u64 xena_mdio_cfg[] = {
166 /* Reset PMA PLL */ 233 /* Reset PMA PLL */
167 0xC001010000000000ULL, 0xC0010100000000E0ULL, 234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL, 235 0xC0010100008000E4ULL,
@@ -172,7 +239,7 @@ static u64 default_mdio_cfg[] = {
172 END_SIGN 239 END_SIGN
173}; 240};
174 241
175static u64 default_dtx_cfg[] = { 242static u64 xena_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL, 243 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL, 244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -196,8 +263,7 @@ static u64 default_dtx_cfg[] = {
196 END_SIGN 263 END_SIGN
197}; 264};
198 265
199 266/*
200/*
201 * Constants for Fixing the MacAddress problem seen mostly on 267 * Constants for Fixing the MacAddress problem seen mostly on
202 * Alpha machines. 268 * Alpha machines.
203 */ 269 */
@@ -226,20 +292,25 @@ static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
226static unsigned int rx_ring_num = 1; 292static unsigned int rx_ring_num = 1;
227static unsigned int rx_ring_sz[MAX_RX_RINGS] = 293static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229static unsigned int Stats_refresh_time = 4; 295static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int use_continuous_tx_intrs = 1;
230static unsigned int rmac_pause_time = 65535; 298static unsigned int rmac_pause_time = 65535;
231static unsigned int mc_pause_threshold_q0q3 = 187; 299static unsigned int mc_pause_threshold_q0q3 = 187;
232static unsigned int mc_pause_threshold_q4q7 = 187; 300static unsigned int mc_pause_threshold_q4q7 = 187;
233static unsigned int shared_splits; 301static unsigned int shared_splits;
234static unsigned int tmac_util_period = 5; 302static unsigned int tmac_util_period = 5;
235static unsigned int rmac_util_period = 5; 303static unsigned int rmac_util_period = 5;
304static unsigned int bimodal = 0;
236#ifndef CONFIG_S2IO_NAPI 305#ifndef CONFIG_S2IO_NAPI
237static unsigned int indicate_max_pkts; 306static unsigned int indicate_max_pkts;
238#endif 307#endif
308/* Frequency of Rx desc syncs expressed as power of 2 */
309static unsigned int rxsync_frequency = 3;
239 310
240/* 311/*
241 * S2IO device table. 312 * S2IO device table.
242 * This table lists all the devices that this driver supports. 313 * This table lists all the devices that this driver supports.
243 */ 314 */
244static struct pci_device_id s2io_tbl[] __devinitdata = { 315static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
@@ -247,9 +318,9 @@ static struct pci_device_id s2io_tbl[] __devinitdata = {
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID}, 319 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, 320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID}, 321 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, 322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID}, 323 PCI_ANY_ID, PCI_ANY_ID},
253 {0,} 324 {0,}
254}; 325};
255 326
@@ -268,8 +339,8 @@ static struct pci_driver s2io_driver = {
268/** 339/**
269 * init_shared_mem - Allocation and Initialization of Memory 340 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable. 341 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared 342 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors, 343 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block. 344 * Rx descriptors and the statistics block.
274 */ 345 */
275 346
@@ -279,11 +350,11 @@ static int init_shared_mem(struct s2io_nic *nic)
279 void *tmp_v_addr, *tmp_v_addr_next; 350 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next; 351 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL; 352 RxD_block_t *pre_rxd_blk = NULL;
282 int i, j, blk_cnt; 353 int i, j, blk_cnt, rx_sz, tx_sz;
283 int lst_size, lst_per_page; 354 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev; 355 struct net_device *dev = nic->dev;
285#ifdef CONFIG_2BUFF_MODE 356#ifdef CONFIG_2BUFF_MODE
286 unsigned long tmp; 357 u64 tmp;
287 buffAdd_t *ba; 358 buffAdd_t *ba;
288#endif 359#endif
289 360
@@ -300,36 +371,41 @@ static int init_shared_mem(struct s2io_nic *nic)
300 size += config->tx_cfg[i].fifo_len; 371 size += config->tx_cfg[i].fifo_len;
301 } 372 }
302 if (size > MAX_AVAILABLE_TXDS) { 373 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ", 374 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
304 dev->name); 375 __FUNCTION__);
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value "); 376 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
306 DBG_PRINT(ERR_DBG, "that can be used\n");
307 return FAILURE; 377 return FAILURE;
308 } 378 }
309 379
310 lst_size = (sizeof(TxD_t) * config->max_txds); 380 lst_size = (sizeof(TxD_t) * config->max_txds);
381 tx_sz = lst_size * size;
311 lst_per_page = PAGE_SIZE / lst_size; 382 lst_per_page = PAGE_SIZE / lst_size;
312 383
313 for (i = 0; i < config->tx_fifo_num; i++) { 384 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len; 385 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 386 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL); 387 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
317 if (!nic->list_info[i]) { 388 GFP_KERNEL);
389 if (!mac_control->fifos[i].list_info) {
318 DBG_PRINT(ERR_DBG, 390 DBG_PRINT(ERR_DBG,
319 "Malloc failed for list_info\n"); 391 "Malloc failed for list_info\n");
320 return -ENOMEM; 392 return -ENOMEM;
321 } 393 }
322 memset(nic->list_info[i], 0, list_holder_size); 394 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
323 } 395 }
324 for (i = 0; i < config->tx_fifo_num; i++) { 396 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 397 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 lst_per_page); 398 lst_per_page);
327 mac_control->tx_curr_put_info[i].offset = 0; 399 mac_control->fifos[i].tx_curr_put_info.offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len = 400 mac_control->fifos[i].tx_curr_put_info.fifo_len =
329 config->tx_cfg[i].fifo_len - 1; 401 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0; 402 mac_control->fifos[i].tx_curr_get_info.offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len = 403 mac_control->fifos[i].tx_curr_get_info.fifo_len =
332 config->tx_cfg[i].fifo_len - 1; 404 config->tx_cfg[i].fifo_len - 1;
405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
408
333 for (j = 0; j < page_num; j++) { 409 for (j = 0; j < page_num; j++) {
334 int k = 0; 410 int k = 0;
335 dma_addr_t tmp_p; 411 dma_addr_t tmp_p;
@@ -345,16 +421,15 @@ static int init_shared_mem(struct s2io_nic *nic)
345 while (k < lst_per_page) { 421 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k; 422 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len) 423 if (l == config->tx_cfg[i].fifo_len)
348 goto end_txd_alloc; 424 break;
349 nic->list_info[i][l].list_virt_addr = 425 mac_control->fifos[i].list_info[l].list_virt_addr =
350 tmp_v + (k * lst_size); 426 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr = 427 mac_control->fifos[i].list_info[l].list_phy_addr =
352 tmp_p + (k * lst_size); 428 tmp_p + (k * lst_size);
353 k++; 429 k++;
354 } 430 }
355 } 431 }
356 } 432 }
357 end_txd_alloc:
358 433
359 /* Allocation and initialization of RXDs in Rings */ 434 /* Allocation and initialization of RXDs in Rings */
360 size = 0; 435 size = 0;
@@ -367,21 +442,26 @@ static int init_shared_mem(struct s2io_nic *nic)
367 return FAILURE; 442 return FAILURE;
368 } 443 }
369 size += config->rx_cfg[i].num_rxd; 444 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] = 445 mac_control->rings[i].block_count =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 446 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 nic->pkt_cnt[i] = 447 mac_control->rings[i].pkt_cnt =
373 config->rx_cfg[i].num_rxd - nic->block_count[i]; 448 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
374 } 449 }
450 size = (size * (sizeof(RxD_t)));
451 rx_sz = size;
375 452
376 for (i = 0; i < config->rx_ring_num; i++) { 453 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0; 454 mac_control->rings[i].rx_curr_get_info.block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0; 455 mac_control->rings[i].rx_curr_get_info.offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len = 456 mac_control->rings[i].rx_curr_get_info.ring_len =
380 config->rx_cfg[i].num_rxd - 1; 457 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0; 458 mac_control->rings[i].rx_curr_put_info.block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0; 459 mac_control->rings[i].rx_curr_put_info.offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len = 460 mac_control->rings[i].rx_curr_put_info.ring_len =
384 config->rx_cfg[i].num_rxd - 1; 461 config->rx_cfg[i].num_rxd - 1;
462 mac_control->rings[i].nic = nic;
463 mac_control->rings[i].ring_no = i;
464
385 blk_cnt = 465 blk_cnt =
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 466 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */ 467 /* Allocating all the Rx blocks */
@@ -395,32 +475,36 @@ static int init_shared_mem(struct s2io_nic *nic)
395 &tmp_p_addr); 475 &tmp_p_addr);
396 if (tmp_v_addr == NULL) { 476 if (tmp_v_addr == NULL) {
397 /* 477 /*
398 * In case of failure, free_shared_mem() 478 * In case of failure, free_shared_mem()
399 * is called, which should free any 479 * is called, which should free any
400 * memory that was alloced till the 480 * memory that was alloced till the
401 * failure happened. 481 * failure happened.
402 */ 482 */
403 nic->rx_blocks[i][j].block_virt_addr = 483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
404 tmp_v_addr; 484 tmp_v_addr;
405 return -ENOMEM; 485 return -ENOMEM;
406 } 486 }
407 memset(tmp_v_addr, 0, size); 487 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr; 488 mac_control->rings[i].rx_blocks[j].block_virt_addr =
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr; 489 tmp_v_addr;
490 mac_control->rings[i].rx_blocks[j].block_dma_addr =
491 tmp_p_addr;
410 } 492 }
411 /* Interlinking all Rx Blocks */ 493 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) { 494 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 495 tmp_v_addr =
496 mac_control->rings[i].rx_blocks[j].block_virt_addr;
414 tmp_v_addr_next = 497 tmp_v_addr_next =
415 nic->rx_blocks[i][(j + 1) % 498 mac_control->rings[i].rx_blocks[(j + 1) %
416 blk_cnt].block_virt_addr; 499 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 500 tmp_p_addr =
501 mac_control->rings[i].rx_blocks[j].block_dma_addr;
418 tmp_p_addr_next = 502 tmp_p_addr_next =
419 nic->rx_blocks[i][(j + 1) % 503 mac_control->rings[i].rx_blocks[(j + 1) %
420 blk_cnt].block_dma_addr; 504 blk_cnt].block_dma_addr;
421 505
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 506 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD 507 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
424 * marker. 508 * marker.
425 */ 509 */
426#ifndef CONFIG_2BUFF_MODE 510#ifndef CONFIG_2BUFF_MODE
@@ -433,43 +517,43 @@ static int init_shared_mem(struct s2io_nic *nic)
433 } 517 }
434 518
435#ifdef CONFIG_2BUFF_MODE 519#ifdef CONFIG_2BUFF_MODE
436 /* 520 /*
437 * Allocation of Storages for buffer addresses in 2BUFF mode 521 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well. 522 * and the buffers as well.
439 */ 523 */
440 for (i = 0; i < config->rx_ring_num; i++) { 524 for (i = 0; i < config->rx_ring_num; i++) {
441 blk_cnt = 525 blk_cnt =
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 526 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 527 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
444 GFP_KERNEL); 528 GFP_KERNEL);
445 if (!nic->ba[i]) 529 if (!mac_control->rings[i].ba)
446 return -ENOMEM; 530 return -ENOMEM;
447 for (j = 0; j < blk_cnt; j++) { 531 for (j = 0; j < blk_cnt; j++) {
448 int k = 0; 532 int k = 0;
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) * 533 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)), 534 (MAX_RXDS_PER_BLOCK + 1)),
451 GFP_KERNEL); 535 GFP_KERNEL);
452 if (!nic->ba[i][j]) 536 if (!mac_control->rings[i].ba[j])
453 return -ENOMEM; 537 return -ENOMEM;
454 while (k != MAX_RXDS_PER_BLOCK) { 538 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k]; 539 ba = &mac_control->rings[i].ba[j][k];
456 540
457 ba->ba_0_org = kmalloc 541 ba->ba_0_org = (void *) kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 542 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
459 if (!ba->ba_0_org) 543 if (!ba->ba_0_org)
460 return -ENOMEM; 544 return -ENOMEM;
461 tmp = (unsigned long) ba->ba_0_org; 545 tmp = (u64) ba->ba_0_org;
462 tmp += ALIGN_SIZE; 546 tmp += ALIGN_SIZE;
463 tmp &= ~((unsigned long) ALIGN_SIZE); 547 tmp &= ~((u64) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp; 548 ba->ba_0 = (void *) tmp;
465 549
466 ba->ba_1_org = kmalloc 550 ba->ba_1_org = (void *) kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 551 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
468 if (!ba->ba_1_org) 552 if (!ba->ba_1_org)
469 return -ENOMEM; 553 return -ENOMEM;
470 tmp = (unsigned long) ba->ba_1_org; 554 tmp = (u64) ba->ba_1_org;
471 tmp += ALIGN_SIZE; 555 tmp += ALIGN_SIZE;
472 tmp &= ~((unsigned long) ALIGN_SIZE); 556 tmp &= ~((u64) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp; 557 ba->ba_1 = (void *) tmp;
474 k++; 558 k++;
475 } 559 }
@@ -483,9 +567,9 @@ static int init_shared_mem(struct s2io_nic *nic)
483 (nic->pdev, size, &mac_control->stats_mem_phy); 567 (nic->pdev, size, &mac_control->stats_mem_phy);
484 568
485 if (!mac_control->stats_mem) { 569 if (!mac_control->stats_mem) {
486 /* 570 /*
487 * In case of failure, free_shared_mem() is called, which 571 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the 572 * should free any memory that was alloced till the
489 * failure happened. 573 * failure happened.
490 */ 574 */
491 return -ENOMEM; 575 return -ENOMEM;
@@ -495,15 +579,14 @@ static int init_shared_mem(struct s2io_nic *nic)
495 tmp_v_addr = mac_control->stats_mem; 579 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 580 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size); 581 memset(tmp_v_addr, 0, size);
498
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 582 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr); 583 (unsigned long long) tmp_p_addr);
501 584
502 return SUCCESS; 585 return SUCCESS;
503} 586}
504 587
505/** 588/**
506 * free_shared_mem - Free the allocated Memory 589 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable. 590 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by 591 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel. 592 * the init_shared_mem() function and return it to the kernel.
@@ -533,15 +616,19 @@ static void free_shared_mem(struct s2io_nic *nic)
533 lst_per_page); 616 lst_per_page);
534 for (j = 0; j < page_num; j++) { 617 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page); 618 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr) 619 if ((!mac_control->fifos[i].list_info) ||
620 (!mac_control->fifos[i].list_info[mem_blks].
621 list_virt_addr))
537 break; 622 break;
538 pci_free_consistent(nic->pdev, PAGE_SIZE, 623 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks]. 624 mac_control->fifos[i].
625 list_info[mem_blks].
540 list_virt_addr, 626 list_virt_addr,
541 nic->list_info[i][mem_blks]. 627 mac_control->fifos[i].
628 list_info[mem_blks].
542 list_phy_addr); 629 list_phy_addr);
543 } 630 }
544 kfree(nic->list_info[i]); 631 kfree(mac_control->fifos[i].list_info);
545 } 632 }
546 633
547#ifndef CONFIG_2BUFF_MODE 634#ifndef CONFIG_2BUFF_MODE
@@ -550,10 +637,12 @@ static void free_shared_mem(struct s2io_nic *nic)
550 size = SIZE_OF_BLOCK; 637 size = SIZE_OF_BLOCK;
551#endif 638#endif
552 for (i = 0; i < config->rx_ring_num; i++) { 639 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i]; 640 blk_cnt = mac_control->rings[i].block_count;
554 for (j = 0; j < blk_cnt; j++) { 641 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr; 642 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr; 643 block_virt_addr;
644 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
645 block_dma_addr;
557 if (tmp_v_addr == NULL) 646 if (tmp_v_addr == NULL)
558 break; 647 break;
559 pci_free_consistent(nic->pdev, size, 648 pci_free_consistent(nic->pdev, size,
@@ -566,35 +655,21 @@ static void free_shared_mem(struct s2io_nic *nic)
566 for (i = 0; i < config->rx_ring_num; i++) { 655 for (i = 0; i < config->rx_ring_num; i++) {
567 blk_cnt = 656 blk_cnt =
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 657 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
569 if (!nic->ba[i])
570 goto end_free;
571 for (j = 0; j < blk_cnt; j++) { 658 for (j = 0; j < blk_cnt; j++) {
572 int k = 0; 659 int k = 0;
573 if (!nic->ba[i][j]) { 660 if (!mac_control->rings[i].ba[j])
574 kfree(nic->ba[i]); 661 continue;
575 goto end_free;
576 }
577 while (k != MAX_RXDS_PER_BLOCK) { 662 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k]; 663 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
580 {
581 kfree(nic->ba[i]);
582 kfree(nic->ba[i][j]);
583 if(ba->ba_0_org)
584 kfree(ba->ba_0_org);
585 if(ba->ba_1_org)
586 kfree(ba->ba_1_org);
587 goto end_free;
588 }
589 kfree(ba->ba_0_org); 664 kfree(ba->ba_0_org);
590 kfree(ba->ba_1_org); 665 kfree(ba->ba_1_org);
591 k++; 666 k++;
592 } 667 }
593 kfree(nic->ba[i][j]); 668 kfree(mac_control->rings[i].ba[j]);
594 } 669 }
595 kfree(nic->ba[i]); 670 if (mac_control->rings[i].ba)
671 kfree(mac_control->rings[i].ba);
596 } 672 }
597end_free:
598#endif 673#endif
599 674
600 if (mac_control->stats_mem) { 675 if (mac_control->stats_mem) {
@@ -605,12 +680,93 @@ end_free:
605 } 680 }
606} 681}
607 682
608/** 683/**
609 * init_nic - Initialization of hardware 684 * s2io_verify_pci_mode -
685 */
686
687static int s2io_verify_pci_mode(nic_t *nic)
688{
689 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
690 register u64 val64 = 0;
691 int mode;
692
693 val64 = readq(&bar0->pci_mode);
694 mode = (u8)GET_PCI_MODE(val64);
695
696 if ( val64 & PCI_MODE_UNKNOWN_MODE)
697 return -1; /* Unknown PCI mode */
698 return mode;
699}
700
701
702/**
703 * s2io_print_pci_mode -
704 */
705static int s2io_print_pci_mode(nic_t *nic)
706{
707 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
708 register u64 val64 = 0;
709 int mode;
710 struct config_param *config = &nic->config;
711
712 val64 = readq(&bar0->pci_mode);
713 mode = (u8)GET_PCI_MODE(val64);
714
715 if ( val64 & PCI_MODE_UNKNOWN_MODE)
716 return -1; /* Unknown PCI mode */
717
718 if (val64 & PCI_MODE_32_BITS) {
719 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
720 } else {
721 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
722 }
723
724 switch(mode) {
725 case PCI_MODE_PCI_33:
726 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
727 config->bus_speed = 33;
728 break;
729 case PCI_MODE_PCI_66:
730 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
731 config->bus_speed = 133;
732 break;
733 case PCI_MODE_PCIX_M1_66:
734 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
735 config->bus_speed = 133; /* Herc doubles the clock rate */
736 break;
737 case PCI_MODE_PCIX_M1_100:
738 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
739 config->bus_speed = 200;
740 break;
741 case PCI_MODE_PCIX_M1_133:
742 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
743 config->bus_speed = 266;
744 break;
745 case PCI_MODE_PCIX_M2_66:
746 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
747 config->bus_speed = 133;
748 break;
749 case PCI_MODE_PCIX_M2_100:
750 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
751 config->bus_speed = 200;
752 break;
753 case PCI_MODE_PCIX_M2_133:
754 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
755 config->bus_speed = 266;
756 break;
757 default:
758 return -1; /* Unsupported bus speed */
759 }
760
761 return mode;
762}
763
764/**
765 * init_nic - Initialization of hardware
610 * @nic: device peivate variable 766 * @nic: device peivate variable
611 * Description: The function sequentially configures every block 767 * Description: The function sequentially configures every block
612 * of the H/W from their reset values. 768 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and 769 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect). 770 * '-1' on failure (endian settings incorrect).
615 */ 771 */
616 772
@@ -626,21 +782,32 @@ static int init_nic(struct s2io_nic *nic)
626 struct config_param *config; 782 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0; 783 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share; 784 unsigned long long mem_share;
785 int mem_size;
629 786
630 mac_control = &nic->mac_control; 787 mac_control = &nic->mac_control;
631 config = &nic->config; 788 config = &nic->config;
632 789
633 /* Initialize swapper control register */ 790 /* to set the swapper controle on the card */
634 if (s2io_set_swapper(nic)) { 791 if(s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); 792 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
636 return -1; 793 return -1;
637 } 794 }
638 795
796 /*
797 * Herc requires EOI to be removed from reset before XGXS, so..
798 */
799 if (nic->device_type & XFRAME_II_DEVICE) {
800 val64 = 0xA500000000ULL;
801 writeq(val64, &bar0->sw_reset);
802 msleep(500);
803 val64 = readq(&bar0->sw_reset);
804 }
805
639 /* Remove XGXS from reset state */ 806 /* Remove XGXS from reset state */
640 val64 = 0; 807 val64 = 0;
641 writeq(val64, &bar0->sw_reset); 808 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
643 msleep(500); 809 msleep(500);
810 val64 = readq(&bar0->sw_reset);
644 811
645 /* Enable Receiving broadcasts */ 812 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg; 813 add = &bar0->mac_cfg;
@@ -660,48 +827,58 @@ static int init_nic(struct s2io_nic *nic)
660 val64 = dev->mtu; 827 val64 = dev->mtu;
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 828 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
662 829
663 /* 830 /*
664 * Configuring the XAUI Interface of Xena. 831 * Configuring the XAUI Interface of Xena.
665 * *************************************** 832 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series 833 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular 834 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 835 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values 836 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 837 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue 838 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro. 839 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into 840 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we 841 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN. 842 * start writing into mdio_control until we encounter END_SIGN.
676 */ 843 */
677 while (1) { 844 if (nic->device_type & XFRAME_II_DEVICE) {
678 dtx_cfg: 845 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 846 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
681 dtx_cnt++;
682 goto mdio_cfg;
683 }
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF); 847 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control); 848 if (dtx_cnt & 0x1)
849 msleep(1); /* Necessary!! */
687 dtx_cnt++; 850 dtx_cnt++;
688 } 851 }
689 mdio_cfg: 852 } else {
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 853 while (1) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 854 dtx_cfg:
855 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
856 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
857 dtx_cnt++;
858 goto mdio_cfg;
859 }
860 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
861 &bar0->dtx_control, UF);
862 val64 = readq(&bar0->dtx_control);
863 dtx_cnt++;
864 }
865 mdio_cfg:
866 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
867 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
868 mdio_cnt++;
869 goto dtx_cfg;
870 }
871 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
872 &bar0->mdio_control, UF);
873 val64 = readq(&bar0->mdio_control);
692 mdio_cnt++; 874 mdio_cnt++;
875 }
876 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
877 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
878 break;
879 } else {
693 goto dtx_cfg; 880 goto dtx_cfg;
694 } 881 }
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
698 mdio_cnt++;
699 }
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
702 break;
703 } else {
704 goto dtx_cfg;
705 } 882 }
706 } 883 }
707 884
@@ -748,12 +925,20 @@ static int init_nic(struct s2io_nic *nic)
748 val64 |= BIT(0); /* To enable the FIFO partition. */ 925 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0); 926 writeq(val64, &bar0->tx_fifo_partition_0);
750 927
928 /*
929 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
930 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
931 */
932 if ((nic->device_type == XFRAME_I_DEVICE) &&
933 (get_xena_rev_id(nic->pdev) < 4))
934 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
935
751 val64 = readq(&bar0->tx_fifo_partition_0); 936 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", 937 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64); 938 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
754 939
755 /* 940 /*
756 * Initialization of Tx_PA_CONFIG register to ignore packet 941 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking. 942 * integrity checking.
758 */ 943 */
759 val64 = readq(&bar0->tx_pa_cfg); 944 val64 = readq(&bar0->tx_pa_cfg);
@@ -770,85 +955,304 @@ static int init_nic(struct s2io_nic *nic)
770 } 955 }
771 writeq(val64, &bar0->rx_queue_priority); 956 writeq(val64, &bar0->rx_queue_priority);
772 957
773 /* 958 /*
774 * Allocating equal share of memory to all the 959 * Allocating equal share of memory to all the
775 * configured Rings. 960 * configured Rings.
776 */ 961 */
777 val64 = 0; 962 val64 = 0;
963 if (nic->device_type & XFRAME_II_DEVICE)
964 mem_size = 32;
965 else
966 mem_size = 64;
967
778 for (i = 0; i < config->rx_ring_num; i++) { 968 for (i = 0; i < config->rx_ring_num; i++) {
779 switch (i) { 969 switch (i) {
780 case 0: 970 case 0:
781 mem_share = (64 / config->rx_ring_num + 971 mem_share = (mem_size / config->rx_ring_num +
782 64 % config->rx_ring_num); 972 mem_size % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share); 973 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
784 continue; 974 continue;
785 case 1: 975 case 1:
786 mem_share = (64 / config->rx_ring_num); 976 mem_share = (mem_size / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share); 977 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
788 continue; 978 continue;
789 case 2: 979 case 2:
790 mem_share = (64 / config->rx_ring_num); 980 mem_share = (mem_size / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share); 981 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
792 continue; 982 continue;
793 case 3: 983 case 3:
794 mem_share = (64 / config->rx_ring_num); 984 mem_share = (mem_size / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share); 985 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
796 continue; 986 continue;
797 case 4: 987 case 4:
798 mem_share = (64 / config->rx_ring_num); 988 mem_share = (mem_size / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share); 989 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
800 continue; 990 continue;
801 case 5: 991 case 5:
802 mem_share = (64 / config->rx_ring_num); 992 mem_share = (mem_size / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share); 993 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
804 continue; 994 continue;
805 case 6: 995 case 6:
806 mem_share = (64 / config->rx_ring_num); 996 mem_share = (mem_size / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share); 997 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
808 continue; 998 continue;
809 case 7: 999 case 7:
810 mem_share = (64 / config->rx_ring_num); 1000 mem_share = (mem_size / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share); 1001 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
812 continue; 1002 continue;
813 } 1003 }
814 } 1004 }
815 writeq(val64, &bar0->rx_queue_cfg); 1005 writeq(val64, &bar0->rx_queue_cfg);
816 1006
817 /* 1007 /*
818 * Initializing the Tx round robin registers to 0. 1008 * Filling Tx round robin registers
819 * Filling Tx and Rx round robin registers as per the 1009 * as per the number of FIFOs
820 * number of FIFOs and Rings is still TODO.
821 */
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
827
828 /*
829 * TODO
830 * Disable Rx steering. Hard coding all packets be steered to
831 * Queue 0 for now.
832 */ 1010 */
833 val64 = 0x8080808080808080ULL; 1011 switch (config->tx_fifo_num) {
834 writeq(val64, &bar0->rts_qos_steering); 1012 case 1:
1013 val64 = 0x0000000000000000ULL;
1014 writeq(val64, &bar0->tx_w_round_robin_0);
1015 writeq(val64, &bar0->tx_w_round_robin_1);
1016 writeq(val64, &bar0->tx_w_round_robin_2);
1017 writeq(val64, &bar0->tx_w_round_robin_3);
1018 writeq(val64, &bar0->tx_w_round_robin_4);
1019 break;
1020 case 2:
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_0);
1023 val64 = 0x0100000100000100ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_1);
1025 val64 = 0x0001000001000001ULL;
1026 writeq(val64, &bar0->tx_w_round_robin_2);
1027 val64 = 0x0000010000010000ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_3);
1029 val64 = 0x0100000000000000ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_4);
1031 break;
1032 case 3:
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_0);
1035 val64 = 0x0001020000010001ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_1);
1037 val64 = 0x0200000100010200ULL;
1038 writeq(val64, &bar0->tx_w_round_robin_2);
1039 val64 = 0x0001000102000001ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_3);
1041 val64 = 0x0001020000000000ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_4);
1043 break;
1044 case 4:
1045 val64 = 0x0001020300010200ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_0);
1047 val64 = 0x0100000102030001ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_1);
1049 val64 = 0x0200010000010203ULL;
1050 writeq(val64, &bar0->tx_w_round_robin_2);
1051 val64 = 0x0001020001000001ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_3);
1053 val64 = 0x0203000100000000ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_4);
1055 break;
1056 case 5:
1057 val64 = 0x0001000203000102ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_0);
1059 val64 = 0x0001020001030004ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_1);
1061 val64 = 0x0001000203000102ULL;
1062 writeq(val64, &bar0->tx_w_round_robin_2);
1063 val64 = 0x0001020001030004ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_3);
1065 val64 = 0x0001000000000000ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_4);
1067 break;
1068 case 6:
1069 val64 = 0x0001020304000102ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_0);
1071 val64 = 0x0304050001020001ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_1);
1073 val64 = 0x0203000100000102ULL;
1074 writeq(val64, &bar0->tx_w_round_robin_2);
1075 val64 = 0x0304000102030405ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_3);
1077 val64 = 0x0001000200000000ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_4);
1079 break;
1080 case 7:
1081 val64 = 0x0001020001020300ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_0);
1083 val64 = 0x0102030400010203ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_1);
1085 val64 = 0x0405060001020001ULL;
1086 writeq(val64, &bar0->tx_w_round_robin_2);
1087 val64 = 0x0304050000010200ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_3);
1089 val64 = 0x0102030000000000ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_4);
1091 break;
1092 case 8:
1093 val64 = 0x0001020300040105ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_0);
1095 val64 = 0x0200030106000204ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_1);
1097 val64 = 0x0103000502010007ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_2);
1099 val64 = 0x0304010002060500ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_3);
1101 val64 = 0x0103020400000000ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_4);
1103 break;
1104 }
1105
1106 /* Filling the Rx round robin registers as per the
1107 * number of Rings and steering based on QoS.
1108 */
1109 switch (config->rx_ring_num) {
1110 case 1:
1111 val64 = 0x8080808080808080ULL;
1112 writeq(val64, &bar0->rts_qos_steering);
1113 break;
1114 case 2:
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_0);
1117 val64 = 0x0100000100000100ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_1);
1119 val64 = 0x0001000001000001ULL;
1120 writeq(val64, &bar0->rx_w_round_robin_2);
1121 val64 = 0x0000010000010000ULL;
1122 writeq(val64, &bar0->rx_w_round_robin_3);
1123 val64 = 0x0100000000000000ULL;
1124 writeq(val64, &bar0->rx_w_round_robin_4);
1125
1126 val64 = 0x8080808040404040ULL;
1127 writeq(val64, &bar0->rts_qos_steering);
1128 break;
1129 case 3:
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_0);
1132 val64 = 0x0001020000010001ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_1);
1134 val64 = 0x0200000100010200ULL;
1135 writeq(val64, &bar0->rx_w_round_robin_2);
1136 val64 = 0x0001000102000001ULL;
1137 writeq(val64, &bar0->rx_w_round_robin_3);
1138 val64 = 0x0001020000000000ULL;
1139 writeq(val64, &bar0->rx_w_round_robin_4);
1140
1141 val64 = 0x8080804040402020ULL;
1142 writeq(val64, &bar0->rts_qos_steering);
1143 break;
1144 case 4:
1145 val64 = 0x0001020300010200ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_0);
1147 val64 = 0x0100000102030001ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_1);
1149 val64 = 0x0200010000010203ULL;
1150 writeq(val64, &bar0->rx_w_round_robin_2);
1151 val64 = 0x0001020001000001ULL;
1152 writeq(val64, &bar0->rx_w_round_robin_3);
1153 val64 = 0x0203000100000000ULL;
1154 writeq(val64, &bar0->rx_w_round_robin_4);
1155
1156 val64 = 0x8080404020201010ULL;
1157 writeq(val64, &bar0->rts_qos_steering);
1158 break;
1159 case 5:
1160 val64 = 0x0001000203000102ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_0);
1162 val64 = 0x0001020001030004ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_1);
1164 val64 = 0x0001000203000102ULL;
1165 writeq(val64, &bar0->rx_w_round_robin_2);
1166 val64 = 0x0001020001030004ULL;
1167 writeq(val64, &bar0->rx_w_round_robin_3);
1168 val64 = 0x0001000000000000ULL;
1169 writeq(val64, &bar0->rx_w_round_robin_4);
1170
1171 val64 = 0x8080404020201008ULL;
1172 writeq(val64, &bar0->rts_qos_steering);
1173 break;
1174 case 6:
1175 val64 = 0x0001020304000102ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_0);
1177 val64 = 0x0304050001020001ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_1);
1179 val64 = 0x0203000100000102ULL;
1180 writeq(val64, &bar0->rx_w_round_robin_2);
1181 val64 = 0x0304000102030405ULL;
1182 writeq(val64, &bar0->rx_w_round_robin_3);
1183 val64 = 0x0001000200000000ULL;
1184 writeq(val64, &bar0->rx_w_round_robin_4);
1185
1186 val64 = 0x8080404020100804ULL;
1187 writeq(val64, &bar0->rts_qos_steering);
1188 break;
1189 case 7:
1190 val64 = 0x0001020001020300ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_0);
1192 val64 = 0x0102030400010203ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_1);
1194 val64 = 0x0405060001020001ULL;
1195 writeq(val64, &bar0->rx_w_round_robin_2);
1196 val64 = 0x0304050000010200ULL;
1197 writeq(val64, &bar0->rx_w_round_robin_3);
1198 val64 = 0x0102030000000000ULL;
1199 writeq(val64, &bar0->rx_w_round_robin_4);
1200
1201 val64 = 0x8080402010080402ULL;
1202 writeq(val64, &bar0->rts_qos_steering);
1203 break;
1204 case 8:
1205 val64 = 0x0001020300040105ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_0);
1207 val64 = 0x0200030106000204ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_1);
1209 val64 = 0x0103000502010007ULL;
1210 writeq(val64, &bar0->rx_w_round_robin_2);
1211 val64 = 0x0304010002060500ULL;
1212 writeq(val64, &bar0->rx_w_round_robin_3);
1213 val64 = 0x0103020400000000ULL;
1214 writeq(val64, &bar0->rx_w_round_robin_4);
1215
1216 val64 = 0x8040201008040201ULL;
1217 writeq(val64, &bar0->rts_qos_steering);
1218 break;
1219 }
835 1220
836 /* UDP Fix */ 1221 /* UDP Fix */
837 val64 = 0; 1222 val64 = 0;
838 for (i = 1; i < 8; i++) 1223 for (i = 0; i < 8; i++)
1224 writeq(val64, &bar0->rts_frm_len_n[i]);
1225
1226 /* Set the default rts frame length for the rings configured */
1227 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1228 for (i = 0 ; i < config->rx_ring_num ; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]); 1229 writeq(val64, &bar0->rts_frm_len_n[i]);
840 1230
841 /* Set rts_frm_len register for fifo 0 */ 1231 /* Set the frame length for the configured rings
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22), 1232 * desired by the user
843 &bar0->rts_frm_len_n[0]); 1233 */
1234 for (i = 0; i < config->rx_ring_num; i++) {
1235 /* If rts_frm_len[i] == 0 then it is assumed that user not
1236 * specified frame length steering.
1237 * If the user provides the frame length then program
1238 * the rts_frm_len register for those values or else
1239 * leave it as it is.
1240 */
1241 if (rts_frm_len[i] != 0) {
1242 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1243 &bar0->rts_frm_len_n[i]);
1244 }
1245 }
844 1246
845 /* Enable statistics */ 1247 /* Program statistics memory */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1248 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
850 1249
851 /* 1250 if (nic->device_type == XFRAME_II_DEVICE) {
1251 val64 = STAT_BC(0x320);
1252 writeq(val64, &bar0->stat_byte_cnt);
1253 }
1254
1255 /*
852 * Initializing the sampling rate for the device to calculate the 1256 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization. 1257 * bandwidth utilization.
854 */ 1258 */
@@ -857,30 +1261,38 @@ static int init_nic(struct s2io_nic *nic)
857 writeq(val64, &bar0->mac_link_util); 1261 writeq(val64, &bar0->mac_link_util);
858 1262
859 1263
860 /* 1264 /*
861 * Initializing the Transmit and Receive Traffic Interrupt 1265 * Initializing the Transmit and Receive Traffic Interrupt
862 * Scheme. 1266 * Scheme.
863 */ 1267 */
864 /* TTI Initialization. Default Tx timer gets us about 1268 /*
1269 * TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled 1270 * 250 interrupts per sec. Continuous interrupts are enabled
866 * by default. 1271 * by default.
867 */ 1272 */
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) | 1273 if (nic->device_type == XFRAME_II_DEVICE) {
869 TTI_DATA1_MEM_TX_URNG_A(0xA) | 1274 int count = (nic->config.bus_speed * 125)/2;
1275 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1276 } else {
1277
1278 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1279 }
1280 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1281 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN | 1282 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
872 TTI_DATA1_MEM_TX_TIMER_CI_EN; 1283 if (use_continuous_tx_intrs)
1284 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem); 1285 writeq(val64, &bar0->tti_data1_mem);
874 1286
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1287 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1288 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1289 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem); 1290 writeq(val64, &bar0->tti_data2_mem);
879 1291
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1292 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem); 1293 writeq(val64, &bar0->tti_command_mem);
882 1294
883 /* 1295 /*
884 * Once the operation completes, the Strobe bit of the command 1296 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition 1297 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete, 1298 * We wait for a maximum of 500ms for the operation to complete,
@@ -901,52 +1313,97 @@ static int init_nic(struct s2io_nic *nic)
901 time++; 1313 time++;
902 } 1314 }
903 1315
904 /* RTI Initialization */ 1316 if (nic->config.bimodal) {
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) | 1317 int k = 0;
906 RTI_DATA1_MEM_RX_URNG_A(0xA) | 1318 for (k = 0; k < config->rx_ring_num; k++) {
907 RTI_DATA1_MEM_RX_URNG_B(0x10) | 1319 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; 1320 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1321 writeq(val64, &bar0->tti_command_mem);
1322
1323 /*
1324 * Once the operation completes, the Strobe bit of the command
1325 * register will be reset. We poll for this particular condition
1326 * We wait for a maximum of 500ms for the operation to complete,
1327 * if it's not complete by then we return error.
1328 */
1329 time = 0;
1330 while (TRUE) {
1331 val64 = readq(&bar0->tti_command_mem);
1332 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1333 break;
1334 }
1335 if (time > 10) {
1336 DBG_PRINT(ERR_DBG,
1337 "%s: TTI init Failed\n",
1338 dev->name);
1339 return -1;
1340 }
1341 time++;
1342 msleep(50);
1343 }
1344 }
1345 } else {
909 1346
910 writeq(val64, &bar0->rti_data1_mem); 1347 /* RTI Initialization */
1348 if (nic->device_type == XFRAME_II_DEVICE) {
1349 /*
1350 * Programmed to generate Apprx 500 Intrs per
1351 * second
1352 */
1353 int count = (nic->config.bus_speed * 125)/4;
1354 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1355 } else {
1356 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1357 }
1358 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1359 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1360 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
911 1361
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | 1362 writeq(val64, &bar0->rti_data1_mem);
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
916 1363
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD; 1364 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
918 writeq(val64, &bar0->rti_command_mem); 1365 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1366 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1367 writeq(val64, &bar0->rti_data2_mem);
919 1368
920 /* 1369 for (i = 0; i < config->rx_ring_num; i++) {
921 * Once the operation completes, the Strobe bit of the command 1370 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
922 * register will be reset. We poll for this particular condition 1371 | RTI_CMD_MEM_OFFSET(i);
923 * We wait for a maximum of 500ms for the operation to complete, 1372 writeq(val64, &bar0->rti_command_mem);
924 * if it's not complete by then we return error. 1373
925 */ 1374 /*
926 time = 0; 1375 * Once the operation completes, the Strobe bit of the
927 while (TRUE) { 1376 * command register will be reset. We poll for this
928 val64 = readq(&bar0->rti_command_mem); 1377 * particular condition. We wait for a maximum of 500ms
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) { 1378 * for the operation to complete, if it's not complete
930 break; 1379 * by then we return error.
931 } 1380 */
932 if (time > 10) { 1381 time = 0;
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", 1382 while (TRUE) {
934 dev->name); 1383 val64 = readq(&bar0->rti_command_mem);
935 return -1; 1384 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1385 break;
1386 }
1387 if (time > 10) {
1388 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1389 dev->name);
1390 return -1;
1391 }
1392 time++;
1393 msleep(50);
1394 }
936 } 1395 }
937 time++;
938 msleep(50);
939 } 1396 }
940 1397
941 /* 1398 /*
942 * Initializing proper values as Pause threshold into all 1399 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side. 1400 * the 8 Queues on Rx side.
944 */ 1401 */
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); 1402 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); 1403 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947 1404
948 /* Disable RMAC PAD STRIPPING */ 1405 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg; 1406 add = (void *) &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg); 1407 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); 1408 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1409 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -955,8 +1412,8 @@ static int init_nic(struct s2io_nic *nic)
955 writel((u32) (val64 >> 32), (add + 4)); 1412 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg); 1413 val64 = readq(&bar0->mac_cfg);
957 1414
958 /* 1415 /*
959 * Set the time value to be inserted in the pause frame 1416 * Set the time value to be inserted in the pause frame
960 * generated by xena. 1417 * generated by xena.
961 */ 1418 */
962 val64 = readq(&bar0->rmac_pause_cfg); 1419 val64 = readq(&bar0->rmac_pause_cfg);
@@ -964,7 +1421,7 @@ static int init_nic(struct s2io_nic *nic)
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); 1421 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg); 1422 writeq(val64, &bar0->rmac_pause_cfg);
966 1423
967 /* 1424 /*
968 * Set the Threshold Limit for Generating the pause frame 1425 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of 1426 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 1427 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
@@ -988,25 +1445,54 @@ static int init_nic(struct s2io_nic *nic)
988 } 1445 }
989 writeq(val64, &bar0->mc_pause_thresh_q4q7); 1446 writeq(val64, &bar0->mc_pause_thresh_q4q7);
990 1447
991 /* 1448 /*
992 * TxDMA will stop Read request if the number of read split has 1449 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits 1450 * exceeded the limit pointed by shared_splits
994 */ 1451 */
995 val64 = readq(&bar0->pic_control); 1452 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); 1453 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control); 1454 writeq(val64, &bar0->pic_control);
998 1455
1456 /*
1457 * Programming the Herc to split every write transaction
1458 * that does not start on an ADB to reduce disconnects.
1459 */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1462 writeq(val64, &bar0->wreq_split_mask);
1463 }
1464
1465 /* Setting Link stability period to 64 ms */
1466 if (nic->device_type == XFRAME_II_DEVICE) {
1467 val64 = MISC_LINK_STABILITY_PRD(3);
1468 writeq(val64, &bar0->misc_control);
1469 }
1470
999 return SUCCESS; 1471 return SUCCESS;
1000} 1472}
1473#define LINK_UP_DOWN_INTERRUPT 1
1474#define MAC_RMAC_ERR_TIMER 2
1001 1475
1002/** 1476#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts 1477#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1478#else
1479int s2io_link_fault_indication(nic_t *nic)
1480{
1481 if (nic->device_type == XFRAME_II_DEVICE)
1482 return LINK_UP_DOWN_INTERRUPT;
1483 else
1484 return MAC_RMAC_ERR_TIMER;
1485}
1486#endif
1487
1488/**
1489 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable, 1490 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and, 1491 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs. 1492 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts 1493 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to 1494 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block. 1495 * enable/disable any Intr block.
1010 * Return Value: NONE. 1496 * Return Value: NONE.
1011 */ 1497 */
1012 1498
@@ -1024,20 +1510,31 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1024 temp64 = readq(&bar0->general_int_mask); 1510 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64); 1511 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask); 1512 writeq(temp64, &bar0->general_int_mask);
1027 /* 1513 /*
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO 1514 * If Hercules adapter enable GPIO otherwise
1029 * interrupts for now. 1515 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1030 * TODO 1516 * interrupts for now.
1517 * TODO
1031 */ 1518 */
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1519 if (s2io_link_fault_indication(nic) ==
1033 /* 1520 LINK_UP_DOWN_INTERRUPT ) {
1521 temp64 = readq(&bar0->pic_int_mask);
1522 temp64 &= ~((u64) PIC_INT_GPIO);
1523 writeq(temp64, &bar0->pic_int_mask);
1524 temp64 = readq(&bar0->gpio_int_mask);
1525 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1526 writeq(temp64, &bar0->gpio_int_mask);
1527 } else {
1528 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1529 }
1530 /*
1034 * No MSI Support is available presently, so TTI and 1531 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled. 1532 * RTI interrupts are also disabled.
1036 */ 1533 */
1037 } else if (flag == DISABLE_INTRS) { 1534 } else if (flag == DISABLE_INTRS) {
1038 /* 1535 /*
1039 * Disable PIC Intrs in the general 1536 * Disable PIC Intrs in the general
1040 * intr mask register 1537 * intr mask register
1041 */ 1538 */
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 1539 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask); 1540 temp64 = readq(&bar0->general_int_mask);
@@ -1055,27 +1552,27 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1055 temp64 = readq(&bar0->general_int_mask); 1552 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64); 1553 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask); 1554 writeq(temp64, &bar0->general_int_mask);
1058 /* 1555 /*
1059 * Keep all interrupts other than PFC interrupt 1556 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level. 1557 * and PCC interrupt disabled in DMA level.
1061 */ 1558 */
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M | 1559 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 TXDMA_PCC_INT_M); 1560 TXDMA_PCC_INT_M);
1064 writeq(val64, &bar0->txdma_int_mask); 1561 writeq(val64, &bar0->txdma_int_mask);
1065 /* 1562 /*
1066 * Enable only the MISC error 1 interrupt in PFC block 1563 * Enable only the MISC error 1 interrupt in PFC block
1067 */ 1564 */
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1); 1565 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask); 1566 writeq(val64, &bar0->pfc_err_mask);
1070 /* 1567 /*
1071 * Enable only the FB_ECC error interrupt in PCC block 1568 * Enable only the FB_ECC error interrupt in PCC block
1072 */ 1569 */
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR); 1570 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask); 1571 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) { 1572 } else if (flag == DISABLE_INTRS) {
1076 /* 1573 /*
1077 * Disable TxDMA Intrs in the general intr mask 1574 * Disable TxDMA Intrs in the general intr mask
1078 * register 1575 * register
1079 */ 1576 */
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask); 1577 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask); 1578 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
@@ -1093,15 +1590,15 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1093 temp64 = readq(&bar0->general_int_mask); 1590 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64); 1591 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask); 1592 writeq(temp64, &bar0->general_int_mask);
1096 /* 1593 /*
1097 * All RxDMA block interrupts are disabled for now 1594 * All RxDMA block interrupts are disabled for now
1098 * TODO 1595 * TODO
1099 */ 1596 */
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) { 1598 } else if (flag == DISABLE_INTRS) {
1102 /* 1599 /*
1103 * Disable RxDMA Intrs in the general intr mask 1600 * Disable RxDMA Intrs in the general intr mask
1104 * register 1601 * register
1105 */ 1602 */
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask); 1603 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask); 1604 temp64 = readq(&bar0->general_int_mask);
@@ -1118,22 +1615,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1118 temp64 = readq(&bar0->general_int_mask); 1615 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64); 1616 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask); 1617 writeq(temp64, &bar0->general_int_mask);
1121 /* 1618 /*
1122 * All MAC block error interrupts are disabled for now 1619 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1124 * TODO 1620 * TODO
1125 */ 1621 */
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1130
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) { 1622 } else if (flag == DISABLE_INTRS) {
1135 /* 1623 /*
1136 * Disable MAC Intrs in the general intr mask register 1624 * Disable MAC Intrs in the general intr mask register
1137 */ 1625 */
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask); 1626 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS, 1627 writeq(DISABLE_ALL_INTRS,
@@ -1152,14 +1640,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1152 temp64 = readq(&bar0->general_int_mask); 1640 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64); 1641 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask); 1642 writeq(temp64, &bar0->general_int_mask);
1155 /* 1643 /*
1156 * All XGXS block error interrupts are disabled for now 1644 * All XGXS block error interrupts are disabled for now
1157 * TODO 1645 * TODO
1158 */ 1646 */
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1647 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) { 1648 } else if (flag == DISABLE_INTRS) {
1161 /* 1649 /*
1162 * Disable MC Intrs in the general intr mask register 1650 * Disable MC Intrs in the general intr mask register
1163 */ 1651 */
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask); 1652 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask); 1653 temp64 = readq(&bar0->general_int_mask);
@@ -1175,11 +1663,11 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1175 temp64 = readq(&bar0->general_int_mask); 1663 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64); 1664 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask); 1665 writeq(temp64, &bar0->general_int_mask);
1178 /* 1666 /*
1179 * All MC block error interrupts are disabled for now 1667 * Enable all MC Intrs.
1180 * TODO
1181 */ 1668 */
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask); 1669 writeq(0x0, &bar0->mc_int_mask);
1670 writeq(0x0, &bar0->mc_err_mask);
1183 } else if (flag == DISABLE_INTRS) { 1671 } else if (flag == DISABLE_INTRS) {
1184 /* 1672 /*
1185 * Disable MC Intrs in the general intr mask register 1673 * Disable MC Intrs in the general intr mask register
@@ -1199,14 +1687,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1199 temp64 = readq(&bar0->general_int_mask); 1687 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64); 1688 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask); 1689 writeq(temp64, &bar0->general_int_mask);
1202 /* 1690 /*
1203 * Enable all the Tx side interrupts 1691 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels 1692 * writing 0 Enables all 64 TX interrupt levels
1205 */ 1693 */
1206 writeq(0x0, &bar0->tx_traffic_mask); 1694 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) { 1695 } else if (flag == DISABLE_INTRS) {
1208 /* 1696 /*
1209 * Disable Tx Traffic Intrs in the general intr mask 1697 * Disable Tx Traffic Intrs in the general intr mask
1210 * register. 1698 * register.
1211 */ 1699 */
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); 1700 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
@@ -1226,8 +1714,8 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1226 /* writing 0 Enables all 8 RX interrupt levels */ 1714 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask); 1715 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) { 1716 } else if (flag == DISABLE_INTRS) {
1229 /* 1717 /*
1230 * Disable Rx Traffic Intrs in the general intr mask 1718 * Disable Rx Traffic Intrs in the general intr mask
1231 * register. 1719 * register.
1232 */ 1720 */
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); 1721 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
@@ -1238,24 +1726,66 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1238 } 1726 }
1239} 1727}
1240 1728
1241/** 1729static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1242 * verify_xena_quiescence - Checks whether the H/W is ready 1730{
1731 int ret = 0;
1732
1733 if (flag == FALSE) {
1734 if ((!herc && (rev_id >= 4)) || herc) {
1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1738 ret = 1;
1739 }
1740 }else {
1741 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1742 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1743 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1744 ret = 1;
1745 }
1746 }
1747 } else {
1748 if ((!herc && (rev_id >= 4)) || herc) {
1749 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1750 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1751 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1752 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1753 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1754 ret = 1;
1755 }
1756 } else {
1757 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1758 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1759 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1760 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1761 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1762 ret = 1;
1763 }
1764 }
1765 }
1766
1767 return ret;
1768}
1769/**
1770 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register. 1771 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once 1772 * @flag : indicates if the adapter enable bit was ever written once
1245 * before. 1773 * before.
1246 * Description: Returns whether the H/W is ready to go or not. Depending 1774 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison 1775 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to 1776 * differs and the calling function passes the input argument flag to
1249 * indicate this. 1777 * indicate this.
1250 * Return: 1 If xena is quiescence 1778 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence 1779 * 0 If Xena is not quiescence
1252 */ 1780 */
1253 1781
1254static int verify_xena_quiescence(u64 val64, int flag) 1782static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1255{ 1783{
1256 int ret = 0; 1784 int ret = 0, herc;
1257 u64 tmp64 = ~((u64) val64); 1785 u64 tmp64 = ~((u64) val64);
1786 int rev_id = get_xena_rev_id(sp->pdev);
1258 1787
1788 herc = (sp->device_type == XFRAME_II_DEVICE);
1259 if (! 1789 if (!
1260 (tmp64 & 1790 (tmp64 &
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1791 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
@@ -1263,25 +1793,7 @@ static int verify_xena_quiescence(u64 val64, int flag)
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1793 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1794 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) { 1795 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) { 1796 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1270
1271 ret = 1;
1272
1273 }
1274 } else {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1280
1281 ret = 1;
1282
1283 }
1284 }
1285 } 1797 }
1286 1798
1287 return ret; 1799 return ret;
@@ -1290,12 +1802,12 @@ static int verify_xena_quiescence(u64 val64, int flag)
1290/** 1802/**
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms 1803 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure 1804 * @sp: Pointer to device specifc structure
1293 * Description : 1805 * Description :
1294 * New procedure to clear mac address reading problems on Alpha platforms 1806 * New procedure to clear mac address reading problems on Alpha platforms
1295 * 1807 *
1296 */ 1808 */
1297 1809
1298static void fix_mac_address(nic_t * sp) 1810void fix_mac_address(nic_t * sp)
1299{ 1811{
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1812 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1301 u64 val64; 1813 u64 val64;
@@ -1303,20 +1815,21 @@ static void fix_mac_address(nic_t * sp)
1303 1815
1304 while (fix_mac[i] != END_SIGN) { 1816 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control); 1817 writeq(fix_mac[i++], &bar0->gpio_control);
1818 udelay(10);
1306 val64 = readq(&bar0->gpio_control); 1819 val64 = readq(&bar0->gpio_control);
1307 } 1820 }
1308} 1821}
1309 1822
1310/** 1823/**
1311 * start_nic - Turns the device on 1824 * start_nic - Turns the device on
1312 * @nic : device private variable. 1825 * @nic : device private variable.
1313 * Description: 1826 * Description:
1314 * This function actually turns the device on. Before this function is 1827 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states 1828 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On 1829 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is 1830 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register. 1831 * literally switched on by writing into the adapter control register.
1319 * Return Value: 1832 * Return Value:
1320 * SUCCESS on success and -1 on failure. 1833 * SUCCESS on success and -1 on failure.
1321 */ 1834 */
1322 1835
@@ -1325,8 +1838,8 @@ static int start_nic(struct s2io_nic *nic)
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1838 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev; 1839 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0; 1840 register u64 val64 = 0;
1328 u16 interruptible, i; 1841 u16 interruptible;
1329 u16 subid; 1842 u16 subid, i;
1330 mac_info_t *mac_control; 1843 mac_info_t *mac_control;
1331 struct config_param *config; 1844 struct config_param *config;
1332 1845
@@ -1335,10 +1848,12 @@ static int start_nic(struct s2io_nic *nic)
1335 1848
1336 /* PRC Initialization and configuration */ 1849 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) { 1850 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr, 1851 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]); 1852 &bar0->prc_rxd0_n[i]);
1340 1853
1341 val64 = readq(&bar0->prc_ctrl_n[i]); 1854 val64 = readq(&bar0->prc_ctrl_n[i]);
1855 if (nic->config.bimodal)
1856 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1342#ifndef CONFIG_2BUFF_MODE 1857#ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED; 1858 val64 |= PRC_CTRL_RC_ENABLED;
1344#else 1859#else
@@ -1354,7 +1869,7 @@ static int start_nic(struct s2io_nic *nic)
1354 writeq(val64, &bar0->rx_pa_cfg); 1869 writeq(val64, &bar0->rx_pa_cfg);
1355#endif 1870#endif
1356 1871
1357 /* 1872 /*
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout 1873 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required 1874 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation. 1875 * for the device to be ready for operation.
@@ -1364,27 +1879,27 @@ static int start_nic(struct s2io_nic *nic)
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 1879 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs); 1880 val64 = readq(&bar0->mc_rldram_mrs);
1366 1881
1367 msleep(100); /* Delay by around 100 ms. */ 1882 msleep(100); /* Delay by around 100 ms. */
1368 1883
1369 /* Enabling ECC Protection. */ 1884 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control); 1885 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN; 1886 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control); 1887 writeq(val64, &bar0->adapter_control);
1373 1888
1374 /* 1889 /*
1375 * Clearing any possible Link state change interrupts that 1890 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card. 1891 * could have popped up just before Enabling the card.
1377 */ 1892 */
1378 val64 = readq(&bar0->mac_rmac_err_reg); 1893 val64 = readq(&bar0->mac_rmac_err_reg);
1379 if (val64) 1894 if (val64)
1380 writeq(val64, &bar0->mac_rmac_err_reg); 1895 writeq(val64, &bar0->mac_rmac_err_reg);
1381 1896
1382 /* 1897 /*
1383 * Verify if the device is ready to be enabled, if so enable 1898 * Verify if the device is ready to be enabled, if so enable
1384 * it. 1899 * it.
1385 */ 1900 */
1386 val64 = readq(&bar0->adapter_status); 1901 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) { 1902 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1903 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1904 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64); 1905 (unsigned long long) val64);
@@ -1392,16 +1907,18 @@ static int start_nic(struct s2io_nic *nic)
1392 } 1907 }
1393 1908
1394 /* Enable select interrupts */ 1909 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 1910 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1396 RX_MAC_INTR; 1911 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1912 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1913
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); 1914 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1398 1915
1399 /* 1916 /*
1400 * With some switches, link might be already up at this point. 1917 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser, 1918 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot 1919 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to 1920 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change. 1921 * make a global change.
1405 */ 1922 */
1406 1923
1407 /* Enabling Laser. */ 1924 /* Enabling Laser. */
@@ -1411,44 +1928,30 @@ static int start_nic(struct s2io_nic *nic)
1411 1928
1412 /* SXE-002: Initialize link and activity LED */ 1929 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device; 1930 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) { 1931 if (((subid & 0xFF) >= 0x07) &&
1932 (nic->device_type == XFRAME_I_DEVICE)) {
1415 val64 = readq(&bar0->gpio_control); 1933 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL; 1934 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control); 1935 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL; 1936 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700); 1937 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1420 } 1938 }
1421 1939
1422 /* 1940 /*
1423 * Don't see link state interrupts on certain switches, so 1941 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here. 1942 * directly scheduling a link state task from here.
1425 */ 1943 */
1426 schedule_work(&nic->set_link_task); 1944 schedule_work(&nic->set_link_task);
1427 1945
1428 /*
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1432 */
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1435 udelay(50);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1438 udelay(50);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1441 udelay(50);
1442
1443 return SUCCESS; 1946 return SUCCESS;
1444} 1947}
1445 1948
1446/** 1949/**
1447 * free_tx_buffers - Free all queued Tx buffers 1950 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable. 1951 * @nic : device private variable.
1449 * Description: 1952 * Description:
1450 * Free all queued Tx buffers. 1953 * Free all queued Tx buffers.
1451 * Return Value: void 1954 * Return Value: void
1452*/ 1955*/
1453 1956
1454static void free_tx_buffers(struct s2io_nic *nic) 1957static void free_tx_buffers(struct s2io_nic *nic)
@@ -1459,39 +1962,61 @@ static void free_tx_buffers(struct s2io_nic *nic)
1459 int i, j; 1962 int i, j;
1460 mac_info_t *mac_control; 1963 mac_info_t *mac_control;
1461 struct config_param *config; 1964 struct config_param *config;
1462 int cnt = 0; 1965 int cnt = 0, frg_cnt;
1463 1966
1464 mac_control = &nic->mac_control; 1967 mac_control = &nic->mac_control;
1465 config = &nic->config; 1968 config = &nic->config;
1466 1969
1467 for (i = 0; i < config->tx_fifo_num; i++) { 1970 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 1971 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j]. 1972 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1470 list_virt_addr; 1973 list_virt_addr;
1471 skb = 1974 skb =
1472 (struct sk_buff *) ((unsigned long) txdp-> 1975 (struct sk_buff *) ((unsigned long) txdp->
1473 Host_Control); 1976 Host_Control);
1474 if (skb == NULL) { 1977 if (skb == NULL) {
1475 memset(txdp, 0, sizeof(TxD_t)); 1978 memset(txdp, 0, sizeof(TxD_t) *
1979 config->max_txds);
1476 continue; 1980 continue;
1477 } 1981 }
1982 frg_cnt = skb_shinfo(skb)->nr_frags;
1983 pci_unmap_single(nic->pdev, (dma_addr_t)
1984 txdp->Buffer_Pointer,
1985 skb->len - skb->data_len,
1986 PCI_DMA_TODEVICE);
1987 if (frg_cnt) {
1988 TxD_t *temp;
1989 temp = txdp;
1990 txdp++;
1991 for (j = 0; j < frg_cnt; j++, txdp++) {
1992 skb_frag_t *frag =
1993 &skb_shinfo(skb)->frags[j];
1994 pci_unmap_page(nic->pdev,
1995 (dma_addr_t)
1996 txdp->
1997 Buffer_Pointer,
1998 frag->size,
1999 PCI_DMA_TODEVICE);
2000 }
2001 txdp = temp;
2002 }
1478 dev_kfree_skb(skb); 2003 dev_kfree_skb(skb);
1479 memset(txdp, 0, sizeof(TxD_t)); 2004 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1480 cnt++; 2005 cnt++;
1481 } 2006 }
1482 DBG_PRINT(INTR_DBG, 2007 DBG_PRINT(INTR_DBG,
1483 "%s:forcibly freeing %d skbs on FIFO%d\n", 2008 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 dev->name, cnt, i); 2009 dev->name, cnt, i);
1485 mac_control->tx_curr_get_info[i].offset = 0; 2010 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0; 2011 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1487 } 2012 }
1488} 2013}
1489 2014
1490/** 2015/**
1491 * stop_nic - To stop the nic 2016 * stop_nic - To stop the nic
1492 * @nic ; device private variable. 2017 * @nic ; device private variable.
1493 * Description: 2018 * Description:
1494 * This function does exactly the opposite of what the start_nic() 2019 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device. 2020 * function does. This function is called to stop the device.
1496 * Return Value: 2021 * Return Value:
1497 * void. 2022 * void.
@@ -1509,8 +2034,9 @@ static void stop_nic(struct s2io_nic *nic)
1509 config = &nic->config; 2034 config = &nic->config;
1510 2035
1511 /* Disable all interrupts */ 2036 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR | 2037 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1513 RX_MAC_INTR; 2038 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2039 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); 2040 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1515 2041
1516 /* Disable PRCs */ 2042 /* Disable PRCs */
@@ -1521,11 +2047,11 @@ static void stop_nic(struct s2io_nic *nic)
1521 } 2047 }
1522} 2048}
1523 2049
1524/** 2050/**
1525 * fill_rx_buffers - Allocates the Rx side skbs 2051 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable 2052 * @nic: device private variable
1527 * @ring_no: ring number 2053 * @ring_no: ring number
1528 * Description: 2054 * Description:
1529 * The function allocates Rx side skbs and puts the physical 2055 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC 2056 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations. 2057 * can DMA the received frame into these locations.
@@ -1533,8 +2059,8 @@ static void stop_nic(struct s2io_nic *nic)
1533 * 1. single buffer, 2059 * 1. single buffer,
1534 * 2. three buffer and 2060 * 2. three buffer and
1535 * 3. Five buffer modes. 2061 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split 2062 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header, 2063 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself 2064 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is 2065 * is split into 3 fragments. As of now only single buffer mode is
1540 * supported. 2066 * supported.
@@ -1542,7 +2068,7 @@ static void stop_nic(struct s2io_nic *nic)
1542 * SUCCESS on success or an appropriate -ve value on failure. 2068 * SUCCESS on success or an appropriate -ve value on failure.
1543 */ 2069 */
1544 2070
1545static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2071int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546{ 2072{
1547 struct net_device *dev = nic->dev; 2073 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb; 2074 struct sk_buff *skb;
@@ -1550,34 +2076,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1550 int off, off1, size, block_no, block_no1; 2076 int off, off1, size, block_no, block_no1;
1551 int offset, offset1; 2077 int offset, offset1;
1552 u32 alloc_tab = 0; 2078 u32 alloc_tab = 0;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] - 2079 u32 alloc_cnt;
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control; 2080 mac_info_t *mac_control;
1556 struct config_param *config; 2081 struct config_param *config;
1557#ifdef CONFIG_2BUFF_MODE 2082#ifdef CONFIG_2BUFF_MODE
1558 RxD_t *rxdpnext; 2083 RxD_t *rxdpnext;
1559 int nextblk; 2084 int nextblk;
1560 unsigned long tmp; 2085 u64 tmp;
1561 buffAdd_t *ba; 2086 buffAdd_t *ba;
1562 dma_addr_t rxdpphys; 2087 dma_addr_t rxdpphys;
1563#endif 2088#endif
1564#ifndef CONFIG_S2IO_NAPI 2089#ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags; 2090 unsigned long flags;
1566#endif 2091#endif
2092 RxD_t *first_rxdp = NULL;
1567 2093
1568 mac_control = &nic->mac_control; 2094 mac_control = &nic->mac_control;
1569 config = &nic->config; 2095 config = &nic->config;
1570 2096 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2097 atomic_read(&nic->rx_bufs_left[ring_no]);
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2098 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2099 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573 2100
1574 while (alloc_tab < alloc_cnt) { 2101 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no]. 2102 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1576 block_index; 2103 block_index;
1577 block_no1 = mac_control->rx_curr_get_info[ring_no]. 2104 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1578 block_index; 2105 block_index;
1579 off = mac_control->rx_curr_put_info[ring_no].offset; 2106 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset; 2107 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1581#ifndef CONFIG_2BUFF_MODE 2108#ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off; 2109 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1; 2110 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
@@ -1586,7 +2113,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1; 2113 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1587#endif 2114#endif
1588 2115
1589 rxdp = nic->rx_blocks[ring_no][block_no]. 2116 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1590 block_virt_addr + off; 2117 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) { 2118 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2119 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
@@ -1595,15 +2122,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1595 } 2122 }
1596#ifndef CONFIG_2BUFF_MODE 2123#ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) { 2124 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no]. 2125 mac_control->rings[ring_no].rx_curr_put_info.
1599 block_index++; 2126 block_index++;
1600 mac_control->rx_curr_put_info[ring_no]. 2127 mac_control->rings[ring_no].rx_curr_put_info.
1601 block_index %= nic->block_count[ring_no]; 2128 block_index %= mac_control->rings[ring_no].block_count;
1602 block_no = mac_control->rx_curr_put_info 2129 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1603 [ring_no].block_index; 2130 block_index;
1604 off++; 2131 off++;
1605 off %= (MAX_RXDS_PER_BLOCK + 1); 2132 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset = 2133 mac_control->rings[ring_no].rx_curr_put_info.offset =
1607 off; 2134 off;
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); 2135 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2136 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
@@ -1611,30 +2138,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1611 } 2138 }
1612#ifndef CONFIG_S2IO_NAPI 2139#ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags); 2140 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] = 2141 mac_control->rings[ring_no].put_pos =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2142 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags); 2143 spin_unlock_irqrestore(&nic->put_lock, flags);
1617#endif 2144#endif
1618#else 2145#else
1619 if (rxdp->Host_Control == END_OF_BLOCK) { 2146 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no]. 2147 mac_control->rings[ring_no].rx_curr_put_info.
1621 block_index++; 2148 block_index++;
1622 mac_control->rx_curr_put_info[ring_no]. 2149 mac_control->rings[ring_no].rx_curr_put_info.block_index
1623 block_index %= nic->block_count[ring_no]; 2150 %= mac_control->rings[ring_no].block_count;
1624 block_no = mac_control->rx_curr_put_info 2151 block_no = mac_control->rings[ring_no].rx_curr_put_info
1625 [ring_no].block_index; 2152 .block_index;
1626 off = 0; 2153 off = 0;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n", 2154 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no, 2155 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1); 2156 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset = 2157 mac_control->rings[ring_no].rx_curr_put_info.offset =
1631 off; 2158 off;
1632 rxdp = nic->rx_blocks[ring_no][block_no]. 2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1633 block_virt_addr; 2160 block_virt_addr;
1634 } 2161 }
1635#ifndef CONFIG_S2IO_NAPI 2162#ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags); 2163 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no * 2164 mac_control->rings[ring_no].put_pos = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off; 2165 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags); 2166 spin_unlock_irqrestore(&nic->put_lock, flags);
1640#endif 2167#endif
@@ -1646,27 +2173,27 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1646 if (rxdp->Control_2 & BIT(0)) 2173 if (rxdp->Control_2 & BIT(0))
1647#endif 2174#endif
1648 { 2175 {
1649 mac_control->rx_curr_put_info[ring_no]. 2176 mac_control->rings[ring_no].rx_curr_put_info.
1650 offset = off; 2177 offset = off;
1651 goto end; 2178 goto end;
1652 } 2179 }
1653#ifdef CONFIG_2BUFF_MODE 2180#ifdef CONFIG_2BUFF_MODE
1654 /* 2181 /*
1655 * RxDs Spanning cache lines will be replenished only 2182 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It 2183 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6) 2184 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending 2185 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor. 2186 * decsriptor is of-course the 3rd descriptor.
1660 */ 2187 */
1661 rxdpphys = nic->rx_blocks[ring_no][block_no]. 2188 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1662 block_dma_addr + (off * sizeof(RxD_t)); 2189 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) { 2190 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no]. 2191 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1665 block_virt_addr + (off + 1); 2192 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) { 2193 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) % 2194 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]); 2195 (mac_control->rings[ring_no].block_count);
1669 rxdpnext = nic->rx_blocks[ring_no] 2196 rxdpnext = mac_control->rings[ring_no].rx_blocks
1670 [nextblk].block_virt_addr; 2197 [nextblk].block_virt_addr;
1671 } 2198 }
1672 if (rxdpnext->Control_2 & BIT(0)) 2199 if (rxdpnext->Control_2 & BIT(0))
@@ -1682,6 +2209,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1682 if (!skb) { 2209 if (!skb) {
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2210 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2211 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2212 if (first_rxdp) {
2213 wmb();
2214 first_rxdp->Control_1 |= RXD_OWN_XENA;
2215 }
1685 return -ENOMEM; 2216 return -ENOMEM;
1686 } 2217 }
1687#ifndef CONFIG_2BUFF_MODE 2218#ifndef CONFIG_2BUFF_MODE
@@ -1692,12 +2223,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE); 2223 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size); 2224 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb); 2225 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA; 2226 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2227 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off++; 2228 off++;
1697 off %= (MAX_RXDS_PER_BLOCK + 1); 2229 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off; 2230 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1699#else 2231#else
1700 ba = &nic->ba[ring_no][block_no][off]; 2232 ba = &mac_control->rings[ring_no].ba[block_no][off];
1701 skb_reserve(skb, BUF0_LEN); 2233 skb_reserve(skb, BUF0_LEN);
1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE); 2234 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 if (tmp) 2235 if (tmp)
@@ -1719,22 +2251,41 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1719 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */ 2251 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1720 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */ 2252 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1721 rxdp->Host_Control = (u64) ((unsigned long) (skb)); 2253 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1722 rxdp->Control_1 |= RXD_OWN_XENA; 2254 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2255 rxdp->Control_1 |= RXD_OWN_XENA;
1723 off++; 2256 off++;
1724 mac_control->rx_curr_put_info[ring_no].offset = off; 2257 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1725#endif 2258#endif
2259 rxdp->Control_2 |= SET_RXD_MARKER;
2260
2261 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2262 if (first_rxdp) {
2263 wmb();
2264 first_rxdp->Control_1 |= RXD_OWN_XENA;
2265 }
2266 first_rxdp = rxdp;
2267 }
1726 atomic_inc(&nic->rx_bufs_left[ring_no]); 2268 atomic_inc(&nic->rx_bufs_left[ring_no]);
1727 alloc_tab++; 2269 alloc_tab++;
1728 } 2270 }
1729 2271
1730 end: 2272 end:
2273 /* Transfer ownership of first descriptor to adapter just before
2274 * exiting. Before that, use memory barrier so that ownership
2275 * and other fields are seen by adapter correctly.
2276 */
2277 if (first_rxdp) {
2278 wmb();
2279 first_rxdp->Control_1 |= RXD_OWN_XENA;
2280 }
2281
1731 return SUCCESS; 2282 return SUCCESS;
1732} 2283}
1733 2284
1734/** 2285/**
1735 * free_rx_buffers - Frees all Rx buffers 2286 * free_rx_buffers - Frees all Rx buffers
1736 * @sp: device private variable. 2287 * @sp: device private variable.
1737 * Description: 2288 * Description:
1738 * This function will free all Rx buffers allocated by host. 2289 * This function will free all Rx buffers allocated by host.
1739 * Return Value: 2290 * Return Value:
1740 * NONE. 2291 * NONE.
@@ -1758,7 +2309,8 @@ static void free_rx_buffers(struct s2io_nic *sp)
1758 for (i = 0; i < config->rx_ring_num; i++) { 2309 for (i = 0; i < config->rx_ring_num; i++) {
1759 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2310 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1760 off = j % (MAX_RXDS_PER_BLOCK + 1); 2311 off = j % (MAX_RXDS_PER_BLOCK + 1);
1761 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off; 2312 rxdp = mac_control->rings[i].rx_blocks[blk].
2313 block_virt_addr + off;
1762 2314
1763#ifndef CONFIG_2BUFF_MODE 2315#ifndef CONFIG_2BUFF_MODE
1764 if (rxdp->Control_1 == END_OF_BLOCK) { 2316 if (rxdp->Control_1 == END_OF_BLOCK) {
@@ -1793,7 +2345,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1793 HEADER_SNAP_SIZE, 2345 HEADER_SNAP_SIZE,
1794 PCI_DMA_FROMDEVICE); 2346 PCI_DMA_FROMDEVICE);
1795#else 2347#else
1796 ba = &sp->ba[i][blk][off]; 2348 ba = &mac_control->rings[i].ba[blk][off];
1797 pci_unmap_single(sp->pdev, (dma_addr_t) 2349 pci_unmap_single(sp->pdev, (dma_addr_t)
1798 rxdp->Buffer0_ptr, 2350 rxdp->Buffer0_ptr,
1799 BUF0_LEN, 2351 BUF0_LEN,
@@ -1813,10 +2365,10 @@ static void free_rx_buffers(struct s2io_nic *sp)
1813 } 2365 }
1814 memset(rxdp, 0, sizeof(RxD_t)); 2366 memset(rxdp, 0, sizeof(RxD_t));
1815 } 2367 }
1816 mac_control->rx_curr_put_info[i].block_index = 0; 2368 mac_control->rings[i].rx_curr_put_info.block_index = 0;
1817 mac_control->rx_curr_get_info[i].block_index = 0; 2369 mac_control->rings[i].rx_curr_get_info.block_index = 0;
1818 mac_control->rx_curr_put_info[i].offset = 0; 2370 mac_control->rings[i].rx_curr_put_info.offset = 0;
1819 mac_control->rx_curr_get_info[i].offset = 0; 2371 mac_control->rings[i].rx_curr_get_info.offset = 0;
1820 atomic_set(&sp->rx_bufs_left[i], 0); 2372 atomic_set(&sp->rx_bufs_left[i], 0);
1821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2373 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1822 dev->name, buf_cnt, i); 2374 dev->name, buf_cnt, i);
@@ -1826,7 +2378,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
1826/** 2378/**
1827 * s2io_poll - Rx interrupt handler for NAPI support 2379 * s2io_poll - Rx interrupt handler for NAPI support
1828 * @dev : pointer to the device structure. 2380 * @dev : pointer to the device structure.
1829 * @budget : The number of packets that were budgeted to be processed 2381 * @budget : The number of packets that were budgeted to be processed
1830 * during one pass through the 'Poll" function. 2382 * during one pass through the 'Poll" function.
1831 * Description: 2383 * Description:
1832 * Comes into picture only if NAPI support has been incorporated. It does 2384 * Comes into picture only if NAPI support has been incorporated. It does
@@ -1836,160 +2388,36 @@ static void free_rx_buffers(struct s2io_nic *sp)
1836 * 0 on success and 1 if there are No Rx packets to be processed. 2388 * 0 on success and 1 if there are No Rx packets to be processed.
1837 */ 2389 */
1838 2390
1839#ifdef CONFIG_S2IO_NAPI 2391#if defined(CONFIG_S2IO_NAPI)
1840static int s2io_poll(struct net_device *dev, int *budget) 2392static int s2io_poll(struct net_device *dev, int *budget)
1841{ 2393{
1842 nic_t *nic = dev->priv; 2394 nic_t *nic = dev->priv;
1843 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2395 int pkt_cnt = 0, org_pkts_to_process;
1844 int pkts_to_process = *budget, pkt_cnt = 0;
1845 register u64 val64 = 0;
1846 rx_curr_get_info_t get_info, put_info;
1847 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1848#ifndef CONFIG_2BUFF_MODE
1849 u16 val16, cksum;
1850#endif
1851 struct sk_buff *skb;
1852 RxD_t *rxdp;
1853 mac_info_t *mac_control; 2396 mac_info_t *mac_control;
1854 struct config_param *config; 2397 struct config_param *config;
1855#ifdef CONFIG_2BUFF_MODE 2398 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
1856 buffAdd_t *ba; 2399 u64 val64;
1857#endif 2400 int i;
1858 2401
2402 atomic_inc(&nic->isr_cnt);
1859 mac_control = &nic->mac_control; 2403 mac_control = &nic->mac_control;
1860 config = &nic->config; 2404 config = &nic->config;
1861 2405
1862 if (pkts_to_process > dev->quota) 2406 nic->pkts_to_process = *budget;
1863 pkts_to_process = dev->quota; 2407 if (nic->pkts_to_process > dev->quota)
2408 nic->pkts_to_process = dev->quota;
2409 org_pkts_to_process = nic->pkts_to_process;
1864 2410
1865 val64 = readq(&bar0->rx_traffic_int); 2411 val64 = readq(&bar0->rx_traffic_int);
1866 writeq(val64, &bar0->rx_traffic_int); 2412 writeq(val64, &bar0->rx_traffic_int);
1867 2413
1868 for (i = 0; i < config->rx_ring_num; i++) { 2414 for (i = 0; i < config->rx_ring_num; i++) {
1869 get_info = mac_control->rx_curr_get_info[i]; 2415 rx_intr_handler(&mac_control->rings[i]);
1870 get_block = get_info.block_index; 2416 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
1871 put_info = mac_control->rx_curr_put_info[i]; 2417 if (!nic->pkts_to_process) {
1872 put_block = put_info.block_index; 2418 /* Quota for the current iteration has been met */
1873 ring_bufs = config->rx_cfg[i].num_rxd; 2419 goto no_rx;
1874 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1875 get_info.offset;
1876#ifndef CONFIG_2BUFF_MODE
1877 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1878 get_info.offset;
1879 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 put_info.offset;
1881 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1882 (((get_offset + 1) % ring_bufs) != put_offset)) {
1883 if (--pkts_to_process < 0) {
1884 goto no_rx;
1885 }
1886 if (rxdp->Control_1 == END_OF_BLOCK) {
1887 rxdp =
1888 (RxD_t *) ((unsigned long) rxdp->
1889 Control_2);
1890 get_info.offset++;
1891 get_info.offset %=
1892 (MAX_RXDS_PER_BLOCK + 1);
1893 get_block++;
1894 get_block %= nic->block_count[i];
1895 mac_control->rx_curr_get_info[i].
1896 offset = get_info.offset;
1897 mac_control->rx_curr_get_info[i].
1898 block_index = get_block;
1899 continue;
1900 }
1901 get_offset =
1902 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1903 get_info.offset;
1904 skb =
1905 (struct sk_buff *) ((unsigned long) rxdp->
1906 Host_Control);
1907 if (skb == NULL) {
1908 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1909 dev->name);
1910 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1911 goto no_rx;
1912 }
1913 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1914 val16 = (u16) (val64 >> 48);
1915 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1917 rxdp->Buffer0_ptr,
1918 dev->mtu +
1919 HEADER_ETHERNET_II_802_3_SIZE +
1920 HEADER_802_2_SIZE +
1921 HEADER_SNAP_SIZE,
1922 PCI_DMA_FROMDEVICE);
1923 rx_osm_handler(nic, val16, rxdp, i);
1924 pkt_cnt++;
1925 get_info.offset++;
1926 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1927 rxdp =
1928 nic->rx_blocks[i][get_block].block_virt_addr +
1929 get_info.offset;
1930 mac_control->rx_curr_get_info[i].offset =
1931 get_info.offset;
1932 } 2420 }
1933#else
1934 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1935 get_info.offset;
1936 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 put_info.offset;
1938 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1939 !(rxdp->Control_2 & BIT(0))) &&
1940 (((get_offset + 1) % ring_bufs) != put_offset)) {
1941 if (--pkts_to_process < 0) {
1942 goto no_rx;
1943 }
1944 skb = (struct sk_buff *) ((unsigned long)
1945 rxdp->Host_Control);
1946 if (skb == NULL) {
1947 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1948 dev->name);
1949 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1950 goto no_rx;
1951 }
1952
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1954 rxdp->Buffer0_ptr,
1955 BUF0_LEN, PCI_DMA_FROMDEVICE);
1956 pci_unmap_single(nic->pdev, (dma_addr_t)
1957 rxdp->Buffer1_ptr,
1958 BUF1_LEN, PCI_DMA_FROMDEVICE);
1959 pci_unmap_single(nic->pdev, (dma_addr_t)
1960 rxdp->Buffer2_ptr,
1961 dev->mtu + BUF0_LEN + 4,
1962 PCI_DMA_FROMDEVICE);
1963 ba = &nic->ba[i][get_block][get_info.offset];
1964
1965 rx_osm_handler(nic, rxdp, i, ba);
1966
1967 get_info.offset++;
1968 mac_control->rx_curr_get_info[i].offset =
1969 get_info.offset;
1970 rxdp =
1971 nic->rx_blocks[i][get_block].block_virt_addr +
1972 get_info.offset;
1973
1974 if (get_info.offset &&
1975 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1976 get_info.offset = 0;
1977 mac_control->rx_curr_get_info[i].
1978 offset = get_info.offset;
1979 get_block++;
1980 get_block %= nic->block_count[i];
1981 mac_control->rx_curr_get_info[i].
1982 block_index = get_block;
1983 rxdp =
1984 nic->rx_blocks[i][get_block].
1985 block_virt_addr;
1986 }
1987 get_offset =
1988 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1989 get_info.offset;
1990 pkt_cnt++;
1991 }
1992#endif
1993 } 2421 }
1994 if (!pkt_cnt) 2422 if (!pkt_cnt)
1995 pkt_cnt = 1; 2423 pkt_cnt = 1;
@@ -2007,9 +2435,10 @@ static int s2io_poll(struct net_device *dev, int *budget)
2007 } 2435 }
2008 /* Re enable the Rx interrupts. */ 2436 /* Re enable the Rx interrupts. */
2009 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS); 2437 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2438 atomic_dec(&nic->isr_cnt);
2010 return 0; 2439 return 0;
2011 2440
2012 no_rx: 2441no_rx:
2013 dev->quota -= pkt_cnt; 2442 dev->quota -= pkt_cnt;
2014 *budget -= pkt_cnt; 2443 *budget -= pkt_cnt;
2015 2444
@@ -2020,279 +2449,204 @@ static int s2io_poll(struct net_device *dev, int *budget)
2020 break; 2449 break;
2021 } 2450 }
2022 } 2451 }
2452 atomic_dec(&nic->isr_cnt);
2023 return 1; 2453 return 1;
2024} 2454}
2025#else 2455#endif
2026/** 2456
2457/**
2027 * rx_intr_handler - Rx interrupt handler 2458 * rx_intr_handler - Rx interrupt handler
2028 * @nic: device private variable. 2459 * @nic: device private variable.
2029 * Description: 2460 * Description:
2030 * If the interrupt is because of a received frame or if the 2461 * If the interrupt is because of a received frame or if the
2031 * receive ring contains fresh as yet un-processed frames,this function is 2462 * receive ring contains fresh as yet un-processed frames,this function is
2032 * called. It picks out the RxD at which place the last Rx processing had 2463 * called. It picks out the RxD at which place the last Rx processing had
2033 * stopped and sends the skb to the OSM's Rx handler and then increments 2464 * stopped and sends the skb to the OSM's Rx handler and then increments
2034 * the offset. 2465 * the offset.
2035 * Return Value: 2466 * Return Value:
2036 * NONE. 2467 * NONE.
2037 */ 2468 */
2038 2469static void rx_intr_handler(ring_info_t *ring_data)
2039static void rx_intr_handler(struct s2io_nic *nic)
2040{ 2470{
2471 nic_t *nic = ring_data->nic;
2041 struct net_device *dev = (struct net_device *) nic->dev; 2472 struct net_device *dev = (struct net_device *) nic->dev;
2042 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 2473 int get_block, get_offset, put_block, put_offset, ring_bufs;
2043 rx_curr_get_info_t get_info, put_info; 2474 rx_curr_get_info_t get_info, put_info;
2044 RxD_t *rxdp; 2475 RxD_t *rxdp;
2045 struct sk_buff *skb; 2476 struct sk_buff *skb;
2046#ifndef CONFIG_2BUFF_MODE 2477#ifndef CONFIG_S2IO_NAPI
2047 u16 val16, cksum; 2478 int pkt_cnt = 0;
2048#endif
2049 register u64 val64 = 0;
2050 int get_block, get_offset, put_block, put_offset, ring_bufs;
2051 int i, pkt_cnt = 0;
2052 mac_info_t *mac_control;
2053 struct config_param *config;
2054#ifdef CONFIG_2BUFF_MODE
2055 buffAdd_t *ba;
2056#endif 2479#endif
2480 spin_lock(&nic->rx_lock);
2481 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2482 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2483 __FUNCTION__, dev->name);
2484 spin_unlock(&nic->rx_lock);
2485 }
2057 2486
2058 mac_control = &nic->mac_control; 2487 get_info = ring_data->rx_curr_get_info;
2059 config = &nic->config; 2488 get_block = get_info.block_index;
2060 2489 put_info = ring_data->rx_curr_put_info;
2061 /* 2490 put_block = put_info.block_index;
2062 * rx_traffic_int reg is an R1 register, hence we read and write back 2491 ring_bufs = get_info.ring_len+1;
2063 * the samevalue in the register to clear it. 2492 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2064 */
2065 val64 = readq(&bar0->rx_traffic_int);
2066 writeq(val64, &bar0->rx_traffic_int);
2067
2068 for (i = 0; i < config->rx_ring_num; i++) {
2069 get_info = mac_control->rx_curr_get_info[i];
2070 get_block = get_info.block_index;
2071 put_info = mac_control->rx_curr_put_info[i];
2072 put_block = put_info.block_index;
2073 ring_bufs = config->rx_cfg[i].num_rxd;
2074 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2075 get_info.offset;
2076#ifndef CONFIG_2BUFF_MODE
2077 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2078 get_info.offset; 2493 get_info.offset;
2079 spin_lock(&nic->put_lock); 2494 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2080 put_offset = nic->put_pos[i]; 2495 get_info.offset;
2081 spin_unlock(&nic->put_lock); 2496#ifndef CONFIG_S2IO_NAPI
2082 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2497 spin_lock(&nic->put_lock);
2083 (((get_offset + 1) % ring_bufs) != put_offset)) { 2498 put_offset = ring_data->put_pos;
2084 if (rxdp->Control_1 == END_OF_BLOCK) { 2499 spin_unlock(&nic->put_lock);
2085 rxdp = (RxD_t *) ((unsigned long) 2500#else
2086 rxdp->Control_2); 2501 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2087 get_info.offset++; 2502 put_info.offset;
2088 get_info.offset %= 2503#endif
2089 (MAX_RXDS_PER_BLOCK + 1); 2504 while (RXD_IS_UP2DT(rxdp) &&
2090 get_block++; 2505 (((get_offset + 1) % ring_bufs) != put_offset)) {
2091 get_block %= nic->block_count[i]; 2506 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2092 mac_control->rx_curr_get_info[i]. 2507 if (skb == NULL) {
2093 offset = get_info.offset; 2508 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2094 mac_control->rx_curr_get_info[i]. 2509 dev->name);
2095 block_index = get_block; 2510 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2096 continue; 2511 spin_unlock(&nic->rx_lock);
2097 } 2512 return;
2098 get_offset =
2099 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2100 get_info.offset;
2101 skb = (struct sk_buff *) ((unsigned long)
2102 rxdp->Host_Control);
2103 if (skb == NULL) {
2104 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2105 dev->name);
2106 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2107 return;
2108 }
2109 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2110 val16 = (u16) (val64 >> 48);
2111 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2112 pci_unmap_single(nic->pdev, (dma_addr_t)
2113 rxdp->Buffer0_ptr,
2114 dev->mtu +
2115 HEADER_ETHERNET_II_802_3_SIZE +
2116 HEADER_802_2_SIZE +
2117 HEADER_SNAP_SIZE,
2118 PCI_DMA_FROMDEVICE);
2119 rx_osm_handler(nic, val16, rxdp, i);
2120 get_info.offset++;
2121 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2122 rxdp =
2123 nic->rx_blocks[i][get_block].block_virt_addr +
2124 get_info.offset;
2125 mac_control->rx_curr_get_info[i].offset =
2126 get_info.offset;
2127 pkt_cnt++;
2128 if ((indicate_max_pkts)
2129 && (pkt_cnt > indicate_max_pkts))
2130 break;
2131 } 2513 }
2514#ifndef CONFIG_2BUFF_MODE
2515 pci_unmap_single(nic->pdev, (dma_addr_t)
2516 rxdp->Buffer0_ptr,
2517 dev->mtu +
2518 HEADER_ETHERNET_II_802_3_SIZE +
2519 HEADER_802_2_SIZE +
2520 HEADER_SNAP_SIZE,
2521 PCI_DMA_FROMDEVICE);
2132#else 2522#else
2133 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) + 2523 pci_unmap_single(nic->pdev, (dma_addr_t)
2524 rxdp->Buffer0_ptr,
2525 BUF0_LEN, PCI_DMA_FROMDEVICE);
2526 pci_unmap_single(nic->pdev, (dma_addr_t)
2527 rxdp->Buffer1_ptr,
2528 BUF1_LEN, PCI_DMA_FROMDEVICE);
2529 pci_unmap_single(nic->pdev, (dma_addr_t)
2530 rxdp->Buffer2_ptr,
2531 dev->mtu + BUF0_LEN + 4,
2532 PCI_DMA_FROMDEVICE);
2533#endif
2534 rx_osm_handler(ring_data, rxdp);
2535 get_info.offset++;
2536 ring_data->rx_curr_get_info.offset =
2134 get_info.offset; 2537 get_info.offset;
2135 spin_lock(&nic->put_lock); 2538 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2136 put_offset = nic->put_pos[i]; 2539 get_info.offset;
2137 spin_unlock(&nic->put_lock); 2540 if (get_info.offset &&
2138 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) && 2541 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2139 !(rxdp->Control_2 & BIT(0))) && 2542 get_info.offset = 0;
2140 (((get_offset + 1) % ring_bufs) != put_offset)) { 2543 ring_data->rx_curr_get_info.offset
2141 skb = (struct sk_buff *) ((unsigned long) 2544 = get_info.offset;
2142 rxdp->Host_Control); 2545 get_block++;
2143 if (skb == NULL) { 2546 get_block %= ring_data->block_count;
2144 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2547 ring_data->rx_curr_get_info.block_index
2145 dev->name); 2548 = get_block;
2146 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2549 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2147 return; 2550 }
2148 }
2149
2150 pci_unmap_single(nic->pdev, (dma_addr_t)
2151 rxdp->Buffer0_ptr,
2152 BUF0_LEN, PCI_DMA_FROMDEVICE);
2153 pci_unmap_single(nic->pdev, (dma_addr_t)
2154 rxdp->Buffer1_ptr,
2155 BUF1_LEN, PCI_DMA_FROMDEVICE);
2156 pci_unmap_single(nic->pdev, (dma_addr_t)
2157 rxdp->Buffer2_ptr,
2158 dev->mtu + BUF0_LEN + 4,
2159 PCI_DMA_FROMDEVICE);
2160 ba = &nic->ba[i][get_block][get_info.offset];
2161
2162 rx_osm_handler(nic, rxdp, i, ba);
2163
2164 get_info.offset++;
2165 mac_control->rx_curr_get_info[i].offset =
2166 get_info.offset;
2167 rxdp =
2168 nic->rx_blocks[i][get_block].block_virt_addr +
2169 get_info.offset;
2170 2551
2171 if (get_info.offset && 2552 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2172 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2173 get_info.offset = 0;
2174 mac_control->rx_curr_get_info[i].
2175 offset = get_info.offset;
2176 get_block++;
2177 get_block %= nic->block_count[i];
2178 mac_control->rx_curr_get_info[i].
2179 block_index = get_block;
2180 rxdp =
2181 nic->rx_blocks[i][get_block].
2182 block_virt_addr;
2183 }
2184 get_offset =
2185 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2186 get_info.offset; 2553 get_info.offset;
2187 pkt_cnt++; 2554#ifdef CONFIG_S2IO_NAPI
2188 if ((indicate_max_pkts) 2555 nic->pkts_to_process -= 1;
2189 && (pkt_cnt > indicate_max_pkts)) 2556 if (!nic->pkts_to_process)
2190 break; 2557 break;
2191 } 2558#else
2192#endif 2559 pkt_cnt++;
2193 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2560 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2194 break; 2561 break;
2562#endif
2195 } 2563 }
2564 spin_unlock(&nic->rx_lock);
2196} 2565}
2197#endif 2566
2198/** 2567/**
2199 * tx_intr_handler - Transmit interrupt handler 2568 * tx_intr_handler - Transmit interrupt handler
2200 * @nic : device private variable 2569 * @nic : device private variable
2201 * Description: 2570 * Description:
2202 * If an interrupt was raised to indicate DMA complete of the 2571 * If an interrupt was raised to indicate DMA complete of the
2203 * Tx packet, this function is called. It identifies the last TxD 2572 * Tx packet, this function is called. It identifies the last TxD
2204 * whose buffer was freed and frees all skbs whose data have already 2573 * whose buffer was freed and frees all skbs whose data have already
2205 * DMA'ed into the NICs internal memory. 2574 * DMA'ed into the NICs internal memory.
2206 * Return Value: 2575 * Return Value:
2207 * NONE 2576 * NONE
2208 */ 2577 */
2209 2578
2210static void tx_intr_handler(struct s2io_nic *nic) 2579static void tx_intr_handler(fifo_info_t *fifo_data)
2211{ 2580{
2212 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2581 nic_t *nic = fifo_data->nic;
2213 struct net_device *dev = (struct net_device *) nic->dev; 2582 struct net_device *dev = (struct net_device *) nic->dev;
2214 tx_curr_get_info_t get_info, put_info; 2583 tx_curr_get_info_t get_info, put_info;
2215 struct sk_buff *skb; 2584 struct sk_buff *skb;
2216 TxD_t *txdlp; 2585 TxD_t *txdlp;
2217 register u64 val64 = 0;
2218 int i;
2219 u16 j, frg_cnt; 2586 u16 j, frg_cnt;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2222 2587
2223 mac_control = &nic->mac_control; 2588 get_info = fifo_data->tx_curr_get_info;
2224 config = &nic->config; 2589 put_info = fifo_data->tx_curr_put_info;
2225 2590 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2226 /* 2591 list_virt_addr;
2227 * tx_traffic_int reg is an R1 register, hence we read and write 2592 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2228 * back the samevalue in the register to clear it. 2593 (get_info.offset != put_info.offset) &&
2229 */ 2594 (txdlp->Host_Control)) {
2230 val64 = readq(&bar0->tx_traffic_int); 2595 /* Check for TxD errors */
2231 writeq(val64, &bar0->tx_traffic_int); 2596 if (txdlp->Control_1 & TXD_T_CODE) {
2597 unsigned long long err;
2598 err = txdlp->Control_1 & TXD_T_CODE;
2599 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2600 err);
2601 }
2232 2602
2233 for (i = 0; i < config->tx_fifo_num; i++) { 2603 skb = (struct sk_buff *) ((unsigned long)
2234 get_info = mac_control->tx_curr_get_info[i]; 2604 txdlp->Host_Control);
2235 put_info = mac_control->tx_curr_put_info[i]; 2605 if (skb == NULL) {
2236 txdlp = (TxD_t *) nic->list_info[i][get_info.offset]. 2606 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2237 list_virt_addr; 2607 __FUNCTION__);
2238 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 2608 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2239 (get_info.offset != put_info.offset) && 2609 return;
2240 (txdlp->Host_Control)) { 2610 }
2241 /* Check for TxD errors */
2242 if (txdlp->Control_1 & TXD_T_CODE) {
2243 unsigned long long err;
2244 err = txdlp->Control_1 & TXD_T_CODE;
2245 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2246 err);
2247 }
2248 2611
2249 skb = (struct sk_buff *) ((unsigned long) 2612 frg_cnt = skb_shinfo(skb)->nr_frags;
2250 txdlp->Host_Control); 2613 nic->tx_pkt_count++;
2251 if (skb == NULL) { 2614
2252 DBG_PRINT(ERR_DBG, "%s: Null skb ", 2615 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 dev->name); 2616 txdlp->Buffer_Pointer,
2254 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 2617 skb->len - skb->data_len,
2255 return; 2618 PCI_DMA_TODEVICE);
2619 if (frg_cnt) {
2620 TxD_t *temp;
2621 temp = txdlp;
2622 txdlp++;
2623 for (j = 0; j < frg_cnt; j++, txdlp++) {
2624 skb_frag_t *frag =
2625 &skb_shinfo(skb)->frags[j];
2626 if (!txdlp->Buffer_Pointer)
2627 break;
2628 pci_unmap_page(nic->pdev,
2629 (dma_addr_t)
2630 txdlp->
2631 Buffer_Pointer,
2632 frag->size,
2633 PCI_DMA_TODEVICE);
2256 } 2634 }
2257 nic->tx_pkt_count++; 2635 txdlp = temp;
2258
2259 frg_cnt = skb_shinfo(skb)->nr_frags;
2260
2261 /* For unfragmented skb */
2262 pci_unmap_single(nic->pdev, (dma_addr_t)
2263 txdlp->Buffer_Pointer,
2264 skb->len - skb->data_len,
2265 PCI_DMA_TODEVICE);
2266 if (frg_cnt) {
2267 TxD_t *temp = txdlp;
2268 txdlp++;
2269 for (j = 0; j < frg_cnt; j++, txdlp++) {
2270 skb_frag_t *frag =
2271 &skb_shinfo(skb)->frags[j];
2272 pci_unmap_page(nic->pdev,
2273 (dma_addr_t)
2274 txdlp->
2275 Buffer_Pointer,
2276 frag->size,
2277 PCI_DMA_TODEVICE);
2278 }
2279 txdlp = temp;
2280 }
2281 memset(txdlp, 0,
2282 (sizeof(TxD_t) * config->max_txds));
2283
2284 /* Updating the statistics block */
2285 nic->stats.tx_packets++;
2286 nic->stats.tx_bytes += skb->len;
2287 dev_kfree_skb_irq(skb);
2288
2289 get_info.offset++;
2290 get_info.offset %= get_info.fifo_len + 1;
2291 txdlp = (TxD_t *) nic->list_info[i]
2292 [get_info.offset].list_virt_addr;
2293 mac_control->tx_curr_get_info[i].offset =
2294 get_info.offset;
2295 } 2636 }
2637 memset(txdlp, 0,
2638 (sizeof(TxD_t) * fifo_data->max_txds));
2639
2640 /* Updating the statistics block */
2641 nic->stats.tx_bytes += skb->len;
2642 dev_kfree_skb_irq(skb);
2643
2644 get_info.offset++;
2645 get_info.offset %= get_info.fifo_len + 1;
2646 txdlp = (TxD_t *) fifo_data->list_info
2647 [get_info.offset].list_virt_addr;
2648 fifo_data->tx_curr_get_info.offset =
2649 get_info.offset;
2296 } 2650 }
2297 2651
2298 spin_lock(&nic->tx_lock); 2652 spin_lock(&nic->tx_lock);
@@ -2301,13 +2655,13 @@ static void tx_intr_handler(struct s2io_nic *nic)
2301 spin_unlock(&nic->tx_lock); 2655 spin_unlock(&nic->tx_lock);
2302} 2656}
2303 2657
2304/** 2658/**
2305 * alarm_intr_handler - Alarm Interrrupt handler 2659 * alarm_intr_handler - Alarm Interrrupt handler
2306 * @nic: device private variable 2660 * @nic: device private variable
2307 * Description: If the interrupt was neither because of Rx packet or Tx 2661 * Description: If the interrupt was neither because of Rx packet or Tx
2308 * complete, this function is called. If the interrupt was to indicate 2662 * complete, this function is called. If the interrupt was to indicate
2309 * a loss of link, the OSM link status handler is invoked for any other 2663 * a loss of link, the OSM link status handler is invoked for any other
2310 * alarm interrupt the block that raised the interrupt is displayed 2664 * alarm interrupt the block that raised the interrupt is displayed
2311 * and a H/W reset is issued. 2665 * and a H/W reset is issued.
2312 * Return Value: 2666 * Return Value:
2313 * NONE 2667 * NONE
@@ -2320,10 +2674,32 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2320 register u64 val64 = 0, err_reg = 0; 2674 register u64 val64 = 0, err_reg = 0;
2321 2675
2322 /* Handling link status change error Intr */ 2676 /* Handling link status change error Intr */
2323 err_reg = readq(&bar0->mac_rmac_err_reg); 2677 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2324 writeq(err_reg, &bar0->mac_rmac_err_reg); 2678 err_reg = readq(&bar0->mac_rmac_err_reg);
2325 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) { 2679 writeq(err_reg, &bar0->mac_rmac_err_reg);
2326 schedule_work(&nic->set_link_task); 2680 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2681 schedule_work(&nic->set_link_task);
2682 }
2683 }
2684
2685 /* Handling Ecc errors */
2686 val64 = readq(&bar0->mc_err_reg);
2687 writeq(val64, &bar0->mc_err_reg);
2688 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2689 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2690 nic->mac_control.stats_info->sw_stat.
2691 double_ecc_errs++;
2692 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2693 dev->name);
2694 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2695 if (nic->device_type != XFRAME_II_DEVICE) {
2696 netif_stop_queue(dev);
2697 schedule_work(&nic->rst_timer_task);
2698 }
2699 } else {
2700 nic->mac_control.stats_info->sw_stat.
2701 single_ecc_errs++;
2702 }
2327 } 2703 }
2328 2704
2329 /* In case of a serious error, the device will be Reset. */ 2705 /* In case of a serious error, the device will be Reset. */
@@ -2338,7 +2714,7 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2338 /* 2714 /*
2339 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC 2715 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2340 * Error occurs, the adapter will be recycled by disabling the 2716 * Error occurs, the adapter will be recycled by disabling the
2341 * adapter enable bit and enabling it again after the device 2717 * adapter enable bit and enabling it again after the device
2342 * becomes Quiescent. 2718 * becomes Quiescent.
2343 */ 2719 */
2344 val64 = readq(&bar0->pcc_err_reg); 2720 val64 = readq(&bar0->pcc_err_reg);
@@ -2354,18 +2730,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2354 /* Other type of interrupts are not being handled now, TODO */ 2730 /* Other type of interrupts are not being handled now, TODO */
2355} 2731}
2356 2732
2357/** 2733/**
2358 * wait_for_cmd_complete - waits for a command to complete. 2734 * wait_for_cmd_complete - waits for a command to complete.
2359 * @sp : private member of the device structure, which is a pointer to the 2735 * @sp : private member of the device structure, which is a pointer to the
2360 * s2io_nic structure. 2736 * s2io_nic structure.
2361 * Description: Function that waits for a command to Write into RMAC 2737 * Description: Function that waits for a command to Write into RMAC
2362 * ADDR DATA registers to be completed and returns either success or 2738 * ADDR DATA registers to be completed and returns either success or
2363 * error depending on whether the command was complete or not. 2739 * error depending on whether the command was complete or not.
2364 * Return value: 2740 * Return value:
2365 * SUCCESS on success and FAILURE on failure. 2741 * SUCCESS on success and FAILURE on failure.
2366 */ 2742 */
2367 2743
2368static int wait_for_cmd_complete(nic_t * sp) 2744int wait_for_cmd_complete(nic_t * sp)
2369{ 2745{
2370 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2746 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2371 int ret = FAILURE, cnt = 0; 2747 int ret = FAILURE, cnt = 0;
@@ -2385,29 +2761,32 @@ static int wait_for_cmd_complete(nic_t * sp)
2385 return ret; 2761 return ret;
2386} 2762}
2387 2763
2388/** 2764/**
2389 * s2io_reset - Resets the card. 2765 * s2io_reset - Resets the card.
2390 * @sp : private member of the device structure. 2766 * @sp : private member of the device structure.
2391 * Description: Function to Reset the card. This function then also 2767 * Description: Function to Reset the card. This function then also
2392 * restores the previously saved PCI configuration space registers as 2768 * restores the previously saved PCI configuration space registers as
2393 * the card reset also resets the configuration space. 2769 * the card reset also resets the configuration space.
2394 * Return value: 2770 * Return value:
2395 * void. 2771 * void.
2396 */ 2772 */
2397 2773
2398static void s2io_reset(nic_t * sp) 2774void s2io_reset(nic_t * sp)
2399{ 2775{
2400 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2776 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2401 u64 val64; 2777 u64 val64;
2402 u16 subid; 2778 u16 subid, pci_cmd;
2779
2780 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2781 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2403 2782
2404 val64 = SW_RESET_ALL; 2783 val64 = SW_RESET_ALL;
2405 writeq(val64, &bar0->sw_reset); 2784 writeq(val64, &bar0->sw_reset);
2406 2785
2407 /* 2786 /*
2408 * At this stage, if the PCI write is indeed completed, the 2787 * At this stage, if the PCI write is indeed completed, the
2409 * card is reset and so is the PCI Config space of the device. 2788 * card is reset and so is the PCI Config space of the device.
2410 * So a read cannot be issued at this stage on any of the 2789 * So a read cannot be issued at this stage on any of the
2411 * registers to ensure the write into "sw_reset" register 2790 * registers to ensure the write into "sw_reset" register
2412 * has gone through. 2791 * has gone through.
2413 * Question: Is there any system call that will explicitly force 2792 * Question: Is there any system call that will explicitly force
@@ -2418,42 +2797,72 @@ static void s2io_reset(nic_t * sp)
2418 */ 2797 */
2419 msleep(250); 2798 msleep(250);
2420 2799
2421 /* Restore the PCI state saved during initializarion. */ 2800 /* Restore the PCI state saved during initialization. */
2422 pci_restore_state(sp->pdev); 2801 pci_restore_state(sp->pdev);
2802 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2803 pci_cmd);
2423 s2io_init_pci(sp); 2804 s2io_init_pci(sp);
2424 2805
2425 msleep(250); 2806 msleep(250);
2426 2807
2808 /* Set swapper to enable I/O register access */
2809 s2io_set_swapper(sp);
2810
2811 /* Clear certain PCI/PCI-X fields after reset */
2812 if (sp->device_type == XFRAME_II_DEVICE) {
2813 /* Clear parity err detect bit */
2814 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2815
2816 /* Clearing PCIX Ecc status register */
2817 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2818
2819 /* Clearing PCI_STATUS error reflected here */
2820 writeq(BIT(62), &bar0->txpic_int_reg);
2821 }
2822
2823 /* Reset device statistics maintained by OS */
2824 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2825
2427 /* SXE-002: Configure link and activity LED to turn it off */ 2826 /* SXE-002: Configure link and activity LED to turn it off */
2428 subid = sp->pdev->subsystem_device; 2827 subid = sp->pdev->subsystem_device;
2429 if ((subid & 0xFF) >= 0x07) { 2828 if (((subid & 0xFF) >= 0x07) &&
2829 (sp->device_type == XFRAME_I_DEVICE)) {
2430 val64 = readq(&bar0->gpio_control); 2830 val64 = readq(&bar0->gpio_control);
2431 val64 |= 0x0000800000000000ULL; 2831 val64 |= 0x0000800000000000ULL;
2432 writeq(val64, &bar0->gpio_control); 2832 writeq(val64, &bar0->gpio_control);
2433 val64 = 0x0411040400000000ULL; 2833 val64 = 0x0411040400000000ULL;
2434 writeq(val64, (void __iomem *) bar0 + 0x2700); 2834 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2835 }
2836
2837 /*
2838 * Clear spurious ECC interrupts that would have occured on
2839 * XFRAME II cards after reset.
2840 */
2841 if (sp->device_type == XFRAME_II_DEVICE) {
2842 val64 = readq(&bar0->pcc_err_reg);
2843 writeq(val64, &bar0->pcc_err_reg);
2435 } 2844 }
2436 2845
2437 sp->device_enabled_once = FALSE; 2846 sp->device_enabled_once = FALSE;
2438} 2847}
2439 2848
2440/** 2849/**
2441 * s2io_set_swapper - to set the swapper controle on the card 2850 * s2io_set_swapper - to set the swapper controle on the card
2442 * @sp : private member of the device structure, 2851 * @sp : private member of the device structure,
2443 * pointer to the s2io_nic structure. 2852 * pointer to the s2io_nic structure.
2444 * Description: Function to set the swapper control on the card 2853 * Description: Function to set the swapper control on the card
2445 * correctly depending on the 'endianness' of the system. 2854 * correctly depending on the 'endianness' of the system.
2446 * Return value: 2855 * Return value:
2447 * SUCCESS on success and FAILURE on failure. 2856 * SUCCESS on success and FAILURE on failure.
2448 */ 2857 */
2449 2858
2450static int s2io_set_swapper(nic_t * sp) 2859int s2io_set_swapper(nic_t * sp)
2451{ 2860{
2452 struct net_device *dev = sp->dev; 2861 struct net_device *dev = sp->dev;
2453 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2862 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2454 u64 val64, valt, valr; 2863 u64 val64, valt, valr;
2455 2864
2456 /* 2865 /*
2457 * Set proper endian settings and verify the same by reading 2866 * Set proper endian settings and verify the same by reading
2458 * the PIF Feed-back register. 2867 * the PIF Feed-back register.
2459 */ 2868 */
@@ -2505,8 +2914,9 @@ static int s2io_set_swapper(nic_t * sp)
2505 i++; 2914 i++;
2506 } 2915 }
2507 if(i == 4) { 2916 if(i == 4) {
2917 unsigned long long x = val64;
2508 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr "); 2918 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2509 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64); 2919 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2510 return FAILURE; 2920 return FAILURE;
2511 } 2921 }
2512 } 2922 }
@@ -2514,8 +2924,8 @@ static int s2io_set_swapper(nic_t * sp)
2514 val64 &= 0xFFFF000000000000ULL; 2924 val64 &= 0xFFFF000000000000ULL;
2515 2925
2516#ifdef __BIG_ENDIAN 2926#ifdef __BIG_ENDIAN
2517 /* 2927 /*
2518 * The device by default set to a big endian format, so a 2928 * The device by default set to a big endian format, so a
2519 * big endian driver need not set anything. 2929 * big endian driver need not set anything.
2520 */ 2930 */
2521 val64 |= (SWAPPER_CTRL_TXP_FE | 2931 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2531,9 +2941,9 @@ static int s2io_set_swapper(nic_t * sp)
2531 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); 2941 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2532 writeq(val64, &bar0->swapper_ctrl); 2942 writeq(val64, &bar0->swapper_ctrl);
2533#else 2943#else
2534 /* 2944 /*
2535 * Initially we enable all bits to make it accessible by the 2945 * Initially we enable all bits to make it accessible by the
2536 * driver, then we selectively enable only those bits that 2946 * driver, then we selectively enable only those bits that
2537 * we want to set. 2947 * we want to set.
2538 */ 2948 */
2539 val64 |= (SWAPPER_CTRL_TXP_FE | 2949 val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2555,8 +2965,8 @@ static int s2io_set_swapper(nic_t * sp)
2555#endif 2965#endif
2556 val64 = readq(&bar0->swapper_ctrl); 2966 val64 = readq(&bar0->swapper_ctrl);
2557 2967
2558 /* 2968 /*
2559 * Verifying if endian settings are accurate by reading a 2969 * Verifying if endian settings are accurate by reading a
2560 * feedback register. 2970 * feedback register.
2561 */ 2971 */
2562 val64 = readq(&bar0->pif_rd_swapper_fb); 2972 val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -2576,55 +2986,63 @@ static int s2io_set_swapper(nic_t * sp)
2576 * Functions defined below concern the OS part of the driver * 2986 * Functions defined below concern the OS part of the driver *
2577 * ********************************************************* */ 2987 * ********************************************************* */
2578 2988
2579/** 2989/**
2580 * s2io_open - open entry point of the driver 2990 * s2io_open - open entry point of the driver
2581 * @dev : pointer to the device structure. 2991 * @dev : pointer to the device structure.
2582 * Description: 2992 * Description:
2583 * This function is the open entry point of the driver. It mainly calls a 2993 * This function is the open entry point of the driver. It mainly calls a
2584 * function to allocate Rx buffers and inserts them into the buffer 2994 * function to allocate Rx buffers and inserts them into the buffer
2585 * descriptors and then enables the Rx part of the NIC. 2995 * descriptors and then enables the Rx part of the NIC.
2586 * Return value: 2996 * Return value:
2587 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2588 * file on failure. 2998 * file on failure.
2589 */ 2999 */
2590 3000
2591static int s2io_open(struct net_device *dev) 3001int s2io_open(struct net_device *dev)
2592{ 3002{
2593 nic_t *sp = dev->priv; 3003 nic_t *sp = dev->priv;
2594 int err = 0; 3004 int err = 0;
2595 3005
2596 /* 3006 /*
2597 * Make sure you have link off by default every time 3007 * Make sure you have link off by default every time
2598 * Nic is initialized 3008 * Nic is initialized
2599 */ 3009 */
2600 netif_carrier_off(dev); 3010 netif_carrier_off(dev);
2601 sp->last_link_state = LINK_DOWN; 3011 sp->last_link_state = 0;
2602 3012
2603 /* Initialize H/W and enable interrupts */ 3013 /* Initialize H/W and enable interrupts */
2604 if (s2io_card_up(sp)) { 3014 if (s2io_card_up(sp)) {
2605 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 3015 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2606 dev->name); 3016 dev->name);
2607 return -ENODEV; 3017 err = -ENODEV;
3018 goto hw_init_failed;
2608 } 3019 }
2609 3020
2610 /* After proper initialization of H/W, register ISR */ 3021 /* After proper initialization of H/W, register ISR */
2611 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ, 3022 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2612 sp->name, dev); 3023 sp->name, dev);
2613 if (err) { 3024 if (err) {
2614 s2io_reset(sp);
2615 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 3025 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2616 dev->name); 3026 dev->name);
2617 return err; 3027 goto isr_registration_failed;
2618 } 3028 }
2619 3029
2620 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { 3030 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2621 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); 3031 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2622 s2io_reset(sp); 3032 err = -ENODEV;
2623 return -ENODEV; 3033 goto setting_mac_address_failed;
2624 } 3034 }
2625 3035
2626 netif_start_queue(dev); 3036 netif_start_queue(dev);
2627 return 0; 3037 return 0;
3038
3039setting_mac_address_failed:
3040 free_irq(sp->pdev->irq, dev);
3041isr_registration_failed:
3042 del_timer_sync(&sp->alarm_timer);
3043 s2io_reset(sp);
3044hw_init_failed:
3045 return err;
2628} 3046}
2629 3047
2630/** 3048/**
@@ -2640,16 +3058,15 @@ static int s2io_open(struct net_device *dev)
2640 * file on failure. 3058 * file on failure.
2641 */ 3059 */
2642 3060
2643static int s2io_close(struct net_device *dev) 3061int s2io_close(struct net_device *dev)
2644{ 3062{
2645 nic_t *sp = dev->priv; 3063 nic_t *sp = dev->priv;
2646
2647 flush_scheduled_work(); 3064 flush_scheduled_work();
2648 netif_stop_queue(dev); 3065 netif_stop_queue(dev);
2649 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3066 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2650 s2io_card_down(sp); 3067 s2io_card_down(sp);
2651 3068
2652 free_irq(dev->irq, dev); 3069 free_irq(sp->pdev->irq, dev);
2653 sp->device_close_flag = TRUE; /* Device is shut down. */ 3070 sp->device_close_flag = TRUE; /* Device is shut down. */
2654 return 0; 3071 return 0;
2655} 3072}
@@ -2667,7 +3084,7 @@ static int s2io_close(struct net_device *dev)
2667 * 0 on success & 1 on failure. 3084 * 0 on success & 1 on failure.
2668 */ 3085 */
2669 3086
2670static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3087int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2671{ 3088{
2672 nic_t *sp = dev->priv; 3089 nic_t *sp = dev->priv;
2673 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3090 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
@@ -2678,29 +3095,39 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2678#ifdef NETIF_F_TSO 3095#ifdef NETIF_F_TSO
2679 int mss; 3096 int mss;
2680#endif 3097#endif
3098 u16 vlan_tag = 0;
3099 int vlan_priority = 0;
2681 mac_info_t *mac_control; 3100 mac_info_t *mac_control;
2682 struct config_param *config; 3101 struct config_param *config;
2683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2684 3102
2685 mac_control = &sp->mac_control; 3103 mac_control = &sp->mac_control;
2686 config = &sp->config; 3104 config = &sp->config;
2687 3105
2688 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name); 3106 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2689 spin_lock_irqsave(&sp->tx_lock, flags); 3107 spin_lock_irqsave(&sp->tx_lock, flags);
2690
2691 if (atomic_read(&sp->card_state) == CARD_DOWN) { 3108 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2692 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n", 3109 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2693 dev->name); 3110 dev->name);
2694 spin_unlock_irqrestore(&sp->tx_lock, flags); 3111 spin_unlock_irqrestore(&sp->tx_lock, flags);
2695 return 1; 3112 dev_kfree_skb(skb);
3113 return 0;
2696 } 3114 }
2697 3115
2698 queue = 0; 3116 queue = 0;
2699 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2700 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2701 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2702 3117
2703 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1; 3118 /* Get Fifo number to Transmit based on vlan priority */
3119 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3120 vlan_tag = vlan_tx_tag_get(skb);
3121 vlan_priority = vlan_tag >> 13;
3122 queue = config->fifo_mapping[vlan_priority];
3123 }
3124
3125 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3126 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3127 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3128 list_virt_addr;
3129
3130 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2704 /* Avoid "put" pointer going beyond "get" pointer */ 3131 /* Avoid "put" pointer going beyond "get" pointer */
2705 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { 3132 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2706 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n"); 3133 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
@@ -2709,6 +3136,15 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2709 spin_unlock_irqrestore(&sp->tx_lock, flags); 3136 spin_unlock_irqrestore(&sp->tx_lock, flags);
2710 return 0; 3137 return 0;
2711 } 3138 }
3139
3140 /* A buffer with no data will be dropped */
3141 if (!skb->len) {
3142 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3143 dev_kfree_skb(skb);
3144 spin_unlock_irqrestore(&sp->tx_lock, flags);
3145 return 0;
3146 }
3147
2712#ifdef NETIF_F_TSO 3148#ifdef NETIF_F_TSO
2713 mss = skb_shinfo(skb)->tso_size; 3149 mss = skb_shinfo(skb)->tso_size;
2714 if (mss) { 3150 if (mss) {
@@ -2720,9 +3156,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2720 frg_cnt = skb_shinfo(skb)->nr_frags; 3156 frg_cnt = skb_shinfo(skb)->nr_frags;
2721 frg_len = skb->len - skb->data_len; 3157 frg_len = skb->len - skb->data_len;
2722 3158
2723 txdp->Host_Control = (unsigned long) skb;
2724 txdp->Buffer_Pointer = pci_map_single 3159 txdp->Buffer_Pointer = pci_map_single
2725 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3160 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3161 txdp->Host_Control = (unsigned long) skb;
2726 if (skb->ip_summed == CHECKSUM_HW) { 3162 if (skb->ip_summed == CHECKSUM_HW) {
2727 txdp->Control_2 |= 3163 txdp->Control_2 |=
2728 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3164 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -2731,6 +3167,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2731 3167
2732 txdp->Control_2 |= config->tx_intr_type; 3168 txdp->Control_2 |= config->tx_intr_type;
2733 3169
3170 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3171 txdp->Control_2 |= TXD_VLAN_ENABLE;
3172 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3173 }
3174
2734 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3175 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2735 TXD_GATHER_CODE_FIRST); 3176 TXD_GATHER_CODE_FIRST);
2736 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3177 txdp->Control_1 |= TXD_LIST_OWN_XENA;
@@ -2738,6 +3179,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2738 /* For fragmented SKB. */ 3179 /* For fragmented SKB. */
2739 for (i = 0; i < frg_cnt; i++) { 3180 for (i = 0; i < frg_cnt; i++) {
2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3181 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3182 /* A '0' length fragment will be ignored */
3183 if (!frag->size)
3184 continue;
2741 txdp++; 3185 txdp++;
2742 txdp->Buffer_Pointer = (u64) pci_map_page 3186 txdp->Buffer_Pointer = (u64) pci_map_page
2743 (sp->pdev, frag->page, frag->page_offset, 3187 (sp->pdev, frag->page, frag->page_offset,
@@ -2747,23 +3191,23 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2747 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3191 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2748 3192
2749 tx_fifo = mac_control->tx_FIFO_start[queue]; 3193 tx_fifo = mac_control->tx_FIFO_start[queue];
2750 val64 = sp->list_info[queue][put_off].list_phy_addr; 3194 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2751 writeq(val64, &tx_fifo->TxDL_Pointer); 3195 writeq(val64, &tx_fifo->TxDL_Pointer);
2752 3196
2753 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3197 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2754 TX_FIFO_LAST_LIST); 3198 TX_FIFO_LAST_LIST);
3199
2755#ifdef NETIF_F_TSO 3200#ifdef NETIF_F_TSO
2756 if (mss) 3201 if (mss)
2757 val64 |= TX_FIFO_SPECIAL_FUNC; 3202 val64 |= TX_FIFO_SPECIAL_FUNC;
2758#endif 3203#endif
2759 writeq(val64, &tx_fifo->List_Control); 3204 writeq(val64, &tx_fifo->List_Control);
2760 3205
2761 /* Perform a PCI read to flush previous writes */ 3206 mmiowb();
2762 val64 = readq(&bar0->general_int_status);
2763 3207
2764 put_off++; 3208 put_off++;
2765 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1; 3209 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2766 mac_control->tx_curr_put_info[queue].offset = put_off; 3210 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2767 3211
2768 /* Avoid "put" pointer going beyond "get" pointer */ 3212 /* Avoid "put" pointer going beyond "get" pointer */
2769 if (((put_off + 1) % queue_len) == get_off) { 3213 if (((put_off + 1) % queue_len) == get_off) {
@@ -2779,18 +3223,74 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2779 return 0; 3223 return 0;
2780} 3224}
2781 3225
3226static void
3227s2io_alarm_handle(unsigned long data)
3228{
3229 nic_t *sp = (nic_t *)data;
3230
3231 alarm_intr_handler(sp);
3232 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3233}
3234
3235static void s2io_txpic_intr_handle(nic_t *sp)
3236{
3237 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3238 u64 val64;
3239
3240 val64 = readq(&bar0->pic_int_status);
3241 if (val64 & PIC_INT_GPIO) {
3242 val64 = readq(&bar0->gpio_int_reg);
3243 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3244 (val64 & GPIO_INT_REG_LINK_UP)) {
3245 val64 |= GPIO_INT_REG_LINK_DOWN;
3246 val64 |= GPIO_INT_REG_LINK_UP;
3247 writeq(val64, &bar0->gpio_int_reg);
3248 goto masking;
3249 }
3250
3251 if (((sp->last_link_state == LINK_UP) &&
3252 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3253 ((sp->last_link_state == LINK_DOWN) &&
3254 (val64 & GPIO_INT_REG_LINK_UP))) {
3255 val64 = readq(&bar0->gpio_int_mask);
3256 val64 |= GPIO_INT_MASK_LINK_DOWN;
3257 val64 |= GPIO_INT_MASK_LINK_UP;
3258 writeq(val64, &bar0->gpio_int_mask);
3259 s2io_set_link((unsigned long)sp);
3260 }
3261masking:
3262 if (sp->last_link_state == LINK_UP) {
3263 /*enable down interrupt */
3264 val64 = readq(&bar0->gpio_int_mask);
3265 /* unmasks link down intr */
3266 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3267 /* masks link up intr */
3268 val64 |= GPIO_INT_MASK_LINK_UP;
3269 writeq(val64, &bar0->gpio_int_mask);
3270 } else {
3271 /*enable UP Interrupt */
3272 val64 = readq(&bar0->gpio_int_mask);
3273 /* unmasks link up interrupt */
3274 val64 &= ~GPIO_INT_MASK_LINK_UP;
3275 /* masks link down interrupt */
3276 val64 |= GPIO_INT_MASK_LINK_DOWN;
3277 writeq(val64, &bar0->gpio_int_mask);
3278 }
3279 }
3280}
3281
2782/** 3282/**
2783 * s2io_isr - ISR handler of the device . 3283 * s2io_isr - ISR handler of the device .
2784 * @irq: the irq of the device. 3284 * @irq: the irq of the device.
2785 * @dev_id: a void pointer to the dev structure of the NIC. 3285 * @dev_id: a void pointer to the dev structure of the NIC.
2786 * @pt_regs: pointer to the registers pushed on the stack. 3286 * @pt_regs: pointer to the registers pushed on the stack.
2787 * Description: This function is the ISR handler of the device. It 3287 * Description: This function is the ISR handler of the device. It
2788 * identifies the reason for the interrupt and calls the relevant 3288 * identifies the reason for the interrupt and calls the relevant
2789 * service routines. As a contongency measure, this ISR allocates the 3289 * service routines. As a contongency measure, this ISR allocates the
2790 * recv buffers, if their numbers are below the panic value which is 3290 * recv buffers, if their numbers are below the panic value which is
2791 * presently set to 25% of the original number of rcv buffers allocated. 3291 * presently set to 25% of the original number of rcv buffers allocated.
2792 * Return value: 3292 * Return value:
2793 * IRQ_HANDLED: will be returned if IRQ was handled by this routine 3293 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2794 * IRQ_NONE: will be returned if interrupt is not from our device 3294 * IRQ_NONE: will be returned if interrupt is not from our device
2795 */ 3295 */
2796static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) 3296static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2798,40 +3298,31 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2798 struct net_device *dev = (struct net_device *) dev_id; 3298 struct net_device *dev = (struct net_device *) dev_id;
2799 nic_t *sp = dev->priv; 3299 nic_t *sp = dev->priv;
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801#ifndef CONFIG_S2IO_NAPI 3301 int i;
2802 int i, ret; 3302 u64 reason = 0, val64;
2803#endif
2804 u64 reason = 0;
2805 mac_info_t *mac_control; 3303 mac_info_t *mac_control;
2806 struct config_param *config; 3304 struct config_param *config;
2807 3305
3306 atomic_inc(&sp->isr_cnt);
2808 mac_control = &sp->mac_control; 3307 mac_control = &sp->mac_control;
2809 config = &sp->config; 3308 config = &sp->config;
2810 3309
2811 /* 3310 /*
2812 * Identify the cause for interrupt and call the appropriate 3311 * Identify the cause for interrupt and call the appropriate
2813 * interrupt handler. Causes for the interrupt could be; 3312 * interrupt handler. Causes for the interrupt could be;
2814 * 1. Rx of packet. 3313 * 1. Rx of packet.
2815 * 2. Tx complete. 3314 * 2. Tx complete.
2816 * 3. Link down. 3315 * 3. Link down.
2817 * 4. Error in any functional blocks of the NIC. 3316 * 4. Error in any functional blocks of the NIC.
2818 */ 3317 */
2819 reason = readq(&bar0->general_int_status); 3318 reason = readq(&bar0->general_int_status);
2820 3319
2821 if (!reason) { 3320 if (!reason) {
2822 /* The interrupt was not raised by Xena. */ 3321 /* The interrupt was not raised by Xena. */
3322 atomic_dec(&sp->isr_cnt);
2823 return IRQ_NONE; 3323 return IRQ_NONE;
2824 } 3324 }
2825 3325
2826 /* If Intr is because of Tx Traffic */
2827 if (reason & GEN_INTR_TXTRAFFIC) {
2828 tx_intr_handler(sp);
2829 }
2830
2831 /* If Intr is because of an error */
2832 if (reason & (GEN_ERROR_INTR))
2833 alarm_intr_handler(sp);
2834
2835#ifdef CONFIG_S2IO_NAPI 3326#ifdef CONFIG_S2IO_NAPI
2836 if (reason & GEN_INTR_RXTRAFFIC) { 3327 if (reason & GEN_INTR_RXTRAFFIC) {
2837 if (netif_rx_schedule_prep(dev)) { 3328 if (netif_rx_schedule_prep(dev)) {
@@ -2843,17 +3334,43 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2843#else 3334#else
2844 /* If Intr is because of Rx Traffic */ 3335 /* If Intr is because of Rx Traffic */
2845 if (reason & GEN_INTR_RXTRAFFIC) { 3336 if (reason & GEN_INTR_RXTRAFFIC) {
2846 rx_intr_handler(sp); 3337 /*
3338 * rx_traffic_int reg is an R1 register, writing all 1's
3339 * will ensure that the actual interrupt causing bit get's
3340 * cleared and hence a read can be avoided.
3341 */
3342 val64 = 0xFFFFFFFFFFFFFFFFULL;
3343 writeq(val64, &bar0->rx_traffic_int);
3344 for (i = 0; i < config->rx_ring_num; i++) {
3345 rx_intr_handler(&mac_control->rings[i]);
3346 }
2847 } 3347 }
2848#endif 3348#endif
2849 3349
2850 /* 3350 /* If Intr is because of Tx Traffic */
2851 * If the Rx buffer count is below the panic threshold then 3351 if (reason & GEN_INTR_TXTRAFFIC) {
2852 * reallocate the buffers from the interrupt handler itself, 3352 /*
3353 * tx_traffic_int reg is an R1 register, writing all 1's
3354 * will ensure that the actual interrupt causing bit get's
3355 * cleared and hence a read can be avoided.
3356 */
3357 val64 = 0xFFFFFFFFFFFFFFFFULL;
3358 writeq(val64, &bar0->tx_traffic_int);
3359
3360 for (i = 0; i < config->tx_fifo_num; i++)
3361 tx_intr_handler(&mac_control->fifos[i]);
3362 }
3363
3364 if (reason & GEN_INTR_TXPIC)
3365 s2io_txpic_intr_handle(sp);
3366 /*
3367 * If the Rx buffer count is below the panic threshold then
3368 * reallocate the buffers from the interrupt handler itself,
2853 * else schedule a tasklet to reallocate the buffers. 3369 * else schedule a tasklet to reallocate the buffers.
2854 */ 3370 */
2855#ifndef CONFIG_S2IO_NAPI 3371#ifndef CONFIG_S2IO_NAPI
2856 for (i = 0; i < config->rx_ring_num; i++) { 3372 for (i = 0; i < config->rx_ring_num; i++) {
3373 int ret;
2857 int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 3374 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2858 int level = rx_buffer_level(sp, rxb_size, i); 3375 int level = rx_buffer_level(sp, rxb_size, i);
2859 3376
@@ -2865,6 +3382,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2865 dev->name); 3382 dev->name);
2866 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 3383 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2867 clear_bit(0, (&sp->tasklet_status)); 3384 clear_bit(0, (&sp->tasklet_status));
3385 atomic_dec(&sp->isr_cnt);
2868 return IRQ_HANDLED; 3386 return IRQ_HANDLED;
2869 } 3387 }
2870 clear_bit(0, (&sp->tasklet_status)); 3388 clear_bit(0, (&sp->tasklet_status));
@@ -2874,33 +3392,69 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2874 } 3392 }
2875#endif 3393#endif
2876 3394
3395 atomic_dec(&sp->isr_cnt);
2877 return IRQ_HANDLED; 3396 return IRQ_HANDLED;
2878} 3397}
2879 3398
2880/** 3399/**
2881 * s2io_get_stats - Updates the device statistics structure. 3400 * s2io_updt_stats -
3401 */
3402static void s2io_updt_stats(nic_t *sp)
3403{
3404 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3405 u64 val64;
3406 int cnt = 0;
3407
3408 if (atomic_read(&sp->card_state) == CARD_UP) {
3409 /* Apprx 30us on a 133 MHz bus */
3410 val64 = SET_UPDT_CLICKS(10) |
3411 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3412 writeq(val64, &bar0->stat_cfg);
3413 do {
3414 udelay(100);
3415 val64 = readq(&bar0->stat_cfg);
3416 if (!(val64 & BIT(0)))
3417 break;
3418 cnt++;
3419 if (cnt == 5)
3420 break; /* Updt failed */
3421 } while(1);
3422 }
3423}
3424
3425/**
3426 * s2io_get_stats - Updates the device statistics structure.
2882 * @dev : pointer to the device structure. 3427 * @dev : pointer to the device structure.
2883 * Description: 3428 * Description:
2884 * This function updates the device statistics structure in the s2io_nic 3429 * This function updates the device statistics structure in the s2io_nic
2885 * structure and returns a pointer to the same. 3430 * structure and returns a pointer to the same.
2886 * Return value: 3431 * Return value:
2887 * pointer to the updated net_device_stats structure. 3432 * pointer to the updated net_device_stats structure.
2888 */ 3433 */
2889 3434
2890static struct net_device_stats *s2io_get_stats(struct net_device *dev) 3435struct net_device_stats *s2io_get_stats(struct net_device *dev)
2891{ 3436{
2892 nic_t *sp = dev->priv; 3437 nic_t *sp = dev->priv;
2893 mac_info_t *mac_control; 3438 mac_info_t *mac_control;
2894 struct config_param *config; 3439 struct config_param *config;
2895 3440
3441
2896 mac_control = &sp->mac_control; 3442 mac_control = &sp->mac_control;
2897 config = &sp->config; 3443 config = &sp->config;
2898 3444
2899 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms; 3445 /* Configure Stats for immediate updt */
2900 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms; 3446 s2io_updt_stats(sp);
2901 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms; 3447
3448 sp->stats.tx_packets =
3449 le32_to_cpu(mac_control->stats_info->tmac_frms);
3450 sp->stats.tx_errors =
3451 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3452 sp->stats.rx_errors =
3453 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3454 sp->stats.multicast =
3455 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
2902 sp->stats.rx_length_errors = 3456 sp->stats.rx_length_errors =
2903 mac_control->stats_info->rmac_long_frms; 3457 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
2904 3458
2905 return (&sp->stats); 3459 return (&sp->stats);
2906} 3460}
@@ -2909,8 +3463,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2909 * s2io_set_multicast - entry point for multicast address enable/disable. 3463 * s2io_set_multicast - entry point for multicast address enable/disable.
2910 * @dev : pointer to the device structure 3464 * @dev : pointer to the device structure
2911 * Description: 3465 * Description:
2912 * This function is a driver entry point which gets called by the kernel 3466 * This function is a driver entry point which gets called by the kernel
2913 * whenever multicast addresses must be enabled/disabled. This also gets 3467 * whenever multicast addresses must be enabled/disabled. This also gets
2914 * called to set/reset promiscuous mode. Depending on the deivce flag, we 3468 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2915 * determine, if multicast address must be enabled or if promiscuous mode 3469 * determine, if multicast address must be enabled or if promiscuous mode
2916 * is to be disabled etc. 3470 * is to be disabled etc.
@@ -2948,6 +3502,8 @@ static void s2io_set_multicast(struct net_device *dev)
2948 /* Disable all Multicast addresses */ 3502 /* Disable all Multicast addresses */
2949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3503 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2950 &bar0->rmac_addr_data0_mem); 3504 &bar0->rmac_addr_data0_mem);
3505 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3506 &bar0->rmac_addr_data1_mem);
2951 val64 = RMAC_ADDR_CMD_MEM_WE | 3507 val64 = RMAC_ADDR_CMD_MEM_WE |
2952 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3508 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2953 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); 3509 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
@@ -3010,7 +3566,7 @@ static void s2io_set_multicast(struct net_device *dev)
3010 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 3566 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3011 &bar0->rmac_addr_data0_mem); 3567 &bar0->rmac_addr_data0_mem);
3012 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3568 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3013 &bar0->rmac_addr_data1_mem); 3569 &bar0->rmac_addr_data1_mem);
3014 val64 = RMAC_ADDR_CMD_MEM_WE | 3570 val64 = RMAC_ADDR_CMD_MEM_WE |
3015 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3571 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3016 RMAC_ADDR_CMD_MEM_OFFSET 3572 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3039,8 +3595,7 @@ static void s2io_set_multicast(struct net_device *dev)
3039 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 3595 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3040 &bar0->rmac_addr_data0_mem); 3596 &bar0->rmac_addr_data0_mem);
3041 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 3597 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3042 &bar0->rmac_addr_data1_mem); 3598 &bar0->rmac_addr_data1_mem);
3043
3044 val64 = RMAC_ADDR_CMD_MEM_WE | 3599 val64 = RMAC_ADDR_CMD_MEM_WE |
3045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3600 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3046 RMAC_ADDR_CMD_MEM_OFFSET 3601 RMAC_ADDR_CMD_MEM_OFFSET
@@ -3059,12 +3614,12 @@ static void s2io_set_multicast(struct net_device *dev)
3059} 3614}
3060 3615
3061/** 3616/**
3062 * s2io_set_mac_addr - Programs the Xframe mac address 3617 * s2io_set_mac_addr - Programs the Xframe mac address
3063 * @dev : pointer to the device structure. 3618 * @dev : pointer to the device structure.
3064 * @addr: a uchar pointer to the new mac address which is to be set. 3619 * @addr: a uchar pointer to the new mac address which is to be set.
3065 * Description : This procedure will program the Xframe to receive 3620 * Description : This procedure will program the Xframe to receive
3066 * frames with new Mac Address 3621 * frames with new Mac Address
3067 * Return value: SUCCESS on success and an appropriate (-)ve integer 3622 * Return value: SUCCESS on success and an appropriate (-)ve integer
3068 * as defined in errno.h file on failure. 3623 * as defined in errno.h file on failure.
3069 */ 3624 */
3070 3625
@@ -3075,10 +3630,10 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3075 register u64 val64, mac_addr = 0; 3630 register u64 val64, mac_addr = 0;
3076 int i; 3631 int i;
3077 3632
3078 /* 3633 /*
3079 * Set the new MAC address as the new unicast filter and reflect this 3634 * Set the new MAC address as the new unicast filter and reflect this
3080 * change on the device address registered with the OS. It will be 3635 * change on the device address registered with the OS. It will be
3081 * at offset 0. 3636 * at offset 0.
3082 */ 3637 */
3083 for (i = 0; i < ETH_ALEN; i++) { 3638 for (i = 0; i < ETH_ALEN; i++) {
3084 mac_addr <<= 8; 3639 mac_addr <<= 8;
@@ -3102,12 +3657,12 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3102} 3657}
3103 3658
3104/** 3659/**
3105 * s2io_ethtool_sset - Sets different link parameters. 3660 * s2io_ethtool_sset - Sets different link parameters.
3106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3661 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3107 * @info: pointer to the structure with parameters given by ethtool to set 3662 * @info: pointer to the structure with parameters given by ethtool to set
3108 * link information. 3663 * link information.
3109 * Description: 3664 * Description:
3110 * The function sets different link parameters provided by the user onto 3665 * The function sets different link parameters provided by the user onto
3111 * the NIC. 3666 * the NIC.
3112 * Return value: 3667 * Return value:
3113 * 0 on success. 3668 * 0 on success.
@@ -3129,7 +3684,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
3129} 3684}
3130 3685
3131/** 3686/**
3132 * s2io_ethtol_gset - Return link specific information. 3687 * s2io_ethtol_gset - Return link specific information.
3133 * @sp : private member of the device structure, pointer to the 3688 * @sp : private member of the device structure, pointer to the
3134 * s2io_nic structure. 3689 * s2io_nic structure.
3135 * @info : pointer to the structure with parameters given by ethtool 3690 * @info : pointer to the structure with parameters given by ethtool
@@ -3161,8 +3716,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3161} 3716}
3162 3717
3163/** 3718/**
3164 * s2io_ethtool_gdrvinfo - Returns driver specific information. 3719 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3165 * @sp : private member of the device structure, which is a pointer to the 3720 * @sp : private member of the device structure, which is a pointer to the
3166 * s2io_nic structure. 3721 * s2io_nic structure.
3167 * @info : pointer to the structure with parameters given by ethtool to 3722 * @info : pointer to the structure with parameters given by ethtool to
3168 * return driver information. 3723 * return driver information.
@@ -3190,9 +3745,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3190 3745
3191/** 3746/**
3192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer. 3747 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3193 * @sp: private member of the device structure, which is a pointer to the 3748 * @sp: private member of the device structure, which is a pointer to the
3194 * s2io_nic structure. 3749 * s2io_nic structure.
3195 * @regs : pointer to the structure with parameters given by ethtool for 3750 * @regs : pointer to the structure with parameters given by ethtool for
3196 * dumping the registers. 3751 * dumping the registers.
3197 * @reg_space: The input argumnet into which all the registers are dumped. 3752 * @reg_space: The input argumnet into which all the registers are dumped.
3198 * Description: 3753 * Description:
@@ -3221,11 +3776,11 @@ static void s2io_ethtool_gregs(struct net_device *dev,
3221 3776
3222/** 3777/**
3223 * s2io_phy_id - timer function that alternates adapter LED. 3778 * s2io_phy_id - timer function that alternates adapter LED.
3224 * @data : address of the private member of the device structure, which 3779 * @data : address of the private member of the device structure, which
3225 * is a pointer to the s2io_nic structure, provided as an u32. 3780 * is a pointer to the s2io_nic structure, provided as an u32.
3226 * Description: This is actually the timer function that alternates the 3781 * Description: This is actually the timer function that alternates the
3227 * adapter LED bit of the adapter control bit to set/reset every time on 3782 * adapter LED bit of the adapter control bit to set/reset every time on
3228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks 3783 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3229 * once every second. 3784 * once every second.
3230*/ 3785*/
3231static void s2io_phy_id(unsigned long data) 3786static void s2io_phy_id(unsigned long data)
@@ -3236,7 +3791,8 @@ static void s2io_phy_id(unsigned long data)
3236 u16 subid; 3791 u16 subid;
3237 3792
3238 subid = sp->pdev->subsystem_device; 3793 subid = sp->pdev->subsystem_device;
3239 if ((subid & 0xFF) >= 0x07) { 3794 if ((sp->device_type == XFRAME_II_DEVICE) ||
3795 ((subid & 0xFF) >= 0x07)) {
3240 val64 = readq(&bar0->gpio_control); 3796 val64 = readq(&bar0->gpio_control);
3241 val64 ^= GPIO_CTRL_GPIO_0; 3797 val64 ^= GPIO_CTRL_GPIO_0;
3242 writeq(val64, &bar0->gpio_control); 3798 writeq(val64, &bar0->gpio_control);
@@ -3253,12 +3809,12 @@ static void s2io_phy_id(unsigned long data)
3253 * s2io_ethtool_idnic - To physically identify the nic on the system. 3809 * s2io_ethtool_idnic - To physically identify the nic on the system.
3254 * @sp : private member of the device structure, which is a pointer to the 3810 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure. 3811 * s2io_nic structure.
3256 * @id : pointer to the structure with identification parameters given by 3812 * @id : pointer to the structure with identification parameters given by
3257 * ethtool. 3813 * ethtool.
3258 * Description: Used to physically identify the NIC on the system. 3814 * Description: Used to physically identify the NIC on the system.
3259 * The Link LED will blink for a time specified by the user for 3815 * The Link LED will blink for a time specified by the user for
3260 * identification. 3816 * identification.
3261 * NOTE: The Link has to be Up to be able to blink the LED. Hence 3817 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3262 * identification is possible only if it's link is up. 3818 * identification is possible only if it's link is up.
3263 * Return value: 3819 * Return value:
3264 * int , returns 0 on success 3820 * int , returns 0 on success
@@ -3273,7 +3829,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3273 3829
3274 subid = sp->pdev->subsystem_device; 3830 subid = sp->pdev->subsystem_device;
3275 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3831 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3276 if ((subid & 0xFF) < 0x07) { 3832 if ((sp->device_type == XFRAME_I_DEVICE) &&
3833 ((subid & 0xFF) < 0x07)) {
3277 val64 = readq(&bar0->adapter_control); 3834 val64 = readq(&bar0->adapter_control);
3278 if (!(val64 & ADAPTER_CNTL_EN)) { 3835 if (!(val64 & ADAPTER_CNTL_EN)) {
3279 printk(KERN_ERR 3836 printk(KERN_ERR
@@ -3288,12 +3845,12 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3288 } 3845 }
3289 mod_timer(&sp->id_timer, jiffies); 3846 mod_timer(&sp->id_timer, jiffies);
3290 if (data) 3847 if (data)
3291 msleep(data * 1000); 3848 msleep_interruptible(data * HZ);
3292 else 3849 else
3293 msleep(0xFFFFFFFF); 3850 msleep_interruptible(MAX_FLICKER_TIME);
3294 del_timer_sync(&sp->id_timer); 3851 del_timer_sync(&sp->id_timer);
3295 3852
3296 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 3853 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3297 writeq(last_gpio_ctrl_val, &bar0->gpio_control); 3854 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3298 last_gpio_ctrl_val = readq(&bar0->gpio_control); 3855 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3299 } 3856 }
@@ -3303,7 +3860,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3303 3860
3304/** 3861/**
3305 * s2io_ethtool_getpause_data -Pause frame frame generation and reception. 3862 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3306 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 3863 * @sp : private member of the device structure, which is a pointer to the
3864 * s2io_nic structure.
3307 * @ep : pointer to the structure with pause parameters given by ethtool. 3865 * @ep : pointer to the structure with pause parameters given by ethtool.
3308 * Description: 3866 * Description:
3309 * Returns the Pause frame generation and reception capability of the NIC. 3867 * Returns the Pause frame generation and reception capability of the NIC.
@@ -3327,7 +3885,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3327 3885
3328/** 3886/**
3329 * s2io_ethtool_setpause_data - set/reset pause frame generation. 3887 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3330 * @sp : private member of the device structure, which is a pointer to the 3888 * @sp : private member of the device structure, which is a pointer to the
3331 * s2io_nic structure. 3889 * s2io_nic structure.
3332 * @ep : pointer to the structure with pause parameters given by ethtool. 3890 * @ep : pointer to the structure with pause parameters given by ethtool.
3333 * Description: 3891 * Description:
@@ -3338,7 +3896,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
3338 */ 3896 */
3339 3897
3340static int s2io_ethtool_setpause_data(struct net_device *dev, 3898static int s2io_ethtool_setpause_data(struct net_device *dev,
3341 struct ethtool_pauseparam *ep) 3899 struct ethtool_pauseparam *ep)
3342{ 3900{
3343 u64 val64; 3901 u64 val64;
3344 nic_t *sp = dev->priv; 3902 nic_t *sp = dev->priv;
@@ -3359,13 +3917,13 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
3359 3917
3360/** 3918/**
3361 * read_eeprom - reads 4 bytes of data from user given offset. 3919 * read_eeprom - reads 4 bytes of data from user given offset.
3362 * @sp : private member of the device structure, which is a pointer to the 3920 * @sp : private member of the device structure, which is a pointer to the
3363 * s2io_nic structure. 3921 * s2io_nic structure.
3364 * @off : offset at which the data must be written 3922 * @off : offset at which the data must be written
3365 * @data : Its an output parameter where the data read at the given 3923 * @data : Its an output parameter where the data read at the given
3366 * offset is stored. 3924 * offset is stored.
3367 * Description: 3925 * Description:
3368 * Will read 4 bytes of data from the user given offset and return the 3926 * Will read 4 bytes of data from the user given offset and return the
3369 * read data. 3927 * read data.
3370 * NOTE: Will allow to read only part of the EEPROM visible through the 3928 * NOTE: Will allow to read only part of the EEPROM visible through the
3371 * I2C bus. 3929 * I2C bus.
@@ -3406,7 +3964,7 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
3406 * s2io_nic structure. 3964 * s2io_nic structure.
3407 * @off : offset at which the data must be written 3965 * @off : offset at which the data must be written
3408 * @data : The data that is to be written 3966 * @data : The data that is to be written
3409 * @cnt : Number of bytes of the data that are actually to be written into 3967 * @cnt : Number of bytes of the data that are actually to be written into
3410 * the Eeprom. (max of 3) 3968 * the Eeprom. (max of 3)
3411 * Description: 3969 * Description:
3412 * Actually writes the relevant part of the data value into the Eeprom 3970 * Actually writes the relevant part of the data value into the Eeprom
@@ -3443,7 +4001,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3443/** 4001/**
3444 * s2io_ethtool_geeprom - reads the value stored in the Eeprom. 4002 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3445 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. 4003 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3446 * @eeprom : pointer to the user level structure provided by ethtool, 4004 * @eeprom : pointer to the user level structure provided by ethtool,
3447 * containing all relevant information. 4005 * containing all relevant information.
3448 * @data_buf : user defined value to be written into Eeprom. 4006 * @data_buf : user defined value to be written into Eeprom.
3449 * Description: Reads the values stored in the Eeprom at given offset 4007 * Description: Reads the values stored in the Eeprom at given offset
@@ -3454,7 +4012,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3454 */ 4012 */
3455 4013
3456static int s2io_ethtool_geeprom(struct net_device *dev, 4014static int s2io_ethtool_geeprom(struct net_device *dev,
3457 struct ethtool_eeprom *eeprom, u8 * data_buf) 4015 struct ethtool_eeprom *eeprom, u8 * data_buf)
3458{ 4016{
3459 u32 data, i, valid; 4017 u32 data, i, valid;
3460 nic_t *sp = dev->priv; 4018 nic_t *sp = dev->priv;
@@ -3479,7 +4037,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
3479 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom 4037 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3480 * @sp : private member of the device structure, which is a pointer to the 4038 * @sp : private member of the device structure, which is a pointer to the
3481 * s2io_nic structure. 4039 * s2io_nic structure.
3482 * @eeprom : pointer to the user level structure provided by ethtool, 4040 * @eeprom : pointer to the user level structure provided by ethtool,
3483 * containing all relevant information. 4041 * containing all relevant information.
3484 * @data_buf ; user defined value to be written into Eeprom. 4042 * @data_buf ; user defined value to be written into Eeprom.
3485 * Description: 4043 * Description:
@@ -3527,8 +4085,8 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
3527} 4085}
3528 4086
3529/** 4087/**
3530 * s2io_register_test - reads and writes into all clock domains. 4088 * s2io_register_test - reads and writes into all clock domains.
3531 * @sp : private member of the device structure, which is a pointer to the 4089 * @sp : private member of the device structure, which is a pointer to the
3532 * s2io_nic structure. 4090 * s2io_nic structure.
3533 * @data : variable that returns the result of each of the test conducted b 4091 * @data : variable that returns the result of each of the test conducted b
3534 * by the driver. 4092 * by the driver.
@@ -3545,8 +4103,8 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3545 u64 val64 = 0; 4103 u64 val64 = 0;
3546 int fail = 0; 4104 int fail = 0;
3547 4105
3548 val64 = readq(&bar0->pcc_enable); 4106 val64 = readq(&bar0->pif_rd_swapper_fb);
3549 if (val64 != 0xff00000000000000ULL) { 4107 if (val64 != 0x123456789abcdefULL) {
3550 fail = 1; 4108 fail = 1;
3551 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n"); 4109 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3552 } 4110 }
@@ -3590,13 +4148,13 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
3590} 4148}
3591 4149
3592/** 4150/**
3593 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. 4151 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3594 * @sp : private member of the device structure, which is a pointer to the 4152 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure. 4153 * s2io_nic structure.
3596 * @data:variable that returns the result of each of the test conducted by 4154 * @data:variable that returns the result of each of the test conducted by
3597 * the driver. 4155 * the driver.
3598 * Description: 4156 * Description:
3599 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL 4157 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3600 * register. 4158 * register.
3601 * Return value: 4159 * Return value:
3602 * 0 on success. 4160 * 0 on success.
@@ -3661,14 +4219,14 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3661 4219
3662/** 4220/**
3663 * s2io_bist_test - invokes the MemBist test of the card . 4221 * s2io_bist_test - invokes the MemBist test of the card .
3664 * @sp : private member of the device structure, which is a pointer to the 4222 * @sp : private member of the device structure, which is a pointer to the
3665 * s2io_nic structure. 4223 * s2io_nic structure.
3666 * @data:variable that returns the result of each of the test conducted by 4224 * @data:variable that returns the result of each of the test conducted by
3667 * the driver. 4225 * the driver.
3668 * Description: 4226 * Description:
3669 * This invokes the MemBist test of the card. We give around 4227 * This invokes the MemBist test of the card. We give around
3670 * 2 secs time for the Test to complete. If it's still not complete 4228 * 2 secs time for the Test to complete. If it's still not complete
3671 * within this peiod, we consider that the test failed. 4229 * within this peiod, we consider that the test failed.
3672 * Return value: 4230 * Return value:
3673 * 0 on success and -1 on failure. 4231 * 0 on success and -1 on failure.
3674 */ 4232 */
@@ -3697,13 +4255,13 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
3697} 4255}
3698 4256
3699/** 4257/**
3700 * s2io-link_test - verifies the link state of the nic 4258 * s2io-link_test - verifies the link state of the nic
3701 * @sp ; private member of the device structure, which is a pointer to the 4259 * @sp ; private member of the device structure, which is a pointer to the
3702 * s2io_nic structure. 4260 * s2io_nic structure.
3703 * @data: variable that returns the result of each of the test conducted by 4261 * @data: variable that returns the result of each of the test conducted by
3704 * the driver. 4262 * the driver.
3705 * Description: 4263 * Description:
3706 * The function verifies the link state of the NIC and updates the input 4264 * The function verifies the link state of the NIC and updates the input
3707 * argument 'data' appropriately. 4265 * argument 'data' appropriately.
3708 * Return value: 4266 * Return value:
3709 * 0 on success. 4267 * 0 on success.
@@ -3722,13 +4280,13 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
3722} 4280}
3723 4281
3724/** 4282/**
3725 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 4283 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3726 * @sp - private member of the device structure, which is a pointer to the 4284 * @sp - private member of the device structure, which is a pointer to the
3727 * s2io_nic structure. 4285 * s2io_nic structure.
3728 * @data - variable that returns the result of each of the test 4286 * @data - variable that returns the result of each of the test
3729 * conducted by the driver. 4287 * conducted by the driver.
3730 * Description: 4288 * Description:
3731 * This is one of the offline test that tests the read and write 4289 * This is one of the offline test that tests the read and write
3732 * access to the RldRam chip on the NIC. 4290 * access to the RldRam chip on the NIC.
3733 * Return value: 4291 * Return value:
3734 * 0 on success. 4292 * 0 on success.
@@ -3833,7 +4391,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3833 * s2io_nic structure. 4391 * s2io_nic structure.
3834 * @ethtest : pointer to a ethtool command specific structure that will be 4392 * @ethtest : pointer to a ethtool command specific structure that will be
3835 * returned to the user. 4393 * returned to the user.
3836 * @data : variable that returns the result of each of the test 4394 * @data : variable that returns the result of each of the test
3837 * conducted by the driver. 4395 * conducted by the driver.
3838 * Description: 4396 * Description:
3839 * This function conducts 6 tests ( 4 offline and 2 online) to determine 4397 * This function conducts 6 tests ( 4 offline and 2 online) to determine
@@ -3851,23 +4409,18 @@ static void s2io_ethtool_test(struct net_device *dev,
3851 4409
3852 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 4410 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3853 /* Offline Tests. */ 4411 /* Offline Tests. */
3854 if (orig_state) { 4412 if (orig_state)
3855 s2io_close(sp->dev); 4413 s2io_close(sp->dev);
3856 s2io_set_swapper(sp);
3857 } else
3858 s2io_set_swapper(sp);
3859 4414
3860 if (s2io_register_test(sp, &data[0])) 4415 if (s2io_register_test(sp, &data[0]))
3861 ethtest->flags |= ETH_TEST_FL_FAILED; 4416 ethtest->flags |= ETH_TEST_FL_FAILED;
3862 4417
3863 s2io_reset(sp); 4418 s2io_reset(sp);
3864 s2io_set_swapper(sp);
3865 4419
3866 if (s2io_rldram_test(sp, &data[3])) 4420 if (s2io_rldram_test(sp, &data[3]))
3867 ethtest->flags |= ETH_TEST_FL_FAILED; 4421 ethtest->flags |= ETH_TEST_FL_FAILED;
3868 4422
3869 s2io_reset(sp); 4423 s2io_reset(sp);
3870 s2io_set_swapper(sp);
3871 4424
3872 if (s2io_eeprom_test(sp, &data[1])) 4425 if (s2io_eeprom_test(sp, &data[1]))
3873 ethtest->flags |= ETH_TEST_FL_FAILED; 4426 ethtest->flags |= ETH_TEST_FL_FAILED;
@@ -3910,61 +4463,111 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
3910 nic_t *sp = dev->priv; 4463 nic_t *sp = dev->priv;
3911 StatInfo_t *stat_info = sp->mac_control.stats_info; 4464 StatInfo_t *stat_info = sp->mac_control.stats_info;
3912 4465
3913 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms); 4466 s2io_updt_stats(sp);
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets); 4467 tmp_stats[i++] =
4468 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4469 le32_to_cpu(stat_info->tmac_frms);
4470 tmp_stats[i++] =
4471 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4472 le32_to_cpu(stat_info->tmac_data_octets);
3915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms); 4473 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms); 4474 tmp_stats[i++] =
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms); 4475 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->tmac_mcst_frms);
4477 tmp_stats[i++] =
4478 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4479 le32_to_cpu(stat_info->tmac_bcst_frms);
3918 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); 4480 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms); 4481 tmp_stats[i++] =
4482 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4483 le32_to_cpu(stat_info->tmac_any_err_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); 4484 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip); 4485 tmp_stats[i++] =
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip); 4486 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp); 4487 le32_to_cpu(stat_info->tmac_vld_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp); 4488 tmp_stats[i++] =
4489 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4490 le32_to_cpu(stat_info->tmac_drop_ip);
4491 tmp_stats[i++] =
4492 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4493 le32_to_cpu(stat_info->tmac_icmp);
4494 tmp_stats[i++] =
4495 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4496 le32_to_cpu(stat_info->tmac_rst_tcp);
3925 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp); 4497 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp); 4498 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
3927 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms); 4499 le32_to_cpu(stat_info->tmac_udp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets); 4500 tmp_stats[i++] =
4501 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_vld_frms);
4503 tmp_stats[i++] =
4504 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_data_octets);
3929 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms); 4506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms); 4507 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3931 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms); 4508 tmp_stats[i++] =
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms); 4509 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4510 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4511 tmp_stats[i++] =
4512 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4513 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms); 4514 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); 4515 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); 4516 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms); 4517 tmp_stats[i++] =
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms); 4518 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms); 4519 le32_to_cpu(stat_info->rmac_discarded_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms); 4520 tmp_stats[i++] =
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms); 4521 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip); 4522 le32_to_cpu(stat_info->rmac_usized_frms);
4523 tmp_stats[i++] =
4524 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4525 le32_to_cpu(stat_info->rmac_osized_frms);
4526 tmp_stats[i++] =
4527 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4528 le32_to_cpu(stat_info->rmac_frag_frms);
4529 tmp_stats[i++] =
4530 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4531 le32_to_cpu(stat_info->rmac_jabber_frms);
4532 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4533 le32_to_cpu(stat_info->rmac_ip);
3942 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets); 4534 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip); 4535 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip); 4536 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp); 4537 le32_to_cpu(stat_info->rmac_drop_ip);
4538 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4539 le32_to_cpu(stat_info->rmac_icmp);
3946 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp); 4540 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp); 4541 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp); 4542 le32_to_cpu(stat_info->rmac_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt); 4543 tmp_stats[i++] =
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip); 4544 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4545 le32_to_cpu(stat_info->rmac_err_drp_udp);
4546 tmp_stats[i++] =
4547 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4548 le32_to_cpu(stat_info->rmac_pause_cnt);
4549 tmp_stats[i++] =
4550 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4551 le32_to_cpu(stat_info->rmac_accepted_ip);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp); 4552 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4553 tmp_stats[i++] = 0;
4554 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4555 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
3952} 4556}
3953 4557
3954static int s2io_ethtool_get_regs_len(struct net_device *dev) 4558int s2io_ethtool_get_regs_len(struct net_device *dev)
3955{ 4559{
3956 return (XENA_REG_SPACE); 4560 return (XENA_REG_SPACE);
3957} 4561}
3958 4562
3959 4563
3960static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 4564u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3961{ 4565{
3962 nic_t *sp = dev->priv; 4566 nic_t *sp = dev->priv;
3963 4567
3964 return (sp->rx_csum); 4568 return (sp->rx_csum);
3965} 4569}
3966 4570int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3967static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3968{ 4571{
3969 nic_t *sp = dev->priv; 4572 nic_t *sp = dev->priv;
3970 4573
@@ -3975,19 +4578,17 @@ static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3975 4578
3976 return 0; 4579 return 0;
3977} 4580}
3978 4581int s2io_get_eeprom_len(struct net_device *dev)
3979static int s2io_get_eeprom_len(struct net_device *dev)
3980{ 4582{
3981 return (XENA_EEPROM_SPACE); 4583 return (XENA_EEPROM_SPACE);
3982} 4584}
3983 4585
3984static int s2io_ethtool_self_test_count(struct net_device *dev) 4586int s2io_ethtool_self_test_count(struct net_device *dev)
3985{ 4587{
3986 return (S2IO_TEST_LEN); 4588 return (S2IO_TEST_LEN);
3987} 4589}
3988 4590void s2io_ethtool_get_strings(struct net_device *dev,
3989static void s2io_ethtool_get_strings(struct net_device *dev, 4591 u32 stringset, u8 * data)
3990 u32 stringset, u8 * data)
3991{ 4592{
3992 switch (stringset) { 4593 switch (stringset) {
3993 case ETH_SS_TEST: 4594 case ETH_SS_TEST:
@@ -3998,13 +4599,12 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
3998 sizeof(ethtool_stats_keys)); 4599 sizeof(ethtool_stats_keys));
3999 } 4600 }
4000} 4601}
4001
4002static int s2io_ethtool_get_stats_count(struct net_device *dev) 4602static int s2io_ethtool_get_stats_count(struct net_device *dev)
4003{ 4603{
4004 return (S2IO_STAT_LEN); 4604 return (S2IO_STAT_LEN);
4005} 4605}
4006 4606
4007static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 4607int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4008{ 4608{
4009 if (data) 4609 if (data)
4010 dev->features |= NETIF_F_IP_CSUM; 4610 dev->features |= NETIF_F_IP_CSUM;
@@ -4046,21 +4646,18 @@ static struct ethtool_ops netdev_ethtool_ops = {
4046}; 4646};
4047 4647
4048/** 4648/**
4049 * s2io_ioctl - Entry point for the Ioctl 4649 * s2io_ioctl - Entry point for the Ioctl
4050 * @dev : Device pointer. 4650 * @dev : Device pointer.
4051 * @ifr : An IOCTL specefic structure, that can contain a pointer to 4651 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4052 * a proprietary structure used to pass information to the driver. 4652 * a proprietary structure used to pass information to the driver.
4053 * @cmd : This is used to distinguish between the different commands that 4653 * @cmd : This is used to distinguish between the different commands that
4054 * can be passed to the IOCTL functions. 4654 * can be passed to the IOCTL functions.
4055 * Description: 4655 * Description:
4056 * This function has support for ethtool, adding multiple MAC addresses on 4656 * Currently there are no special functionality supported in IOCTL, hence
4057 * the NIC and some DBG commands for the util tool. 4657 * function always return EOPNOTSUPPORTED
4058 * Return value:
4059 * Currently the IOCTL supports no operations, hence by default this
4060 * function returns OP NOT SUPPORTED value.
4061 */ 4658 */
4062 4659
4063static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 4660int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4064{ 4661{
4065 return -EOPNOTSUPP; 4662 return -EOPNOTSUPP;
4066} 4663}
@@ -4076,17 +4673,9 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4076 * file on failure. 4673 * file on failure.
4077 */ 4674 */
4078 4675
4079static int s2io_change_mtu(struct net_device *dev, int new_mtu) 4676int s2io_change_mtu(struct net_device *dev, int new_mtu)
4080{ 4677{
4081 nic_t *sp = dev->priv; 4678 nic_t *sp = dev->priv;
4082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4083 register u64 val64;
4084
4085 if (netif_running(dev)) {
4086 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4087 DBG_PRINT(ERR_DBG, "change its MTU \n");
4088 return -EBUSY;
4089 }
4090 4679
4091 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 4680 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4092 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 4681 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -4094,11 +4683,22 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4094 return -EPERM; 4683 return -EPERM;
4095 } 4684 }
4096 4685
4097 /* Set the new MTU into the PYLD register of the NIC */
4098 val64 = new_mtu;
4099 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4100
4101 dev->mtu = new_mtu; 4686 dev->mtu = new_mtu;
4687 if (netif_running(dev)) {
4688 s2io_card_down(sp);
4689 netif_stop_queue(dev);
4690 if (s2io_card_up(sp)) {
4691 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4692 __FUNCTION__);
4693 }
4694 if (netif_queue_stopped(dev))
4695 netif_wake_queue(dev);
4696 } else { /* Device is down */
4697 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4698 u64 val64 = new_mtu;
4699
4700 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4701 }
4102 4702
4103 return 0; 4703 return 0;
4104} 4704}
@@ -4108,9 +4708,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4108 * @dev_adr : address of the device structure in dma_addr_t format. 4708 * @dev_adr : address of the device structure in dma_addr_t format.
4109 * Description: 4709 * Description:
4110 * This is the tasklet or the bottom half of the ISR. This is 4710 * This is the tasklet or the bottom half of the ISR. This is
4111 * an extension of the ISR which is scheduled by the scheduler to be run 4711 * an extension of the ISR which is scheduled by the scheduler to be run
4112 * when the load on the CPU is low. All low priority tasks of the ISR can 4712 * when the load on the CPU is low. All low priority tasks of the ISR can
4113 * be pushed into the tasklet. For now the tasklet is used only to 4713 * be pushed into the tasklet. For now the tasklet is used only to
4114 * replenish the Rx buffers in the Rx buffer descriptors. 4714 * replenish the Rx buffers in the Rx buffer descriptors.
4115 * Return value: 4715 * Return value:
4116 * void. 4716 * void.
@@ -4166,19 +4766,22 @@ static void s2io_set_link(unsigned long data)
4166 } 4766 }
4167 4767
4168 subid = nic->pdev->subsystem_device; 4768 subid = nic->pdev->subsystem_device;
4169 /* 4769 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4170 * Allow a small delay for the NICs self initiated 4770 /*
4171 * cleanup to complete. 4771 * Allow a small delay for the NICs self initiated
4172 */ 4772 * cleanup to complete.
4173 msleep(100); 4773 */
4774 msleep(100);
4775 }
4174 4776
4175 val64 = readq(&bar0->adapter_status); 4777 val64 = readq(&bar0->adapter_status);
4176 if (verify_xena_quiescence(val64, nic->device_enabled_once)) { 4778 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4177 if (LINK_IS_UP(val64)) { 4779 if (LINK_IS_UP(val64)) {
4178 val64 = readq(&bar0->adapter_control); 4780 val64 = readq(&bar0->adapter_control);
4179 val64 |= ADAPTER_CNTL_EN; 4781 val64 |= ADAPTER_CNTL_EN;
4180 writeq(val64, &bar0->adapter_control); 4782 writeq(val64, &bar0->adapter_control);
4181 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4783 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4784 subid)) {
4182 val64 = readq(&bar0->gpio_control); 4785 val64 = readq(&bar0->gpio_control);
4183 val64 |= GPIO_CTRL_GPIO_0; 4786 val64 |= GPIO_CTRL_GPIO_0;
4184 writeq(val64, &bar0->gpio_control); 4787 writeq(val64, &bar0->gpio_control);
@@ -4187,20 +4790,24 @@ static void s2io_set_link(unsigned long data)
4187 val64 |= ADAPTER_LED_ON; 4790 val64 |= ADAPTER_LED_ON;
4188 writeq(val64, &bar0->adapter_control); 4791 writeq(val64, &bar0->adapter_control);
4189 } 4792 }
4190 val64 = readq(&bar0->adapter_status); 4793 if (s2io_link_fault_indication(nic) ==
4191 if (!LINK_IS_UP(val64)) { 4794 MAC_RMAC_ERR_TIMER) {
4192 DBG_PRINT(ERR_DBG, "%s:", dev->name); 4795 val64 = readq(&bar0->adapter_status);
4193 DBG_PRINT(ERR_DBG, " Link down"); 4796 if (!LINK_IS_UP(val64)) {
4194 DBG_PRINT(ERR_DBG, "after "); 4797 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4195 DBG_PRINT(ERR_DBG, "enabling "); 4798 DBG_PRINT(ERR_DBG, " Link down");
4196 DBG_PRINT(ERR_DBG, "device \n"); 4799 DBG_PRINT(ERR_DBG, "after ");
4800 DBG_PRINT(ERR_DBG, "enabling ");
4801 DBG_PRINT(ERR_DBG, "device \n");
4802 }
4197 } 4803 }
4198 if (nic->device_enabled_once == FALSE) { 4804 if (nic->device_enabled_once == FALSE) {
4199 nic->device_enabled_once = TRUE; 4805 nic->device_enabled_once = TRUE;
4200 } 4806 }
4201 s2io_link(nic, LINK_UP); 4807 s2io_link(nic, LINK_UP);
4202 } else { 4808 } else {
4203 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) { 4809 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4810 subid)) {
4204 val64 = readq(&bar0->gpio_control); 4811 val64 = readq(&bar0->gpio_control);
4205 val64 &= ~GPIO_CTRL_GPIO_0; 4812 val64 &= ~GPIO_CTRL_GPIO_0;
4206 writeq(val64, &bar0->gpio_control); 4813 writeq(val64, &bar0->gpio_control);
@@ -4223,9 +4830,11 @@ static void s2io_card_down(nic_t * sp)
4223 unsigned long flags; 4830 unsigned long flags;
4224 register u64 val64 = 0; 4831 register u64 val64 = 0;
4225 4832
4833 del_timer_sync(&sp->alarm_timer);
4226 /* If s2io_set_link task is executing, wait till it completes. */ 4834 /* If s2io_set_link task is executing, wait till it completes. */
4227 while (test_and_set_bit(0, &(sp->link_state))) 4835 while (test_and_set_bit(0, &(sp->link_state))) {
4228 msleep(50); 4836 msleep(50);
4837 }
4229 atomic_set(&sp->card_state, CARD_DOWN); 4838 atomic_set(&sp->card_state, CARD_DOWN);
4230 4839
4231 /* disable Tx and Rx traffic on the NIC */ 4840 /* disable Tx and Rx traffic on the NIC */
@@ -4237,7 +4846,7 @@ static void s2io_card_down(nic_t * sp)
4237 /* Check if the device is Quiescent and then Reset the NIC */ 4846 /* Check if the device is Quiescent and then Reset the NIC */
4238 do { 4847 do {
4239 val64 = readq(&bar0->adapter_status); 4848 val64 = readq(&bar0->adapter_status);
4240 if (verify_xena_quiescence(val64, sp->device_enabled_once)) { 4849 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4241 break; 4850 break;
4242 } 4851 }
4243 4852
@@ -4251,14 +4860,27 @@ static void s2io_card_down(nic_t * sp)
4251 break; 4860 break;
4252 } 4861 }
4253 } while (1); 4862 } while (1);
4254 spin_lock_irqsave(&sp->tx_lock, flags);
4255 s2io_reset(sp); 4863 s2io_reset(sp);
4256 4864
4257 /* Free all unused Tx and Rx buffers */ 4865 /* Waiting till all Interrupt handlers are complete */
4866 cnt = 0;
4867 do {
4868 msleep(10);
4869 if (!atomic_read(&sp->isr_cnt))
4870 break;
4871 cnt++;
4872 } while(cnt < 5);
4873
4874 spin_lock_irqsave(&sp->tx_lock, flags);
4875 /* Free all Tx buffers */
4258 free_tx_buffers(sp); 4876 free_tx_buffers(sp);
4877 spin_unlock_irqrestore(&sp->tx_lock, flags);
4878
4879 /* Free all Rx buffers */
4880 spin_lock_irqsave(&sp->rx_lock, flags);
4259 free_rx_buffers(sp); 4881 free_rx_buffers(sp);
4882 spin_unlock_irqrestore(&sp->rx_lock, flags);
4260 4883
4261 spin_unlock_irqrestore(&sp->tx_lock, flags);
4262 clear_bit(0, &(sp->link_state)); 4884 clear_bit(0, &(sp->link_state));
4263} 4885}
4264 4886
@@ -4276,8 +4898,8 @@ static int s2io_card_up(nic_t * sp)
4276 return -ENODEV; 4898 return -ENODEV;
4277 } 4899 }
4278 4900
4279 /* 4901 /*
4280 * Initializing the Rx buffers. For now we are considering only 1 4902 * Initializing the Rx buffers. For now we are considering only 1
4281 * Rx ring and initializing buffers into 30 Rx blocks 4903 * Rx ring and initializing buffers into 30 Rx blocks
4282 */ 4904 */
4283 mac_control = &sp->mac_control; 4905 mac_control = &sp->mac_control;
@@ -4311,16 +4933,18 @@ static int s2io_card_up(nic_t * sp)
4311 return -ENODEV; 4933 return -ENODEV;
4312 } 4934 }
4313 4935
4936 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4937
4314 atomic_set(&sp->card_state, CARD_UP); 4938 atomic_set(&sp->card_state, CARD_UP);
4315 return 0; 4939 return 0;
4316} 4940}
4317 4941
4318/** 4942/**
4319 * s2io_restart_nic - Resets the NIC. 4943 * s2io_restart_nic - Resets the NIC.
4320 * @data : long pointer to the device private structure 4944 * @data : long pointer to the device private structure
4321 * Description: 4945 * Description:
4322 * This function is scheduled to be run by the s2io_tx_watchdog 4946 * This function is scheduled to be run by the s2io_tx_watchdog
4323 * function after 0.5 secs to reset the NIC. The idea is to reduce 4947 * function after 0.5 secs to reset the NIC. The idea is to reduce
4324 * the run time of the watch dog routine which is run holding a 4948 * the run time of the watch dog routine which is run holding a
4325 * spin lock. 4949 * spin lock.
4326 */ 4950 */
@@ -4338,10 +4962,11 @@ static void s2io_restart_nic(unsigned long data)
4338 netif_wake_queue(dev); 4962 netif_wake_queue(dev);
4339 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", 4963 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4340 dev->name); 4964 dev->name);
4965
4341} 4966}
4342 4967
4343/** 4968/**
4344 * s2io_tx_watchdog - Watchdog for transmit side. 4969 * s2io_tx_watchdog - Watchdog for transmit side.
4345 * @dev : Pointer to net device structure 4970 * @dev : Pointer to net device structure
4346 * Description: 4971 * Description:
4347 * This function is triggered if the Tx Queue is stopped 4972 * This function is triggered if the Tx Queue is stopped
@@ -4369,7 +4994,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
4369 * @len : length of the packet 4994 * @len : length of the packet
4370 * @cksum : FCS checksum of the frame. 4995 * @cksum : FCS checksum of the frame.
4371 * @ring_no : the ring from which this RxD was extracted. 4996 * @ring_no : the ring from which this RxD was extracted.
4372 * Description: 4997 * Description:
4373 * This function is called by the Tx interrupt serivce routine to perform 4998 * This function is called by the Tx interrupt serivce routine to perform
4374 * some OS related operations on the SKB before passing it to the upper 4999 * some OS related operations on the SKB before passing it to the upper
4375 * layers. It mainly checks if the checksum is OK, if so adds it to the 5000 * layers. It mainly checks if the checksum is OK, if so adds it to the
@@ -4379,35 +5004,68 @@ static void s2io_tx_watchdog(struct net_device *dev)
4379 * Return value: 5004 * Return value:
4380 * SUCCESS on success and -1 on failure. 5005 * SUCCESS on success and -1 on failure.
4381 */ 5006 */
4382#ifndef CONFIG_2BUFF_MODE 5007static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4383static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4384#else
4385static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4386 buffAdd_t * ba)
4387#endif
4388{ 5008{
5009 nic_t *sp = ring_data->nic;
4389 struct net_device *dev = (struct net_device *) sp->dev; 5010 struct net_device *dev = (struct net_device *) sp->dev;
4390 struct sk_buff *skb = 5011 struct sk_buff *skb = (struct sk_buff *)
4391 (struct sk_buff *) ((unsigned long) rxdp->Host_Control); 5012 ((unsigned long) rxdp->Host_Control);
5013 int ring_no = ring_data->ring_no;
4392 u16 l3_csum, l4_csum; 5014 u16 l3_csum, l4_csum;
4393#ifdef CONFIG_2BUFF_MODE 5015#ifdef CONFIG_2BUFF_MODE
4394 int buf0_len, buf2_len; 5016 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5017 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5018 int get_block = ring_data->rx_curr_get_info.block_index;
5019 int get_off = ring_data->rx_curr_get_info.offset;
5020 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4395 unsigned char *buff; 5021 unsigned char *buff;
5022#else
5023 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4396#endif 5024#endif
5025 skb->dev = dev;
5026 if (rxdp->Control_1 & RXD_T_CODE) {
5027 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5028 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5029 dev->name, err);
5030 dev_kfree_skb(skb);
5031 sp->stats.rx_crc_errors++;
5032 atomic_dec(&sp->rx_bufs_left[ring_no]);
5033 rxdp->Host_Control = 0;
5034 return 0;
5035 }
4397 5036
4398 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 5037 /* Updating statistics */
4399 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) { 5038 rxdp->Host_Control = 0;
5039 sp->rx_pkt_count++;
5040 sp->stats.rx_packets++;
5041#ifndef CONFIG_2BUFF_MODE
5042 sp->stats.rx_bytes += len;
5043#else
5044 sp->stats.rx_bytes += buf0_len + buf2_len;
5045#endif
5046
5047#ifndef CONFIG_2BUFF_MODE
5048 skb_put(skb, len);
5049#else
5050 buff = skb_push(skb, buf0_len);
5051 memcpy(buff, ba->ba_0, buf0_len);
5052 skb_put(skb, buf2_len);
5053#endif
5054
5055 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5056 (sp->rx_csum)) {
5057 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4400 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 5058 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4401 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { 5059 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4402 /* 5060 /*
4403 * NIC verifies if the Checksum of the received 5061 * NIC verifies if the Checksum of the received
4404 * frame is Ok or not and accordingly returns 5062 * frame is Ok or not and accordingly returns
4405 * a flag in the RxD. 5063 * a flag in the RxD.
4406 */ 5064 */
4407 skb->ip_summed = CHECKSUM_UNNECESSARY; 5065 skb->ip_summed = CHECKSUM_UNNECESSARY;
4408 } else { 5066 } else {
4409 /* 5067 /*
4410 * Packet with erroneous checksum, let the 5068 * Packet with erroneous checksum, let the
4411 * upper layers deal with it. 5069 * upper layers deal with it.
4412 */ 5070 */
4413 skb->ip_summed = CHECKSUM_NONE; 5071 skb->ip_summed = CHECKSUM_NONE;
@@ -4416,44 +5074,26 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4416 skb->ip_summed = CHECKSUM_NONE; 5074 skb->ip_summed = CHECKSUM_NONE;
4417 } 5075 }
4418 5076
4419 if (rxdp->Control_1 & RXD_T_CODE) {
4420 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4421 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4422 dev->name, err);
4423 }
4424#ifdef CONFIG_2BUFF_MODE
4425 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4426 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4427#endif
4428
4429 skb->dev = dev;
4430#ifndef CONFIG_2BUFF_MODE
4431 skb_put(skb, len);
4432 skb->protocol = eth_type_trans(skb, dev);
4433#else
4434 buff = skb_push(skb, buf0_len);
4435 memcpy(buff, ba->ba_0, buf0_len);
4436 skb_put(skb, buf2_len);
4437 skb->protocol = eth_type_trans(skb, dev); 5077 skb->protocol = eth_type_trans(skb, dev);
4438#endif
4439
4440#ifdef CONFIG_S2IO_NAPI 5078#ifdef CONFIG_S2IO_NAPI
4441 netif_receive_skb(skb); 5079 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5080 /* Queueing the vlan frame to the upper layer */
5081 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5082 RXD_GET_VLAN_TAG(rxdp->Control_2));
5083 } else {
5084 netif_receive_skb(skb);
5085 }
4442#else 5086#else
4443 netif_rx(skb); 5087 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5088 /* Queueing the vlan frame to the upper layer */
5089 vlan_hwaccel_rx(skb, sp->vlgrp,
5090 RXD_GET_VLAN_TAG(rxdp->Control_2));
5091 } else {
5092 netif_rx(skb);
5093 }
4444#endif 5094#endif
4445
4446 dev->last_rx = jiffies; 5095 dev->last_rx = jiffies;
4447 sp->rx_pkt_count++;
4448 sp->stats.rx_packets++;
4449#ifndef CONFIG_2BUFF_MODE
4450 sp->stats.rx_bytes += len;
4451#else
4452 sp->stats.rx_bytes += buf0_len + buf2_len;
4453#endif
4454
4455 atomic_dec(&sp->rx_bufs_left[ring_no]); 5096 atomic_dec(&sp->rx_bufs_left[ring_no]);
4456 rxdp->Host_Control = 0;
4457 return SUCCESS; 5097 return SUCCESS;
4458} 5098}
4459 5099
@@ -4464,13 +5104,13 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4464 * @link : inidicates whether link is UP/DOWN. 5104 * @link : inidicates whether link is UP/DOWN.
4465 * Description: 5105 * Description:
4466 * This function stops/starts the Tx queue depending on whether the link 5106 * This function stops/starts the Tx queue depending on whether the link
4467 * status of the NIC is is down or up. This is called by the Alarm 5107 * status of the NIC is is down or up. This is called by the Alarm
4468 * interrupt handler whenever a link change interrupt comes up. 5108 * interrupt handler whenever a link change interrupt comes up.
4469 * Return value: 5109 * Return value:
4470 * void. 5110 * void.
4471 */ 5111 */
4472 5112
4473static void s2io_link(nic_t * sp, int link) 5113void s2io_link(nic_t * sp, int link)
4474{ 5114{
4475 struct net_device *dev = (struct net_device *) sp->dev; 5115 struct net_device *dev = (struct net_device *) sp->dev;
4476 5116
@@ -4487,8 +5127,25 @@ static void s2io_link(nic_t * sp, int link)
4487} 5127}
4488 5128
4489/** 5129/**
4490 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 5130 * get_xena_rev_id - to identify revision ID of xena.
4491 * @sp : private member of the device structure, which is a pointer to the 5131 * @pdev : PCI Dev structure
5132 * Description:
5133 * Function to identify the Revision ID of xena.
5134 * Return value:
5135 * returns the revision ID of the device.
5136 */
5137
5138int get_xena_rev_id(struct pci_dev *pdev)
5139{
5140 u8 id = 0;
5141 int ret;
5142 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5143 return id;
5144}
5145
5146/**
5147 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5148 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure. 5149 * s2io_nic structure.
4493 * Description: 5150 * Description:
4494 * This function initializes a few of the PCI and PCI-X configuration registers 5151 * This function initializes a few of the PCI and PCI-X configuration registers
@@ -4499,15 +5156,15 @@ static void s2io_link(nic_t * sp, int link)
4499 5156
4500static void s2io_init_pci(nic_t * sp) 5157static void s2io_init_pci(nic_t * sp)
4501{ 5158{
4502 u16 pci_cmd = 0; 5159 u16 pci_cmd = 0, pcix_cmd = 0;
4503 5160
4504 /* Enable Data Parity Error Recovery in PCI-X command register. */ 5161 /* Enable Data Parity Error Recovery in PCI-X command register. */
4505 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5162 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4506 &(sp->pcix_cmd)); 5163 &(pcix_cmd));
4507 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5164 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 (sp->pcix_cmd | 1)); 5165 (pcix_cmd | 1));
4509 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5166 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 &(sp->pcix_cmd)); 5167 &(pcix_cmd));
4511 5168
4512 /* Set the PErr Response bit in PCI command register. */ 5169 /* Set the PErr Response bit in PCI command register. */
4513 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5170 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
@@ -4515,53 +5172,43 @@ static void s2io_init_pci(nic_t * sp)
4515 (pci_cmd | PCI_COMMAND_PARITY)); 5172 (pci_cmd | PCI_COMMAND_PARITY));
4516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 5173 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4517 5174
4518 /* Set MMRB count to 1024 in PCI-X Command register. */
4519 sp->pcix_cmd &= 0xFFF3;
4520 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4521 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4522 &(sp->pcix_cmd));
4523
4524 /* Setting Maximum outstanding splits based on system type. */
4525 sp->pcix_cmd &= 0xFF8F;
4526
4527 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4529 sp->pcix_cmd);
4530 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 &(sp->pcix_cmd));
4532 /* Forcibly disabling relaxed ordering capability of the card. */ 5175 /* Forcibly disabling relaxed ordering capability of the card. */
4533 sp->pcix_cmd &= 0xfffd; 5176 pcix_cmd &= 0xfffd;
4534 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5177 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4535 sp->pcix_cmd); 5178 pcix_cmd);
4536 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 5179 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 &(sp->pcix_cmd)); 5180 &(pcix_cmd));
4538} 5181}
4539 5182
4540MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 5183MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4541MODULE_LICENSE("GPL"); 5184MODULE_LICENSE("GPL");
4542module_param(tx_fifo_num, int, 0); 5185module_param(tx_fifo_num, int, 0);
4543module_param_array(tx_fifo_len, int, NULL, 0);
4544module_param(rx_ring_num, int, 0); 5186module_param(rx_ring_num, int, 0);
4545module_param_array(rx_ring_sz, int, NULL, 0); 5187module_param_array(tx_fifo_len, uint, NULL, 0);
4546module_param(Stats_refresh_time, int, 0); 5188module_param_array(rx_ring_sz, uint, NULL, 0);
5189module_param_array(rts_frm_len, uint, NULL, 0);
5190module_param(use_continuous_tx_intrs, int, 1);
4547module_param(rmac_pause_time, int, 0); 5191module_param(rmac_pause_time, int, 0);
4548module_param(mc_pause_threshold_q0q3, int, 0); 5192module_param(mc_pause_threshold_q0q3, int, 0);
4549module_param(mc_pause_threshold_q4q7, int, 0); 5193module_param(mc_pause_threshold_q4q7, int, 0);
4550module_param(shared_splits, int, 0); 5194module_param(shared_splits, int, 0);
4551module_param(tmac_util_period, int, 0); 5195module_param(tmac_util_period, int, 0);
4552module_param(rmac_util_period, int, 0); 5196module_param(rmac_util_period, int, 0);
5197module_param(bimodal, bool, 0);
4553#ifndef CONFIG_S2IO_NAPI 5198#ifndef CONFIG_S2IO_NAPI
4554module_param(indicate_max_pkts, int, 0); 5199module_param(indicate_max_pkts, int, 0);
4555#endif 5200#endif
5201module_param(rxsync_frequency, int, 0);
5202
4556/** 5203/**
4557 * s2io_init_nic - Initialization of the adapter . 5204 * s2io_init_nic - Initialization of the adapter .
4558 * @pdev : structure containing the PCI related information of the device. 5205 * @pdev : structure containing the PCI related information of the device.
4559 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 5206 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4560 * Description: 5207 * Description:
4561 * The function initializes an adapter identified by the pci_dec structure. 5208 * The function initializes an adapter identified by the pci_dec structure.
4562 * All OS related initialization including memory and device structure and 5209 * All OS related initialization including memory and device structure and
4563 * initlaization of the device private variable is done. Also the swapper 5210 * initlaization of the device private variable is done. Also the swapper
4564 * control register is initialized to enable read and write into the I/O 5211 * control register is initialized to enable read and write into the I/O
4565 * registers of the device. 5212 * registers of the device.
4566 * Return value: 5213 * Return value:
4567 * returns 0 on success and negative on failure. 5214 * returns 0 on success and negative on failure.
@@ -4572,7 +5219,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4572{ 5219{
4573 nic_t *sp; 5220 nic_t *sp;
4574 struct net_device *dev; 5221 struct net_device *dev;
4575 char *dev_name = "S2IO 10GE NIC";
4576 int i, j, ret; 5222 int i, j, ret;
4577 int dma_flag = FALSE; 5223 int dma_flag = FALSE;
4578 u32 mac_up, mac_down; 5224 u32 mac_up, mac_down;
@@ -4581,10 +5227,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4581 u16 subid; 5227 u16 subid;
4582 mac_info_t *mac_control; 5228 mac_info_t *mac_control;
4583 struct config_param *config; 5229 struct config_param *config;
5230 int mode;
4584 5231
4585 5232#ifdef CONFIG_S2IO_NAPI
4586 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n", 5233 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4587 s2io_driver_version); 5234#endif
4588 5235
4589 if ((ret = pci_enable_device(pdev))) { 5236 if ((ret = pci_enable_device(pdev))) {
4590 DBG_PRINT(ERR_DBG, 5237 DBG_PRINT(ERR_DBG,
@@ -4595,7 +5242,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4595 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5242 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4596 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); 5243 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4597 dma_flag = TRUE; 5244 dma_flag = TRUE;
4598
4599 if (pci_set_consistent_dma_mask 5245 if (pci_set_consistent_dma_mask
4600 (pdev, DMA_64BIT_MASK)) { 5246 (pdev, DMA_64BIT_MASK)) {
4601 DBG_PRINT(ERR_DBG, 5247 DBG_PRINT(ERR_DBG,
@@ -4635,34 +5281,41 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4635 memset(sp, 0, sizeof(nic_t)); 5281 memset(sp, 0, sizeof(nic_t));
4636 sp->dev = dev; 5282 sp->dev = dev;
4637 sp->pdev = pdev; 5283 sp->pdev = pdev;
4638 sp->vendor_id = pdev->vendor;
4639 sp->device_id = pdev->device;
4640 sp->high_dma_flag = dma_flag; 5284 sp->high_dma_flag = dma_flag;
4641 sp->irq = pdev->irq;
4642 sp->device_enabled_once = FALSE; 5285 sp->device_enabled_once = FALSE;
4643 strcpy(sp->name, dev_name); 5286
5287 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5288 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5289 sp->device_type = XFRAME_II_DEVICE;
5290 else
5291 sp->device_type = XFRAME_I_DEVICE;
4644 5292
4645 /* Initialize some PCI/PCI-X fields of the NIC. */ 5293 /* Initialize some PCI/PCI-X fields of the NIC. */
4646 s2io_init_pci(sp); 5294 s2io_init_pci(sp);
4647 5295
4648 /* 5296 /*
4649 * Setting the device configuration parameters. 5297 * Setting the device configuration parameters.
4650 * Most of these parameters can be specified by the user during 5298 * Most of these parameters can be specified by the user during
4651 * module insertion as they are module loadable parameters. If 5299 * module insertion as they are module loadable parameters. If
4652 * these parameters are not not specified during load time, they 5300 * these parameters are not not specified during load time, they
4653 * are initialized with default values. 5301 * are initialized with default values.
4654 */ 5302 */
4655 mac_control = &sp->mac_control; 5303 mac_control = &sp->mac_control;
4656 config = &sp->config; 5304 config = &sp->config;
4657 5305
4658 /* Tx side parameters. */ 5306 /* Tx side parameters. */
4659 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */ 5307 if (tx_fifo_len[0] == 0)
5308 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4660 config->tx_fifo_num = tx_fifo_num; 5309 config->tx_fifo_num = tx_fifo_num;
4661 for (i = 0; i < MAX_TX_FIFOS; i++) { 5310 for (i = 0; i < MAX_TX_FIFOS; i++) {
4662 config->tx_cfg[i].fifo_len = tx_fifo_len[i]; 5311 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4663 config->tx_cfg[i].fifo_priority = i; 5312 config->tx_cfg[i].fifo_priority = i;
4664 } 5313 }
4665 5314
5315 /* mapping the QoS priority to the configured fifos */
5316 for (i = 0; i < MAX_TX_FIFOS; i++)
5317 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5318
4666 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 5319 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4667 for (i = 0; i < config->tx_fifo_num; i++) { 5320 for (i = 0; i < config->tx_fifo_num; i++) {
4668 config->tx_cfg[i].f_no_snoop = 5321 config->tx_cfg[i].f_no_snoop =
@@ -4675,7 +5328,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4675 config->max_txds = MAX_SKB_FRAGS; 5328 config->max_txds = MAX_SKB_FRAGS;
4676 5329
4677 /* Rx side parameters. */ 5330 /* Rx side parameters. */
4678 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */ 5331 if (rx_ring_sz[0] == 0)
5332 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4679 config->rx_ring_num = rx_ring_num; 5333 config->rx_ring_num = rx_ring_num;
4680 for (i = 0; i < MAX_RX_RINGS; i++) { 5334 for (i = 0; i < MAX_RX_RINGS; i++) {
4681 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5335 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -4699,10 +5353,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4699 for (i = 0; i < config->rx_ring_num; i++) 5353 for (i = 0; i < config->rx_ring_num; i++)
4700 atomic_set(&sp->rx_bufs_left[i], 0); 5354 atomic_set(&sp->rx_bufs_left[i], 0);
4701 5355
5356 /* Initialize the number of ISRs currently running */
5357 atomic_set(&sp->isr_cnt, 0);
5358
4702 /* initialize the shared memory used by the NIC and the host */ 5359 /* initialize the shared memory used by the NIC and the host */
4703 if (init_shared_mem(sp)) { 5360 if (init_shared_mem(sp)) {
4704 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 5361 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4705 dev->name); 5362 __FUNCTION__);
4706 ret = -ENOMEM; 5363 ret = -ENOMEM;
4707 goto mem_alloc_failed; 5364 goto mem_alloc_failed;
4708 } 5365 }
@@ -4743,13 +5400,17 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4743 dev->do_ioctl = &s2io_ioctl; 5400 dev->do_ioctl = &s2io_ioctl;
4744 dev->change_mtu = &s2io_change_mtu; 5401 dev->change_mtu = &s2io_change_mtu;
4745 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 5402 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5403 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5404 dev->vlan_rx_register = s2io_vlan_rx_register;
5405 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5406
4746 /* 5407 /*
4747 * will use eth_mac_addr() for dev->set_mac_address 5408 * will use eth_mac_addr() for dev->set_mac_address
4748 * mac address will be set every time dev->open() is called 5409 * mac address will be set every time dev->open() is called
4749 */ 5410 */
4750#ifdef CONFIG_S2IO_NAPI 5411#if defined(CONFIG_S2IO_NAPI)
4751 dev->poll = s2io_poll; 5412 dev->poll = s2io_poll;
4752 dev->weight = 90; 5413 dev->weight = 32;
4753#endif 5414#endif
4754 5415
4755 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 5416 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -4776,22 +5437,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4776 goto set_swap_failed; 5437 goto set_swap_failed;
4777 } 5438 }
4778 5439
4779 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */ 5440 /* Verify if the Herc works on the slot its placed into */
4780 fix_mac_address(sp); 5441 if (sp->device_type & XFRAME_II_DEVICE) {
4781 s2io_reset(sp); 5442 mode = s2io_verify_pci_mode(sp);
5443 if (mode < 0) {
5444 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5445 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5446 ret = -EBADSLT;
5447 goto set_swap_failed;
5448 }
5449 }
4782 5450
4783 /* 5451 /* Not needed for Herc */
4784 * Setting swapper control on the NIC, so the MAC address can be read. 5452 if (sp->device_type & XFRAME_I_DEVICE) {
4785 */ 5453 /*
4786 if (s2io_set_swapper(sp)) { 5454 * Fix for all "FFs" MAC address problems observed on
4787 DBG_PRINT(ERR_DBG, 5455 * Alpha platforms
4788 "%s: S2IO: swapper settings are wrong\n", 5456 */
4789 dev->name); 5457 fix_mac_address(sp);
4790 ret = -EAGAIN; 5458 s2io_reset(sp);
4791 goto set_swap_failed;
4792 } 5459 }
4793 5460
4794 /* 5461 /*
4795 * MAC address initialization. 5462 * MAC address initialization.
4796 * For now only one mac address will be read and used. 5463 * For now only one mac address will be read and used.
4797 */ 5464 */
@@ -4814,37 +5481,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4814 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); 5481 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4815 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); 5482 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4816 5483
4817 DBG_PRINT(INIT_DBG,
4818 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4819 sp->def_mac_addr[0].mac_addr[0],
4820 sp->def_mac_addr[0].mac_addr[1],
4821 sp->def_mac_addr[0].mac_addr[2],
4822 sp->def_mac_addr[0].mac_addr[3],
4823 sp->def_mac_addr[0].mac_addr[4],
4824 sp->def_mac_addr[0].mac_addr[5]);
4825
4826 /* Set the factory defined MAC address initially */ 5484 /* Set the factory defined MAC address initially */
4827 dev->addr_len = ETH_ALEN; 5485 dev->addr_len = ETH_ALEN;
4828 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 5486 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4829 5487
4830 /* 5488 /*
4831 * Initialize the tasklet status and link state flags 5489 * Initialize the tasklet status and link state flags
4832 * and the card statte parameter 5490 * and the card state parameter
4833 */ 5491 */
4834 atomic_set(&(sp->card_state), 0); 5492 atomic_set(&(sp->card_state), 0);
4835 sp->tasklet_status = 0; 5493 sp->tasklet_status = 0;
4836 sp->link_state = 0; 5494 sp->link_state = 0;
4837 5495
4838
4839 /* Initialize spinlocks */ 5496 /* Initialize spinlocks */
4840 spin_lock_init(&sp->tx_lock); 5497 spin_lock_init(&sp->tx_lock);
4841#ifndef CONFIG_S2IO_NAPI 5498#ifndef CONFIG_S2IO_NAPI
4842 spin_lock_init(&sp->put_lock); 5499 spin_lock_init(&sp->put_lock);
4843#endif 5500#endif
5501 spin_lock_init(&sp->rx_lock);
4844 5502
4845 /* 5503 /*
4846 * SXE-002: Configure link and activity LED to init state 5504 * SXE-002: Configure link and activity LED to init state
4847 * on driver load. 5505 * on driver load.
4848 */ 5506 */
4849 subid = sp->pdev->subsystem_device; 5507 subid = sp->pdev->subsystem_device;
4850 if ((subid & 0xFF) >= 0x07) { 5508 if ((subid & 0xFF) >= 0x07) {
@@ -4864,13 +5522,61 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4864 goto register_failed; 5522 goto register_failed;
4865 } 5523 }
4866 5524
4867 /* 5525 if (sp->device_type & XFRAME_II_DEVICE) {
4868 * Make Link state as off at this point, when the Link change 5526 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
4869 * interrupt comes the state will be automatically changed to 5527 dev->name);
5528 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5529 get_xena_rev_id(sp->pdev),
5530 s2io_driver_version);
5531 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5532 sp->def_mac_addr[0].mac_addr[0],
5533 sp->def_mac_addr[0].mac_addr[1],
5534 sp->def_mac_addr[0].mac_addr[2],
5535 sp->def_mac_addr[0].mac_addr[3],
5536 sp->def_mac_addr[0].mac_addr[4],
5537 sp->def_mac_addr[0].mac_addr[5]);
5538 mode = s2io_print_pci_mode(sp);
5539 if (mode < 0) {
5540 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5541 ret = -EBADSLT;
5542 goto set_swap_failed;
5543 }
5544 } else {
5545 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5546 dev->name);
5547 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5548 get_xena_rev_id(sp->pdev),
5549 s2io_driver_version);
5550 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5551 sp->def_mac_addr[0].mac_addr[0],
5552 sp->def_mac_addr[0].mac_addr[1],
5553 sp->def_mac_addr[0].mac_addr[2],
5554 sp->def_mac_addr[0].mac_addr[3],
5555 sp->def_mac_addr[0].mac_addr[4],
5556 sp->def_mac_addr[0].mac_addr[5]);
5557 }
5558
5559 /* Initialize device name */
5560 strcpy(sp->name, dev->name);
5561 if (sp->device_type & XFRAME_II_DEVICE)
5562 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5563 else
5564 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5565
5566 /* Initialize bimodal Interrupts */
5567 sp->config.bimodal = bimodal;
5568 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5569 sp->config.bimodal = 0;
5570 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5571 dev->name);
5572 }
5573
5574 /*
5575 * Make Link state as off at this point, when the Link change
5576 * interrupt comes the state will be automatically changed to
4870 * the right state. 5577 * the right state.
4871 */ 5578 */
4872 netif_carrier_off(dev); 5579 netif_carrier_off(dev);
4873 sp->last_link_state = LINK_DOWN;
4874 5580
4875 return 0; 5581 return 0;
4876 5582
@@ -4891,11 +5597,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4891} 5597}
4892 5598
4893/** 5599/**
4894 * s2io_rem_nic - Free the PCI device 5600 * s2io_rem_nic - Free the PCI device
4895 * @pdev: structure containing the PCI related information of the device. 5601 * @pdev: structure containing the PCI related information of the device.
4896 * Description: This function is called by the Pci subsystem to release a 5602 * Description: This function is called by the Pci subsystem to release a
4897 * PCI device and free up all resource held up by the device. This could 5603 * PCI device and free up all resource held up by the device. This could
4898 * be in response to a Hot plug event or when the driver is to be removed 5604 * be in response to a Hot plug event or when the driver is to be removed
4899 * from memory. 5605 * from memory.
4900 */ 5606 */
4901 5607
@@ -4919,7 +5625,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4919 pci_disable_device(pdev); 5625 pci_disable_device(pdev);
4920 pci_release_regions(pdev); 5626 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL); 5627 pci_set_drvdata(pdev, NULL);
4922
4923 free_netdev(dev); 5628 free_netdev(dev);
4924} 5629}
4925 5630
@@ -4935,11 +5640,11 @@ int __init s2io_starter(void)
4935} 5640}
4936 5641
4937/** 5642/**
4938 * s2io_closer - Cleanup routine for the driver 5643 * s2io_closer - Cleanup routine for the driver
4939 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 5644 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4940 */ 5645 */
4941 5646
4942static void s2io_closer(void) 5647void s2io_closer(void)
4943{ 5648{
4944 pci_unregister_driver(&s2io_driver); 5649 pci_unregister_driver(&s2io_driver);
4945 DBG_PRINT(INIT_DBG, "cleanup done\n"); 5650 DBG_PRINT(INIT_DBG, "cleanup done\n");
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1711c8c3dc99..bc64d967f080 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,9 @@
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33 33
34/* Maximum time to flicker LED when asked to identify NIC using ethtool */
35#define MAX_FLICKER_TIME 60000 /* 60 Secs */
36
34/* Maximum outstanding splits to be configured into xena. */ 37/* Maximum outstanding splits to be configured into xena. */
35typedef enum xena_max_outstanding_splits { 38typedef enum xena_max_outstanding_splits {
36 XENA_ONE_SPLIT_TRANSACTION = 0, 39 XENA_ONE_SPLIT_TRANSACTION = 0,
@@ -45,10 +48,10 @@ typedef enum xena_max_outstanding_splits {
45#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 48#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
46 49
47/* OS concerned variables and constants */ 50/* OS concerned variables and constants */
48#define WATCH_DOG_TIMEOUT 5*HZ 51#define WATCH_DOG_TIMEOUT 15*HZ
49#define EFILL 0x1234 52#define EFILL 0x1234
50#define ALIGN_SIZE 127 53#define ALIGN_SIZE 127
51#define PCIX_COMMAND_REGISTER 0x62 54#define PCIX_COMMAND_REGISTER 0x62
52 55
53/* 56/*
54 * Debug related variables. 57 * Debug related variables.
@@ -61,7 +64,7 @@ typedef enum xena_max_outstanding_splits {
61#define INTR_DBG 4 64#define INTR_DBG 4
62 65
63/* Global variable that defines the present debug level of the driver. */ 66/* Global variable that defines the present debug level of the driver. */
64static int debug_level = ERR_DBG; /* Default level. */ 67int debug_level = ERR_DBG; /* Default level. */
65 68
66/* DEBUG message print. */ 69/* DEBUG message print. */
67#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 70#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
@@ -71,6 +74,12 @@ static int debug_level = ERR_DBG; /* Default level. */
71#define L4_CKSUM_OK 0xFFFF 74#define L4_CKSUM_OK 0xFFFF
72#define S2IO_JUMBO_SIZE 9600 75#define S2IO_JUMBO_SIZE 9600
73 76
77/* Driver statistics maintained by driver */
78typedef struct {
79 unsigned long long single_ecc_errs;
80 unsigned long long double_ecc_errs;
81} swStat_t;
82
74/* The statistics block of Xena */ 83/* The statistics block of Xena */
75typedef struct stat_block { 84typedef struct stat_block {
76/* Tx MAC statistics counters. */ 85/* Tx MAC statistics counters. */
@@ -186,12 +195,90 @@ typedef struct stat_block {
186 u32 rxd_rd_cnt; 195 u32 rxd_rd_cnt;
187 u32 rxf_wr_cnt; 196 u32 rxf_wr_cnt;
188 u32 txf_rd_cnt; 197 u32 txf_rd_cnt;
198
199/* Tx MAC statistics overflow counters. */
200 u32 tmac_data_octets_oflow;
201 u32 tmac_frms_oflow;
202 u32 tmac_bcst_frms_oflow;
203 u32 tmac_mcst_frms_oflow;
204 u32 tmac_ucst_frms_oflow;
205 u32 tmac_ttl_octets_oflow;
206 u32 tmac_any_err_frms_oflow;
207 u32 tmac_nucst_frms_oflow;
208 u64 tmac_vlan_frms;
209 u32 tmac_drop_ip_oflow;
210 u32 tmac_vld_ip_oflow;
211 u32 tmac_rst_tcp_oflow;
212 u32 tmac_icmp_oflow;
213 u32 tpa_unknown_protocol;
214 u32 tmac_udp_oflow;
215 u32 reserved_10;
216 u32 tpa_parse_failure;
217
218/* Rx MAC Statistics overflow counters. */
219 u32 rmac_data_octets_oflow;
220 u32 rmac_vld_frms_oflow;
221 u32 rmac_vld_bcst_frms_oflow;
222 u32 rmac_vld_mcst_frms_oflow;
223 u32 rmac_accepted_ucst_frms_oflow;
224 u32 rmac_ttl_octets_oflow;
225 u32 rmac_discarded_frms_oflow;
226 u32 rmac_accepted_nucst_frms_oflow;
227 u32 rmac_usized_frms_oflow;
228 u32 rmac_drop_events_oflow;
229 u32 rmac_frag_frms_oflow;
230 u32 rmac_osized_frms_oflow;
231 u32 rmac_ip_oflow;
232 u32 rmac_jabber_frms_oflow;
233 u32 rmac_icmp_oflow;
234 u32 rmac_drop_ip_oflow;
235 u32 rmac_err_drp_udp_oflow;
236 u32 rmac_udp_oflow;
237 u32 reserved_11;
238 u32 rmac_pause_cnt_oflow;
239 u64 rmac_ttl_1519_4095_frms;
240 u64 rmac_ttl_4096_8191_frms;
241 u64 rmac_ttl_8192_max_frms;
242 u64 rmac_ttl_gt_max_frms;
243 u64 rmac_osized_alt_frms;
244 u64 rmac_jabber_alt_frms;
245 u64 rmac_gt_max_alt_frms;
246 u64 rmac_vlan_frms;
247 u32 rmac_len_discard;
248 u32 rmac_fcs_discard;
249 u32 rmac_pf_discard;
250 u32 rmac_da_discard;
251 u32 rmac_red_discard;
252 u32 rmac_rts_discard;
253 u32 reserved_12;
254 u32 rmac_ingm_full_discard;
255 u32 reserved_13;
256 u32 rmac_accepted_ip_oflow;
257 u32 reserved_14;
258 u32 link_fault_cnt;
259 swStat_t sw_stat;
189} StatInfo_t; 260} StatInfo_t;
190 261
191/* Structures representing different init time configuration 262/*
263 * Structures representing different init time configuration
192 * parameters of the NIC. 264 * parameters of the NIC.
193 */ 265 */
194 266
267#define MAX_TX_FIFOS 8
268#define MAX_RX_RINGS 8
269
270/* FIFO mappings for all possible number of fifos configured */
271int fifo_map[][MAX_TX_FIFOS] = {
272 {0, 0, 0, 0, 0, 0, 0, 0},
273 {0, 0, 0, 0, 1, 1, 1, 1},
274 {0, 0, 0, 1, 1, 1, 2, 2},
275 {0, 0, 1, 1, 2, 2, 3, 3},
276 {0, 0, 1, 1, 2, 2, 3, 4},
277 {0, 0, 1, 1, 2, 3, 4, 5},
278 {0, 0, 1, 2, 3, 4, 5, 6},
279 {0, 1, 2, 3, 4, 5, 6, 7},
280};
281
195/* Maintains Per FIFO related information. */ 282/* Maintains Per FIFO related information. */
196typedef struct tx_fifo_config { 283typedef struct tx_fifo_config {
197#define MAX_AVAILABLE_TXDS 8192 284#define MAX_AVAILABLE_TXDS 8192
@@ -237,14 +324,14 @@ typedef struct rx_ring_config {
237#define NO_SNOOP_RXD_BUFFER 0x02 324#define NO_SNOOP_RXD_BUFFER 0x02
238} rx_ring_config_t; 325} rx_ring_config_t;
239 326
240/* This structure provides contains values of the tunable parameters 327/* This structure provides contains values of the tunable parameters
241 * of the H/W 328 * of the H/W
242 */ 329 */
243struct config_param { 330struct config_param {
244/* Tx Side */ 331/* Tx Side */
245 u32 tx_fifo_num; /*Number of Tx FIFOs */ 332 u32 tx_fifo_num; /*Number of Tx FIFOs */
246#define MAX_TX_FIFOS 8
247 333
334 u8 fifo_mapping[MAX_TX_FIFOS];
248 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 335 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
249 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 336 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
250 u64 tx_intr_type; 337 u64 tx_intr_type;
@@ -252,10 +339,10 @@ struct config_param {
252 339
253/* Rx Side */ 340/* Rx Side */
254 u32 rx_ring_num; /*Number of receive rings */ 341 u32 rx_ring_num; /*Number of receive rings */
255#define MAX_RX_RINGS 8
256#define MAX_RX_BLOCKS_PER_RING 150 342#define MAX_RX_BLOCKS_PER_RING 150
257 343
258 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 344 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
345 u8 bimodal; /*Flag for setting bimodal interrupts*/
259 346
260#define HEADER_ETHERNET_II_802_3_SIZE 14 347#define HEADER_ETHERNET_II_802_3_SIZE 14
261#define HEADER_802_2_SIZE 3 348#define HEADER_802_2_SIZE 3
@@ -269,6 +356,7 @@ struct config_param {
269#define MAX_PYLD_JUMBO 9600 356#define MAX_PYLD_JUMBO 9600
270#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18) 357#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
271#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22) 358#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
359 u16 bus_speed;
272}; 360};
273 361
274/* Structure representing MAC Addrs */ 362/* Structure representing MAC Addrs */
@@ -277,7 +365,7 @@ typedef struct mac_addr {
277} macaddr_t; 365} macaddr_t;
278 366
279/* Structure that represent every FIFO element in the BAR1 367/* Structure that represent every FIFO element in the BAR1
280 * Address location. 368 * Address location.
281 */ 369 */
282typedef struct _TxFIFO_element { 370typedef struct _TxFIFO_element {
283 u64 TxDL_Pointer; 371 u64 TxDL_Pointer;
@@ -339,6 +427,7 @@ typedef struct _RxD_t {
339#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8) 427#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
340#define RXD_FRAME_PROTO_IPV4 BIT(27) 428#define RXD_FRAME_PROTO_IPV4 BIT(27)
341#define RXD_FRAME_PROTO_IPV6 BIT(28) 429#define RXD_FRAME_PROTO_IPV6 BIT(28)
430#define RXD_FRAME_IP_FRAG BIT(29)
342#define RXD_FRAME_PROTO_TCP BIT(30) 431#define RXD_FRAME_PROTO_TCP BIT(30)
343#define RXD_FRAME_PROTO_UDP BIT(31) 432#define RXD_FRAME_PROTO_UDP BIT(31)
344#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP) 433#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
@@ -346,11 +435,15 @@ typedef struct _RxD_t {
346#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF) 435#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
347 436
348 u64 Control_2; 437 u64 Control_2;
438#define THE_RXD_MARK 0x3
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441
349#ifndef CONFIG_2BUFF_MODE 442#ifndef CONFIG_2BUFF_MODE
350#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16) 443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
351#define SET_BUFFER0_SIZE(val) vBIT(val,0,16) 444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
352#else 445#else
353#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16) 446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
354#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) 447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
355#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) 448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
356#define SET_BUFFER0_SIZE(val) vBIT(val,8,8) 449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
@@ -363,7 +456,7 @@ typedef struct _RxD_t {
363#define SET_NUM_TAG(val) vBIT(val,16,32) 456#define SET_NUM_TAG(val) vBIT(val,16,32)
364 457
365#ifndef CONFIG_2BUFF_MODE 458#ifndef CONFIG_2BUFF_MODE
366#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16))) 459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14)))
367#else 460#else
368#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
369 >> 48) 462 >> 48)
@@ -382,7 +475,7 @@ typedef struct _RxD_t {
382#endif 475#endif
383} RxD_t; 476} RxD_t;
384 477
385/* Structure that represents the Rx descriptor block which contains 478/* Structure that represents the Rx descriptor block which contains
386 * 128 Rx descriptors. 479 * 128 Rx descriptors.
387 */ 480 */
388#ifndef CONFIG_2BUFF_MODE 481#ifndef CONFIG_2BUFF_MODE
@@ -392,11 +485,11 @@ typedef struct _RxD_block {
392 485
393 u64 reserved_0; 486 u64 reserved_0;
394#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
395 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last 488 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
396 * Rxd in this blk */ 489 * Rxd in this blk */
397 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */ 490 u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
398 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 491 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
399 * the upper 32 bits should 492 * the upper 32 bits should
400 * be 0 */ 493 * be 0 */
401} RxD_block_t; 494} RxD_block_t;
402#else 495#else
@@ -405,13 +498,13 @@ typedef struct _RxD_block {
405 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
406 499
407#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
408 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd 501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
409 * in this blk */ 502 * in this blk */
410 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */ 503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
411} RxD_block_t; 504} RxD_block_t;
412#define SIZE_OF_BLOCK 4096 505#define SIZE_OF_BLOCK 4096
413 506
414/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
415 * 2buf mode. */ 508 * 2buf mode. */
416typedef struct bufAdd { 509typedef struct bufAdd {
417 void *ba_0_org; 510 void *ba_0_org;
@@ -423,8 +516,8 @@ typedef struct bufAdd {
423 516
424/* Structure which stores all the MAC control parameters */ 517/* Structure which stores all the MAC control parameters */
425 518
426/* This structure stores the offset of the RxD in the ring 519/* This structure stores the offset of the RxD in the ring
427 * from which the Rx Interrupt processor can start picking 520 * from which the Rx Interrupt processor can start picking
428 * up the RxDs for processing. 521 * up the RxDs for processing.
429 */ 522 */
430typedef struct _rx_curr_get_info_t { 523typedef struct _rx_curr_get_info_t {
@@ -436,7 +529,7 @@ typedef struct _rx_curr_get_info_t {
436typedef rx_curr_get_info_t rx_curr_put_info_t; 529typedef rx_curr_get_info_t rx_curr_put_info_t;
437 530
438/* This structure stores the offset of the TxDl in the FIFO 531/* This structure stores the offset of the TxDl in the FIFO
439 * from which the Tx Interrupt processor can start picking 532 * from which the Tx Interrupt processor can start picking
440 * up the TxDLs for send complete interrupt processing. 533 * up the TxDLs for send complete interrupt processing.
441 */ 534 */
442typedef struct { 535typedef struct {
@@ -446,32 +539,96 @@ typedef struct {
446 539
447typedef tx_curr_get_info_t tx_curr_put_info_t; 540typedef tx_curr_get_info_t tx_curr_put_info_t;
448 541
449/* Infomation related to the Tx and Rx FIFOs and Rings of Xena 542/* Structure that holds the Phy and virt addresses of the Blocks */
450 * is maintained in this structure. 543typedef struct rx_block_info {
451 */ 544 RxD_t *block_virt_addr;
452typedef struct mac_info { 545 dma_addr_t block_dma_addr;
453/* rx side stuff */ 546} rx_block_info_t;
454 /* Put pointer info which indictes which RxD has to be replenished 547
548/* pre declaration of the nic structure */
549typedef struct s2io_nic nic_t;
550
551/* Ring specific structure */
552typedef struct ring_info {
553 /* The ring number */
554 int ring_no;
555
556 /*
557 * Place holders for the virtual and physical addresses of
558 * all the Rx Blocks
559 */
560 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
561 int block_count;
562 int pkt_cnt;
563
564 /*
565 * Put pointer info which indictes which RxD has to be replenished
455 * with a new buffer. 566 * with a new buffer.
456 */ 567 */
457 rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS]; 568 rx_curr_put_info_t rx_curr_put_info;
458 569
459 /* Get pointer info which indictes which is the last RxD that was 570 /*
571 * Get pointer info which indictes which is the last RxD that was
460 * processed by the driver. 572 * processed by the driver.
461 */ 573 */
462 rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS]; 574 rx_curr_get_info_t rx_curr_get_info;
463 575
464 u16 rmac_pause_time; 576#ifndef CONFIG_S2IO_NAPI
465 u16 mc_pause_threshold_q0q3; 577 /* Index to the absolute position of the put pointer of Rx ring */
466 u16 mc_pause_threshold_q4q7; 578 int put_pos;
579#endif
580
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */
583 buffAdd_t **ba;
584#endif
585 nic_t *nic;
586} ring_info_t;
467 587
588/* Fifo specific structure */
589typedef struct fifo_info {
590 /* FIFO number */
591 int fifo_no;
592
593 /* Maximum TxDs per TxDL */
594 int max_txds;
595
596 /* Place holder of all the TX List's Phy and Virt addresses. */
597 list_info_hold_t *list_info;
598
599 /*
600 * Current offset within the tx FIFO where driver would write
601 * new Tx frame
602 */
603 tx_curr_put_info_t tx_curr_put_info;
604
605 /*
606 * Current offset within tx FIFO from where the driver would start freeing
607 * the buffers
608 */
609 tx_curr_get_info_t tx_curr_get_info;
610
611 nic_t *nic;
612}fifo_info_t;
613
614/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
615 * is maintained in this structure.
616 */
617typedef struct mac_info {
468/* tx side stuff */ 618/* tx side stuff */
469 /* logical pointer of start of each Tx FIFO */ 619 /* logical pointer of start of each Tx FIFO */
470 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 620 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
471 621
472/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/ 622 /* Fifo specific structure */
473 tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS]; 623 fifo_info_t fifos[MAX_TX_FIFOS];
474 tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS]; 624
625/* rx side stuff */
626 /* Ring specific structure */
627 ring_info_t rings[MAX_RX_RINGS];
628
629 u16 rmac_pause_time;
630 u16 mc_pause_threshold_q0q3;
631 u16 mc_pause_threshold_q4q7;
475 632
476 void *stats_mem; /* orignal pointer to allocated mem */ 633 void *stats_mem; /* orignal pointer to allocated mem */
477 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 634 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
@@ -485,12 +642,6 @@ typedef struct {
485 int usage_cnt; 642 int usage_cnt;
486} usr_addr_t; 643} usr_addr_t;
487 644
488/* Structure that holds the Phy and virt addresses of the Blocks */
489typedef struct rx_block_info {
490 RxD_t *block_virt_addr;
491 dma_addr_t block_dma_addr;
492} rx_block_info_t;
493
494/* Default Tunable parameters of the NIC. */ 645/* Default Tunable parameters of the NIC. */
495#define DEFAULT_FIFO_LEN 4096 646#define DEFAULT_FIFO_LEN 4096
496#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1) 647#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
@@ -499,7 +650,20 @@ typedef struct rx_block_info {
499#define LARGE_BLK_CNT 100 650#define LARGE_BLK_CNT 100
500 651
501/* Structure representing one instance of the NIC */ 652/* Structure representing one instance of the NIC */
502typedef struct s2io_nic { 653struct s2io_nic {
654#ifdef CONFIG_S2IO_NAPI
655 /*
656 * Count of packets to be processed in a given iteration, it will be indicated
657 * by the quota field of the device structure when NAPI is enabled.
658 */
659 int pkts_to_process;
660#endif
661 struct net_device *dev;
662 mac_info_t mac_control;
663 struct config_param config;
664 struct pci_dev *pdev;
665 void __iomem *bar0;
666 void __iomem *bar1;
503#define MAX_MAC_SUPPORTED 16 667#define MAX_MAC_SUPPORTED 16
504#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 668#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
505 669
@@ -507,33 +671,20 @@ typedef struct s2io_nic {
507 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 671 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
508 672
509 struct net_device_stats stats; 673 struct net_device_stats stats;
510 void __iomem *bar0;
511 void __iomem *bar1;
512 struct config_param config;
513 mac_info_t mac_control;
514 int high_dma_flag; 674 int high_dma_flag;
515 int device_close_flag; 675 int device_close_flag;
516 int device_enabled_once; 676 int device_enabled_once;
517 677
518 char name[32]; 678 char name[50];
519 struct tasklet_struct task; 679 struct tasklet_struct task;
520 volatile unsigned long tasklet_status; 680 volatile unsigned long tasklet_status;
521 struct timer_list timer;
522 struct net_device *dev;
523 struct pci_dev *pdev;
524 681
525 u16 vendor_id; 682 /* Timer that handles I/O errors/exceptions */
526 u16 device_id; 683 struct timer_list alarm_timer;
527 u16 ccmd; 684
528 u32 cbar0_1; 685 /* Space to back up the PCI config space */
529 u32 cbar0_2; 686 u32 config_space[256 / sizeof(u32)];
530 u32 cbar1_1; 687
531 u32 cbar1_2;
532 u32 cirq;
533 u8 cache_line;
534 u32 rom_expansion;
535 u16 pcix_cmd;
536 u32 irq;
537 atomic_t rx_bufs_left[MAX_RX_RINGS]; 688 atomic_t rx_bufs_left[MAX_RX_RINGS];
538 689
539 spinlock_t tx_lock; 690 spinlock_t tx_lock;
@@ -558,27 +709,11 @@ typedef struct s2io_nic {
558 u16 tx_err_count; 709 u16 tx_err_count;
559 u16 rx_err_count; 710 u16 rx_err_count;
560 711
561#ifndef CONFIG_S2IO_NAPI
562 /* Index to the absolute position of the put pointer of Rx ring. */
563 int put_pos[MAX_RX_RINGS];
564#endif
565
566 /*
567 * Place holders for the virtual and physical addresses of
568 * all the Rx Blocks
569 */
570 rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
571 int block_count[MAX_RX_RINGS];
572 int pkt_cnt[MAX_RX_RINGS];
573
574 /* Place holder of all the TX List's Phy and Virt addresses. */
575 list_info_hold_t *list_info[MAX_TX_FIFOS];
576
577 /* Id timer, used to blink NIC to physically identify NIC. */ 712 /* Id timer, used to blink NIC to physically identify NIC. */
578 struct timer_list id_timer; 713 struct timer_list id_timer;
579 714
580 /* Restart timer, used to restart NIC if the device is stuck and 715 /* Restart timer, used to restart NIC if the device is stuck and
581 * a schedule task that will set the correct Link state once the 716 * a schedule task that will set the correct Link state once the
582 * NIC's PHY has stabilized after a state change. 717 * NIC's PHY has stabilized after a state change.
583 */ 718 */
584#ifdef INIT_TQUEUE 719#ifdef INIT_TQUEUE
@@ -589,12 +724,12 @@ typedef struct s2io_nic {
589 struct work_struct set_link_task; 724 struct work_struct set_link_task;
590#endif 725#endif
591 726
592 /* Flag that can be used to turn on or turn off the Rx checksum 727 /* Flag that can be used to turn on or turn off the Rx checksum
593 * offload feature. 728 * offload feature.
594 */ 729 */
595 int rx_csum; 730 int rx_csum;
596 731
597 /* after blink, the adapter must be restored with original 732 /* after blink, the adapter must be restored with original
598 * values. 733 * values.
599 */ 734 */
600 u64 adapt_ctrl_org; 735 u64 adapt_ctrl_org;
@@ -604,16 +739,19 @@ typedef struct s2io_nic {
604#define LINK_DOWN 1 739#define LINK_DOWN 1
605#define LINK_UP 2 740#define LINK_UP 2
606 741
607#ifdef CONFIG_2BUFF_MODE
608 /* Buffer Address store. */
609 buffAdd_t **ba[MAX_RX_RINGS];
610#endif
611 int task_flag; 742 int task_flag;
612#define CARD_DOWN 1 743#define CARD_DOWN 1
613#define CARD_UP 2 744#define CARD_UP 2
614 atomic_t card_state; 745 atomic_t card_state;
615 volatile unsigned long link_state; 746 volatile unsigned long link_state;
616} nic_t; 747 struct vlan_group *vlgrp;
748#define XFRAME_I_DEVICE 1
749#define XFRAME_II_DEVICE 2
750 u8 device_type;
751
752 spinlock_t rx_lock;
753 atomic_t isr_cnt;
754};
617 755
618#define RESET_ERROR 1; 756#define RESET_ERROR 1;
619#define CMD_ERROR 2; 757#define CMD_ERROR 2;
@@ -622,7 +760,8 @@ typedef struct s2io_nic {
622#ifndef readq 760#ifndef readq
623static inline u64 readq(void __iomem *addr) 761static inline u64 readq(void __iomem *addr)
624{ 762{
625 u64 ret = readl(addr + 4); 763 u64 ret = 0;
764 ret = readl(addr + 4);
626 ret <<= 32; 765 ret <<= 32;
627 ret |= readl(addr); 766 ret |= readl(addr);
628 767
@@ -637,10 +776,10 @@ static inline void writeq(u64 val, void __iomem *addr)
637 writel((u32) (val >> 32), (addr + 4)); 776 writel((u32) (val >> 32), (addr + 4));
638} 777}
639 778
640/* In 32 bit modes, some registers have to be written in a 779/* In 32 bit modes, some registers have to be written in a
641 * particular order to expect correct hardware operation. The 780 * particular order to expect correct hardware operation. The
642 * macro SPECIAL_REG_WRITE is used to perform such ordered 781 * macro SPECIAL_REG_WRITE is used to perform such ordered
643 * writes. Defines UF (Upper First) and LF (Lower First) will 782 * writes. Defines UF (Upper First) and LF (Lower First) will
644 * be used to specify the required write order. 783 * be used to specify the required write order.
645 */ 784 */
646#define UF 1 785#define UF 1
@@ -716,6 +855,7 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
716#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate 855#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
717 PCC_FB_ECC Error. */ 856 PCC_FB_ECC Error. */
718 857
858#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
719/* 859/*
720 * Prototype declaration. 860 * Prototype declaration.
721 */ 861 */
@@ -725,36 +865,30 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
725static int init_shared_mem(struct s2io_nic *sp); 865static int init_shared_mem(struct s2io_nic *sp);
726static void free_shared_mem(struct s2io_nic *sp); 866static void free_shared_mem(struct s2io_nic *sp);
727static int init_nic(struct s2io_nic *nic); 867static int init_nic(struct s2io_nic *nic);
728#ifndef CONFIG_S2IO_NAPI 868static void rx_intr_handler(ring_info_t *ring_data);
729static void rx_intr_handler(struct s2io_nic *sp); 869static void tx_intr_handler(fifo_info_t *fifo_data);
730#endif
731static void tx_intr_handler(struct s2io_nic *sp);
732static void alarm_intr_handler(struct s2io_nic *sp); 870static void alarm_intr_handler(struct s2io_nic *sp);
733 871
734static int s2io_starter(void); 872static int s2io_starter(void);
735static void s2io_closer(void); 873void s2io_closer(void);
736static void s2io_tx_watchdog(struct net_device *dev); 874static void s2io_tx_watchdog(struct net_device *dev);
737static void s2io_tasklet(unsigned long dev_addr); 875static void s2io_tasklet(unsigned long dev_addr);
738static void s2io_set_multicast(struct net_device *dev); 876static void s2io_set_multicast(struct net_device *dev);
739#ifndef CONFIG_2BUFF_MODE 877static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
740static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no); 878void s2io_link(nic_t * sp, int link);
741#else 879void s2io_reset(nic_t * sp);
742static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no, 880#if defined(CONFIG_S2IO_NAPI)
743 buffAdd_t * ba);
744#endif
745static void s2io_link(nic_t * sp, int link);
746static void s2io_reset(nic_t * sp);
747#ifdef CONFIG_S2IO_NAPI
748static int s2io_poll(struct net_device *dev, int *budget); 881static int s2io_poll(struct net_device *dev, int *budget);
749#endif 882#endif
750static void s2io_init_pci(nic_t * sp); 883static void s2io_init_pci(nic_t * sp);
751static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 884int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
885static void s2io_alarm_handle(unsigned long data);
752static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); 886static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
753static int verify_xena_quiescence(u64 val64, int flag); 887static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
754static struct ethtool_ops netdev_ethtool_ops; 888static struct ethtool_ops netdev_ethtool_ops;
755static void s2io_set_link(unsigned long data); 889static void s2io_set_link(unsigned long data);
756static int s2io_set_swapper(nic_t * sp); 890int s2io_set_swapper(nic_t * sp);
757static void s2io_card_down(nic_t * nic); 891static void s2io_card_down(nic_t *nic);
758static int s2io_card_up(nic_t * nic); 892static int s2io_card_up(nic_t *nic);
759 893int get_xena_rev_id(struct pci_dev *pdev);
760#endif /* _S2IO_H */ 894#endif /* _S2IO_H */
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 3ad0b6751f6f..221354eea21f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
156 156
157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); 157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
158 158
159#ifdef SHAPER_COMPLEX /* and broken.. */
160
161 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
162 {
163 if(ptr->pri<skb->pri
164 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
165 {
166 struct sk_buff *tmp=ptr->prev;
167
168 /*
169 * It goes before us therefore we slip the length
170 * of the new frame.
171 */
172
173 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
174 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
175
176 /*
177 * The packet may have slipped so far back it
178 * fell off.
179 */
180 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
181 {
182 skb_unlink(ptr);
183 dev_kfree_skb(ptr);
184 }
185 ptr=tmp;
186 }
187 else
188 break;
189 }
190 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
191 skb_queue_head(&shaper->sendq,skb);
192 else
193 {
194 struct sk_buff *tmp;
195 /*
196 * Set the packet clock out time according to the
197 * frames ahead. Im sure a bit of thought could drop
198 * this loop.
199 */
200 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
201 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
202 skb_append(ptr,skb);
203 }
204#else
205 { 159 {
206 struct sk_buff *tmp; 160 struct sk_buff *tmp;
207 /* 161 /*
@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 } else 174 } else
221 skb_queue_tail(&shaper->sendq, skb); 175 skb_queue_tail(&shaper->sendq, skb);
222 } 176 }
223#endif 177
224 if(sh_debug) 178 if(sh_debug)
225 printk("Frame queued.\n"); 179 printk("Frame queued.\n");
226 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) 180 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
302 * Pull the frame and get interrupts back on. 256 * Pull the frame and get interrupts back on.
303 */ 257 */
304 258
305 skb_unlink(skb); 259 skb_unlink(skb, &shaper->sendq);
306 if (shaper->recovery < 260 if (shaper->recovery <
307 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) 261 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
308 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; 262 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
index 000000000000..bf3440aa6c24
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1843 @@
1/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/pci.h>
29#include <linux/mii.h>
30#include <linux/delay.h>
31#include <linux/crc32.h>
32#include <linux/dma-mapping.h>
33#include <asm/irq.h>
34
35#define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37#define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39#define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41#define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46#define PHY_MAX_ADDR 32
47#define PHY_ID_ANY 0x1f
48#define MII_REG_ANY 0x1f
49
50#ifdef CONFIG_SIS190_NAPI
51#define NAPI_SUFFIX "-NAPI"
52#else
53#define NAPI_SUFFIX ""
54#endif
55
56#define DRV_VERSION "1.2" NAPI_SUFFIX
57#define DRV_NAME "sis190"
58#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59#define PFX DRV_NAME ": "
60
61#ifdef CONFIG_SIS190_NAPI
62#define sis190_rx_skb netif_receive_skb
63#define sis190_rx_quota(count, quota) min(count, quota)
64#else
65#define sis190_rx_skb netif_rx
66#define sis190_rx_quota(count, quota) count
67#endif
68
69#define MAC_ADDR_LEN 6
70
71#define NUM_TX_DESC 64 /* [8..1024] */
72#define NUM_RX_DESC 64 /* [8..8192] */
73#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75#define RX_BUF_SIZE 1536
76#define RX_BUF_MASK 0xfff8
77
78#define SIS190_REGS_SIZE 0x80
79#define SIS190_TX_TIMEOUT (6*HZ)
80#define SIS190_PHY_TIMEOUT (10*HZ)
81#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
84
85/* Enhanced PHY access register bit definitions */
86#define EhnMIIread 0x0000
87#define EhnMIIwrite 0x0020
88#define EhnMIIdataShift 16
89#define EhnMIIpmdShift 6 /* 7016 only */
90#define EhnMIIregShift 11
91#define EhnMIIreq 0x0010
92#define EhnMIInotDone 0x0010
93
94/* Write/read MMIO register */
95#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98#define SIS_R8(reg) readb (ioaddr + (reg))
99#define SIS_R16(reg) readw (ioaddr + (reg))
100#define SIS_R32(reg) readl (ioaddr + (reg))
101
102#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
103
104enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
137};
138
139enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
155
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
161
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
165
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
173
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
177
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
181
182 /* StationControl */
183 _1000bpsF = 0x1c00,
184 _1000bpsH = 0x0c00,
185 _100bpsF = 0x1800,
186 _100bpsH = 0x0800,
187 _10bpsF = 0x1400,
188 _10bpsH = 0x0400,
189
190 LinkStatus = 0x02, // unused
191 FullDup = 0x01, // unused
192
193 /* TBICSRBit */
194 TBILinkOK = 0x02000000, // unused
195};
196
197struct TxDesc {
198 __le32 PSize;
199 __le32 status;
200 __le32 addr;
201 __le32 size;
202};
203
204struct RxDesc {
205 __le32 PSize;
206 __le32 status;
207 __le32 addr;
208 __le32 size;
209};
210
211enum _DescStatusBit {
212 /* _Desc.status */
213 OWNbit = 0x80000000, // RXOWN/TXOWN
214 INTbit = 0x40000000, // RXINT/TXINT
215 CRCbit = 0x00020000, // CRCOFF/CRCEN
216 PADbit = 0x00010000, // PREADD/PADEN
217 /* _Desc.size */
218 RingEnd = 0x80000000,
219 /* TxDesc.status */
220 LSEN = 0x08000000, // TSO ? -- FR
221 IPCS = 0x04000000,
222 TCPCS = 0x02000000,
223 UDPCS = 0x01000000,
224 BSTEN = 0x00800000,
225 EXTEN = 0x00400000,
226 DEFEN = 0x00200000,
227 BKFEN = 0x00100000,
228 CRSEN = 0x00080000,
229 COLEN = 0x00040000,
230 THOL3 = 0x30000000,
231 THOL2 = 0x20000000,
232 THOL1 = 0x10000000,
233 THOL0 = 0x00000000,
234 /* RxDesc.status */
235 IPON = 0x20000000,
236 TCPON = 0x10000000,
237 UDPON = 0x08000000,
238 Wakup = 0x00400000,
239 Magic = 0x00200000,
240 Pause = 0x00100000,
241 DEFbit = 0x00200000,
242 BCAST = 0x000c0000,
243 MCAST = 0x00080000,
244 UCAST = 0x00040000,
245 /* RxDesc.PSize */
246 TAGON = 0x80000000,
247 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
248 ABORT = 0x00800000,
249 SHORT = 0x00400000,
250 LIMIT = 0x00200000,
251 MIIER = 0x00100000,
252 OVRUN = 0x00080000,
253 NIBON = 0x00040000,
254 COLON = 0x00020000,
255 CRCOK = 0x00010000,
256 RxSizeMask = 0x0000ffff
257 /*
258 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
259 * provide two (unused with Linux) Tx queues. No publically
260 * available documentation alas.
261 */
262};
263
264enum sis190_eeprom_access_register_bits {
265 EECS = 0x00000001, // unused
266 EECLK = 0x00000002, // unused
267 EEDO = 0x00000008, // unused
268 EEDI = 0x00000004, // unused
269 EEREQ = 0x00000080,
270 EEROP = 0x00000200,
271 EEWOP = 0x00000100 // unused
272};
273
274/* EEPROM Addresses */
275enum sis190_eeprom_address {
276 EEPROMSignature = 0x00,
277 EEPROMCLK = 0x01, // unused
278 EEPROMInfo = 0x02,
279 EEPROMMACAddr = 0x03
280};
281
282struct sis190_private {
283 void __iomem *mmio_addr;
284 struct pci_dev *pci_dev;
285 struct net_device_stats stats;
286 spinlock_t lock;
287 u32 rx_buf_sz;
288 u32 cur_rx;
289 u32 cur_tx;
290 u32 dirty_rx;
291 u32 dirty_tx;
292 dma_addr_t rx_dma;
293 dma_addr_t tx_dma;
294 struct RxDesc *RxDescRing;
295 struct TxDesc *TxDescRing;
296 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
297 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
298 struct work_struct phy_task;
299 struct timer_list timer;
300 u32 msg_enable;
301 struct mii_if_info mii_if;
302 struct list_head first_phy;
303};
304
305struct sis190_phy {
306 struct list_head list;
307 int phy_id;
308 u16 id[2];
309 u16 status;
310 u8 type;
311};
312
313enum sis190_phy_type {
314 UNKNOWN = 0x00,
315 HOME = 0x01,
316 LAN = 0x02,
317 MIX = 0x03
318};
319
320static struct mii_chip_info {
321 const char *name;
322 u16 id[2];
323 unsigned int type;
324} mii_chip_table[] = {
325 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN },
326 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN },
327 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN },
328 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN },
329 { NULL, }
330};
331
332const static struct {
333 const char *name;
334 u8 version; /* depend on docs */
335 u32 RxConfigMask; /* clear the bits supported by this chip */
336} sis_chip_info[] = {
337 { DRV_NAME, 0x00, 0xff7e1880, },
338};
339
340static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
342 { 0, },
343};
344
345MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
346
347static int rx_copybreak = 200;
348
349static struct {
350 u32 msg_enable;
351} debug = { -1 };
352
353MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
354module_param(rx_copybreak, int, 0);
355MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
356module_param_named(debug, debug.msg_enable, int, 0);
357MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
358MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
359MODULE_VERSION(DRV_VERSION);
360MODULE_LICENSE("GPL");
361
362static const u32 sis190_intr_mask =
363 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
364
365/*
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC.
368 */
369static int multicast_filter_limit = 32;
370
371static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
372{
373 unsigned int i;
374
375 SIS_W32(GMIIControl, ctl);
376
377 msleep(1);
378
379 for (i = 0; i < 100; i++) {
380 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
381 break;
382 msleep(1);
383 }
384
385 if (i > 999)
386 printk(KERN_ERR PFX "PHY command failed !\n");
387}
388
389static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
390{
391 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
392 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
393 (((u32) val) << EhnMIIdataShift));
394}
395
396static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
397{
398 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
399 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
400
401 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
402}
403
404static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
405{
406 struct sis190_private *tp = netdev_priv(dev);
407
408 mdio_write(tp->mmio_addr, phy_id, reg, val);
409}
410
411static int __mdio_read(struct net_device *dev, int phy_id, int reg)
412{
413 struct sis190_private *tp = netdev_priv(dev);
414
415 return mdio_read(tp->mmio_addr, phy_id, reg);
416}
417
418static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
419{
420 mdio_read(ioaddr, phy_id, reg);
421 return mdio_read(ioaddr, phy_id, reg);
422}
423
424static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
425{
426 u16 data = 0xffff;
427 unsigned int i;
428
429 if (!(SIS_R32(ROMControl) & 0x0002))
430 return 0;
431
432 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
433
434 for (i = 0; i < 200; i++) {
435 if (!(SIS_R32(ROMInterface) & EEREQ)) {
436 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
437 break;
438 }
439 msleep(1);
440 }
441
442 return data;
443}
444
445static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
446{
447 SIS_W32(IntrMask, 0x00);
448 SIS_W32(IntrStatus, 0xffffffff);
449 SIS_PCI_COMMIT();
450}
451
452static void sis190_asic_down(void __iomem *ioaddr)
453{
454 /* Stop the chip's Tx and Rx DMA processes. */
455
456 SIS_W32(TxControl, 0x1a00);
457 SIS_W32(RxControl, 0x1a00);
458
459 sis190_irq_mask_and_ack(ioaddr);
460}
461
462static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
463{
464 desc->size |= cpu_to_le32(RingEnd);
465}
466
467static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
468{
469 u32 eor = le32_to_cpu(desc->size) & RingEnd;
470
471 desc->PSize = 0x0;
472 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 wmb();
474 desc->status = cpu_to_le32(OWNbit | INTbit);
475}
476
477static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
478 u32 rx_buf_sz)
479{
480 desc->addr = cpu_to_le32(mapping);
481 sis190_give_to_asic(desc, rx_buf_sz);
482}
483
484static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
485{
486 desc->PSize = 0x0;
487 desc->addr = 0xdeadbeef;
488 desc->size &= cpu_to_le32(RingEnd);
489 wmb();
490 desc->status = 0x0;
491}
492
493static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
494 struct RxDesc *desc, u32 rx_buf_sz)
495{
496 struct sk_buff *skb;
497 dma_addr_t mapping;
498 int ret = 0;
499
500 skb = dev_alloc_skb(rx_buf_sz);
501 if (!skb)
502 goto err_out;
503
504 *sk_buff = skb;
505
506 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
507 PCI_DMA_FROMDEVICE);
508
509 sis190_map_to_asic(desc, mapping, rx_buf_sz);
510out:
511 return ret;
512
513err_out:
514 ret = -ENOMEM;
515 sis190_make_unusable_by_asic(desc);
516 goto out;
517}
518
519static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
520 u32 start, u32 end)
521{
522 u32 cur;
523
524 for (cur = start; cur < end; cur++) {
525 int ret, i = cur % NUM_RX_DESC;
526
527 if (tp->Rx_skbuff[i])
528 continue;
529
530 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
531 tp->RxDescRing + i, tp->rx_buf_sz);
532 if (ret < 0)
533 break;
534 }
535 return cur - start;
536}
537
538static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
539 struct RxDesc *desc, int rx_buf_sz)
540{
541 int ret = -1;
542
543 if (pkt_size < rx_copybreak) {
544 struct sk_buff *skb;
545
546 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
547 if (skb) {
548 skb_reserve(skb, NET_IP_ALIGN);
549 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
550 *sk_buff = skb;
551 sis190_give_to_asic(desc, rx_buf_sz);
552 ret = 0;
553 }
554 }
555 return ret;
556}
557
558static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
559{
560#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
561
562 if ((status & CRCOK) && !(status & ErrMask))
563 return 0;
564
565 if (!(status & CRCOK))
566 stats->rx_crc_errors++;
567 else if (status & OVRUN)
568 stats->rx_over_errors++;
569 else if (status & (SHORT | LIMIT))
570 stats->rx_length_errors++;
571 else if (status & (MIIER | NIBON | COLON))
572 stats->rx_frame_errors++;
573
574 stats->rx_errors++;
575 return -1;
576}
577
578static int sis190_rx_interrupt(struct net_device *dev,
579 struct sis190_private *tp, void __iomem *ioaddr)
580{
581 struct net_device_stats *stats = &tp->stats;
582 u32 rx_left, cur_rx = tp->cur_rx;
583 u32 delta, count;
584
585 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
586 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
587
588 for (; rx_left > 0; rx_left--, cur_rx++) {
589 unsigned int entry = cur_rx % NUM_RX_DESC;
590 struct RxDesc *desc = tp->RxDescRing + entry;
591 u32 status;
592
593 if (desc->status & OWNbit)
594 break;
595
596 status = le32_to_cpu(desc->PSize);
597
598 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
599 // status);
600
601 if (sis190_rx_pkt_err(status, stats) < 0)
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
603 else {
604 struct sk_buff *skb = tp->Rx_skbuff[entry];
605 int pkt_size = (status & RxSizeMask) - 4;
606 void (*pci_action)(struct pci_dev *, dma_addr_t,
607 size_t, int) = pci_dma_sync_single_for_device;
608
609 if (unlikely(pkt_size > tp->rx_buf_sz)) {
610 net_intr(tp, KERN_INFO
611 "%s: (frag) status = %08x.\n",
612 dev->name, status);
613 stats->rx_dropped++;
614 stats->rx_length_errors++;
615 sis190_give_to_asic(desc, tp->rx_buf_sz);
616 continue;
617 }
618
619 pci_dma_sync_single_for_cpu(tp->pci_dev,
620 le32_to_cpu(desc->addr), tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622
623 if (sis190_try_rx_copy(&skb, pkt_size, desc,
624 tp->rx_buf_sz)) {
625 pci_action = pci_unmap_single;
626 tp->Rx_skbuff[entry] = NULL;
627 sis190_make_unusable_by_asic(desc);
628 }
629
630 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
631 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
632
633 skb->dev = dev;
634 skb_put(skb, pkt_size);
635 skb->protocol = eth_type_trans(skb, dev);
636
637 sis190_rx_skb(skb);
638
639 dev->last_rx = jiffies;
640 stats->rx_packets++;
641 stats->rx_bytes += pkt_size;
642 if ((status & BCAST) == MCAST)
643 stats->multicast++;
644 }
645 }
646 count = cur_rx - tp->cur_rx;
647 tp->cur_rx = cur_rx;
648
649 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
650 if (!delta && count && netif_msg_intr(tp))
651 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
652 tp->dirty_rx += delta;
653
654 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
655 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
656
657 return count;
658}
659
660static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
661 struct TxDesc *desc)
662{
663 unsigned int len;
664
665 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
666
667 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
668
669 memset(desc, 0x00, sizeof(*desc));
670}
671
672static void sis190_tx_interrupt(struct net_device *dev,
673 struct sis190_private *tp, void __iomem *ioaddr)
674{
675 u32 pending, dirty_tx = tp->dirty_tx;
676 /*
677 * It would not be needed if queueing was allowed to be enabled
678 * again too early (hint: think preempt and unclocked smp systems).
679 */
680 unsigned int queue_stopped;
681
682 smp_rmb();
683 pending = tp->cur_tx - dirty_tx;
684 queue_stopped = (pending == NUM_TX_DESC);
685
686 for (; pending; pending--, dirty_tx++) {
687 unsigned int entry = dirty_tx % NUM_TX_DESC;
688 struct TxDesc *txd = tp->TxDescRing + entry;
689 struct sk_buff *skb;
690
691 if (le32_to_cpu(txd->status) & OWNbit)
692 break;
693
694 skb = tp->Tx_skbuff[entry];
695
696 tp->stats.tx_packets++;
697 tp->stats.tx_bytes += skb->len;
698
699 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
700 tp->Tx_skbuff[entry] = NULL;
701 dev_kfree_skb_irq(skb);
702 }
703
704 if (tp->dirty_tx != dirty_tx) {
705 tp->dirty_tx = dirty_tx;
706 smp_wmb();
707 if (queue_stopped)
708 netif_wake_queue(dev);
709 }
710}
711
712/*
713 * The interrupt handler does all of the Rx thread work and cleans up after
714 * the Tx thread.
715 */
716static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
717{
718 struct net_device *dev = __dev;
719 struct sis190_private *tp = netdev_priv(dev);
720 void __iomem *ioaddr = tp->mmio_addr;
721 unsigned int handled = 0;
722 u32 status;
723
724 status = SIS_R32(IntrStatus);
725
726 if ((status == 0xffffffff) || !status)
727 goto out;
728
729 handled = 1;
730
731 if (unlikely(!netif_running(dev))) {
732 sis190_asic_down(ioaddr);
733 goto out;
734 }
735
736 SIS_W32(IntrStatus, status);
737
738 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
739
740 if (status & LinkChange) {
741 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
742 schedule_work(&tp->phy_task);
743 }
744
745 if (status & RxQInt)
746 sis190_rx_interrupt(dev, tp, ioaddr);
747
748 if (status & TxQ0Int)
749 sis190_tx_interrupt(dev, tp, ioaddr);
750out:
751 return IRQ_RETVAL(handled);
752}
753
754#ifdef CONFIG_NET_POLL_CONTROLLER
755static void sis190_netpoll(struct net_device *dev)
756{
757 struct sis190_private *tp = netdev_priv(dev);
758 struct pci_dev *pdev = tp->pci_dev;
759
760 disable_irq(pdev->irq);
761 sis190_interrupt(pdev->irq, dev, NULL);
762 enable_irq(pdev->irq);
763}
764#endif
765
766static void sis190_free_rx_skb(struct sis190_private *tp,
767 struct sk_buff **sk_buff, struct RxDesc *desc)
768{
769 struct pci_dev *pdev = tp->pci_dev;
770
771 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
772 PCI_DMA_FROMDEVICE);
773 dev_kfree_skb(*sk_buff);
774 *sk_buff = NULL;
775 sis190_make_unusable_by_asic(desc);
776}
777
778static void sis190_rx_clear(struct sis190_private *tp)
779{
780 unsigned int i;
781
782 for (i = 0; i < NUM_RX_DESC; i++) {
783 if (!tp->Rx_skbuff[i])
784 continue;
785 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
786 }
787}
788
789static void sis190_init_ring_indexes(struct sis190_private *tp)
790{
791 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
792}
793
794static int sis190_init_ring(struct net_device *dev)
795{
796 struct sis190_private *tp = netdev_priv(dev);
797
798 sis190_init_ring_indexes(tp);
799
800 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
801 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
802
803 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
804 goto err_rx_clear;
805
806 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
807
808 return 0;
809
810err_rx_clear:
811 sis190_rx_clear(tp);
812 return -ENOMEM;
813}
814
815static void sis190_set_rx_mode(struct net_device *dev)
816{
817 struct sis190_private *tp = netdev_priv(dev);
818 void __iomem *ioaddr = tp->mmio_addr;
819 unsigned long flags;
820 u32 mc_filter[2]; /* Multicast hash filter */
821 u16 rx_mode;
822
823 if (dev->flags & IFF_PROMISC) {
824 /* Unconditionally log net taps. */
825 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
826 dev->name);
827 rx_mode =
828 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
829 AcceptAllPhys;
830 mc_filter[1] = mc_filter[0] = 0xffffffff;
831 } else if ((dev->mc_count > multicast_filter_limit) ||
832 (dev->flags & IFF_ALLMULTI)) {
833 /* Too many to filter perfectly -- accept all multicasts. */
834 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
835 mc_filter[1] = mc_filter[0] = 0xffffffff;
836 } else {
837 struct dev_mc_list *mclist;
838 unsigned int i;
839
840 rx_mode = AcceptBroadcast | AcceptMyPhys;
841 mc_filter[1] = mc_filter[0] = 0;
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) {
844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast;
848 }
849 }
850
851 spin_lock_irqsave(&tp->lock, flags);
852
853 SIS_W16(RxMacControl, rx_mode | 0x2);
854 SIS_W32(RxHashTable, mc_filter[0]);
855 SIS_W32(RxHashTable + 4, mc_filter[1]);
856
857 spin_unlock_irqrestore(&tp->lock, flags);
858}
859
860static void sis190_soft_reset(void __iomem *ioaddr)
861{
862 SIS_W32(IntrControl, 0x8000);
863 SIS_PCI_COMMIT();
864 msleep(1);
865 SIS_W32(IntrControl, 0x0);
866 sis190_asic_down(ioaddr);
867 msleep(1);
868}
869
870static void sis190_hw_start(struct net_device *dev)
871{
872 struct sis190_private *tp = netdev_priv(dev);
873 void __iomem *ioaddr = tp->mmio_addr;
874
875 sis190_soft_reset(ioaddr);
876
877 SIS_W32(TxDescStartAddr, tp->tx_dma);
878 SIS_W32(RxDescStartAddr, tp->rx_dma);
879
880 SIS_W32(IntrStatus, 0xffffffff);
881 SIS_W32(IntrMask, 0x0);
882 /*
883 * Default is 100Mbps.
884 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
885 */
886 SIS_W16(StationControl, 0x1901);
887 SIS_W32(GMIIControl, 0x0);
888 SIS_W32(TxMacControl, 0x60);
889 SIS_W16(RxMacControl, 0x02);
890 SIS_W32(RxHashTable, 0x0);
891 SIS_W32(0x6c, 0x0);
892 SIS_W32(RxWolCtrl, 0x0);
893 SIS_W32(RxWolData, 0x0);
894
895 SIS_PCI_COMMIT();
896
897 sis190_set_rx_mode(dev);
898
899 /* Enable all known interrupts by setting the interrupt mask. */
900 SIS_W32(IntrMask, sis190_intr_mask);
901
902 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
903 SIS_W32(RxControl, 0x1a1d);
904
905 netif_start_queue(dev);
906}
907
908static void sis190_phy_task(void * data)
909{
910 struct net_device *dev = data;
911 struct sis190_private *tp = netdev_priv(dev);
912 void __iomem *ioaddr = tp->mmio_addr;
913 int phy_id = tp->mii_if.phy_id;
914 u16 val;
915
916 rtnl_lock();
917
918 val = mdio_read(ioaddr, phy_id, MII_BMCR);
919 if (val & BMCR_RESET) {
920 // FIXME: needlessly high ? -- FR 02/07/2005
921 mod_timer(&tp->timer, jiffies + HZ/10);
922 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
923 BMSR_ANEGCOMPLETE)) {
924 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
925 dev->name);
926 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
927 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
928 } else {
929 /* Rejoice ! */
930 struct {
931 int val;
932 const char *msg;
933 u16 ctl;
934 } reg31[] = {
935 { LPA_1000XFULL | LPA_SLCT,
936 "1000 Mbps Full Duplex",
937 0x01 | _1000bpsF },
938 { LPA_1000XHALF | LPA_SLCT,
939 "1000 Mbps Half Duplex",
940 0x01 | _1000bpsH },
941 { LPA_100FULL,
942 "100 Mbps Full Duplex",
943 0x01 | _100bpsF },
944 { LPA_100HALF,
945 "100 Mbps Half Duplex",
946 0x01 | _100bpsH },
947 { LPA_10FULL,
948 "10 Mbps Full Duplex",
949 0x01 | _10bpsF },
950 { LPA_10HALF,
951 "10 Mbps Half Duplex",
952 0x01 | _10bpsH },
953 { 0, "unknown", 0x0000 }
954 }, *p;
955 u16 adv;
956
957 val = mdio_read(ioaddr, phy_id, 0x1f);
958 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
959
960 val = mdio_read(ioaddr, phy_id, MII_LPA);
961 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
962 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
963 dev->name, val, adv);
964
965 val &= adv;
966
967 for (p = reg31; p->ctl; p++) {
968 if ((val & p->val) == p->val)
969 break;
970 }
971 if (p->ctl)
972 SIS_W16(StationControl, p->ctl);
973 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
974 p->msg);
975 netif_carrier_on(dev);
976 }
977
978 rtnl_unlock();
979}
980
981static void sis190_phy_timer(unsigned long __opaque)
982{
983 struct net_device *dev = (struct net_device *)__opaque;
984 struct sis190_private *tp = netdev_priv(dev);
985
986 if (likely(netif_running(dev)))
987 schedule_work(&tp->phy_task);
988}
989
990static inline void sis190_delete_timer(struct net_device *dev)
991{
992 struct sis190_private *tp = netdev_priv(dev);
993
994 del_timer_sync(&tp->timer);
995}
996
997static inline void sis190_request_timer(struct net_device *dev)
998{
999 struct sis190_private *tp = netdev_priv(dev);
1000 struct timer_list *timer = &tp->timer;
1001
1002 init_timer(timer);
1003 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1004 timer->data = (unsigned long)dev;
1005 timer->function = sis190_phy_timer;
1006 add_timer(timer);
1007}
1008
1009static void sis190_set_rxbufsize(struct sis190_private *tp,
1010 struct net_device *dev)
1011{
1012 unsigned int mtu = dev->mtu;
1013
1014 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1015 /* RxDesc->size has a licence to kill the lower bits */
1016 if (tp->rx_buf_sz & 0x07) {
1017 tp->rx_buf_sz += 8;
1018 tp->rx_buf_sz &= RX_BUF_MASK;
1019 }
1020}
1021
1022static int sis190_open(struct net_device *dev)
1023{
1024 struct sis190_private *tp = netdev_priv(dev);
1025 struct pci_dev *pdev = tp->pci_dev;
1026 int rc = -ENOMEM;
1027
1028 sis190_set_rxbufsize(tp, dev);
1029
1030 /*
1031 * Rx and Tx descriptors need 256 bytes alignment.
1032 * pci_alloc_consistent() guarantees a stronger alignment.
1033 */
1034 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1035 if (!tp->TxDescRing)
1036 goto out;
1037
1038 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1039 if (!tp->RxDescRing)
1040 goto err_free_tx_0;
1041
1042 rc = sis190_init_ring(dev);
1043 if (rc < 0)
1044 goto err_free_rx_1;
1045
1046 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1047
1048 sis190_request_timer(dev);
1049
1050 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1051 if (rc < 0)
1052 goto err_release_timer_2;
1053
1054 sis190_hw_start(dev);
1055out:
1056 return rc;
1057
1058err_release_timer_2:
1059 sis190_delete_timer(dev);
1060 sis190_rx_clear(tp);
1061err_free_rx_1:
1062 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1063 tp->rx_dma);
1064err_free_tx_0:
1065 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1066 tp->tx_dma);
1067 goto out;
1068}
1069
1070static void sis190_tx_clear(struct sis190_private *tp)
1071{
1072 unsigned int i;
1073
1074 for (i = 0; i < NUM_TX_DESC; i++) {
1075 struct sk_buff *skb = tp->Tx_skbuff[i];
1076
1077 if (!skb)
1078 continue;
1079
1080 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1081 tp->Tx_skbuff[i] = NULL;
1082 dev_kfree_skb(skb);
1083
1084 tp->stats.tx_dropped++;
1085 }
1086 tp->cur_tx = tp->dirty_tx = 0;
1087}
1088
1089static void sis190_down(struct net_device *dev)
1090{
1091 struct sis190_private *tp = netdev_priv(dev);
1092 void __iomem *ioaddr = tp->mmio_addr;
1093 unsigned int poll_locked = 0;
1094
1095 sis190_delete_timer(dev);
1096
1097 netif_stop_queue(dev);
1098
1099 flush_scheduled_work();
1100
1101 do {
1102 spin_lock_irq(&tp->lock);
1103
1104 sis190_asic_down(ioaddr);
1105
1106 spin_unlock_irq(&tp->lock);
1107
1108 synchronize_irq(dev->irq);
1109
1110 if (!poll_locked) {
1111 netif_poll_disable(dev);
1112 poll_locked++;
1113 }
1114
1115 synchronize_sched();
1116
1117 } while (SIS_R32(IntrMask));
1118
1119 sis190_tx_clear(tp);
1120 sis190_rx_clear(tp);
1121}
1122
1123static int sis190_close(struct net_device *dev)
1124{
1125 struct sis190_private *tp = netdev_priv(dev);
1126 struct pci_dev *pdev = tp->pci_dev;
1127
1128 sis190_down(dev);
1129
1130 free_irq(dev->irq, dev);
1131
1132 netif_poll_enable(dev);
1133
1134 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1135 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1136
1137 tp->TxDescRing = NULL;
1138 tp->RxDescRing = NULL;
1139
1140 return 0;
1141}
1142
1143static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1144{
1145 struct sis190_private *tp = netdev_priv(dev);
1146 void __iomem *ioaddr = tp->mmio_addr;
1147 u32 len, entry, dirty_tx;
1148 struct TxDesc *desc;
1149 dma_addr_t mapping;
1150
1151 if (unlikely(skb->len < ETH_ZLEN)) {
1152 skb = skb_padto(skb, ETH_ZLEN);
1153 if (!skb) {
1154 tp->stats.tx_dropped++;
1155 goto out;
1156 }
1157 len = ETH_ZLEN;
1158 } else {
1159 len = skb->len;
1160 }
1161
1162 entry = tp->cur_tx % NUM_TX_DESC;
1163 desc = tp->TxDescRing + entry;
1164
1165 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1166 netif_stop_queue(dev);
1167 net_tx_err(tp, KERN_ERR PFX
1168 "%s: BUG! Tx Ring full when queue awake!\n",
1169 dev->name);
1170 return NETDEV_TX_BUSY;
1171 }
1172
1173 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1174
1175 tp->Tx_skbuff[entry] = skb;
1176
1177 desc->PSize = cpu_to_le32(len);
1178 desc->addr = cpu_to_le32(mapping);
1179
1180 desc->size = cpu_to_le32(len);
1181 if (entry == (NUM_TX_DESC - 1))
1182 desc->size |= cpu_to_le32(RingEnd);
1183
1184 wmb();
1185
1186 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1187
1188 tp->cur_tx++;
1189
1190 smp_wmb();
1191
1192 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1193
1194 dev->trans_start = jiffies;
1195
1196 dirty_tx = tp->dirty_tx;
1197 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1198 netif_stop_queue(dev);
1199 smp_rmb();
1200 if (dirty_tx != tp->dirty_tx)
1201 netif_wake_queue(dev);
1202 }
1203out:
1204 return NETDEV_TX_OK;
1205}
1206
1207static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1208{
1209 struct sis190_private *tp = netdev_priv(dev);
1210
1211 return &tp->stats;
1212}
1213
1214static void sis190_free_phy(struct list_head *first_phy)
1215{
1216 struct sis190_phy *cur, *next;
1217
1218 list_for_each_entry_safe(cur, next, first_phy, list) {
1219 kfree(cur);
1220 }
1221}
1222
1223/**
1224 * sis190_default_phy - Select default PHY for sis190 mac.
1225 * @dev: the net device to probe for
1226 *
1227 * Select first detected PHY with link as default.
1228 * If no one is link on, select PHY whose types is HOME as default.
1229 * If HOME doesn't exist, select LAN.
1230 */
1231static u16 sis190_default_phy(struct net_device *dev)
1232{
1233 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1234 struct sis190_private *tp = netdev_priv(dev);
1235 struct mii_if_info *mii_if = &tp->mii_if;
1236 void __iomem *ioaddr = tp->mmio_addr;
1237 u16 status;
1238
1239 phy_home = phy_default = phy_lan = NULL;
1240
1241 list_for_each_entry(phy, &tp->first_phy, list) {
1242 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1243
1244 // Link ON & Not select default PHY & not ghost PHY.
1245 if ((status & BMSR_LSTATUS) &&
1246 !phy_default &&
1247 (phy->type != UNKNOWN)) {
1248 phy_default = phy;
1249 } else {
1250 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1251 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1252 status | BMCR_ANENABLE | BMCR_ISOLATE);
1253 if (phy->type == HOME)
1254 phy_home = phy;
1255 else if (phy->type == LAN)
1256 phy_lan = phy;
1257 }
1258 }
1259
1260 if (!phy_default) {
1261 if (phy_home)
1262 phy_default = phy_home;
1263 else if (phy_lan)
1264 phy_default = phy_lan;
1265 else
1266 phy_default = list_entry(&tp->first_phy,
1267 struct sis190_phy, list);
1268 }
1269
1270 if (mii_if->phy_id != phy_default->phy_id) {
1271 mii_if->phy_id = phy_default->phy_id;
1272 net_probe(tp, KERN_INFO
1273 "%s: Using transceiver at address %d as default.\n",
1274 pci_name(tp->pci_dev), mii_if->phy_id);
1275 }
1276
1277 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1278 status &= (~BMCR_ISOLATE);
1279
1280 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1281 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1282
1283 return status;
1284}
1285
1286static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1287 struct sis190_phy *phy, unsigned int phy_id,
1288 u16 mii_status)
1289{
1290 void __iomem *ioaddr = tp->mmio_addr;
1291 struct mii_chip_info *p;
1292
1293 INIT_LIST_HEAD(&phy->list);
1294 phy->status = mii_status;
1295 phy->phy_id = phy_id;
1296
1297 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1298 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1299
1300 for (p = mii_chip_table; p->type; p++) {
1301 if ((p->id[0] == phy->id[0]) &&
1302 (p->id[1] == (phy->id[1] & 0xfff0))) {
1303 break;
1304 }
1305 }
1306
1307 if (p->id[1]) {
1308 phy->type = (p->type == MIX) ?
1309 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1310 LAN : HOME) : p->type;
1311 } else
1312 phy->type = UNKNOWN;
1313
1314 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1315 pci_name(tp->pci_dev),
1316 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1317}
1318
1319/**
1320 * sis190_mii_probe - Probe MII PHY for sis190
1321 * @dev: the net device to probe for
1322 *
1323 * Search for total of 32 possible mii phy addresses.
1324 * Identify and set current phy if found one,
1325 * return error if it failed to found.
1326 */
1327static int __devinit sis190_mii_probe(struct net_device *dev)
1328{
1329 struct sis190_private *tp = netdev_priv(dev);
1330 struct mii_if_info *mii_if = &tp->mii_if;
1331 void __iomem *ioaddr = tp->mmio_addr;
1332 int phy_id;
1333 int rc = 0;
1334
1335 INIT_LIST_HEAD(&tp->first_phy);
1336
1337 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1338 struct sis190_phy *phy;
1339 u16 status;
1340
1341 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1342
1343 // Try next mii if the current one is not accessible.
1344 if (status == 0xffff || status == 0x0000)
1345 continue;
1346
1347 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1348 if (!phy) {
1349 sis190_free_phy(&tp->first_phy);
1350 rc = -ENOMEM;
1351 goto out;
1352 }
1353
1354 sis190_init_phy(dev, tp, phy, phy_id, status);
1355
1356 list_add(&tp->first_phy, &phy->list);
1357 }
1358
1359 if (list_empty(&tp->first_phy)) {
1360 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1361 pci_name(tp->pci_dev));
1362 rc = -EIO;
1363 goto out;
1364 }
1365
1366 /* Select default PHY for mac */
1367 sis190_default_phy(dev);
1368
1369 mii_if->dev = dev;
1370 mii_if->mdio_read = __mdio_read;
1371 mii_if->mdio_write = __mdio_write;
1372 mii_if->phy_id_mask = PHY_ID_ANY;
1373 mii_if->reg_num_mask = MII_REG_ANY;
1374out:
1375 return rc;
1376}
1377
1378static void __devexit sis190_mii_remove(struct net_device *dev)
1379{
1380 struct sis190_private *tp = netdev_priv(dev);
1381
1382 sis190_free_phy(&tp->first_phy);
1383}
1384
1385static void sis190_release_board(struct pci_dev *pdev)
1386{
1387 struct net_device *dev = pci_get_drvdata(pdev);
1388 struct sis190_private *tp = netdev_priv(dev);
1389
1390 iounmap(tp->mmio_addr);
1391 pci_release_regions(pdev);
1392 pci_disable_device(pdev);
1393 free_netdev(dev);
1394}
1395
1396static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1397{
1398 struct sis190_private *tp;
1399 struct net_device *dev;
1400 void __iomem *ioaddr;
1401 int rc;
1402
1403 dev = alloc_etherdev(sizeof(*tp));
1404 if (!dev) {
1405 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1406 rc = -ENOMEM;
1407 goto err_out_0;
1408 }
1409
1410 SET_MODULE_OWNER(dev);
1411 SET_NETDEV_DEV(dev, &pdev->dev);
1412
1413 tp = netdev_priv(dev);
1414 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1415
1416 rc = pci_enable_device(pdev);
1417 if (rc < 0) {
1418 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1419 goto err_free_dev_1;
1420 }
1421
1422 rc = -ENODEV;
1423
1424 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1425 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1426 pci_name(pdev));
1427 goto err_pci_disable_2;
1428 }
1429 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1430 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1431 pci_name(pdev));
1432 goto err_pci_disable_2;
1433 }
1434
1435 rc = pci_request_regions(pdev, DRV_NAME);
1436 if (rc < 0) {
1437 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1438 pci_name(pdev));
1439 goto err_pci_disable_2;
1440 }
1441
1442 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1443 if (rc < 0) {
1444 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1445 pci_name(pdev));
1446 goto err_free_res_3;
1447 }
1448
1449 pci_set_master(pdev);
1450
1451 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1452 if (!ioaddr) {
1453 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1454 pci_name(pdev));
1455 rc = -EIO;
1456 goto err_free_res_3;
1457 }
1458
1459 tp->pci_dev = pdev;
1460 tp->mmio_addr = ioaddr;
1461
1462 sis190_irq_mask_and_ack(ioaddr);
1463
1464 sis190_soft_reset(ioaddr);
1465out:
1466 return dev;
1467
1468err_free_res_3:
1469 pci_release_regions(pdev);
1470err_pci_disable_2:
1471 pci_disable_device(pdev);
1472err_free_dev_1:
1473 free_netdev(dev);
1474err_out_0:
1475 dev = ERR_PTR(rc);
1476 goto out;
1477}
1478
1479static void sis190_tx_timeout(struct net_device *dev)
1480{
1481 struct sis190_private *tp = netdev_priv(dev);
1482 void __iomem *ioaddr = tp->mmio_addr;
1483 u8 tmp8;
1484
1485 /* Disable Tx, if not already */
1486 tmp8 = SIS_R8(TxControl);
1487 if (tmp8 & CmdTxEnb)
1488 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1489
1490
1491 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1492 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1493
1494 /* Disable interrupts by clearing the interrupt mask. */
1495 SIS_W32(IntrMask, 0x0000);
1496
1497 /* Stop a shared interrupt from scavenging while we are. */
1498 spin_lock_irq(&tp->lock);
1499 sis190_tx_clear(tp);
1500 spin_unlock_irq(&tp->lock);
1501
1502 /* ...and finally, reset everything. */
1503 sis190_hw_start(dev);
1504
1505 netif_wake_queue(dev);
1506}
1507
1508static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1509 struct net_device *dev)
1510{
1511 struct sis190_private *tp = netdev_priv(dev);
1512 void __iomem *ioaddr = tp->mmio_addr;
1513 u16 sig;
1514 int i;
1515
1516 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1517 pci_name(pdev));
1518
1519 /* Check to see if there is a sane EEPROM */
1520 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1521
1522 if ((sig == 0xffff) || (sig == 0x0000)) {
1523 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1524 pci_name(pdev), sig);
1525 return -EIO;
1526 }
1527
1528 /* Get MAC address from EEPROM */
1529 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1530 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1531
1532 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1533 }
1534
1535 return 0;
1536}
1537
1538/**
1539 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1540 * @pdev: PCI device
1541 * @dev: network device to get address for
1542 *
1543 * SiS965 model, use APC CMOS RAM to store MAC address.
1544 * APC CMOS RAM is accessed through ISA bridge.
1545 * MAC address is read into @net_dev->dev_addr.
1546 */
1547static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1548 struct net_device *dev)
1549{
1550 struct sis190_private *tp = netdev_priv(dev);
1551 struct pci_dev *isa_bridge;
1552 u8 reg, tmp8;
1553 int i;
1554
1555 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1556 pci_name(pdev));
1557
1558 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1559 if (!isa_bridge) {
1560 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1561 pci_name(pdev));
1562 return -EIO;
1563 }
1564
1565 /* Enable port 78h & 79h to access APC Registers. */
1566 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1567 reg = (tmp8 & ~0x02);
1568 pci_write_config_byte(isa_bridge, 0x48, reg);
1569 udelay(50);
1570 pci_read_config_byte(isa_bridge, 0x48, &reg);
1571
1572 for (i = 0; i < MAC_ADDR_LEN; i++) {
1573 outb(0x9 + i, 0x78);
1574 dev->dev_addr[i] = inb(0x79);
1575 }
1576
1577 outb(0x12, 0x78);
1578 reg = inb(0x79);
1579
1580 /* Restore the value to ISA Bridge */
1581 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1582 pci_dev_put(isa_bridge);
1583
1584 return 0;
1585}
1586
1587/**
1588 * sis190_init_rxfilter - Initialize the Rx filter
1589 * @dev: network device to initialize
1590 *
1591 * Set receive filter address to our MAC address
1592 * and enable packet filtering.
1593 */
1594static inline void sis190_init_rxfilter(struct net_device *dev)
1595{
1596 struct sis190_private *tp = netdev_priv(dev);
1597 void __iomem *ioaddr = tp->mmio_addr;
1598 u16 ctl;
1599 int i;
1600
1601 ctl = SIS_R16(RxMacControl);
1602 /*
1603 * Disable packet filtering before setting filter.
1604 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1605 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1606 */
1607 SIS_W16(RxMacControl, ctl & ~0x0f00);
1608
1609 for (i = 0; i < MAC_ADDR_LEN; i++)
1610 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1611
1612 SIS_W16(RxMacControl, ctl);
1613 SIS_PCI_COMMIT();
1614}
1615
1616static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1617{
1618 u8 from;
1619
1620 pci_read_config_byte(pdev, 0x73, &from);
1621
1622 return (from & 0x00000001) ?
1623 sis190_get_mac_addr_from_apc(pdev, dev) :
1624 sis190_get_mac_addr_from_eeprom(pdev, dev);
1625}
1626
1627static void sis190_set_speed_auto(struct net_device *dev)
1628{
1629 struct sis190_private *tp = netdev_priv(dev);
1630 void __iomem *ioaddr = tp->mmio_addr;
1631 int phy_id = tp->mii_if.phy_id;
1632 int val;
1633
1634 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1635
1636 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1637
1638 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1639 // unchanged.
1640 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1641 ADVERTISE_100FULL | ADVERTISE_10FULL |
1642 ADVERTISE_100HALF | ADVERTISE_10HALF);
1643
1644 // Enable 1000 Full Mode.
1645 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1646
1647 // Enable auto-negotiation and restart auto-negotiation.
1648 mdio_write(ioaddr, phy_id, MII_BMCR,
1649 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1650}
1651
1652static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1653{
1654 struct sis190_private *tp = netdev_priv(dev);
1655
1656 return mii_ethtool_gset(&tp->mii_if, cmd);
1657}
1658
1659static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1660{
1661 struct sis190_private *tp = netdev_priv(dev);
1662
1663 return mii_ethtool_sset(&tp->mii_if, cmd);
1664}
1665
1666static void sis190_get_drvinfo(struct net_device *dev,
1667 struct ethtool_drvinfo *info)
1668{
1669 struct sis190_private *tp = netdev_priv(dev);
1670
1671 strcpy(info->driver, DRV_NAME);
1672 strcpy(info->version, DRV_VERSION);
1673 strcpy(info->bus_info, pci_name(tp->pci_dev));
1674}
1675
1676static int sis190_get_regs_len(struct net_device *dev)
1677{
1678 return SIS190_REGS_SIZE;
1679}
1680
1681static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 void *p)
1683{
1684 struct sis190_private *tp = netdev_priv(dev);
1685 unsigned long flags;
1686
1687 if (regs->len > SIS190_REGS_SIZE)
1688 regs->len = SIS190_REGS_SIZE;
1689
1690 spin_lock_irqsave(&tp->lock, flags);
1691 memcpy_fromio(p, tp->mmio_addr, regs->len);
1692 spin_unlock_irqrestore(&tp->lock, flags);
1693}
1694
1695static int sis190_nway_reset(struct net_device *dev)
1696{
1697 struct sis190_private *tp = netdev_priv(dev);
1698
1699 return mii_nway_restart(&tp->mii_if);
1700}
1701
1702static u32 sis190_get_msglevel(struct net_device *dev)
1703{
1704 struct sis190_private *tp = netdev_priv(dev);
1705
1706 return tp->msg_enable;
1707}
1708
1709static void sis190_set_msglevel(struct net_device *dev, u32 value)
1710{
1711 struct sis190_private *tp = netdev_priv(dev);
1712
1713 tp->msg_enable = value;
1714}
1715
1716static struct ethtool_ops sis190_ethtool_ops = {
1717 .get_settings = sis190_get_settings,
1718 .set_settings = sis190_set_settings,
1719 .get_drvinfo = sis190_get_drvinfo,
1720 .get_regs_len = sis190_get_regs_len,
1721 .get_regs = sis190_get_regs,
1722 .get_link = ethtool_op_get_link,
1723 .get_msglevel = sis190_get_msglevel,
1724 .set_msglevel = sis190_set_msglevel,
1725 .nway_reset = sis190_nway_reset,
1726};
1727
1728static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1729{
1730 struct sis190_private *tp = netdev_priv(dev);
1731
1732 return !netif_running(dev) ? -EINVAL :
1733 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1734}
1735
1736static int __devinit sis190_init_one(struct pci_dev *pdev,
1737 const struct pci_device_id *ent)
1738{
1739 static int printed_version = 0;
1740 struct sis190_private *tp;
1741 struct net_device *dev;
1742 void __iomem *ioaddr;
1743 int rc;
1744
1745 if (!printed_version) {
1746 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1747 printed_version = 1;
1748 }
1749
1750 dev = sis190_init_board(pdev);
1751 if (IS_ERR(dev)) {
1752 rc = PTR_ERR(dev);
1753 goto out;
1754 }
1755
1756 tp = netdev_priv(dev);
1757 ioaddr = tp->mmio_addr;
1758
1759 rc = sis190_get_mac_addr(pdev, dev);
1760 if (rc < 0)
1761 goto err_release_board;
1762
1763 sis190_init_rxfilter(dev);
1764
1765 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1766
1767 dev->open = sis190_open;
1768 dev->stop = sis190_close;
1769 dev->do_ioctl = sis190_ioctl;
1770 dev->get_stats = sis190_get_stats;
1771 dev->tx_timeout = sis190_tx_timeout;
1772 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1773 dev->hard_start_xmit = sis190_start_xmit;
1774#ifdef CONFIG_NET_POLL_CONTROLLER
1775 dev->poll_controller = sis190_netpoll;
1776#endif
1777 dev->set_multicast_list = sis190_set_rx_mode;
1778 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1779 dev->irq = pdev->irq;
1780 dev->base_addr = (unsigned long) 0xdead;
1781
1782 spin_lock_init(&tp->lock);
1783
1784 rc = sis190_mii_probe(dev);
1785 if (rc < 0)
1786 goto err_release_board;
1787
1788 rc = register_netdev(dev);
1789 if (rc < 0)
1790 goto err_remove_mii;
1791
1792 pci_set_drvdata(pdev, dev);
1793
1794 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1795 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1796 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1797 ioaddr, dev->irq,
1798 dev->dev_addr[0], dev->dev_addr[1],
1799 dev->dev_addr[2], dev->dev_addr[3],
1800 dev->dev_addr[4], dev->dev_addr[5]);
1801
1802 netif_carrier_off(dev);
1803
1804 sis190_set_speed_auto(dev);
1805out:
1806 return rc;
1807
1808err_remove_mii:
1809 sis190_mii_remove(dev);
1810err_release_board:
1811 sis190_release_board(pdev);
1812 goto out;
1813}
1814
1815static void __devexit sis190_remove_one(struct pci_dev *pdev)
1816{
1817 struct net_device *dev = pci_get_drvdata(pdev);
1818
1819 sis190_mii_remove(dev);
1820 unregister_netdev(dev);
1821 sis190_release_board(pdev);
1822 pci_set_drvdata(pdev, NULL);
1823}
1824
1825static struct pci_driver sis190_pci_driver = {
1826 .name = DRV_NAME,
1827 .id_table = sis190_pci_tbl,
1828 .probe = sis190_init_one,
1829 .remove = __devexit_p(sis190_remove_one),
1830};
1831
1832static int __init sis190_init_module(void)
1833{
1834 return pci_module_init(&sis190_pci_driver);
1835}
1836
1837static void __exit sis190_cleanup_module(void)
1838{
1839 pci_unregister_driver(&sis190_pci_driver);
1840}
1841
1842module_init(sis190_init_module);
1843module_exit(sis190_cleanup_module);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f15739481d62..d7c98515fdfd 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.8" 45#define DRV_VERSION "0.9"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -79,8 +79,8 @@ static const struct pci_device_id skge_id_table[] = {
79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
82 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
83 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
84 { 0 } 84 { 0 }
85}; 85};
86MODULE_DEVICE_TABLE(pci, skge_id_table); 86MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -189,7 +189,7 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
189{ 189{
190 u32 supported; 190 u32 supported;
191 191
192 if (iscopper(hw)) { 192 if (hw->copper) {
193 supported = SUPPORTED_10baseT_Half 193 supported = SUPPORTED_10baseT_Half
194 | SUPPORTED_10baseT_Full 194 | SUPPORTED_10baseT_Full
195 | SUPPORTED_100baseT_Half 195 | SUPPORTED_100baseT_Half
@@ -222,7 +222,7 @@ static int skge_get_settings(struct net_device *dev,
222 ecmd->transceiver = XCVR_INTERNAL; 222 ecmd->transceiver = XCVR_INTERNAL;
223 ecmd->supported = skge_supported_modes(hw); 223 ecmd->supported = skge_supported_modes(hw);
224 224
225 if (iscopper(hw)) { 225 if (hw->copper) {
226 ecmd->port = PORT_TP; 226 ecmd->port = PORT_TP;
227 ecmd->phy_address = hw->phy_addr; 227 ecmd->phy_address = hw->phy_addr;
228 } else 228 } else
@@ -876,6 +876,9 @@ static int skge_rx_fill(struct skge_port *skge)
876 876
877static void skge_link_up(struct skge_port *skge) 877static void skge_link_up(struct skge_port *skge)
878{ 878{
879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881
879 netif_carrier_on(skge->netdev); 882 netif_carrier_on(skge->netdev);
880 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 883 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
881 netif_wake_queue(skge->netdev); 884 netif_wake_queue(skge->netdev);
@@ -894,6 +897,7 @@ static void skge_link_up(struct skge_port *skge)
894 897
895static void skge_link_down(struct skge_port *skge) 898static void skge_link_down(struct skge_port *skge)
896{ 899{
900 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
897 netif_carrier_off(skge->netdev); 901 netif_carrier_off(skge->netdev);
898 netif_stop_queue(skge->netdev); 902 netif_stop_queue(skge->netdev);
899 903
@@ -1599,7 +1603,7 @@ static void yukon_init(struct skge_hw *hw, int port)
1599 adv = PHY_AN_CSMA; 1603 adv = PHY_AN_CSMA;
1600 1604
1601 if (skge->autoneg == AUTONEG_ENABLE) { 1605 if (skge->autoneg == AUTONEG_ENABLE) {
1602 if (iscopper(hw)) { 1606 if (hw->copper) {
1603 if (skge->advertising & ADVERTISED_1000baseT_Full) 1607 if (skge->advertising & ADVERTISED_1000baseT_Full)
1604 ct1000 |= PHY_M_1000C_AFD; 1608 ct1000 |= PHY_M_1000C_AFD;
1605 if (skge->advertising & ADVERTISED_1000baseT_Half) 1609 if (skge->advertising & ADVERTISED_1000baseT_Half)
@@ -1691,7 +1695,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1691 /* Set hardware config mode */ 1695 /* Set hardware config mode */
1692 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1693 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; 1697 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1694 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1698 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1695 1699
1696 /* Clear GMC reset */ 1700 /* Clear GMC reset */
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1701 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
@@ -1780,7 +1784,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1780 reg &= ~GMF_RX_F_FL_ON; 1784 reg &= ~GMF_RX_F_FL_ON;
1781 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1782 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1783 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1787 /*
1788 * because Pause Packet Truncation in GMAC is not working
1789 * we have to increase the Flush Threshold to 64 bytes
1790 * in order to flush pause packets in Rx FIFO on Yukon-1
1791 */
1792 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1784 1793
1785 /* Configure Tx MAC FIFO */ 1794 /* Configure Tx MAC FIFO */
1786 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1795 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
@@ -2670,18 +2679,6 @@ static void skge_error_irq(struct skge_hw *hw)
2670 /* Timestamp (unused) overflow */ 2679 /* Timestamp (unused) overflow */
2671 if (hwstatus & IS_IRQ_TIST_OV) 2680 if (hwstatus & IS_IRQ_TIST_OV)
2672 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2681 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2673
2674 if (hwstatus & IS_IRQ_SENSOR) {
2675 /* no sensors on 32-bit Yukon */
2676 if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
2677 printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
2678 skge_write32(hw, B0_HWE_IMSK,
2679 IS_ERR_MSK & ~IS_IRQ_SENSOR);
2680 } else
2681 printk(KERN_WARNING PFX "sensor interrupt\n");
2682 }
2683
2684
2685 } 2682 }
2686 2683
2687 if (hwstatus & IS_RAM_RD_PAR) { 2684 if (hwstatus & IS_RAM_RD_PAR) {
@@ -2712,9 +2709,10 @@ static void skge_error_irq(struct skge_hw *hw)
2712 2709
2713 skge_pci_clear(hw); 2710 skge_pci_clear(hw);
2714 2711
2712 /* if error still set then just ignore it */
2715 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2713 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2716 if (hwstatus & IS_IRQ_STAT) { 2714 if (hwstatus & IS_IRQ_STAT) {
2717 printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n", 2715 pr_debug("IRQ status %x: still set ignoring hardware errors\n",
2718 hwstatus); 2716 hwstatus);
2719 hw->intr_mask &= ~IS_HW_ERR; 2717 hw->intr_mask &= ~IS_HW_ERR;
2720 } 2718 }
@@ -2876,7 +2874,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
2876static int skge_reset(struct skge_hw *hw) 2874static int skge_reset(struct skge_hw *hw)
2877{ 2875{
2878 u16 ctst; 2876 u16 ctst;
2879 u8 t8, mac_cfg; 2877 u8 t8, mac_cfg, pmd_type, phy_type;
2880 int i; 2878 int i;
2881 2879
2882 ctst = skge_read16(hw, B0_CTST); 2880 ctst = skge_read16(hw, B0_CTST);
@@ -2895,18 +2893,19 @@ static int skge_reset(struct skge_hw *hw)
2895 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); 2893 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2896 2894
2897 hw->chip_id = skge_read8(hw, B2_CHIP_ID); 2895 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
2898 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2896 phy_type = skge_read8(hw, B2_E_1) & 0xf;
2899 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2897 pmd_type = skge_read8(hw, B2_PMD_TYP);
2898 hw->copper = (pmd_type == 'T' || pmd_type == '1');
2900 2899
2901 switch (hw->chip_id) { 2900 switch (hw->chip_id) {
2902 case CHIP_ID_GENESIS: 2901 case CHIP_ID_GENESIS:
2903 switch (hw->phy_type) { 2902 switch (phy_type) {
2904 case SK_PHY_BCOM: 2903 case SK_PHY_BCOM:
2905 hw->phy_addr = PHY_ADDR_BCOM; 2904 hw->phy_addr = PHY_ADDR_BCOM;
2906 break; 2905 break;
2907 default: 2906 default:
2908 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 2907 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
2909 pci_name(hw->pdev), hw->phy_type); 2908 pci_name(hw->pdev), phy_type);
2910 return -EOPNOTSUPP; 2909 return -EOPNOTSUPP;
2911 } 2910 }
2912 break; 2911 break;
@@ -2914,13 +2913,10 @@ static int skge_reset(struct skge_hw *hw)
2914 case CHIP_ID_YUKON: 2913 case CHIP_ID_YUKON:
2915 case CHIP_ID_YUKON_LITE: 2914 case CHIP_ID_YUKON_LITE:
2916 case CHIP_ID_YUKON_LP: 2915 case CHIP_ID_YUKON_LP:
2917 if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S') 2916 if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2918 hw->phy_type = SK_PHY_MARV_COPPER; 2917 hw->copper = 1;
2919 2918
2920 hw->phy_addr = PHY_ADDR_MARV; 2919 hw->phy_addr = PHY_ADDR_MARV;
2921 if (!iscopper(hw))
2922 hw->phy_type = SK_PHY_MARV_FIBER;
2923
2924 break; 2920 break;
2925 2921
2926 default: 2922 default:
@@ -2948,12 +2944,20 @@ static int skge_reset(struct skge_hw *hw)
2948 else 2944 else
2949 hw->ram_size = t8 * 4096; 2945 hw->ram_size = t8 * 4096;
2950 2946
2947 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2951 if (hw->chip_id == CHIP_ID_GENESIS) 2948 if (hw->chip_id == CHIP_ID_GENESIS)
2952 genesis_init(hw); 2949 genesis_init(hw);
2953 else { 2950 else {
2954 /* switch power to VCC (WA for VAUX problem) */ 2951 /* switch power to VCC (WA for VAUX problem) */
2955 skge_write8(hw, B0_POWER_CTRL, 2952 skge_write8(hw, B0_POWER_CTRL,
2956 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2953 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
2954 /* avoid boards with stuck Hardware error bits */
2955 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2956 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2957 printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
2958 hw->intr_mask &= ~IS_HW_ERR;
2959 }
2960
2957 for (i = 0; i < hw->ports; i++) { 2961 for (i = 0; i < hw->ports; i++) {
2958 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2962 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2959 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2963 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
@@ -2994,7 +2998,6 @@ static int skge_reset(struct skge_hw *hw)
2994 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 2998 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2995 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 2999 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
2996 3000
2997 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
2998 skge_write32(hw, B0_IMSK, hw->intr_mask); 3001 skge_write32(hw, B0_IMSK, hw->intr_mask);
2999 3002
3000 if (hw->chip_id != CHIP_ID_GENESIS) 3003 if (hw->chip_id != CHIP_ID_GENESIS)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index b432f1bb8168..f1680beb8e68 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -214,8 +214,6 @@ enum {
214 214
215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ 215/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
216enum { 216enum {
217 IS_ERR_MSK = 0x00003fff,/* All Error bits */
218
219 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ 217 IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
220 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ 218 IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
221 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ 219 IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
@@ -230,6 +228,12 @@ enum {
230 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ 228 IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
231 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ 229 IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
232 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ 230 IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
231
232 IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
233 | IS_NO_STAT_M1 | IS_NO_STAT_M2
234 | IS_RAM_RD_PAR | IS_RAM_WR_PAR
235 | IS_M1_PAR_ERR | IS_M2_PAR_ERR
236 | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
233}; 237};
234 238
235/* B2_TST_CTRL1 8 bit Test Control Register 1 */ 239/* B2_TST_CTRL1 8 bit Test Control Register 1 */
@@ -2456,24 +2460,17 @@ struct skge_hw {
2456 2460
2457 u8 chip_id; 2461 u8 chip_id;
2458 u8 chip_rev; 2462 u8 chip_rev;
2459 u8 phy_type; 2463 u8 copper;
2460 u8 pmd_type;
2461 u16 phy_addr;
2462 u8 ports; 2464 u8 ports;
2463 2465
2464 u32 ram_size; 2466 u32 ram_size;
2465 u32 ram_offset; 2467 u32 ram_offset;
2468 u16 phy_addr;
2466 2469
2467 struct tasklet_struct ext_tasklet; 2470 struct tasklet_struct ext_tasklet;
2468 spinlock_t phy_lock; 2471 spinlock_t phy_lock;
2469}; 2472};
2470 2473
2471
2472static inline int iscopper(const struct skge_hw *hw)
2473{
2474 return (hw->pmd_type == 'T');
2475}
2476
2477enum { 2474enum {
2478 FLOW_MODE_NONE = 0, /* No Flow-Control */ 2475 FLOW_MODE_NONE = 0, /* No Flow-Control */
2479 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ 2476 FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 6d9dae60a697..ba8593ac3f8a 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -68,6 +68,7 @@ static const char version[] =
68#include <linux/etherdevice.h> 68#include <linux/etherdevice.h>
69 69
70#include <asm/io.h> 70#include <asm/io.h>
71#include <asm/irq.h>
71#include <asm/system.h> 72#include <asm/system.h>
72 73
73#include "8390.h" 74#include "8390.h"
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index cdc9cc873e06..90b818a8de6e 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -1,6 +1,11 @@
1/* 1/*
2 * sonic.c 2 * sonic.c
3 * 3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, added zero-copy buffer handling, and
7 * (from the mac68k project) introduced dhd's support for 16-bit cards.
8 *
4 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) 9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
5 * 10 *
6 * This driver is based on work from Andreas Busse, but most of 11 * This driver is based on work from Andreas Busse, but most of
@@ -9,12 +14,23 @@
9 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) 14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
10 * 15 *
11 * Core code included by system sonic drivers 16 * Core code included by system sonic drivers
17 *
18 * And... partially rewritten again by David Huggins-Daines in order
19 * to cope with screwed up Macintosh NICs that may or may not use
20 * 16-bit DMA.
21 *
22 * (C) 1999 David Huggins-Daines <dhd@debian.org>
23 *
12 */ 24 */
13 25
14/* 26/*
15 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook, 27 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
16 * National Semiconductors data sheet for the DP83932B Sonic Ethernet 28 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
17 * controller, and the files "8390.c" and "skeleton.c" in this directory. 29 * controller, and the files "8390.c" and "skeleton.c" in this directory.
30 *
31 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
32 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
33 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
18 */ 34 */
19 35
20 36
@@ -28,6 +44,9 @@
28 */ 44 */
29static int sonic_open(struct net_device *dev) 45static int sonic_open(struct net_device *dev)
30{ 46{
47 struct sonic_local *lp = netdev_priv(dev);
48 int i;
49
31 if (sonic_debug > 2) 50 if (sonic_debug > 2)
32 printk("sonic_open: initializing sonic driver.\n"); 51 printk("sonic_open: initializing sonic driver.\n");
33 52
@@ -40,14 +59,59 @@ static int sonic_open(struct net_device *dev)
40 * This means that during execution of the handler interrupt are disabled 59 * This means that during execution of the handler interrupt are disabled
41 * covering another bug otherwise corrupting data. This doesn't mean 60 * covering another bug otherwise corrupting data. This doesn't mean
42 * this glue works ok under all situations. 61 * this glue works ok under all situations.
62 *
63 * Note (dhd): this also appears to prevent lockups on the Macintrash
64 * when more than one Ethernet card is installed (knock on wood)
65 *
66 * Note (fthain): whether the above is still true is anyones guess. Certainly
67 * the buffer handling algorithms will not tolerate re-entrance without some
68 * mutual exclusion added. Anyway, the memcpy has now been eliminated from the
69 * rx code to make this a faster "fast interrupt".
43 */ 70 */
44// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) { 71 if (request_irq(dev->irq, &sonic_interrupt, SONIC_IRQ_FLAG, "sonic", dev)) {
45 if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT, 72 printk(KERN_ERR "\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
46 "sonic", dev)) {
47 printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
48 return -EAGAIN; 73 return -EAGAIN;
49 } 74 }
50 75
76 for (i = 0; i < SONIC_NUM_RRS; i++) {
77 struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
78 if (skb == NULL) {
79 while(i > 0) { /* free any that were allocated successfully */
80 i--;
81 dev_kfree_skb(lp->rx_skb[i]);
82 lp->rx_skb[i] = NULL;
83 }
84 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
85 dev->name);
86 return -ENOMEM;
87 }
88 skb->dev = dev;
89 /* align IP header unless DMA requires otherwise */
90 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
91 skb_reserve(skb, 2);
92 lp->rx_skb[i] = skb;
93 }
94
95 for (i = 0; i < SONIC_NUM_RRS; i++) {
96 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
97 SONIC_RBSIZE, DMA_FROM_DEVICE);
98 if (!laddr) {
99 while(i > 0) { /* free any that were mapped successfully */
100 i--;
101 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
102 lp->rx_laddr[i] = (dma_addr_t)0;
103 }
104 for (i = 0; i < SONIC_NUM_RRS; i++) {
105 dev_kfree_skb(lp->rx_skb[i]);
106 lp->rx_skb[i] = NULL;
107 }
108 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
109 dev->name);
110 return -ENOMEM;
111 }
112 lp->rx_laddr[i] = laddr;
113 }
114
51 /* 115 /*
52 * Initialize the SONIC 116 * Initialize the SONIC
53 */ 117 */
@@ -67,7 +131,8 @@ static int sonic_open(struct net_device *dev)
67 */ 131 */
68static int sonic_close(struct net_device *dev) 132static int sonic_close(struct net_device *dev)
69{ 133{
70 unsigned int base_addr = dev->base_addr; 134 struct sonic_local *lp = netdev_priv(dev);
135 int i;
71 136
72 if (sonic_debug > 2) 137 if (sonic_debug > 2)
73 printk("sonic_close\n"); 138 printk("sonic_close\n");
@@ -77,20 +142,56 @@ static int sonic_close(struct net_device *dev)
77 /* 142 /*
78 * stop the SONIC, disable interrupts 143 * stop the SONIC, disable interrupts
79 */ 144 */
80 SONIC_WRITE(SONIC_ISR, 0x7fff);
81 SONIC_WRITE(SONIC_IMR, 0); 145 SONIC_WRITE(SONIC_IMR, 0);
146 SONIC_WRITE(SONIC_ISR, 0x7fff);
82 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); 147 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
83 148
84 sonic_free_irq(dev->irq, dev); /* release the IRQ */ 149 /* unmap and free skbs that haven't been transmitted */
150 for (i = 0; i < SONIC_NUM_TDS; i++) {
151 if(lp->tx_laddr[i]) {
152 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
153 lp->tx_laddr[i] = (dma_addr_t)0;
154 }
155 if(lp->tx_skb[i]) {
156 dev_kfree_skb(lp->tx_skb[i]);
157 lp->tx_skb[i] = NULL;
158 }
159 }
160
161 /* unmap and free the receive buffers */
162 for (i = 0; i < SONIC_NUM_RRS; i++) {
163 if(lp->rx_laddr[i]) {
164 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
165 lp->rx_laddr[i] = (dma_addr_t)0;
166 }
167 if(lp->rx_skb[i]) {
168 dev_kfree_skb(lp->rx_skb[i]);
169 lp->rx_skb[i] = NULL;
170 }
171 }
172
173 free_irq(dev->irq, dev); /* release the IRQ */
85 174
86 return 0; 175 return 0;
87} 176}
88 177
89static void sonic_tx_timeout(struct net_device *dev) 178static void sonic_tx_timeout(struct net_device *dev)
90{ 179{
91 struct sonic_local *lp = (struct sonic_local *) dev->priv; 180 struct sonic_local *lp = netdev_priv(dev);
92 printk("%s: transmit timed out.\n", dev->name); 181 int i;
93 182 /* Stop the interrupts for this */
183 SONIC_WRITE(SONIC_IMR, 0);
184 /* We could resend the original skbs. Easier to re-initialise. */
185 for (i = 0; i < SONIC_NUM_TDS; i++) {
186 if(lp->tx_laddr[i]) {
187 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
188 lp->tx_laddr[i] = (dma_addr_t)0;
189 }
190 if(lp->tx_skb[i]) {
191 dev_kfree_skb(lp->tx_skb[i]);
192 lp->tx_skb[i] = NULL;
193 }
194 }
94 /* Try to restart the adaptor. */ 195 /* Try to restart the adaptor. */
95 sonic_init(dev); 196 sonic_init(dev);
96 lp->stats.tx_errors++; 197 lp->stats.tx_errors++;
@@ -100,60 +201,92 @@ static void sonic_tx_timeout(struct net_device *dev)
100 201
101/* 202/*
102 * transmit packet 203 * transmit packet
204 *
205 * Appends new TD during transmission thus avoiding any TX interrupts
206 * until we run out of TDs.
207 * This routine interacts closely with the ISR in that it may,
208 * set tx_skb[i]
209 * reset the status flags of the new TD
210 * set and reset EOL flags
211 * stop the tx queue
212 * The ISR interacts with this routine in various ways. It may,
213 * reset tx_skb[i]
214 * test the EOL and status flags of the TDs
215 * wake the tx queue
216 * Concurrently with all of this, the SONIC is potentially writing to
217 * the status flags of the TDs.
218 * Until some mutual exclusion is added, this code will not work with SMP. However,
219 * MIPS Jazz machines and m68k Macs were all uni-processor machines.
103 */ 220 */
221
104static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) 222static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
105{ 223{
106 struct sonic_local *lp = (struct sonic_local *) dev->priv; 224 struct sonic_local *lp = netdev_priv(dev);
107 unsigned int base_addr = dev->base_addr; 225 dma_addr_t laddr;
108 unsigned int laddr; 226 int length;
109 int entry, length; 227 int entry = lp->next_tx;
110
111 netif_stop_queue(dev);
112 228
113 if (sonic_debug > 2) 229 if (sonic_debug > 2)
114 printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); 230 printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
115 231
232 length = skb->len;
233 if (length < ETH_ZLEN) {
234 skb = skb_padto(skb, ETH_ZLEN);
235 if (skb == NULL)
236 return 0;
237 length = ETH_ZLEN;
238 }
239
116 /* 240 /*
117 * Map the packet data into the logical DMA address space 241 * Map the packet data into the logical DMA address space
118 */ 242 */
119 if ((laddr = vdma_alloc(CPHYSADDR(skb->data), skb->len)) == ~0UL) { 243
120 printk("%s: no VDMA entry for transmit available.\n", 244 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
121 dev->name); 245 if (!laddr) {
246 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
122 dev_kfree_skb(skb); 247 dev_kfree_skb(skb);
123 netif_start_queue(dev);
124 return 1; 248 return 1;
125 } 249 }
126 entry = lp->cur_tx & SONIC_TDS_MASK; 250
251 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
252 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
253 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
254 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
255 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
256 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
257 sonic_tda_put(dev, entry, SONIC_TD_LINK,
258 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
259
260 /*
261 * Must set tx_skb[entry] only after clearing status, and
262 * before clearing EOL and before stopping queue
263 */
264 wmb();
265 lp->tx_len[entry] = length;
127 lp->tx_laddr[entry] = laddr; 266 lp->tx_laddr[entry] = laddr;
128 lp->tx_skb[entry] = skb; 267 lp->tx_skb[entry] = skb;
129 268
130 length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 269 wmb();
131 flush_cache_all(); 270 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
271 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
272 lp->eol_tx = entry;
132 273
133 /* 274 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
134 * Setup the transmit descriptor and issue the transmit command. 275 if (lp->tx_skb[lp->next_tx] != NULL) {
135 */ 276 /* The ring is full, the ISR has yet to process the next TD. */
136 lp->tda[entry].tx_status = 0; /* clear status */ 277 if (sonic_debug > 3)
137 lp->tda[entry].tx_frag_count = 1; /* single fragment */ 278 printk("%s: stopping queue\n", dev->name);
138 lp->tda[entry].tx_pktsize = length; /* length of packet */ 279 netif_stop_queue(dev);
139 lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff; 280 /* after this packet, wait for ISR to free up some TDAs */
140 lp->tda[entry].tx_frag_ptr_h = laddr >> 16; 281 } else netif_start_queue(dev);
141 lp->tda[entry].tx_frag_size = length;
142 lp->cur_tx++;
143 lp->stats.tx_bytes += length;
144 282
145 if (sonic_debug > 2) 283 if (sonic_debug > 2)
146 printk("sonic_send_packet: issueing Tx command\n"); 284 printk("sonic_send_packet: issuing Tx command\n");
147 285
148 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); 286 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
149 287
150 dev->trans_start = jiffies; 288 dev->trans_start = jiffies;
151 289
152 if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
153 netif_start_queue(dev);
154 else
155 lp->tx_full = 1;
156
157 return 0; 290 return 0;
158} 291}
159 292
@@ -164,175 +297,199 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
164static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs) 297static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
165{ 298{
166 struct net_device *dev = (struct net_device *) dev_id; 299 struct net_device *dev = (struct net_device *) dev_id;
167 unsigned int base_addr = dev->base_addr; 300 struct sonic_local *lp = netdev_priv(dev);
168 struct sonic_local *lp;
169 int status; 301 int status;
170 302
171 if (dev == NULL) { 303 if (dev == NULL) {
172 printk("sonic_interrupt: irq %d for unknown device.\n", irq); 304 printk(KERN_ERR "sonic_interrupt: irq %d for unknown device.\n", irq);
173 return IRQ_NONE; 305 return IRQ_NONE;
174 } 306 }
175 307
176 lp = (struct sonic_local *) dev->priv; 308 if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
177 309 return IRQ_NONE;
178 status = SONIC_READ(SONIC_ISR);
179 SONIC_WRITE(SONIC_ISR, 0x7fff); /* clear all bits */
180
181 if (sonic_debug > 2)
182 printk("sonic_interrupt: ISR=%x\n", status);
183
184 if (status & SONIC_INT_PKTRX) {
185 sonic_rx(dev); /* got packet(s) */
186 }
187
188 if (status & SONIC_INT_TXDN) {
189 int dirty_tx = lp->dirty_tx;
190
191 while (dirty_tx < lp->cur_tx) {
192 int entry = dirty_tx & SONIC_TDS_MASK;
193 int status = lp->tda[entry].tx_status;
194 310
195 if (sonic_debug > 3) 311 do {
196 printk 312 if (status & SONIC_INT_PKTRX) {
197 ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n", 313 if (sonic_debug > 2)
198 status, lp->cur_tx, lp->dirty_tx); 314 printk("%s: packet rx\n", dev->name);
315 sonic_rx(dev); /* got packet(s) */
316 SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
317 }
199 318
200 if (status == 0) { 319 if (status & SONIC_INT_TXDN) {
201 /* It still hasn't been Txed, kick the sonic again */ 320 int entry = lp->cur_tx;
202 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); 321 int td_status;
203 break; 322 int freed_some = 0;
204 }
205 323
206 /* put back EOL and free descriptor */ 324 /* At this point, cur_tx is the index of a TD that is one of:
207 lp->tda[entry].tx_frag_count = 0; 325 * unallocated/freed (status set & tx_skb[entry] clear)
208 lp->tda[entry].tx_status = 0; 326 * allocated and sent (status set & tx_skb[entry] set )
209 327 * allocated and not yet sent (status clear & tx_skb[entry] set )
210 if (status & 0x0001) 328 * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
211 lp->stats.tx_packets++; 329 */
212 else {
213 lp->stats.tx_errors++;
214 if (status & 0x0642)
215 lp->stats.tx_aborted_errors++;
216 if (status & 0x0180)
217 lp->stats.tx_carrier_errors++;
218 if (status & 0x0020)
219 lp->stats.tx_window_errors++;
220 if (status & 0x0004)
221 lp->stats.tx_fifo_errors++;
222 }
223 330
224 /* We must free the original skb */ 331 if (sonic_debug > 2)
225 if (lp->tx_skb[entry]) { 332 printk("%s: tx done\n", dev->name);
333
334 while (lp->tx_skb[entry] != NULL) {
335 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
336 break;
337
338 if (td_status & 0x0001) {
339 lp->stats.tx_packets++;
340 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
341 } else {
342 lp->stats.tx_errors++;
343 if (td_status & 0x0642)
344 lp->stats.tx_aborted_errors++;
345 if (td_status & 0x0180)
346 lp->stats.tx_carrier_errors++;
347 if (td_status & 0x0020)
348 lp->stats.tx_window_errors++;
349 if (td_status & 0x0004)
350 lp->stats.tx_fifo_errors++;
351 }
352
353 /* We must free the original skb */
226 dev_kfree_skb_irq(lp->tx_skb[entry]); 354 dev_kfree_skb_irq(lp->tx_skb[entry]);
227 lp->tx_skb[entry] = 0; 355 lp->tx_skb[entry] = NULL;
356 /* and unmap DMA buffer */
357 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
358 lp->tx_laddr[entry] = (dma_addr_t)0;
359 freed_some = 1;
360
361 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
362 entry = (entry + 1) & SONIC_TDS_MASK;
363 break;
364 }
365 entry = (entry + 1) & SONIC_TDS_MASK;
228 } 366 }
229 /* and the VDMA address */
230 vdma_free(lp->tx_laddr[entry]);
231 dirty_tx++;
232 }
233 367
234 if (lp->tx_full 368 if (freed_some || lp->tx_skb[entry] == NULL)
235 && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) { 369 netif_wake_queue(dev); /* The ring is no longer full */
236 /* The ring is no longer full, clear tbusy. */ 370 lp->cur_tx = entry;
237 lp->tx_full = 0; 371 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
238 netif_wake_queue(dev);
239 } 372 }
240 373
241 lp->dirty_tx = dirty_tx; 374 /*
242 } 375 * check error conditions
376 */
377 if (status & SONIC_INT_RFO) {
378 if (sonic_debug > 1)
379 printk("%s: rx fifo overrun\n", dev->name);
380 lp->stats.rx_fifo_errors++;
381 SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
382 }
383 if (status & SONIC_INT_RDE) {
384 if (sonic_debug > 1)
385 printk("%s: rx descriptors exhausted\n", dev->name);
386 lp->stats.rx_dropped++;
387 SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
388 }
389 if (status & SONIC_INT_RBAE) {
390 if (sonic_debug > 1)
391 printk("%s: rx buffer area exceeded\n", dev->name);
392 lp->stats.rx_dropped++;
393 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
394 }
243 395
244 /* 396 /* counter overruns; all counters are 16bit wide */
245 * check error conditions 397 if (status & SONIC_INT_FAE) {
246 */ 398 lp->stats.rx_frame_errors += 65536;
247 if (status & SONIC_INT_RFO) { 399 SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
248 printk("%s: receive fifo underrun\n", dev->name); 400 }
249 lp->stats.rx_fifo_errors++; 401 if (status & SONIC_INT_CRC) {
250 } 402 lp->stats.rx_crc_errors += 65536;
251 if (status & SONIC_INT_RDE) { 403 SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
252 printk("%s: receive descriptors exhausted\n", dev->name); 404 }
253 lp->stats.rx_dropped++; 405 if (status & SONIC_INT_MP) {
254 } 406 lp->stats.rx_missed_errors += 65536;
255 if (status & SONIC_INT_RBE) { 407 SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
256 printk("%s: receive buffer exhausted\n", dev->name); 408 }
257 lp->stats.rx_dropped++;
258 }
259 if (status & SONIC_INT_RBAE) {
260 printk("%s: receive buffer area exhausted\n", dev->name);
261 lp->stats.rx_dropped++;
262 }
263 409
264 /* counter overruns; all counters are 16bit wide */ 410 /* transmit error */
265 if (status & SONIC_INT_FAE) 411 if (status & SONIC_INT_TXER) {
266 lp->stats.rx_frame_errors += 65536; 412 if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
267 if (status & SONIC_INT_CRC) 413 printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
268 lp->stats.rx_crc_errors += 65536; 414 SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
269 if (status & SONIC_INT_MP) 415 }
270 lp->stats.rx_missed_errors += 65536;
271 416
272 /* transmit error */ 417 /* bus retry */
273 if (status & SONIC_INT_TXER) 418 if (status & SONIC_INT_BR) {
274 lp->stats.tx_errors++; 419 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
420 dev->name);
421 /* ... to help debug DMA problems causing endless interrupts. */
422 /* Bounce the eth interface to turn on the interrupt again. */
423 SONIC_WRITE(SONIC_IMR, 0);
424 SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
425 }
275 426
276 /* 427 /* load CAM done */
277 * clear interrupt bits and return 428 if (status & SONIC_INT_LCD)
278 */ 429 SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
279 SONIC_WRITE(SONIC_ISR, status); 430 } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
280 return IRQ_HANDLED; 431 return IRQ_HANDLED;
281} 432}
282 433
283/* 434/*
284 * We have a good packet(s), get it/them out of the buffers. 435 * We have a good packet(s), pass it/them up the network stack.
285 */ 436 */
286static void sonic_rx(struct net_device *dev) 437static void sonic_rx(struct net_device *dev)
287{ 438{
288 unsigned int base_addr = dev->base_addr; 439 struct sonic_local *lp = netdev_priv(dev);
289 struct sonic_local *lp = (struct sonic_local *) dev->priv;
290 sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
291 int status; 440 int status;
292 441 int entry = lp->cur_rx;
293 while (rd->in_use == 0) { 442
294 struct sk_buff *skb; 443 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
444 struct sk_buff *used_skb;
445 struct sk_buff *new_skb;
446 dma_addr_t new_laddr;
447 u16 bufadr_l;
448 u16 bufadr_h;
295 int pkt_len; 449 int pkt_len;
296 unsigned char *pkt_ptr;
297 450
298 status = rd->rx_status; 451 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
299 if (sonic_debug > 3)
300 printk("status %x, cur_rx %d, cur_rra %x\n",
301 status, lp->cur_rx, lp->cur_rra);
302 if (status & SONIC_RCR_PRX) { 452 if (status & SONIC_RCR_PRX) {
303 pkt_len = rd->rx_pktlen;
304 pkt_ptr =
305 (char *)
306 sonic_chiptomem((rd->rx_pktptr_h << 16) +
307 rd->rx_pktptr_l);
308
309 if (sonic_debug > 3)
310 printk
311 ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n",
312 pkt_ptr, lp->rba, rd->rx_pktptr_h,
313 rd->rx_pktptr_l,
314 SONIC_READ(SONIC_RBWC1),
315 SONIC_READ(SONIC_RBWC0));
316
317 /* Malloc up new buffer. */ 453 /* Malloc up new buffer. */
318 skb = dev_alloc_skb(pkt_len + 2); 454 new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
319 if (skb == NULL) { 455 if (new_skb == NULL) {
320 printk 456 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
321 ("%s: Memory squeeze, dropping packet.\n", 457 lp->stats.rx_dropped++;
322 dev->name); 458 break;
459 }
460 new_skb->dev = dev;
461 /* provide 16 byte IP header alignment unless DMA requires otherwise */
462 if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
463 skb_reserve(new_skb, 2);
464
465 new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
466 SONIC_RBSIZE, DMA_FROM_DEVICE);
467 if (!new_laddr) {
468 dev_kfree_skb(new_skb);
469 printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
323 lp->stats.rx_dropped++; 470 lp->stats.rx_dropped++;
324 break; 471 break;
325 } 472 }
326 skb->dev = dev; 473
327 skb_reserve(skb, 2); /* 16 byte align */ 474 /* now we have a new skb to replace it, pass the used one up the stack */
328 skb_put(skb, pkt_len); /* Make room */ 475 dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
329 eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0); 476 used_skb = lp->rx_skb[entry];
330 skb->protocol = eth_type_trans(skb, dev); 477 pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
331 netif_rx(skb); /* pass the packet to upper layers */ 478 skb_trim(used_skb, pkt_len);
479 used_skb->protocol = eth_type_trans(used_skb, dev);
480 netif_rx(used_skb);
332 dev->last_rx = jiffies; 481 dev->last_rx = jiffies;
333 lp->stats.rx_packets++; 482 lp->stats.rx_packets++;
334 lp->stats.rx_bytes += pkt_len; 483 lp->stats.rx_bytes += pkt_len;
335 484
485 /* and insert the new skb */
486 lp->rx_laddr[entry] = new_laddr;
487 lp->rx_skb[entry] = new_skb;
488
489 bufadr_l = (unsigned long)new_laddr & 0xffff;
490 bufadr_h = (unsigned long)new_laddr >> 16;
491 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
492 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
336 } else { 493 } else {
337 /* This should only happen, if we enable accepting broken packets. */ 494 /* This should only happen, if we enable accepting broken packets. */
338 lp->stats.rx_errors++; 495 lp->stats.rx_errors++;
@@ -341,29 +498,35 @@ static void sonic_rx(struct net_device *dev)
341 if (status & SONIC_RCR_CRCR) 498 if (status & SONIC_RCR_CRCR)
342 lp->stats.rx_crc_errors++; 499 lp->stats.rx_crc_errors++;
343 } 500 }
344
345 rd->in_use = 1;
346 rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
347 /* now give back the buffer to the receive buffer area */
348 if (status & SONIC_RCR_LPKT) { 501 if (status & SONIC_RCR_LPKT) {
349 /* 502 /*
350 * this was the last packet out of the current receice buffer 503 * this was the last packet out of the current receive buffer
351 * give the buffer back to the SONIC 504 * give the buffer back to the SONIC
352 */ 505 */
353 lp->cur_rra += sizeof(sonic_rr_t); 506 lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
354 if (lp->cur_rra > 507 if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
355 (lp->rra_laddr + 508 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
356 (SONIC_NUM_RRS - 509 if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
357 1) * sizeof(sonic_rr_t))) lp->cur_rra = 510 if (sonic_debug > 2)
358 lp->rra_laddr; 511 printk("%s: rx buffer exhausted\n", dev->name);
359 SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff); 512 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
513 }
360 } else 514 } else
361 printk 515 printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
362 ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
363 dev->name); 516 dev->name);
517 /*
518 * give back the descriptor
519 */
520 sonic_rda_put(dev, entry, SONIC_RD_LINK,
521 sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
522 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
523 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
524 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
525 lp->eol_rx = entry;
526 lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
364 } 527 }
365 /* 528 /*
366 * If any worth-while packets have been received, dev_rint() 529 * If any worth-while packets have been received, netif_rx()
367 * has done a mark_bh(NET_BH) for us and will work on them 530 * has done a mark_bh(NET_BH) for us and will work on them
368 * when we get to the bottom-half routine. 531 * when we get to the bottom-half routine.
369 */ 532 */
@@ -376,8 +539,7 @@ static void sonic_rx(struct net_device *dev)
376 */ 539 */
377static struct net_device_stats *sonic_get_stats(struct net_device *dev) 540static struct net_device_stats *sonic_get_stats(struct net_device *dev)
378{ 541{
379 struct sonic_local *lp = (struct sonic_local *) dev->priv; 542 struct sonic_local *lp = netdev_priv(dev);
380 unsigned int base_addr = dev->base_addr;
381 543
382 /* read the tally counter from the SONIC and reset them */ 544 /* read the tally counter from the SONIC and reset them */
383 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT); 545 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
@@ -396,8 +558,7 @@ static struct net_device_stats *sonic_get_stats(struct net_device *dev)
396 */ 558 */
397static void sonic_multicast_list(struct net_device *dev) 559static void sonic_multicast_list(struct net_device *dev)
398{ 560{
399 struct sonic_local *lp = (struct sonic_local *) dev->priv; 561 struct sonic_local *lp = netdev_priv(dev);
400 unsigned int base_addr = dev->base_addr;
401 unsigned int rcr; 562 unsigned int rcr;
402 struct dev_mc_list *dmi = dev->mc_list; 563 struct dev_mc_list *dmi = dev->mc_list;
403 unsigned char *addr; 564 unsigned char *addr;
@@ -413,20 +574,15 @@ static void sonic_multicast_list(struct net_device *dev)
413 rcr |= SONIC_RCR_AMC; 574 rcr |= SONIC_RCR_AMC;
414 } else { 575 } else {
415 if (sonic_debug > 2) 576 if (sonic_debug > 2)
416 printk 577 printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
417 ("sonic_multicast_list: mc_count %d\n", 578 sonic_set_cam_enable(dev, 1); /* always enable our own address */
418 dev->mc_count);
419 lp->cda.cam_enable = 1; /* always enable our own address */
420 for (i = 1; i <= dev->mc_count; i++) { 579 for (i = 1; i <= dev->mc_count; i++) {
421 addr = dmi->dmi_addr; 580 addr = dmi->dmi_addr;
422 dmi = dmi->next; 581 dmi = dmi->next;
423 lp->cda.cam_desc[i].cam_cap0 = 582 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
424 addr[1] << 8 | addr[0]; 583 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
425 lp->cda.cam_desc[i].cam_cap1 = 584 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
426 addr[3] << 8 | addr[2]; 585 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
427 lp->cda.cam_desc[i].cam_cap2 =
428 addr[5] << 8 | addr[4];
429 lp->cda.cam_enable |= (1 << i);
430 } 586 }
431 SONIC_WRITE(SONIC_CDC, 16); 587 SONIC_WRITE(SONIC_CDC, 16);
432 /* issue Load CAM command */ 588 /* issue Load CAM command */
@@ -447,19 +603,16 @@ static void sonic_multicast_list(struct net_device *dev)
447 */ 603 */
448static int sonic_init(struct net_device *dev) 604static int sonic_init(struct net_device *dev)
449{ 605{
450 unsigned int base_addr = dev->base_addr;
451 unsigned int cmd; 606 unsigned int cmd;
452 struct sonic_local *lp = (struct sonic_local *) dev->priv; 607 struct sonic_local *lp = netdev_priv(dev);
453 unsigned int rra_start;
454 unsigned int rra_end;
455 int i; 608 int i;
456 609
457 /* 610 /*
458 * put the Sonic into software-reset mode and 611 * put the Sonic into software-reset mode and
459 * disable all interrupts 612 * disable all interrupts
460 */ 613 */
461 SONIC_WRITE(SONIC_ISR, 0x7fff);
462 SONIC_WRITE(SONIC_IMR, 0); 614 SONIC_WRITE(SONIC_IMR, 0);
615 SONIC_WRITE(SONIC_ISR, 0x7fff);
463 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); 616 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
464 617
465 /* 618 /*
@@ -475,34 +628,32 @@ static int sonic_init(struct net_device *dev)
475 if (sonic_debug > 2) 628 if (sonic_debug > 2)
476 printk("sonic_init: initialize receive resource area\n"); 629 printk("sonic_init: initialize receive resource area\n");
477 630
478 rra_start = lp->rra_laddr & 0xffff;
479 rra_end =
480 (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
481
482 for (i = 0; i < SONIC_NUM_RRS; i++) { 631 for (i = 0; i < SONIC_NUM_RRS; i++) {
483 lp->rra[i].rx_bufadr_l = 632 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
484 (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff; 633 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
485 lp->rra[i].rx_bufadr_h = 634 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
486 (lp->rba_laddr + i * SONIC_RBSIZE) >> 16; 635 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
487 lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1; 636 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
488 lp->rra[i].rx_bufsize_h = 0; 637 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
489 } 638 }
490 639
491 /* initialize all RRA registers */ 640 /* initialize all RRA registers */
492 SONIC_WRITE(SONIC_RSA, rra_start); 641 lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
493 SONIC_WRITE(SONIC_REA, rra_end); 642 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
494 SONIC_WRITE(SONIC_RRP, rra_start); 643 lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
495 SONIC_WRITE(SONIC_RWP, rra_end); 644 SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
645
646 SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
647 SONIC_WRITE(SONIC_REA, lp->rra_end);
648 SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
649 SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
496 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); 650 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
497 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE - 2) >> 1); 651 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
498
499 lp->cur_rra =
500 lp->rra_laddr + (SONIC_NUM_RRS - 1) * sizeof(sonic_rr_t);
501 652
502 /* load the resource pointers */ 653 /* load the resource pointers */
503 if (sonic_debug > 3) 654 if (sonic_debug > 3)
504 printk("sonic_init: issueing RRRA command\n"); 655 printk("sonic_init: issuing RRRA command\n");
505 656
506 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); 657 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
507 i = 0; 658 i = 0;
508 while (i++ < 100) { 659 while (i++ < 100) {
@@ -511,27 +662,30 @@ static int sonic_init(struct net_device *dev)
511 } 662 }
512 663
513 if (sonic_debug > 2) 664 if (sonic_debug > 2)
514 printk("sonic_init: status=%x\n", SONIC_READ(SONIC_CMD)); 665 printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
515 666
516 /* 667 /*
517 * Initialize the receive descriptors so that they 668 * Initialize the receive descriptors so that they
518 * become a circular linked list, ie. let the last 669 * become a circular linked list, ie. let the last
519 * descriptor point to the first again. 670 * descriptor point to the first again.
520 */ 671 */
521 if (sonic_debug > 2) 672 if (sonic_debug > 2)
522 printk("sonic_init: initialize receive descriptors\n"); 673 printk("sonic_init: initialize receive descriptors\n");
523 for (i = 0; i < SONIC_NUM_RDS; i++) { 674 for (i=0; i<SONIC_NUM_RDS; i++) {
524 lp->rda[i].rx_status = 0; 675 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
525 lp->rda[i].rx_pktlen = 0; 676 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
526 lp->rda[i].rx_pktptr_l = 0; 677 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
527 lp->rda[i].rx_pktptr_h = 0; 678 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
528 lp->rda[i].rx_seqno = 0; 679 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
529 lp->rda[i].in_use = 1; 680 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
530 lp->rda[i].link = 681 sonic_rda_put(dev, i, SONIC_RD_LINK,
531 lp->rda_laddr + (i + 1) * sizeof(sonic_rd_t); 682 lp->rda_laddr +
683 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
532 } 684 }
533 /* fix last descriptor */ 685 /* fix last descriptor */
534 lp->rda[SONIC_NUM_RDS - 1].link = lp->rda_laddr; 686 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
687 (lp->rda_laddr & 0xffff) | SONIC_EOL);
688 lp->eol_rx = SONIC_NUM_RDS - 1;
535 lp->cur_rx = 0; 689 lp->cur_rx = 0;
536 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16); 690 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
537 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff); 691 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
@@ -542,34 +696,34 @@ static int sonic_init(struct net_device *dev)
542 if (sonic_debug > 2) 696 if (sonic_debug > 2)
543 printk("sonic_init: initialize transmit descriptors\n"); 697 printk("sonic_init: initialize transmit descriptors\n");
544 for (i = 0; i < SONIC_NUM_TDS; i++) { 698 for (i = 0; i < SONIC_NUM_TDS; i++) {
545 lp->tda[i].tx_status = 0; 699 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
546 lp->tda[i].tx_config = 0; 700 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
547 lp->tda[i].tx_pktsize = 0; 701 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
548 lp->tda[i].tx_frag_count = 0; 702 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
549 lp->tda[i].link = 703 sonic_tda_put(dev, i, SONIC_TD_LINK,
550 (lp->tda_laddr + 704 (lp->tda_laddr & 0xffff) +
551 (i + 1) * sizeof(sonic_td_t)) | SONIC_END_OF_LINKS; 705 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
706 lp->tx_skb[i] = NULL;
552 } 707 }
553 lp->tda[SONIC_NUM_TDS - 1].link = 708 /* fix last descriptor */
554 (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS; 709 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
710 (lp->tda_laddr & 0xffff));
555 711
556 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); 712 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
557 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); 713 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
558 lp->cur_tx = lp->dirty_tx = 0; 714 lp->cur_tx = lp->next_tx = 0;
559 715 lp->eol_tx = SONIC_NUM_TDS - 1;
716
560 /* 717 /*
561 * put our own address to CAM desc[0] 718 * put our own address to CAM desc[0]
562 */ 719 */
563 lp->cda.cam_desc[0].cam_cap0 = 720 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
564 dev->dev_addr[1] << 8 | dev->dev_addr[0]; 721 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
565 lp->cda.cam_desc[0].cam_cap1 = 722 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
566 dev->dev_addr[3] << 8 | dev->dev_addr[2]; 723 sonic_set_cam_enable(dev, 1);
567 lp->cda.cam_desc[0].cam_cap2 =
568 dev->dev_addr[5] << 8 | dev->dev_addr[4];
569 lp->cda.cam_enable = 1;
570 724
571 for (i = 0; i < 16; i++) 725 for (i = 0; i < 16; i++)
572 lp->cda.cam_desc[i].cam_entry_pointer = i; 726 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
573 727
574 /* 728 /*
575 * initialize CAM registers 729 * initialize CAM registers
@@ -588,8 +742,8 @@ static int sonic_init(struct net_device *dev)
588 break; 742 break;
589 } 743 }
590 if (sonic_debug > 2) { 744 if (sonic_debug > 2) {
591 printk("sonic_init: CMD=%x, ISR=%x\n", 745 printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
592 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR)); 746 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
593 } 747 }
594 748
595 /* 749 /*
@@ -604,7 +758,7 @@ static int sonic_init(struct net_device *dev)
604 758
605 cmd = SONIC_READ(SONIC_CMD); 759 cmd = SONIC_READ(SONIC_CMD);
606 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) 760 if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
607 printk("sonic_init: failed, status=%x\n", cmd); 761 printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
608 762
609 if (sonic_debug > 2) 763 if (sonic_debug > 2)
610 printk("sonic_init: new status=%x\n", 764 printk("sonic_init: new status=%x\n",
diff --git a/drivers/net/sonic.h b/drivers/net/sonic.h
index c4a6d58e4afb..cede969a8baa 100644
--- a/drivers/net/sonic.h
+++ b/drivers/net/sonic.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Helpfile for sonic.c 2 * Header file for sonic.c
3 * 3 *
4 * (C) Waldorf Electronics, Germany 4 * (C) Waldorf Electronics, Germany
5 * Written by Andreas Busse 5 * Written by Andreas Busse
@@ -9,10 +9,16 @@
9 * and pad structure members must be exchanged. Also, the structures 9 * and pad structure members must be exchanged. Also, the structures
10 * need to be changed accordingly to the bus size. 10 * need to be changed accordingly to the bus size.
11 * 11 *
12 * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian), 12 * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian)
13 * see CONFIG_MACSONIC branch below.
14 * 13 *
14 * 990611 David Huggins-Daines <dhd@debian.org>: This machine abstraction
15 * does not cope with 16-bit bus sizes very well. Therefore I have
16 * rewritten it with ugly macros and evil inlines.
17 *
18 * 050625 Finn Thain: introduced more 32-bit cards and dhd's support
19 * for 16-bit cards (from the mac68k project).
15 */ 20 */
21
16#ifndef SONIC_H 22#ifndef SONIC_H
17#define SONIC_H 23#define SONIC_H
18 24
@@ -83,6 +89,7 @@
83/* 89/*
84 * Error counters 90 * Error counters
85 */ 91 */
92
86#define SONIC_CRCT 0x2c 93#define SONIC_CRCT 0x2c
87#define SONIC_FAET 0x2d 94#define SONIC_FAET 0x2d
88#define SONIC_MPT 0x2e 95#define SONIC_MPT 0x2e
@@ -182,14 +189,14 @@
182 189
183#define SONIC_INT_BR 0x4000 190#define SONIC_INT_BR 0x4000
184#define SONIC_INT_HBL 0x2000 191#define SONIC_INT_HBL 0x2000
185#define SONIC_INT_LCD 0x1000 192#define SONIC_INT_LCD 0x1000
186#define SONIC_INT_PINT 0x0800 193#define SONIC_INT_PINT 0x0800
187#define SONIC_INT_PKTRX 0x0400 194#define SONIC_INT_PKTRX 0x0400
188#define SONIC_INT_TXDN 0x0200 195#define SONIC_INT_TXDN 0x0200
189#define SONIC_INT_TXER 0x0100 196#define SONIC_INT_TXER 0x0100
190#define SONIC_INT_TC 0x0080 197#define SONIC_INT_TC 0x0080
191#define SONIC_INT_RDE 0x0040 198#define SONIC_INT_RDE 0x0040
192#define SONIC_INT_RBE 0x0020 199#define SONIC_INT_RBE 0x0020
193#define SONIC_INT_RBAE 0x0010 200#define SONIC_INT_RBAE 0x0010
194#define SONIC_INT_CRC 0x0008 201#define SONIC_INT_CRC 0x0008
195#define SONIC_INT_FAE 0x0004 202#define SONIC_INT_FAE 0x0004
@@ -201,224 +208,61 @@
201 * The interrupts we allow. 208 * The interrupts we allow.
202 */ 209 */
203 210
204#define SONIC_IMR_DEFAULT (SONIC_INT_BR | \ 211#define SONIC_IMR_DEFAULT ( SONIC_INT_BR | \
205 SONIC_INT_LCD | \ 212 SONIC_INT_LCD | \
206 SONIC_INT_PINT | \ 213 SONIC_INT_RFO | \
207 SONIC_INT_PKTRX | \ 214 SONIC_INT_PKTRX | \
208 SONIC_INT_TXDN | \ 215 SONIC_INT_TXDN | \
209 SONIC_INT_TXER | \ 216 SONIC_INT_TXER | \
210 SONIC_INT_RDE | \ 217 SONIC_INT_RDE | \
211 SONIC_INT_RBE | \
212 SONIC_INT_RBAE | \ 218 SONIC_INT_RBAE | \
213 SONIC_INT_CRC | \ 219 SONIC_INT_CRC | \
214 SONIC_INT_FAE | \ 220 SONIC_INT_FAE | \
215 SONIC_INT_MP) 221 SONIC_INT_MP)
216 222
217 223
218#define SONIC_END_OF_LINKS 0x0001 224#define SONIC_EOL 0x0001
219
220
221#ifdef CONFIG_MACSONIC
222/*
223 * Big endian like structures on 680x0 Macs
224 */
225
226typedef struct {
227 u32 rx_bufadr_l; /* receive buffer ptr */
228 u32 rx_bufadr_h;
229
230 u32 rx_bufsize_l; /* no. of words in the receive buffer */
231 u32 rx_bufsize_h;
232} sonic_rr_t;
233
234/*
235 * Sonic receive descriptor. Receive descriptors are
236 * kept in a linked list of these structures.
237 */
238
239typedef struct {
240 SREGS_PAD(pad0);
241 u16 rx_status; /* status after reception of a packet */
242 SREGS_PAD(pad1);
243 u16 rx_pktlen; /* length of the packet incl. CRC */
244
245 /*
246 * Pointers to the location in the receive buffer area (RBA)
247 * where the packet resides. A packet is always received into
248 * a contiguous piece of memory.
249 */
250 SREGS_PAD(pad2);
251 u16 rx_pktptr_l;
252 SREGS_PAD(pad3);
253 u16 rx_pktptr_h;
254
255 SREGS_PAD(pad4);
256 u16 rx_seqno; /* sequence no. */
257
258 SREGS_PAD(pad5);
259 u16 link; /* link to next RDD (end if EOL bit set) */
260
261 /*
262 * Owner of this descriptor, 0= driver, 1=sonic
263 */
264
265 SREGS_PAD(pad6);
266 u16 in_use;
267
268 caddr_t rda_next; /* pointer to next RD */
269} sonic_rd_t;
270
271
272/*
273 * Describes a Transmit Descriptor
274 */
275typedef struct {
276 SREGS_PAD(pad0);
277 u16 tx_status; /* status after transmission of a packet */
278 SREGS_PAD(pad1);
279 u16 tx_config; /* transmit configuration for this packet */
280 SREGS_PAD(pad2);
281 u16 tx_pktsize; /* size of the packet to be transmitted */
282 SREGS_PAD(pad3);
283 u16 tx_frag_count; /* no. of fragments */
284
285 SREGS_PAD(pad4);
286 u16 tx_frag_ptr_l;
287 SREGS_PAD(pad5);
288 u16 tx_frag_ptr_h;
289 SREGS_PAD(pad6);
290 u16 tx_frag_size;
291
292 SREGS_PAD(pad7);
293 u16 link; /* ptr to next descriptor */
294} sonic_td_t;
295
296
297/*
298 * Describes an entry in the CAM Descriptor Area.
299 */
300
301typedef struct {
302 SREGS_PAD(pad0);
303 u16 cam_entry_pointer;
304 SREGS_PAD(pad1);
305 u16 cam_cap0;
306 SREGS_PAD(pad2);
307 u16 cam_cap1;
308 SREGS_PAD(pad3);
309 u16 cam_cap2;
310} sonic_cd_t;
311
312#define CAM_DESCRIPTORS 16 225#define CAM_DESCRIPTORS 16
313 226
314 227/* Offsets in the various DMA buffers accessed by the SONIC */
315typedef struct { 228
316 sonic_cd_t cam_desc[CAM_DESCRIPTORS]; 229#define SONIC_BITMODE16 0
317 SREGS_PAD(pad); 230#define SONIC_BITMODE32 1
318 u16 cam_enable; 231#define SONIC_BUS_SCALE(bitmode) ((bitmode) ? 4 : 2)
319} sonic_cda_t; 232/* Note! These are all measured in bus-size units, so use SONIC_BUS_SCALE */
320 233#define SIZEOF_SONIC_RR 4
321#else /* original declarations, little endian 32 bit */ 234#define SONIC_RR_BUFADR_L 0
322 235#define SONIC_RR_BUFADR_H 1
323/* 236#define SONIC_RR_BUFSIZE_L 2
324 * structure definitions 237#define SONIC_RR_BUFSIZE_H 3
325 */ 238
326 239#define SIZEOF_SONIC_RD 7
327typedef struct { 240#define SONIC_RD_STATUS 0
328 u32 rx_bufadr_l; /* receive buffer ptr */ 241#define SONIC_RD_PKTLEN 1
329 u32 rx_bufadr_h; 242#define SONIC_RD_PKTPTR_L 2
330 243#define SONIC_RD_PKTPTR_H 3
331 u32 rx_bufsize_l; /* no. of words in the receive buffer */ 244#define SONIC_RD_SEQNO 4
332 u32 rx_bufsize_h; 245#define SONIC_RD_LINK 5
333} sonic_rr_t; 246#define SONIC_RD_IN_USE 6
334 247
335/* 248#define SIZEOF_SONIC_TD 8
336 * Sonic receive descriptor. Receive descriptors are 249#define SONIC_TD_STATUS 0
337 * kept in a linked list of these structures. 250#define SONIC_TD_CONFIG 1
338 */ 251#define SONIC_TD_PKTSIZE 2
339 252#define SONIC_TD_FRAG_COUNT 3
340typedef struct { 253#define SONIC_TD_FRAG_PTR_L 4
341 u16 rx_status; /* status after reception of a packet */ 254#define SONIC_TD_FRAG_PTR_H 5
342 SREGS_PAD(pad0); 255#define SONIC_TD_FRAG_SIZE 6
343 u16 rx_pktlen; /* length of the packet incl. CRC */ 256#define SONIC_TD_LINK 7
344 SREGS_PAD(pad1); 257
345 258#define SIZEOF_SONIC_CD 4
346 /* 259#define SONIC_CD_ENTRY_POINTER 0
347 * Pointers to the location in the receive buffer area (RBA) 260#define SONIC_CD_CAP0 1
348 * where the packet resides. A packet is always received into 261#define SONIC_CD_CAP1 2
349 * a contiguous piece of memory. 262#define SONIC_CD_CAP2 3
350 */ 263
351 u16 rx_pktptr_l; 264#define SIZEOF_SONIC_CDA ((CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + 1)
352 SREGS_PAD(pad2); 265#define SONIC_CDA_CAM_ENABLE (CAM_DESCRIPTORS * SIZEOF_SONIC_CD)
353 u16 rx_pktptr_h;
354 SREGS_PAD(pad3);
355
356 u16 rx_seqno; /* sequence no. */
357 SREGS_PAD(pad4);
358
359 u16 link; /* link to next RDD (end if EOL bit set) */
360 SREGS_PAD(pad5);
361
362 /*
363 * Owner of this descriptor, 0= driver, 1=sonic
364 */
365
366 u16 in_use;
367 SREGS_PAD(pad6);
368
369 caddr_t rda_next; /* pointer to next RD */
370} sonic_rd_t;
371
372
373/*
374 * Describes a Transmit Descriptor
375 */
376typedef struct {
377 u16 tx_status; /* status after transmission of a packet */
378 SREGS_PAD(pad0);
379 u16 tx_config; /* transmit configuration for this packet */
380 SREGS_PAD(pad1);
381 u16 tx_pktsize; /* size of the packet to be transmitted */
382 SREGS_PAD(pad2);
383 u16 tx_frag_count; /* no. of fragments */
384 SREGS_PAD(pad3);
385
386 u16 tx_frag_ptr_l;
387 SREGS_PAD(pad4);
388 u16 tx_frag_ptr_h;
389 SREGS_PAD(pad5);
390 u16 tx_frag_size;
391 SREGS_PAD(pad6);
392
393 u16 link; /* ptr to next descriptor */
394 SREGS_PAD(pad7);
395} sonic_td_t;
396
397
398/*
399 * Describes an entry in the CAM Descriptor Area.
400 */
401
402typedef struct {
403 u16 cam_entry_pointer;
404 SREGS_PAD(pad0);
405 u16 cam_cap0;
406 SREGS_PAD(pad1);
407 u16 cam_cap1;
408 SREGS_PAD(pad2);
409 u16 cam_cap2;
410 SREGS_PAD(pad3);
411} sonic_cd_t;
412
413#define CAM_DESCRIPTORS 16
414
415
416typedef struct {
417 sonic_cd_t cam_desc[CAM_DESCRIPTORS];
418 u16 cam_enable;
419 SREGS_PAD(pad);
420} sonic_cda_t;
421#endif /* endianness */
422 266
423/* 267/*
424 * Some tunables for the buffer areas. Power of 2 is required 268 * Some tunables for the buffer areas. Power of 2 is required
@@ -426,44 +270,60 @@ typedef struct {
426 * 270 *
427 * MSch: use more buffer space for the slow m68k Macs! 271 * MSch: use more buffer space for the slow m68k Macs!
428 */ 272 */
429#ifdef CONFIG_MACSONIC 273#define SONIC_NUM_RRS 16 /* number of receive resources */
430#define SONIC_NUM_RRS 32 /* number of receive resources */ 274#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
431#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ 275#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
432#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
433#else
434#define SONIC_NUM_RRS 16 /* number of receive resources */
435#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
436#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
437#endif
438#define SONIC_RBSIZE 1520 /* size of one resource buffer */
439 276
440#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) 277#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
441#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) 278#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
442 279
280#define SONIC_RBSIZE 1520 /* size of one resource buffer */
281
282/* Again, measured in bus size units! */
283#define SIZEOF_SONIC_DESC (SIZEOF_SONIC_CDA \
284 + (SIZEOF_SONIC_TD * SONIC_NUM_TDS) \
285 + (SIZEOF_SONIC_RD * SONIC_NUM_RDS) \
286 + (SIZEOF_SONIC_RR * SONIC_NUM_RRS))
443 287
444/* Information that need to be kept for each board. */ 288/* Information that need to be kept for each board. */
445struct sonic_local { 289struct sonic_local {
446 sonic_cda_t cda; /* virtual CPU address of CDA */ 290 /* Bus size. 0 == 16 bits, 1 == 32 bits. */
447 sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */ 291 int dma_bitmode;
448 sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */ 292 /* Register offset within the longword (independent of endianness,
449 sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */ 293 and varies from one type of Macintosh SONIC to another
450 struct sk_buff *tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */ 294 (Aarrgh)) */
451 unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */ 295 int reg_offset;
452 unsigned char *rba; /* start of receive buffer areas */ 296 void *descriptors;
453 unsigned int cda_laddr; /* logical DMA address of CDA */ 297 /* Crud. These areas have to be within the same 64K. Therefore
454 unsigned int tda_laddr; /* logical DMA address of TDA */ 298 we allocate a desriptors page, and point these to places within it. */
455 unsigned int rra_laddr; /* logical DMA address of RRA */ 299 void *cda; /* CAM descriptor area */
456 unsigned int rda_laddr; /* logical DMA address of RDA */ 300 void *tda; /* Transmit descriptor area */
457 unsigned int rba_laddr; /* logical DMA address of RBA */ 301 void *rra; /* Receive resource area */
458 unsigned int cur_rra; /* current indexes to resource areas */ 302 void *rda; /* Receive descriptor area */
303 struct sk_buff* volatile rx_skb[SONIC_NUM_RRS]; /* packets to be received */
304 struct sk_buff* volatile tx_skb[SONIC_NUM_TDS]; /* packets to be transmitted */
305 unsigned int tx_len[SONIC_NUM_TDS]; /* lengths of tx DMA mappings */
306 /* Logical DMA addresses on MIPS, bus addresses on m68k
307 * (so "laddr" is a bit misleading) */
308 dma_addr_t descriptors_laddr;
309 u32 cda_laddr; /* logical DMA address of CDA */
310 u32 tda_laddr; /* logical DMA address of TDA */
311 u32 rra_laddr; /* logical DMA address of RRA */
312 u32 rda_laddr; /* logical DMA address of RDA */
313 dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
314 dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
315 unsigned int rra_end;
316 unsigned int cur_rwp;
459 unsigned int cur_rx; 317 unsigned int cur_rx;
460 unsigned int cur_tx; 318 unsigned int cur_tx; /* first unacked transmit packet */
461 unsigned int dirty_tx; /* last unacked transmit packet */ 319 unsigned int eol_rx;
462 char tx_full; 320 unsigned int eol_tx; /* last unacked transmit packet */
321 unsigned int next_tx; /* next free TD */
322 struct device *device; /* generic device */
463 struct net_device_stats stats; 323 struct net_device_stats stats;
464}; 324};
465 325
466#define TX_TIMEOUT 6 326#define TX_TIMEOUT (3 * HZ)
467 327
468/* Index to functions, as function prototypes. */ 328/* Index to functions, as function prototypes. */
469 329
@@ -477,6 +337,114 @@ static void sonic_multicast_list(struct net_device *dev);
477static int sonic_init(struct net_device *dev); 337static int sonic_init(struct net_device *dev);
478static void sonic_tx_timeout(struct net_device *dev); 338static void sonic_tx_timeout(struct net_device *dev);
479 339
340/* Internal inlines for reading/writing DMA buffers. Note that bus
341 size and endianness matter here, whereas they don't for registers,
342 as far as we can tell. */
343/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
344 is a much better name. */
345static inline void sonic_buf_put(void* base, int bitmode,
346 int offset, __u16 val)
347{
348 if (bitmode)
349#ifdef __BIG_ENDIAN
350 ((__u16 *) base + (offset*2))[1] = val;
351#else
352 ((__u16 *) base + (offset*2))[0] = val;
353#endif
354 else
355 ((__u16 *) base)[offset] = val;
356}
357
358static inline __u16 sonic_buf_get(void* base, int bitmode,
359 int offset)
360{
361 if (bitmode)
362#ifdef __BIG_ENDIAN
363 return ((volatile __u16 *) base + (offset*2))[1];
364#else
365 return ((volatile __u16 *) base + (offset*2))[0];
366#endif
367 else
368 return ((volatile __u16 *) base)[offset];
369}
370
371/* Inlines that you should actually use for reading/writing DMA buffers */
372static inline void sonic_cda_put(struct net_device* dev, int entry,
373 int offset, __u16 val)
374{
375 struct sonic_local* lp = (struct sonic_local *) dev->priv;
376 sonic_buf_put(lp->cda, lp->dma_bitmode,
377 (entry * SIZEOF_SONIC_CD) + offset, val);
378}
379
380static inline __u16 sonic_cda_get(struct net_device* dev, int entry,
381 int offset)
382{
383 struct sonic_local* lp = (struct sonic_local *) dev->priv;
384 return sonic_buf_get(lp->cda, lp->dma_bitmode,
385 (entry * SIZEOF_SONIC_CD) + offset);
386}
387
388static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val)
389{
390 struct sonic_local* lp = (struct sonic_local *) dev->priv;
391 sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val);
392}
393
394static inline __u16 sonic_get_cam_enable(struct net_device* dev)
395{
396 struct sonic_local* lp = (struct sonic_local *) dev->priv;
397 return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE);
398}
399
400static inline void sonic_tda_put(struct net_device* dev, int entry,
401 int offset, __u16 val)
402{
403 struct sonic_local* lp = (struct sonic_local *) dev->priv;
404 sonic_buf_put(lp->tda, lp->dma_bitmode,
405 (entry * SIZEOF_SONIC_TD) + offset, val);
406}
407
408static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
409 int offset)
410{
411 struct sonic_local* lp = (struct sonic_local *) dev->priv;
412 return sonic_buf_get(lp->tda, lp->dma_bitmode,
413 (entry * SIZEOF_SONIC_TD) + offset);
414}
415
416static inline void sonic_rda_put(struct net_device* dev, int entry,
417 int offset, __u16 val)
418{
419 struct sonic_local* lp = (struct sonic_local *) dev->priv;
420 sonic_buf_put(lp->rda, lp->dma_bitmode,
421 (entry * SIZEOF_SONIC_RD) + offset, val);
422}
423
424static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
425 int offset)
426{
427 struct sonic_local* lp = (struct sonic_local *) dev->priv;
428 return sonic_buf_get(lp->rda, lp->dma_bitmode,
429 (entry * SIZEOF_SONIC_RD) + offset);
430}
431
432static inline void sonic_rra_put(struct net_device* dev, int entry,
433 int offset, __u16 val)
434{
435 struct sonic_local* lp = (struct sonic_local *) dev->priv;
436 sonic_buf_put(lp->rra, lp->dma_bitmode,
437 (entry * SIZEOF_SONIC_RR) + offset, val);
438}
439
440static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
441 int offset)
442{
443 struct sonic_local* lp = (struct sonic_local *) dev->priv;
444 return sonic_buf_get(lp->rra, lp->dma_bitmode,
445 (entry * SIZEOF_SONIC_RR) + offset);
446}
447
480static const char *version = 448static const char *version =
481 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; 449 "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
482 450
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 01419aff333e..af8263a1580e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -66,8 +66,8 @@
66 66
67#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
69#define DRV_MODULE_VERSION "3.36" 69#define DRV_MODULE_VERSION "3.37"
70#define DRV_MODULE_RELDATE "August 19, 2005" 70#define DRV_MODULE_RELDATE "August 25, 2005"
71 71
72#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -340,41 +340,92 @@ static struct {
340 340
341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342{ 342{
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 343 unsigned long flags;
344 spin_lock_bh(&tp->indirect_lock); 344
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 345 spin_lock_irqsave(&tp->indirect_lock, flags);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 346 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
347 spin_unlock_bh(&tp->indirect_lock); 347 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
348 } else { 348 spin_unlock_irqrestore(&tp->indirect_lock, flags);
349 writel(val, tp->regs + off); 349}
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 350
351 readl(tp->regs + off); 351static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
352{
353 writel(val, tp->regs + off);
354 readl(tp->regs + off);
355}
356
357static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
358{
359 unsigned long flags;
360 u32 val;
361
362 spin_lock_irqsave(&tp->indirect_lock, flags);
363 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
364 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
365 spin_unlock_irqrestore(&tp->indirect_lock, flags);
366 return val;
367}
368
369static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
370{
371 unsigned long flags;
372
373 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
374 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
375 TG3_64BIT_REG_LOW, val);
376 return;
377 }
378 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
379 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
380 TG3_64BIT_REG_LOW, val);
381 return;
382 }
383
384 spin_lock_irqsave(&tp->indirect_lock, flags);
385 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
386 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
387 spin_unlock_irqrestore(&tp->indirect_lock, flags);
388
389 /* In indirect mode when disabling interrupts, we also need
390 * to clear the interrupt bit in the GRC local ctrl register.
391 */
392 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
393 (val == 0x1)) {
394 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
395 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
352 } 396 }
353} 397}
354 398
399static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
400{
401 unsigned long flags;
402 u32 val;
403
404 spin_lock_irqsave(&tp->indirect_lock, flags);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
406 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
407 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408 return val;
409}
410
355static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 411static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356{ 412{
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 413 tp->write32(tp, off, val);
358 spin_lock_bh(&tp->indirect_lock); 414 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 415 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 416 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
361 spin_unlock_bh(&tp->indirect_lock); 417 tp->read32(tp, off); /* flush */
362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367} 418}
368 419
369static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val) 420static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
370{ 421{
371 void __iomem *mbox = tp->regs + off; 422 tp->write32_mbox(tp, off, val);
372 writel(val, mbox); 423 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 424 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
374 readl(mbox); 425 tp->read32_mbox(tp, off);
375} 426}
376 427
377static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 428static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378{ 429{
379 void __iomem *mbox = tp->regs + off; 430 void __iomem *mbox = tp->regs + off;
380 writel(val, mbox); 431 writel(val, mbox);
@@ -384,46 +435,57 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
384 readl(mbox); 435 readl(mbox);
385} 436}
386 437
387#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg)) 438static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
388#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val) 439{
389#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val) 440 writel(val, tp->regs + off);
441}
442
443static u32 tg3_read32(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off));
446}
447
448#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
449#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
450#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
451#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
452#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
390 453
391#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val)) 454#define tw32(reg,val) tp->write32(tp, reg, val)
392#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val)) 455#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg)) 456#define tr32(reg) tp->read32(tp, reg)
394#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395#define tr32(reg) readl(tp->regs + (reg))
396#define tr16(reg) readw(tp->regs + (reg))
397#define tr8(reg) readb(tp->regs + (reg))
398 457
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 458static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{ 459{
401 spin_lock_bh(&tp->indirect_lock); 460 unsigned long flags;
461
462 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 463 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 464 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404 465
405 /* Always leave this as zero. */ 466 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 467 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407 spin_unlock_bh(&tp->indirect_lock); 468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408} 469}
409 470
410static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 471static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411{ 472{
412 spin_lock_bh(&tp->indirect_lock); 473 unsigned long flags;
474
475 spin_lock_irqsave(&tp->indirect_lock, flags);
413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 476 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 477 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415 478
416 /* Always leave this as zero. */ 479 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418 spin_unlock_bh(&tp->indirect_lock); 481 spin_unlock_irqrestore(&tp->indirect_lock, flags);
419} 482}
420 483
421static void tg3_disable_ints(struct tg3 *tp) 484static void tg3_disable_ints(struct tg3 *tp)
422{ 485{
423 tw32(TG3PCI_MISC_HOST_CTRL, 486 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 487 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 488 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427} 489}
428 490
429static inline void tg3_cond_int(struct tg3 *tp) 491static inline void tg3_cond_int(struct tg3 *tp)
@@ -439,9 +501,8 @@ static void tg3_enable_ints(struct tg3 *tp)
439 501
440 tw32(TG3PCI_MISC_HOST_CTRL, 502 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 503 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 504 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24)); 505 (tp->last_tag << 24));
444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445 tg3_cond_int(tp); 506 tg3_cond_int(tp);
446} 507}
447 508
@@ -472,8 +533,6 @@ static inline unsigned int tg3_has_work(struct tg3 *tp)
472 */ 533 */
473static void tg3_restart_ints(struct tg3 *tp) 534static void tg3_restart_ints(struct tg3 *tp)
474{ 535{
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 536 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478 tp->last_tag << 24); 537 tp->last_tag << 24);
479 mmiowb(); 538 mmiowb();
@@ -3278,9 +3337,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3278 /* No work, shared interrupt perhaps? re-enable 3337 /* No work, shared interrupt perhaps? re-enable
3279 * interrupts, and flush that PCI write 3338 * interrupts, and flush that PCI write
3280 */ 3339 */
3281 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3340 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3282 0x00000000); 3341 0x00000000);
3283 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3284 } 3342 }
3285 } else { /* shared interrupt */ 3343 } else { /* shared interrupt */
3286 handled = 0; 3344 handled = 0;
@@ -3323,9 +3381,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3323 /* no work, shared interrupt perhaps? re-enable 3381 /* no work, shared interrupt perhaps? re-enable
3324 * interrupts, and flush that PCI write 3382 * interrupts, and flush that PCI write
3325 */ 3383 */
3326 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3384 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3327 tp->last_tag << 24); 3385 tp->last_tag << 24);
3328 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3329 } 3386 }
3330 } else { /* shared interrupt */ 3387 } else { /* shared interrupt */
3331 handled = 0; 3388 handled = 0;
@@ -4216,7 +4273,7 @@ static void tg3_stop_fw(struct tg3 *);
4216static int tg3_chip_reset(struct tg3 *tp) 4273static int tg3_chip_reset(struct tg3 *tp)
4217{ 4274{
4218 u32 val; 4275 u32 val;
4219 u32 flags_save; 4276 void (*write_op)(struct tg3 *, u32, u32);
4220 int i; 4277 int i;
4221 4278
4222 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4279 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
@@ -4228,8 +4285,9 @@ static int tg3_chip_reset(struct tg3 *tp)
4228 * fun things. So, temporarily disable the 5701 4285 * fun things. So, temporarily disable the 5701
4229 * hardware workaround, while we do the reset. 4286 * hardware workaround, while we do the reset.
4230 */ 4287 */
4231 flags_save = tp->tg3_flags; 4288 write_op = tp->write32;
4232 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG; 4289 if (write_op == tg3_write_flush_reg32)
4290 tp->write32 = tg3_write32;
4233 4291
4234 /* do the reset */ 4292 /* do the reset */
4235 val = GRC_MISC_CFG_CORECLK_RESET; 4293 val = GRC_MISC_CFG_CORECLK_RESET;
@@ -4248,8 +4306,8 @@ static int tg3_chip_reset(struct tg3 *tp)
4248 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 4306 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4249 tw32(GRC_MISC_CFG, val); 4307 tw32(GRC_MISC_CFG, val);
4250 4308
4251 /* restore 5701 hardware bug workaround flag */ 4309 /* restore 5701 hardware bug workaround write method */
4252 tp->tg3_flags = flags_save; 4310 tp->write32 = write_op;
4253 4311
4254 /* Unfortunately, we have to delay before the PCI read back. 4312 /* Unfortunately, we have to delay before the PCI read back.
4255 * Some 575X chips even will not respond to a PCI cfg access 4313 * Some 575X chips even will not respond to a PCI cfg access
@@ -4635,7 +4693,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4635 int cpu_scratch_size, struct fw_info *info) 4693 int cpu_scratch_size, struct fw_info *info)
4636{ 4694{
4637 int err, i; 4695 int err, i;
4638 u32 orig_tg3_flags = tp->tg3_flags;
4639 void (*write_op)(struct tg3 *, u32, u32); 4696 void (*write_op)(struct tg3 *, u32, u32);
4640 4697
4641 if (cpu_base == TX_CPU_BASE && 4698 if (cpu_base == TX_CPU_BASE &&
@@ -4651,11 +4708,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4651 else 4708 else
4652 write_op = tg3_write_indirect_reg32; 4709 write_op = tg3_write_indirect_reg32;
4653 4710
4654 /* Force use of PCI config space for indirect register
4655 * write calls.
4656 */
4657 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4658
4659 /* It is possible that bootcode is still loading at this point. 4711 /* It is possible that bootcode is still loading at this point.
4660 * Get the nvram lock first before halting the cpu. 4712 * Get the nvram lock first before halting the cpu.
4661 */ 4713 */
@@ -4691,7 +4743,6 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4691 err = 0; 4743 err = 0;
4692 4744
4693out: 4745out:
4694 tp->tg3_flags = orig_tg3_flags;
4695 return err; 4746 return err;
4696} 4747}
4697 4748
@@ -5808,8 +5859,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5808 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 5859 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5809 udelay(100); 5860 udelay(100);
5810 5861
5811 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5862 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5812 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5813 tp->last_tag = 0; 5863 tp->last_tag = 0;
5814 5864
5815 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5865 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
@@ -6198,7 +6248,8 @@ static int tg3_test_interrupt(struct tg3 *tp)
6198 HOSTCC_MODE_NOW); 6248 HOSTCC_MODE_NOW);
6199 6249
6200 for (i = 0; i < 5; i++) { 6250 for (i = 0; i < 5; i++) {
6201 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 6251 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6252 TG3_64BIT_REG_LOW);
6202 if (int_mbox != 0) 6253 if (int_mbox != 0)
6203 break; 6254 break;
6204 msleep(10); 6255 msleep(10);
@@ -6598,10 +6649,10 @@ static int tg3_open(struct net_device *dev)
6598 6649
6599 /* Mailboxes */ 6650 /* Mailboxes */
6600 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", 6651 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6601 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), 6652 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6602 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), 6653 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6603 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), 6654 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6604 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); 6655 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6605 6656
6606 /* NIC side send descriptors. */ 6657 /* NIC side send descriptors. */
6607 for (i = 0; i < 6; i++) { 6658 for (i = 0; i < 6; i++) {
@@ -7865,8 +7916,6 @@ static int tg3_test_loopback(struct tg3 *tp)
7865 7916
7866 err = -EIO; 7917 err = -EIO;
7867 7918
7868 tg3_abort_hw(tp, 1);
7869
7870 tg3_reset_hw(tp); 7919 tg3_reset_hw(tp);
7871 7920
7872 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7921 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
@@ -7903,7 +7952,7 @@ static int tg3_test_loopback(struct tg3 *tp)
7903 num_pkts++; 7952 num_pkts++;
7904 7953
7905 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx); 7954 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7906 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); 7955 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7907 7956
7908 udelay(10); 7957 udelay(10);
7909 7958
@@ -9155,14 +9204,6 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9155static int __devinit tg3_get_invariants(struct tg3 *tp) 9204static int __devinit tg3_get_invariants(struct tg3 *tp)
9156{ 9205{
9157 static struct pci_device_id write_reorder_chipsets[] = { 9206 static struct pci_device_id write_reorder_chipsets[] = {
9158 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159 PCI_DEVICE_ID_INTEL_82801AA_8) },
9160 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9161 PCI_DEVICE_ID_INTEL_82801AB_8) },
9162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163 PCI_DEVICE_ID_INTEL_82801BA_11) },
9164 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9165 PCI_DEVICE_ID_INTEL_82801BA_6) },
9166 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9207 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9167 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9208 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9168 { }, 9209 { },
@@ -9179,7 +9220,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9179 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9220 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9180#endif 9221#endif
9181 9222
9182 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write 9223 /* If we have an AMD 762 chipset, write
9183 * reordering to the mailbox registers done by the host 9224 * reordering to the mailbox registers done by the host
9184 * controller can cause major troubles. We read back from 9225 * controller can cause major troubles. We read back from
9185 * every mailbox register write to force the writes to be 9226 * every mailbox register write to force the writes to be
@@ -9217,6 +9258,69 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9217 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 9258 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9218 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 9259 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9219 9260
9261 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9262 * we need to disable memory and use config. cycles
9263 * only to access all registers. The 5702/03 chips
9264 * can mistakenly decode the special cycles from the
9265 * ICH chipsets as memory write cycles, causing corruption
9266 * of register and memory space. Only certain ICH bridges
9267 * will drive special cycles with non-zero data during the
9268 * address phase which can fall within the 5703's address
9269 * range. This is not an ICH bug as the PCI spec allows
9270 * non-zero address during special cycles. However, only
9271 * these ICH bridges are known to drive non-zero addresses
9272 * during special cycles.
9273 *
9274 * Since special cycles do not cross PCI bridges, we only
9275 * enable this workaround if the 5703 is on the secondary
9276 * bus of these ICH bridges.
9277 */
9278 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9279 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9280 static struct tg3_dev_id {
9281 u32 vendor;
9282 u32 device;
9283 u32 rev;
9284 } ich_chipsets[] = {
9285 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9286 PCI_ANY_ID },
9287 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9288 PCI_ANY_ID },
9289 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9290 0xa },
9291 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9292 PCI_ANY_ID },
9293 { },
9294 };
9295 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9296 struct pci_dev *bridge = NULL;
9297
9298 while (pci_id->vendor != 0) {
9299 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9300 bridge);
9301 if (!bridge) {
9302 pci_id++;
9303 continue;
9304 }
9305 if (pci_id->rev != PCI_ANY_ID) {
9306 u8 rev;
9307
9308 pci_read_config_byte(bridge, PCI_REVISION_ID,
9309 &rev);
9310 if (rev > pci_id->rev)
9311 continue;
9312 }
9313 if (bridge->subordinate &&
9314 (bridge->subordinate->number ==
9315 tp->pdev->bus->number)) {
9316
9317 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9318 pci_dev_put(bridge);
9319 break;
9320 }
9321 }
9322 }
9323
9220 /* Find msi capability. */ 9324 /* Find msi capability. */
9221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9222 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9326 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
@@ -9304,6 +9408,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9304 } 9408 }
9305 } 9409 }
9306 9410
9411 /* 5700 BX chips need to have their TX producer index mailboxes
9412 * written twice to workaround a bug.
9413 */
9414 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9415 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9416
9307 /* Back to back register writes can cause problems on this chip, 9417 /* Back to back register writes can cause problems on this chip,
9308 * the workaround is to read back all reg writes except those to 9418 * the workaround is to read back all reg writes except those to
9309 * mailbox regs. See tg3_write_indirect_reg32(). 9419 * mailbox regs. See tg3_write_indirect_reg32().
@@ -9327,6 +9437,43 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9327 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 9437 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9328 } 9438 }
9329 9439
9440 /* Default fast path register access methods */
9441 tp->read32 = tg3_read32;
9442 tp->write32 = tg3_write32;
9443 tp->read32_mbox = tg3_read32;
9444 tp->write32_mbox = tg3_write32;
9445 tp->write32_tx_mbox = tg3_write32;
9446 tp->write32_rx_mbox = tg3_write32;
9447
9448 /* Various workaround register access methods */
9449 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9450 tp->write32 = tg3_write_indirect_reg32;
9451 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9452 tp->write32 = tg3_write_flush_reg32;
9453
9454 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9455 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9456 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9457 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9458 tp->write32_rx_mbox = tg3_write_flush_reg32;
9459 }
9460
9461 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9462 tp->read32 = tg3_read_indirect_reg32;
9463 tp->write32 = tg3_write_indirect_reg32;
9464 tp->read32_mbox = tg3_read_indirect_mbox;
9465 tp->write32_mbox = tg3_write_indirect_mbox;
9466 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9467 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9468
9469 iounmap(tp->regs);
9470 tp->regs = 0;
9471
9472 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9473 pci_cmd &= ~PCI_COMMAND_MEMORY;
9474 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9475 }
9476
9330 /* Get eeprom hw config before calling tg3_set_power_state(). 9477 /* Get eeprom hw config before calling tg3_set_power_state().
9331 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be 9478 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9332 * determined before calling tg3_set_power_state() so that 9479 * determined before calling tg3_set_power_state() so that
@@ -9541,14 +9688,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9541 else 9688 else
9542 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 9689 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9543 9690
9544 /* 5700 BX chips need to have their TX producer index mailboxes
9545 * written twice to workaround a bug.
9546 */
9547 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9548 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9549 else
9550 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9551
9552 /* It seems all chips can get confused if TX buffers 9691 /* It seems all chips can get confused if TX buffers
9553 * straddle the 4GB address boundary in some cases. 9692 * straddle the 4GB address boundary in some cases.
9554 */ 9693 */
@@ -10471,7 +10610,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10471 return 0; 10610 return 0;
10472 10611
10473err_out_iounmap: 10612err_out_iounmap:
10474 iounmap(tp->regs); 10613 if (tp->regs) {
10614 iounmap(tp->regs);
10615 tp->regs = 0;
10616 }
10475 10617
10476err_out_free_dev: 10618err_out_free_dev:
10477 free_netdev(dev); 10619 free_netdev(dev);
@@ -10493,7 +10635,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10493 struct tg3 *tp = netdev_priv(dev); 10635 struct tg3 *tp = netdev_priv(dev);
10494 10636
10495 unregister_netdev(dev); 10637 unregister_netdev(dev);
10496 iounmap(tp->regs); 10638 if (tp->regs) {
10639 iounmap(tp->regs);
10640 tp->regs = 0;
10641 }
10497 free_netdev(dev); 10642 free_netdev(dev);
10498 pci_release_regions(pdev); 10643 pci_release_regions(pdev);
10499 pci_disable_device(pdev); 10644 pci_disable_device(pdev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5c4433c147fa..c184b773e585 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2049,6 +2049,11 @@ struct tg3 {
2049 spinlock_t lock; 2049 spinlock_t lock;
2050 spinlock_t indirect_lock; 2050 spinlock_t indirect_lock;
2051 2051
2052 u32 (*read32) (struct tg3 *, u32);
2053 void (*write32) (struct tg3 *, u32, u32);
2054 u32 (*read32_mbox) (struct tg3 *, u32);
2055 void (*write32_mbox) (struct tg3 *, u32,
2056 u32);
2052 void __iomem *regs; 2057 void __iomem *regs;
2053 struct net_device *dev; 2058 struct net_device *dev;
2054 struct pci_dev *pdev; 2059 struct pci_dev *pdev;
@@ -2060,6 +2065,8 @@ struct tg3 {
2060 u32 msg_enable; 2065 u32 msg_enable;
2061 2066
2062 /* begin "tx thread" cacheline section */ 2067 /* begin "tx thread" cacheline section */
2068 void (*write32_tx_mbox) (struct tg3 *, u32,
2069 u32);
2063 u32 tx_prod; 2070 u32 tx_prod;
2064 u32 tx_cons; 2071 u32 tx_cons;
2065 u32 tx_pending; 2072 u32 tx_pending;
@@ -2071,6 +2078,8 @@ struct tg3 {
2071 dma_addr_t tx_desc_mapping; 2078 dma_addr_t tx_desc_mapping;
2072 2079
2073 /* begin "rx thread" cacheline section */ 2080 /* begin "rx thread" cacheline section */
2081 void (*write32_rx_mbox) (struct tg3 *, u32,
2082 u32);
2074 u32 rx_rcb_ptr; 2083 u32 rx_rcb_ptr;
2075 u32 rx_std_ptr; 2084 u32 rx_std_ptr;
2076 u32 rx_jumbo_ptr; 2085 u32 rx_jumbo_ptr;
@@ -2165,6 +2174,7 @@ struct tg3 {
2165#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \ 2174#define TG3_FLG2_ANY_SERDES (TG3_FLG2_PHY_SERDES | \
2166 TG3_FLG2_MII_SERDES) 2175 TG3_FLG2_MII_SERDES)
2167#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2168 2178
2169 u32 split_mode_max_reqs; 2179 u32 split_mode_max_reqs;
2170#define SPLIT_MODE_5704_MAX_REQ 3 2180#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 7e99e9f8045e..e4cfc80b283b 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -84,7 +84,7 @@ config 3C359
84 84
85config TMS380TR 85config TMS380TR
86 tristate "Generic TMS380 Token Ring ISA/PCI adapter support" 86 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
87 depends on TR && (PCI || ISA && ISA_DMA_API) 87 depends on TR && (PCI || ISA && ISA_DMA_API || MCA)
88 select FW_LOADER 88 select FW_LOADER
89 ---help--- 89 ---help---
90 This driver provides generic support for token ring adapters 90 This driver provides generic support for token ring adapters
@@ -158,7 +158,7 @@ config ABYSS
158 158
159config MADGEMC 159config MADGEMC
160 tristate "Madge Smart 16/4 Ringnode MicroChannel" 160 tristate "Madge Smart 16/4 Ringnode MicroChannel"
161 depends on TR && TMS380TR && MCA_LEGACY 161 depends on TR && TMS380TR && MCA
162 help 162 help
163 This tms380 module supports the Madge Smart 16/4 MC16 and MC32 163 This tms380 module supports the Madge Smart 16/4 MC16 and MC32
164 MicroChannel adapters. 164 MicroChannel adapters.
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 87103c400999..9345e68c451e 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -139,7 +139,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
139 */ 139 */
140 dev->base_addr += 0x10; 140 dev->base_addr += 0x10;
141 141
142 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 142 ret = tmsdev_init(dev, &pdev->dev);
143 if (ret) { 143 if (ret) {
144 printk("%s: unable to get memory for dev->priv.\n", 144 printk("%s: unable to get memory for dev->priv.\n",
145 dev->name); 145 dev->name);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 659cbdbef7f3..3a25d191ea4a 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -20,7 +20,7 @@
20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; 20static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/mca-legacy.h> 23#include <linux/mca.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
@@ -38,9 +38,7 @@ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
38#define MADGEMC_IO_EXTENT 32 38#define MADGEMC_IO_EXTENT 32
39#define MADGEMC_SIF_OFFSET 0x08 39#define MADGEMC_SIF_OFFSET 0x08
40 40
41struct madgemc_card { 41struct card_info {
42 struct net_device *dev;
43
44 /* 42 /*
45 * These are read from the BIA ROM. 43 * These are read from the BIA ROM.
46 */ 44 */
@@ -57,16 +55,12 @@ struct madgemc_card {
57 unsigned int arblevel:4; 55 unsigned int arblevel:4;
58 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */ 56 unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
59 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */ 57 unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
60
61 struct madgemc_card *next;
62}; 58};
63static struct madgemc_card *madgemc_card_list;
64
65 59
66static int madgemc_open(struct net_device *dev); 60static int madgemc_open(struct net_device *dev);
67static int madgemc_close(struct net_device *dev); 61static int madgemc_close(struct net_device *dev);
68static int madgemc_chipset_init(struct net_device *dev); 62static int madgemc_chipset_init(struct net_device *dev);
69static void madgemc_read_rom(struct madgemc_card *card); 63static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
70static unsigned short madgemc_setnselout_pins(struct net_device *dev); 64static unsigned short madgemc_setnselout_pins(struct net_device *dev);
71static void madgemc_setcabletype(struct net_device *dev, int type); 65static void madgemc_setcabletype(struct net_device *dev, int type);
72 66
@@ -151,261 +145,237 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
151 145
152 146
153 147
154static int __init madgemc_probe(void) 148static int __devinit madgemc_probe(struct device *device)
155{ 149{
156 static int versionprinted; 150 static int versionprinted;
157 struct net_device *dev; 151 struct net_device *dev;
158 struct net_local *tp; 152 struct net_local *tp;
159 struct madgemc_card *card; 153 struct card_info *card;
160 int i,slot = 0; 154 struct mca_device *mdev = to_mca_device(device);
161 __u8 posreg[4]; 155 int ret = 0, i = 0;
162 156
163 if (!MCA_bus) 157 if (versionprinted++ == 0)
164 return -1; 158 printk("%s", version);
165 159
166 while (slot != MCA_NOTFOUND) { 160 if(mca_device_claimed(mdev))
167 /* 161 return -EBUSY;
168 * Currently we only support the MC16/32 (MCA ID 002d) 162 mca_device_set_claim(mdev, 1);
169 */ 163
170 slot = mca_find_unused_adapter(0x002d, slot); 164 dev = alloc_trdev(sizeof(struct net_local));
171 if (slot == MCA_NOTFOUND) 165 if (!dev) {
172 break; 166 printk("madgemc: unable to allocate dev space\n");
173 167 mca_device_set_claim(mdev, 0);
174 /* 168 ret = -ENOMEM;
175 * If we get here, we have an adapter. 169 goto getout;
176 */ 170 }
177 if (versionprinted++ == 0)
178 printk("%s", version);
179
180 dev = alloc_trdev(sizeof(struct net_local));
181 if (dev == NULL) {
182 printk("madgemc: unable to allocate dev space\n");
183 if (madgemc_card_list)
184 return 0;
185 return -1;
186 }
187 171
188 SET_MODULE_OWNER(dev); 172 SET_MODULE_OWNER(dev);
189 dev->dma = 0; 173 dev->dma = 0;
190 174
191 /* 175 card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
192 * Fetch MCA config registers 176 if (card==NULL) {
193 */ 177 printk("madgemc: unable to allocate card struct\n");
194 for(i=0;i<4;i++) 178 ret = -ENOMEM;
195 posreg[i] = mca_read_stored_pos(slot, i+2); 179 goto getout1;
196 180 }
197 card = kmalloc(sizeof(struct madgemc_card), GFP_KERNEL); 181
198 if (card==NULL) { 182 /*
199 printk("madgemc: unable to allocate card struct\n"); 183 * Parse configuration information. This all comes
200 free_netdev(dev); 184 * directly from the publicly available @002d.ADF.
201 if (madgemc_card_list) 185 * Get it from Madge or your local ADF library.
202 return 0; 186 */
203 return -1; 187
204 } 188 /*
205 card->dev = dev; 189 * Base address
206 190 */
207 /* 191 dev->base_addr = 0x0a20 +
208 * Parse configuration information. This all comes 192 ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
209 * directly from the publicly available @002d.ADF. 193 ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
210 * Get it from Madge or your local ADF library. 194 ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
211 */ 195
212 196 /*
213 /* 197 * Interrupt line
214 * Base address 198 */
215 */ 199 switch(mdev->pos[0] >> 6) { /* upper two bits */
216 dev->base_addr = 0x0a20 +
217 ((posreg[2] & MC16_POS2_ADDR2)?0x0400:0) +
218 ((posreg[0] & MC16_POS0_ADDR1)?0x1000:0) +
219 ((posreg[3] & MC16_POS3_ADDR3)?0x2000:0);
220
221 /*
222 * Interrupt line
223 */
224 switch(posreg[0] >> 6) { /* upper two bits */
225 case 0x1: dev->irq = 3; break; 200 case 0x1: dev->irq = 3; break;
226 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */ 201 case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
227 case 0x3: dev->irq = 10; break; 202 case 0x3: dev->irq = 10; break;
228 default: dev->irq = 0; break; 203 default: dev->irq = 0; break;
229 } 204 }
230 205
231 if (dev->irq == 0) { 206 if (dev->irq == 0) {
232 printk("%s: invalid IRQ\n", dev->name); 207 printk("%s: invalid IRQ\n", dev->name);
233 goto getout1; 208 ret = -EBUSY;
234 } 209 goto getout2;
210 }
235 211
236 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, 212 if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
237 "madgemc")) { 213 "madgemc")) {
238 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", slot, dev->base_addr); 214 printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
239 dev->base_addr += MADGEMC_SIF_OFFSET;
240 goto getout1;
241 }
242 dev->base_addr += MADGEMC_SIF_OFFSET; 215 dev->base_addr += MADGEMC_SIF_OFFSET;
216 ret = -EBUSY;
217 goto getout2;
218 }
219 dev->base_addr += MADGEMC_SIF_OFFSET;
220
221 /*
222 * Arbitration Level
223 */
224 card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
225
226 /*
227 * Burst mode and Fairness
228 */
229 card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
230 card->fairness = ((mdev->pos[2] >> 4) & 0x1);
231
232 /*
233 * Ring Speed
234 */
235 if ((mdev->pos[1] >> 2)&0x1)
236 card->ringspeed = 2; /* not selected */
237 else if ((mdev->pos[2] >> 5) & 0x1)
238 card->ringspeed = 1; /* 16Mb */
239 else
240 card->ringspeed = 0; /* 4Mb */
241
242 /*
243 * Cable type
244 */
245 if ((mdev->pos[1] >> 6)&0x1)
246 card->cabletype = 1; /* STP/DB9 */
247 else
248 card->cabletype = 0; /* UTP/RJ-45 */
249
250
251 /*
252 * ROM Info. This requires us to actually twiddle
253 * bits on the card, so we must ensure above that
254 * the base address is free of conflict (request_region above).
255 */
256 madgemc_read_rom(dev, card);
243 257
244 /* 258 if (card->manid != 0x4d) { /* something went wrong */
245 * Arbitration Level 259 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
246 */ 260 goto getout3;
247 card->arblevel = ((posreg[0] >> 1) & 0x7) + 8; 261 }
248
249 /*
250 * Burst mode and Fairness
251 */
252 card->burstmode = ((posreg[2] >> 6) & 0x3);
253 card->fairness = ((posreg[2] >> 4) & 0x1);
254
255 /*
256 * Ring Speed
257 */
258 if ((posreg[1] >> 2)&0x1)
259 card->ringspeed = 2; /* not selected */
260 else if ((posreg[2] >> 5) & 0x1)
261 card->ringspeed = 1; /* 16Mb */
262 else
263 card->ringspeed = 0; /* 4Mb */
264
265 /*
266 * Cable type
267 */
268 if ((posreg[1] >> 6)&0x1)
269 card->cabletype = 1; /* STP/DB9 */
270 else
271 card->cabletype = 0; /* UTP/RJ-45 */
272
273
274 /*
275 * ROM Info. This requires us to actually twiddle
276 * bits on the card, so we must ensure above that
277 * the base address is free of conflict (request_region above).
278 */
279 madgemc_read_rom(card);
280
281 if (card->manid != 0x4d) { /* something went wrong */
282 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
283 goto getout;
284 }
285 262
286 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) { 263 if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
287 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype); 264 printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
288 goto getout; 265 ret = -EIO;
289 } 266 goto getout3;
267 }
290 268
291 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */ 269 /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
292 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01)) 270 if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
293 card->ramsize = 128; 271 card->ramsize = 128;
294 else 272 else
295 card->ramsize = 256; 273 card->ramsize = 256;
296 274
297 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", 275 printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
298 dev->name, 276 dev->name,
299 (card->cardtype == 0x08)?MADGEMC16_CARDNAME: 277 (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
300 MADGEMC32_CARDNAME, card->cardrev, 278 MADGEMC32_CARDNAME, card->cardrev,
301 dev->base_addr, dev->irq); 279 dev->base_addr, dev->irq);
302 280
303 if (card->cardtype == 0x0d) 281 if (card->cardtype == 0x0d)
304 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name); 282 printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
305 283
306 if (card->ringspeed==2) { /* Unknown */ 284 if (card->ringspeed==2) { /* Unknown */
307 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name); 285 printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
308 card->ringspeed = 1; /* default to 16mb */ 286 card->ringspeed = 1; /* default to 16mb */
309 } 287 }
310 288
311 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize); 289 printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
312 290
313 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name, 291 printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
314 (card->ringspeed)?16:4, 292 (card->ringspeed)?16:4,
315 card->cabletype?"STP/DB9":"UTP/RJ-45"); 293 card->cabletype?"STP/DB9":"UTP/RJ-45");
316 printk("%s: Arbitration Level: %d\n", dev->name, 294 printk("%s: Arbitration Level: %d\n", dev->name,
317 card->arblevel); 295 card->arblevel);
318 296
319 printk("%s: Burst Mode: ", dev->name); 297 printk("%s: Burst Mode: ", dev->name);
320 switch(card->burstmode) { 298 switch(card->burstmode) {
321 case 0: printk("Cycle steal"); break; 299 case 0: printk("Cycle steal"); break;
322 case 1: printk("Limited burst"); break; 300 case 1: printk("Limited burst"); break;
323 case 2: printk("Delayed release"); break; 301 case 2: printk("Delayed release"); break;
324 case 3: printk("Immediate release"); break; 302 case 3: printk("Immediate release"); break;
325 } 303 }
326 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair"); 304 printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
327
328
329 /*
330 * Enable SIF before we assign the interrupt handler,
331 * just in case we get spurious interrupts that need
332 * handling.
333 */
334 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
335 madgemc_setsifsel(dev, 1);
336 if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
337 "madgemc", dev))
338 goto getout;
339
340 madgemc_chipset_init(dev); /* enables interrupts! */
341 madgemc_setcabletype(dev, card->cabletype);
342 305
343 /* Setup MCA structures */
344 mca_set_adapter_name(slot, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
345 mca_set_adapter_procfn(slot, madgemc_mcaproc, dev);
346 mca_mark_as_used(slot);
347 306
348 printk("%s: Ring Station Address: ", dev->name); 307 /*
349 printk("%2.2x", dev->dev_addr[0]); 308 * Enable SIF before we assign the interrupt handler,
350 for (i = 1; i < 6; i++) 309 * just in case we get spurious interrupts that need
351 printk(":%2.2x", dev->dev_addr[i]); 310 * handling.
352 printk("\n"); 311 */
353 312 outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
354 /* XXX is ISA_MAX_ADDRESS correct here? */ 313 madgemc_setsifsel(dev, 1);
355 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) { 314 if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
356 printk("%s: unable to get memory for dev->priv.\n", 315 "madgemc", dev)) {
357 dev->name); 316 ret = -EBUSY;
358 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, 317 goto getout3;
359 MADGEMC_IO_EXTENT);
360
361 kfree(card);
362 tmsdev_term(dev);
363 free_netdev(dev);
364 if (madgemc_card_list)
365 return 0;
366 return -1;
367 }
368 tp = netdev_priv(dev);
369
370 /*
371 * The MC16 is physically a 32bit card. However, Madge
372 * insists on calling it 16bit, so I'll assume here that
373 * they know what they're talking about. Cut off DMA
374 * at 16mb.
375 */
376 tp->setnselout = madgemc_setnselout_pins;
377 tp->sifwriteb = madgemc_sifwriteb;
378 tp->sifreadb = madgemc_sifreadb;
379 tp->sifwritew = madgemc_sifwritew;
380 tp->sifreadw = madgemc_sifreadw;
381 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
382
383 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
384
385 dev->open = madgemc_open;
386 dev->stop = madgemc_close;
387
388 if (register_netdev(dev) == 0) {
389 /* Enlist in the card list */
390 card->next = madgemc_card_list;
391 madgemc_card_list = card;
392 slot++;
393 continue; /* successful, try to find another */
394 }
395
396 free_irq(dev->irq, dev);
397 getout:
398 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
399 MADGEMC_IO_EXTENT);
400 getout1:
401 kfree(card);
402 free_netdev(dev);
403 slot++;
404 } 318 }
405 319
406 if (madgemc_card_list) 320 madgemc_chipset_init(dev); /* enables interrupts! */
321 madgemc_setcabletype(dev, card->cabletype);
322
323 /* Setup MCA structures */
324 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
325 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
326
327 printk("%s: Ring Station Address: ", dev->name);
328 printk("%2.2x", dev->dev_addr[0]);
329 for (i = 1; i < 6; i++)
330 printk(":%2.2x", dev->dev_addr[i]);
331 printk("\n");
332
333 if (tmsdev_init(dev, device)) {
334 printk("%s: unable to get memory for dev->priv.\n",
335 dev->name);
336 ret = -ENOMEM;
337 goto getout4;
338 }
339 tp = netdev_priv(dev);
340
341 /*
342 * The MC16 is physically a 32bit card. However, Madge
343 * insists on calling it 16bit, so I'll assume here that
344 * they know what they're talking about. Cut off DMA
345 * at 16mb.
346 */
347 tp->setnselout = madgemc_setnselout_pins;
348 tp->sifwriteb = madgemc_sifwriteb;
349 tp->sifreadb = madgemc_sifreadb;
350 tp->sifwritew = madgemc_sifwritew;
351 tp->sifreadw = madgemc_sifreadw;
352 tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
353
354 memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
355
356 dev->open = madgemc_open;
357 dev->stop = madgemc_close;
358
359 tp->tmspriv = card;
360 dev_set_drvdata(device, dev);
361
362 if (register_netdev(dev) == 0)
407 return 0; 363 return 0;
408 return -1; 364
365 dev_set_drvdata(device, NULL);
366 ret = -ENOMEM;
367getout4:
368 free_irq(dev->irq, dev);
369getout3:
370 release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
371 MADGEMC_IO_EXTENT);
372getout2:
373 kfree(card);
374getout1:
375 free_netdev(dev);
376getout:
377 mca_device_set_claim(mdev, 0);
378 return ret;
409} 379}
410 380
411/* 381/*
@@ -664,12 +634,12 @@ static void madgemc_chipset_close(struct net_device *dev)
664 * is complete. 634 * is complete.
665 * 635 *
666 */ 636 */
667static void madgemc_read_rom(struct madgemc_card *card) 637static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
668{ 638{
669 unsigned long ioaddr; 639 unsigned long ioaddr;
670 unsigned char reg0, reg1, tmpreg0, i; 640 unsigned char reg0, reg1, tmpreg0, i;
671 641
672 ioaddr = card->dev->base_addr; 642 ioaddr = dev->base_addr;
673 643
674 reg0 = inb(ioaddr + MC_CONTROL_REG0); 644 reg0 = inb(ioaddr + MC_CONTROL_REG0);
675 reg1 = inb(ioaddr + MC_CONTROL_REG1); 645 reg1 = inb(ioaddr + MC_CONTROL_REG1);
@@ -686,9 +656,9 @@ static void madgemc_read_rom(struct madgemc_card *card)
686 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0); 656 outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
687 657
688 /* Read BIA */ 658 /* Read BIA */
689 card->dev->addr_len = 6; 659 dev->addr_len = 6;
690 for (i = 0; i < 6; i++) 660 for (i = 0; i < 6; i++)
691 card->dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i); 661 dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
692 662
693 /* Restore original register values */ 663 /* Restore original register values */
694 outb(reg0, ioaddr + MC_CONTROL_REG0); 664 outb(reg0, ioaddr + MC_CONTROL_REG0);
@@ -721,14 +691,10 @@ static int madgemc_close(struct net_device *dev)
721static int madgemc_mcaproc(char *buf, int slot, void *d) 691static int madgemc_mcaproc(char *buf, int slot, void *d)
722{ 692{
723 struct net_device *dev = (struct net_device *)d; 693 struct net_device *dev = (struct net_device *)d;
724 struct madgemc_card *curcard = madgemc_card_list; 694 struct net_local *tp = dev->priv;
695 struct card_info *curcard = tp->tmspriv;
725 int len = 0; 696 int len = 0;
726 697
727 while (curcard) { /* search for card struct */
728 if (curcard->dev == dev)
729 break;
730 curcard = curcard->next;
731 }
732 len += sprintf(buf+len, "-------\n"); 698 len += sprintf(buf+len, "-------\n");
733 if (curcard) { 699 if (curcard) {
734 struct net_local *tp = netdev_priv(dev); 700 struct net_local *tp = netdev_priv(dev);
@@ -763,25 +729,56 @@ static int madgemc_mcaproc(char *buf, int slot, void *d)
763 return len; 729 return len;
764} 730}
765 731
766static void __exit madgemc_exit(void) 732static int __devexit madgemc_remove(struct device *device)
767{ 733{
768 struct net_device *dev; 734 struct net_device *dev = dev_get_drvdata(device);
769 struct madgemc_card *this_card; 735 struct net_local *tp;
770 736 struct card_info *card;
771 while (madgemc_card_list) { 737
772 dev = madgemc_card_list->dev; 738 if (!dev)
773 unregister_netdev(dev); 739 BUG();
774 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT); 740
775 free_irq(dev->irq, dev); 741 tp = dev->priv;
776 tmsdev_term(dev); 742 card = tp->tmspriv;
777 free_netdev(dev); 743 kfree(card);
778 this_card = madgemc_card_list; 744 tp->tmspriv = NULL;
779 madgemc_card_list = this_card->next; 745
780 kfree(this_card); 746 unregister_netdev(dev);
781 } 747 release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
748 free_irq(dev->irq, dev);
749 tmsdev_term(dev);
750 free_netdev(dev);
751 dev_set_drvdata(device, NULL);
752
753 return 0;
754}
755
756static short madgemc_adapter_ids[] __initdata = {
757 0x002d,
758 0x0000
759};
760
761static struct mca_driver madgemc_driver = {
762 .id_table = madgemc_adapter_ids,
763 .driver = {
764 .name = "madgemc",
765 .bus = &mca_bus_type,
766 .probe = madgemc_probe,
767 .remove = __devexit_p(madgemc_remove),
768 },
769};
770
771static int __init madgemc_init (void)
772{
773 return mca_register_driver (&madgemc_driver);
774}
775
776static void __exit madgemc_exit (void)
777{
778 mca_unregister_driver (&madgemc_driver);
782} 779}
783 780
784module_init(madgemc_probe); 781module_init(madgemc_init);
785module_exit(madgemc_exit); 782module_exit(madgemc_exit);
786 783
787MODULE_LICENSE("GPL"); 784MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 40ad0fde28af..eb1423ede75c 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -62,8 +62,7 @@ static int dmalist[] __initdata = {
62}; 62};
63 63
64static char cardname[] = "Proteon 1392\0"; 64static char cardname[] = "Proteon 1392\0";
65 65static u64 dma_mask = ISA_MAX_ADDRESS;
66struct net_device *proteon_probe(int unit);
67static int proteon_open(struct net_device *dev); 66static int proteon_open(struct net_device *dev);
68static void proteon_read_eeprom(struct net_device *dev); 67static void proteon_read_eeprom(struct net_device *dev);
69static unsigned short proteon_setnselout_pins(struct net_device *dev); 68static unsigned short proteon_setnselout_pins(struct net_device *dev);
@@ -116,7 +115,7 @@ nodev:
116 return -ENODEV; 115 return -ENODEV;
117} 116}
118 117
119static int __init setup_card(struct net_device *dev) 118static int __init setup_card(struct net_device *dev, struct device *pdev)
120{ 119{
121 struct net_local *tp; 120 struct net_local *tp;
122 static int versionprinted; 121 static int versionprinted;
@@ -137,7 +136,7 @@ static int __init setup_card(struct net_device *dev)
137 } 136 }
138 } 137 }
139 if (err) 138 if (err)
140 goto out4; 139 goto out5;
141 140
142 /* At this point we have found a valid card. */ 141 /* At this point we have found a valid card. */
143 142
@@ -145,14 +144,15 @@ static int __init setup_card(struct net_device *dev)
145 printk(KERN_DEBUG "%s", version); 144 printk(KERN_DEBUG "%s", version);
146 145
147 err = -EIO; 146 err = -EIO;
148 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 147 pdev->dma_mask = &dma_mask;
148 if (tmsdev_init(dev, pdev))
149 goto out4; 149 goto out4;
150 150
151 dev->base_addr &= ~3; 151 dev->base_addr &= ~3;
152 152
153 proteon_read_eeprom(dev); 153 proteon_read_eeprom(dev);
154 154
155 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 155 printk(KERN_DEBUG "proteon.c: Ring Station Address: ");
156 printk("%2.2x", dev->dev_addr[0]); 156 printk("%2.2x", dev->dev_addr[0]);
157 for (j = 1; j < 6; j++) 157 for (j = 1; j < 6; j++)
158 printk(":%2.2x", dev->dev_addr[j]); 158 printk(":%2.2x", dev->dev_addr[j]);
@@ -185,7 +185,7 @@ static int __init setup_card(struct net_device *dev)
185 185
186 if(irqlist[j] == 0) 186 if(irqlist[j] == 0)
187 { 187 {
188 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 188 printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
189 goto out3; 189 goto out3;
190 } 190 }
191 } 191 }
@@ -196,15 +196,15 @@ static int __init setup_card(struct net_device *dev)
196 break; 196 break;
197 if (irqlist[j] == 0) 197 if (irqlist[j] == 0)
198 { 198 {
199 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 199 printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
200 dev->name, dev->irq); 200 dev->irq);
201 goto out3; 201 goto out3;
202 } 202 }
203 if (request_irq(dev->irq, tms380tr_interrupt, 0, 203 if (request_irq(dev->irq, tms380tr_interrupt, 0,
204 cardname, dev)) 204 cardname, dev))
205 { 205 {
206 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 206 printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
207 dev->name, dev->irq); 207 dev->irq);
208 goto out3; 208 goto out3;
209 } 209 }
210 } 210 }
@@ -220,7 +220,7 @@ static int __init setup_card(struct net_device *dev)
220 220
221 if(dmalist[j] == 0) 221 if(dmalist[j] == 0)
222 { 222 {
223 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 223 printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
224 goto out2; 224 goto out2;
225 } 225 }
226 } 226 }
@@ -231,25 +231,25 @@ static int __init setup_card(struct net_device *dev)
231 break; 231 break;
232 if (dmalist[j] == 0) 232 if (dmalist[j] == 0)
233 { 233 {
234 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 234 printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
235 dev->name, dev->dma); 235 dev->dma);
236 goto out2; 236 goto out2;
237 } 237 }
238 if (request_dma(dev->dma, cardname)) 238 if (request_dma(dev->dma, cardname))
239 { 239 {
240 printk(KERN_INFO "%s: Selected DMA %d not available\n", 240 printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
241 dev->name, dev->dma); 241 dev->dma);
242 goto out2; 242 goto out2;
243 } 243 }
244 } 244 }
245 245
246 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
247 dev->name, dev->base_addr, dev->irq, dev->dma);
248
249 err = register_netdev(dev); 246 err = register_netdev(dev);
250 if (err) 247 if (err)
251 goto out; 248 goto out;
252 249
250 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
251 dev->name, dev->base_addr, dev->irq, dev->dma);
252
253 return 0; 253 return 0;
254out: 254out:
255 free_dma(dev->dma); 255 free_dma(dev->dma);
@@ -258,34 +258,11 @@ out2:
258out3: 258out3:
259 tmsdev_term(dev); 259 tmsdev_term(dev);
260out4: 260out4:
261 release_region(dev->base_addr, PROTEON_IO_EXTENT); 261 release_region(dev->base_addr, PROTEON_IO_EXTENT);
262out5:
262 return err; 263 return err;
263} 264}
264 265
265struct net_device * __init proteon_probe(int unit)
266{
267 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
268 int err = 0;
269
270 if (!dev)
271 return ERR_PTR(-ENOMEM);
272
273 if (unit >= 0) {
274 sprintf(dev->name, "tr%d", unit);
275 netdev_boot_setup_check(dev);
276 }
277
278 err = setup_card(dev);
279 if (err)
280 goto out;
281
282 return dev;
283
284out:
285 free_netdev(dev);
286 return ERR_PTR(err);
287}
288
289/* 266/*
290 * Reads MAC address from adapter RAM, which should've read it from 267 * Reads MAC address from adapter RAM, which should've read it from
291 * the onboard ROM. 268 * the onboard ROM.
@@ -352,8 +329,6 @@ static int proteon_open(struct net_device *dev)
352 return tms380tr_open(dev); 329 return tms380tr_open(dev);
353} 330}
354 331
355#ifdef MODULE
356
357#define ISATR_MAX_ADAPTERS 3 332#define ISATR_MAX_ADAPTERS 3
358 333
359static int io[ISATR_MAX_ADAPTERS]; 334static int io[ISATR_MAX_ADAPTERS];
@@ -366,13 +341,23 @@ module_param_array(io, int, NULL, 0);
366module_param_array(irq, int, NULL, 0); 341module_param_array(irq, int, NULL, 0);
367module_param_array(dma, int, NULL, 0); 342module_param_array(dma, int, NULL, 0);
368 343
369static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS]; 344static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
345
346static struct device_driver proteon_driver = {
347 .name = "proteon",
348 .bus = &platform_bus_type,
349};
370 350
371int init_module(void) 351static int __init proteon_init(void)
372{ 352{
373 struct net_device *dev; 353 struct net_device *dev;
354 struct platform_device *pdev;
374 int i, num = 0, err = 0; 355 int i, num = 0, err = 0;
375 356
357 err = driver_register(&proteon_driver);
358 if (err)
359 return err;
360
376 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 361 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
377 dev = alloc_trdev(sizeof(struct net_local)); 362 dev = alloc_trdev(sizeof(struct net_local));
378 if (!dev) 363 if (!dev)
@@ -381,11 +366,15 @@ int init_module(void)
381 dev->base_addr = io[i]; 366 dev->base_addr = io[i];
382 dev->irq = irq[i]; 367 dev->irq = irq[i];
383 dev->dma = dma[i]; 368 dev->dma = dma[i];
384 err = setup_card(dev); 369 pdev = platform_device_register_simple("proteon",
370 i, NULL, 0);
371 err = setup_card(dev, &pdev->dev);
385 if (!err) { 372 if (!err) {
386 proteon_dev[i] = dev; 373 proteon_dev[i] = pdev;
374 dev_set_drvdata(&pdev->dev, dev);
387 ++num; 375 ++num;
388 } else { 376 } else {
377 platform_device_unregister(pdev);
389 free_netdev(dev); 378 free_netdev(dev);
390 } 379 }
391 } 380 }
@@ -399,23 +388,28 @@ int init_module(void)
399 return (0); 388 return (0);
400} 389}
401 390
402void cleanup_module(void) 391static void __exit proteon_cleanup(void)
403{ 392{
393 struct net_device *dev;
404 int i; 394 int i;
405 395
406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 396 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
407 struct net_device *dev = proteon_dev[i]; 397 struct platform_device *pdev = proteon_dev[i];
408 398
409 if (!dev) 399 if (!pdev)
410 continue; 400 continue;
411 401 dev = dev_get_drvdata(&pdev->dev);
412 unregister_netdev(dev); 402 unregister_netdev(dev);
413 release_region(dev->base_addr, PROTEON_IO_EXTENT); 403 release_region(dev->base_addr, PROTEON_IO_EXTENT);
414 free_irq(dev->irq, dev); 404 free_irq(dev->irq, dev);
415 free_dma(dev->dma); 405 free_dma(dev->dma);
416 tmsdev_term(dev); 406 tmsdev_term(dev);
417 free_netdev(dev); 407 free_netdev(dev);
408 dev_set_drvdata(&pdev->dev, NULL);
409 platform_device_unregister(pdev);
418 } 410 }
411 driver_unregister(&proteon_driver);
419} 412}
420#endif /* MODULE */
421 413
414module_init(proteon_init);
415module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index f26796e2d0e5..3c7c66204f74 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -68,8 +68,7 @@ static int dmalist[] __initdata = {
68}; 68};
69 69
70static char isa_cardname[] = "SK NET TR 4/16 ISA\0"; 70static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
71 71static u64 dma_mask = ISA_MAX_ADDRESS;
72struct net_device *sk_isa_probe(int unit);
73static int sk_isa_open(struct net_device *dev); 72static int sk_isa_open(struct net_device *dev);
74static void sk_isa_read_eeprom(struct net_device *dev); 73static void sk_isa_read_eeprom(struct net_device *dev);
75static unsigned short sk_isa_setnselout_pins(struct net_device *dev); 74static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
@@ -133,7 +132,7 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
133 return 0; 132 return 0;
134} 133}
135 134
136static int __init setup_card(struct net_device *dev) 135static int __init setup_card(struct net_device *dev, struct device *pdev)
137{ 136{
138 struct net_local *tp; 137 struct net_local *tp;
139 static int versionprinted; 138 static int versionprinted;
@@ -154,7 +153,7 @@ static int __init setup_card(struct net_device *dev)
154 } 153 }
155 } 154 }
156 if (err) 155 if (err)
157 goto out4; 156 goto out5;
158 157
159 /* At this point we have found a valid card. */ 158 /* At this point we have found a valid card. */
160 159
@@ -162,14 +161,15 @@ static int __init setup_card(struct net_device *dev)
162 printk(KERN_DEBUG "%s", version); 161 printk(KERN_DEBUG "%s", version);
163 162
164 err = -EIO; 163 err = -EIO;
165 if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) 164 pdev->dma_mask = &dma_mask;
165 if (tmsdev_init(dev, pdev))
166 goto out4; 166 goto out4;
167 167
168 dev->base_addr &= ~3; 168 dev->base_addr &= ~3;
169 169
170 sk_isa_read_eeprom(dev); 170 sk_isa_read_eeprom(dev);
171 171
172 printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name); 172 printk(KERN_DEBUG "skisa.c: Ring Station Address: ");
173 printk("%2.2x", dev->dev_addr[0]); 173 printk("%2.2x", dev->dev_addr[0]);
174 for (j = 1; j < 6; j++) 174 for (j = 1; j < 6; j++)
175 printk(":%2.2x", dev->dev_addr[j]); 175 printk(":%2.2x", dev->dev_addr[j]);
@@ -202,7 +202,7 @@ static int __init setup_card(struct net_device *dev)
202 202
203 if(irqlist[j] == 0) 203 if(irqlist[j] == 0)
204 { 204 {
205 printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name); 205 printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
206 goto out3; 206 goto out3;
207 } 207 }
208 } 208 }
@@ -213,15 +213,15 @@ static int __init setup_card(struct net_device *dev)
213 break; 213 break;
214 if (irqlist[j] == 0) 214 if (irqlist[j] == 0)
215 { 215 {
216 printk(KERN_INFO "%s: Illegal IRQ %d specified\n", 216 printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
217 dev->name, dev->irq); 217 dev->irq);
218 goto out3; 218 goto out3;
219 } 219 }
220 if (request_irq(dev->irq, tms380tr_interrupt, 0, 220 if (request_irq(dev->irq, tms380tr_interrupt, 0,
221 isa_cardname, dev)) 221 isa_cardname, dev))
222 { 222 {
223 printk(KERN_INFO "%s: Selected IRQ %d not available\n", 223 printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
224 dev->name, dev->irq); 224 dev->irq);
225 goto out3; 225 goto out3;
226 } 226 }
227 } 227 }
@@ -237,7 +237,7 @@ static int __init setup_card(struct net_device *dev)
237 237
238 if(dmalist[j] == 0) 238 if(dmalist[j] == 0)
239 { 239 {
240 printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name); 240 printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
241 goto out2; 241 goto out2;
242 } 242 }
243 } 243 }
@@ -248,25 +248,25 @@ static int __init setup_card(struct net_device *dev)
248 break; 248 break;
249 if (dmalist[j] == 0) 249 if (dmalist[j] == 0)
250 { 250 {
251 printk(KERN_INFO "%s: Illegal DMA %d specified\n", 251 printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
252 dev->name, dev->dma); 252 dev->dma);
253 goto out2; 253 goto out2;
254 } 254 }
255 if (request_dma(dev->dma, isa_cardname)) 255 if (request_dma(dev->dma, isa_cardname))
256 { 256 {
257 printk(KERN_INFO "%s: Selected DMA %d not available\n", 257 printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
258 dev->name, dev->dma); 258 dev->dma);
259 goto out2; 259 goto out2;
260 } 260 }
261 } 261 }
262 262
263 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
264 dev->name, dev->base_addr, dev->irq, dev->dma);
265
266 err = register_netdev(dev); 263 err = register_netdev(dev);
267 if (err) 264 if (err)
268 goto out; 265 goto out;
269 266
267 printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
268 dev->name, dev->base_addr, dev->irq, dev->dma);
269
270 return 0; 270 return 0;
271out: 271out:
272 free_dma(dev->dma); 272 free_dma(dev->dma);
@@ -275,33 +275,11 @@ out2:
275out3: 275out3:
276 tmsdev_term(dev); 276 tmsdev_term(dev);
277out4: 277out4:
278 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 278 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
279out5:
279 return err; 280 return err;
280} 281}
281 282
282struct net_device * __init sk_isa_probe(int unit)
283{
284 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
285 int err = 0;
286
287 if (!dev)
288 return ERR_PTR(-ENOMEM);
289
290 if (unit >= 0) {
291 sprintf(dev->name, "tr%d", unit);
292 netdev_boot_setup_check(dev);
293 }
294
295 err = setup_card(dev);
296 if (err)
297 goto out;
298
299 return dev;
300out:
301 free_netdev(dev);
302 return ERR_PTR(err);
303}
304
305/* 283/*
306 * Reads MAC address from adapter RAM, which should've read it from 284 * Reads MAC address from adapter RAM, which should've read it from
307 * the onboard ROM. 285 * the onboard ROM.
@@ -361,8 +339,6 @@ static int sk_isa_open(struct net_device *dev)
361 return tms380tr_open(dev); 339 return tms380tr_open(dev);
362} 340}
363 341
364#ifdef MODULE
365
366#define ISATR_MAX_ADAPTERS 3 342#define ISATR_MAX_ADAPTERS 3
367 343
368static int io[ISATR_MAX_ADAPTERS]; 344static int io[ISATR_MAX_ADAPTERS];
@@ -375,13 +351,23 @@ module_param_array(io, int, NULL, 0);
375module_param_array(irq, int, NULL, 0); 351module_param_array(irq, int, NULL, 0);
376module_param_array(dma, int, NULL, 0); 352module_param_array(dma, int, NULL, 0);
377 353
378static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS]; 354static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
379 355
380int init_module(void) 356static struct device_driver sk_isa_driver = {
357 .name = "skisa",
358 .bus = &platform_bus_type,
359};
360
361static int __init sk_isa_init(void)
381{ 362{
382 struct net_device *dev; 363 struct net_device *dev;
364 struct platform_device *pdev;
383 int i, num = 0, err = 0; 365 int i, num = 0, err = 0;
384 366
367 err = driver_register(&sk_isa_driver);
368 if (err)
369 return err;
370
385 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 371 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
386 dev = alloc_trdev(sizeof(struct net_local)); 372 dev = alloc_trdev(sizeof(struct net_local));
387 if (!dev) 373 if (!dev)
@@ -390,12 +376,15 @@ int init_module(void)
390 dev->base_addr = io[i]; 376 dev->base_addr = io[i];
391 dev->irq = irq[i]; 377 dev->irq = irq[i];
392 dev->dma = dma[i]; 378 dev->dma = dma[i];
393 err = setup_card(dev); 379 pdev = platform_device_register_simple("skisa",
394 380 i, NULL, 0);
381 err = setup_card(dev, &pdev->dev);
395 if (!err) { 382 if (!err) {
396 sk_isa_dev[i] = dev; 383 sk_isa_dev[i] = pdev;
384 dev_set_drvdata(&sk_isa_dev[i]->dev, dev);
397 ++num; 385 ++num;
398 } else { 386 } else {
387 platform_device_unregister(pdev);
399 free_netdev(dev); 388 free_netdev(dev);
400 } 389 }
401 } 390 }
@@ -409,23 +398,28 @@ int init_module(void)
409 return (0); 398 return (0);
410} 399}
411 400
412void cleanup_module(void) 401static void __exit sk_isa_cleanup(void)
413{ 402{
403 struct net_device *dev;
414 int i; 404 int i;
415 405
416 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) { 406 for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
417 struct net_device *dev = sk_isa_dev[i]; 407 struct platform_device *pdev = sk_isa_dev[i];
418 408
419 if (!dev) 409 if (!pdev)
420 continue; 410 continue;
421 411 dev = dev_get_drvdata(&pdev->dev);
422 unregister_netdev(dev); 412 unregister_netdev(dev);
423 release_region(dev->base_addr, SK_ISA_IO_EXTENT); 413 release_region(dev->base_addr, SK_ISA_IO_EXTENT);
424 free_irq(dev->irq, dev); 414 free_irq(dev->irq, dev);
425 free_dma(dev->dma); 415 free_dma(dev->dma);
426 tmsdev_term(dev); 416 tmsdev_term(dev);
427 free_netdev(dev); 417 free_netdev(dev);
418 dev_set_drvdata(&pdev->dev, NULL);
419 platform_device_unregister(pdev);
428 } 420 }
421 driver_unregister(&sk_isa_driver);
429} 422}
430#endif /* MODULE */
431 423
424module_init(sk_isa_init);
425module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5e0b0ce98ed7..2e39bf1f7462 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -62,6 +62,7 @@
62 * normal operation. 62 * normal operation.
63 * 30-Dec-02 JF Removed incorrect __init from 63 * 30-Dec-02 JF Removed incorrect __init from
64 * tms380tr_init_card. 64 * tms380tr_init_card.
65 * 22-Jul-05 JF Converted to dma-mapping.
65 * 66 *
66 * To do: 67 * To do:
67 * 1. Multi/Broadcast packet handling (this may have fixed itself) 68 * 1. Multi/Broadcast packet handling (this may have fixed itself)
@@ -89,7 +90,7 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
89#include <linux/time.h> 90#include <linux/time.h>
90#include <linux/errno.h> 91#include <linux/errno.h>
91#include <linux/init.h> 92#include <linux/init.h>
92#include <linux/pci.h> 93#include <linux/dma-mapping.h>
93#include <linux/delay.h> 94#include <linux/delay.h>
94#include <linux/netdevice.h> 95#include <linux/netdevice.h>
95#include <linux/etherdevice.h> 96#include <linux/etherdevice.h>
@@ -114,8 +115,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
114#endif 115#endif
115static unsigned int tms380tr_debug = TMS380TR_DEBUG; 116static unsigned int tms380tr_debug = TMS380TR_DEBUG;
116 117
117static struct device tms_device;
118
119/* Index to functions, as function prototypes. 118/* Index to functions, as function prototypes.
120 * Alphabetical by function name. 119 * Alphabetical by function name.
121 */ 120 */
@@ -434,7 +433,7 @@ static void tms380tr_init_net_local(struct net_device *dev)
434 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize); 433 skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
435 434
436 /* data unreachable for DMA ? then use local buffer */ 435 /* data unreachable for DMA ? then use local buffer */
437 dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 436 dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
438 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 437 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
439 { 438 {
440 tp->Rpl[i].SkbStat = SKB_DATA_COPY; 439 tp->Rpl[i].SkbStat = SKB_DATA_COPY;
@@ -638,10 +637,10 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
638 /* Is buffer reachable for Busmaster-DMA? */ 637 /* Is buffer reachable for Busmaster-DMA? */
639 638
640 length = skb->len; 639 length = skb->len;
641 dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE); 640 dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
642 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) { 641 if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
643 /* Copy frame to local buffer */ 642 /* Copy frame to local buffer */
644 pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE); 643 dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
645 dmabuf = 0; 644 dmabuf = 0;
646 i = tp->TplFree->TPLIndex; 645 i = tp->TplFree->TPLIndex;
647 buf = tp->LocalTxBuffers[i]; 646 buf = tp->LocalTxBuffers[i];
@@ -1284,9 +1283,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1284 unsigned short count, c, count2; 1283 unsigned short count, c, count2;
1285 const struct firmware *fw_entry = NULL; 1284 const struct firmware *fw_entry = NULL;
1286 1285
1287 strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE); 1286 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1288
1289 if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
1290 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", 1287 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1291 dev->name, "tms380tr.bin"); 1288 dev->name, "tms380tr.bin");
1292 return (-1); 1289 return (-1);
@@ -2021,7 +2018,7 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
2021 2018
2022 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl); 2019 printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
2023 if (tpl->DMABuff) 2020 if (tpl->DMABuff)
2024 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2021 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2025 dev_kfree_skb_any(tpl->Skb); 2022 dev_kfree_skb_any(tpl->Skb);
2026 } 2023 }
2027 2024
@@ -2090,7 +2087,7 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
2090 2087
2091 tp->MacStat.tx_packets++; 2088 tp->MacStat.tx_packets++;
2092 if (tpl->DMABuff) 2089 if (tpl->DMABuff)
2093 pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE); 2090 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2094 dev_kfree_skb_irq(tpl->Skb); 2091 dev_kfree_skb_irq(tpl->Skb);
2095 tpl->BusyFlag = 0; /* "free" TPL */ 2092 tpl->BusyFlag = 0; /* "free" TPL */
2096 } 2093 }
@@ -2209,7 +2206,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2209 tp->MacStat.rx_errors++; 2206 tp->MacStat.rx_errors++;
2210 } 2207 }
2211 if (rpl->DMABuff) 2208 if (rpl->DMABuff)
2212 pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE); 2209 dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
2213 rpl->DMABuff = 0; 2210 rpl->DMABuff = 0;
2214 2211
2215 /* Allocate new skb for rpl */ 2212 /* Allocate new skb for rpl */
@@ -2227,7 +2224,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2227 skb_put(rpl->Skb, tp->MaxPacketSize); 2224 skb_put(rpl->Skb, tp->MaxPacketSize);
2228 2225
2229 /* Data unreachable for DMA ? then use local buffer */ 2226 /* Data unreachable for DMA ? then use local buffer */
2230 dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE); 2227 dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
2231 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit)) 2228 if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
2232 { 2229 {
2233 rpl->SkbStat = SKB_DATA_COPY; 2230 rpl->SkbStat = SKB_DATA_COPY;
@@ -2332,23 +2329,26 @@ void tmsdev_term(struct net_device *dev)
2332 struct net_local *tp; 2329 struct net_local *tp;
2333 2330
2334 tp = netdev_priv(dev); 2331 tp = netdev_priv(dev);
2335 pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local), 2332 dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
2336 PCI_DMA_BIDIRECTIONAL); 2333 DMA_BIDIRECTIONAL);
2337} 2334}
2338 2335
2339int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 2336int tmsdev_init(struct net_device *dev, struct device *pdev)
2340 struct pci_dev *pdev)
2341{ 2337{
2342 struct net_local *tms_local; 2338 struct net_local *tms_local;
2343 2339
2344 memset(dev->priv, 0, sizeof(struct net_local)); 2340 memset(dev->priv, 0, sizeof(struct net_local));
2345 tms_local = netdev_priv(dev); 2341 tms_local = netdev_priv(dev);
2346 init_waitqueue_head(&tms_local->wait_for_tok_int); 2342 init_waitqueue_head(&tms_local->wait_for_tok_int);
2347 tms_local->dmalimit = dmalimit; 2343 if (pdev->dma_mask)
2344 tms_local->dmalimit = *pdev->dma_mask;
2345 else
2346 return -ENOMEM;
2348 tms_local->pdev = pdev; 2347 tms_local->pdev = pdev;
2349 tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local, 2348 tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
2350 sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL); 2349 sizeof(struct net_local), DMA_BIDIRECTIONAL);
2351 if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit) 2350 if (tms_local->dmabuffer + sizeof(struct net_local) >
2351 tms_local->dmalimit)
2352 { 2352 {
2353 printk(KERN_INFO "%s: Memory not accessible for DMA\n", 2353 printk(KERN_INFO "%s: Memory not accessible for DMA\n",
2354 dev->name); 2354 dev->name);
@@ -2370,8 +2370,6 @@ int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
2370 return 0; 2370 return 0;
2371} 2371}
2372 2372
2373#ifdef MODULE
2374
2375EXPORT_SYMBOL(tms380tr_open); 2373EXPORT_SYMBOL(tms380tr_open);
2376EXPORT_SYMBOL(tms380tr_close); 2374EXPORT_SYMBOL(tms380tr_close);
2377EXPORT_SYMBOL(tms380tr_interrupt); 2375EXPORT_SYMBOL(tms380tr_interrupt);
@@ -2379,6 +2377,8 @@ EXPORT_SYMBOL(tmsdev_init);
2379EXPORT_SYMBOL(tmsdev_term); 2377EXPORT_SYMBOL(tmsdev_term);
2380EXPORT_SYMBOL(tms380tr_wait); 2378EXPORT_SYMBOL(tms380tr_wait);
2381 2379
2380#ifdef MODULE
2381
2382static struct module *TMS380_module = NULL; 2382static struct module *TMS380_module = NULL;
2383 2383
2384int init_module(void) 2384int init_module(void)
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index f2c5ba0f37a5..30452c67bb68 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -17,8 +17,7 @@
17int tms380tr_open(struct net_device *dev); 17int tms380tr_open(struct net_device *dev);
18int tms380tr_close(struct net_device *dev); 18int tms380tr_close(struct net_device *dev);
19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs); 19irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
20int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 20int tmsdev_init(struct net_device *dev, struct device *pdev);
21 struct pci_dev *pdev);
22void tmsdev_term(struct net_device *dev); 21void tmsdev_term(struct net_device *dev);
23void tms380tr_wait(unsigned long time); 22void tms380tr_wait(unsigned long time);
24 23
@@ -719,7 +718,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
719 struct sk_buff *Skb; 718 struct sk_buff *Skb;
720 unsigned char TPLIndex; 719 unsigned char TPLIndex;
721 volatile unsigned char BusyFlag;/* Flag: TPL busy? */ 720 volatile unsigned char BusyFlag;/* Flag: TPL busy? */
722 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 721 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
723}; 722};
724 723
725/* ---------------------Receive Functions-------------------------------* 724/* ---------------------Receive Functions-------------------------------*
@@ -1060,7 +1059,7 @@ struct s_RPL { /* Receive Parameter List */
1060 struct sk_buff *Skb; 1059 struct sk_buff *Skb;
1061 SKB_STAT SkbStat; 1060 SKB_STAT SkbStat;
1062 int RPLIndex; 1061 int RPLIndex;
1063 dma_addr_t DMABuff; /* DMA IO bus address from pci_map */ 1062 dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
1064}; 1063};
1065 1064
1066/* Information that need to be kept for each board. */ 1065/* Information that need to be kept for each board. */
@@ -1091,7 +1090,7 @@ typedef struct net_local {
1091 RPL *RplTail; 1090 RPL *RplTail;
1092 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE]; 1091 unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
1093 1092
1094 struct pci_dev *pdev; 1093 struct device *pdev;
1095 int DataRate; 1094 int DataRate;
1096 unsigned char ScbInUse; 1095 unsigned char ScbInUse;
1097 unsigned short CMDqueue; 1096 unsigned short CMDqueue;
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 2e18c0a46482..ab47c0547a3b 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
100 unsigned int pci_irq_line; 100 unsigned int pci_irq_line;
101 unsigned long pci_ioaddr; 101 unsigned long pci_ioaddr;
102 struct card_info *cardinfo = &card_info_table[ent->driver_data]; 102 struct card_info *cardinfo = &card_info_table[ent->driver_data];
103 103
104 if (versionprinted++ == 0) 104 if (versionprinted++ == 0)
105 printk("%s", version); 105 printk("%s", version);
106 106
@@ -143,7 +143,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
143 printk(":%2.2x", dev->dev_addr[i]); 143 printk(":%2.2x", dev->dev_addr[i]);
144 printk("\n"); 144 printk("\n");
145 145
146 ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev); 146 ret = tmsdev_init(dev, &pdev->dev);
147 if (ret) { 147 if (ret) {
148 printk("%s: unable to get memory for dev->priv.\n", dev->name); 148 printk("%s: unable to get memory for dev->priv.\n", dev->name);
149 goto err_out_irq; 149 goto err_out_irq;
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index e2cdaf876201..8c9634a98c11 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -135,6 +135,18 @@ config DM9102
135 <file:Documentation/networking/net-modules.txt>. The module will 135 <file:Documentation/networking/net-modules.txt>. The module will
136 be called dmfe. 136 be called dmfe.
137 137
138config ULI526X
139 tristate "ULi M526x controller support"
140 depends on NET_TULIP && PCI
141 select CRC32
142 ---help---
143 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
144 (<http://www.uli.com.tw/>).
145
146 To compile this driver as a module, choose M here and read
147 <file:Documentation/networking/net-modules.txt>. The module will
148 be called uli526x.
149
138config PCMCIA_XIRCOM 150config PCMCIA_XIRCOM
139 tristate "Xircom CardBus support (new driver)" 151 tristate "Xircom CardBus support (new driver)"
140 depends on NET_TULIP && CARDBUS 152 depends on NET_TULIP && CARDBUS
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 8bb9b4683979..451090d6fcca 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840) += winbond-840.o
9obj-$(CONFIG_DE2104X) += de2104x.o 9obj-$(CONFIG_DE2104X) += de2104x.o
10obj-$(CONFIG_TULIP) += tulip.o 10obj-$(CONFIG_TULIP) += tulip.o
11obj-$(CONFIG_DE4X5) += de4x5.o 11obj-$(CONFIG_DE4X5) += de4x5.o
12obj-$(CONFIG_ULI526X) += uli526x.o
12 13
13# Declare multi-part drivers. 14# Declare multi-part drivers.
14 15
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e26c31f944bf..f53396fe79c9 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
81 return retval & 0xffff; 81 return retval & 0xffff;
82 } 82 }
83 83
84 if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
85 int value;
86 int i = 1000;
87
88 value = ioread32(ioaddr + CSR9);
89 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
90
91 value = (phy_id << 21) | (location << 16) | 0x08000000;
92 iowrite32(value, ioaddr + CSR10);
93
94 while(--i > 0) {
95 mdio_delay();
96 if(ioread32(ioaddr + CSR10) & 0x10000000)
97 break;
98 }
99 retval = ioread32(ioaddr + CSR10);
100 spin_unlock_irqrestore(&tp->mii_lock, flags);
101 return retval & 0xFFFF;
102 }
103 /* Establish sync by sending at least 32 logic ones. */ 84 /* Establish sync by sending at least 32 logic ones. */
104 for (i = 32; i >= 0; i--) { 85 for (i = 32; i >= 0; i--) {
105 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); 86 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
159 spin_unlock_irqrestore(&tp->mii_lock, flags); 140 spin_unlock_irqrestore(&tp->mii_lock, flags);
160 return; 141 return;
161 } 142 }
162 if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
163 int value;
164 int i = 1000;
165
166 value = ioread32(ioaddr + CSR9);
167 iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
168
169 value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
170 iowrite32(value, ioaddr + CSR10);
171
172 while(--i > 0) {
173 if (ioread32(ioaddr + CSR10) & 0x10000000)
174 break;
175 }
176 spin_unlock_irqrestore(&tp->mii_lock, flags);
177 return;
178 }
179 143
180 /* Establish sync by sending 32 logic ones. */ 144 /* Establish sync by sending 32 logic ones. */
181 for (i = 32; i >= 0; i--) { 145 for (i = 32; i >= 0; i--) {
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 691568283553..e058a9fbfe88 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
39 case MX98713: 39 case MX98713:
40 case COMPEX9881: 40 case COMPEX9881:
41 case DM910X: 41 case DM910X:
42 case ULI526X:
43 default: { 42 default: {
44 struct medialeaf *mleaf; 43 struct medialeaf *mleaf;
45 unsigned char *p; 44 unsigned char *p;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 20346d847d9e..05d2d96f7be2 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -88,7 +88,6 @@ enum chips {
88 I21145, 88 I21145,
89 DM910X, 89 DM910X,
90 CONEXANT, 90 CONEXANT,
91 ULI526X
92}; 91};
93 92
94 93
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
482 481
483static inline void tulip_restart_rxtx(struct tulip_private *tp) 482static inline void tulip_restart_rxtx(struct tulip_private *tp)
484{ 483{
485 if(!(tp->chip_id == ULI526X && 484 tulip_stop_rxtx(tp);
486 (tp->revision == 0x40 || tp->revision == 0x50))) { 485 udelay(5);
487 tulip_stop_rxtx(tp);
488 udelay(5);
489 }
490 tulip_start_rxtx(tp); 486 tulip_start_rxtx(tp);
491} 487}
492 488
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d45d8f56e5b4..05da5bea564c 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
199 { "Conexant LANfinity", 256, 0x0001ebef, 199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer }, 200 HAS_MII | HAS_ACPI, tulip_timer },
201 201
202 /* ULi526X */
203 { "ULi M5261/M5263", 128, 0x0001ebef,
204 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
205}; 202};
206 203
207 204
@@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tbl[] = {
239 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
243 { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
244 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
245 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
246 { } /* terminate list */ 241 { } /* terminate list */
@@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_device *dev)
522 dev->name); 517 dev->name);
523 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 518 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
524 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 519 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
525 || tp->chip_id == DM910X || tp->chip_id == ULI526X) { 520 || tp->chip_id == DM910X) {
526 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 521 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
527 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 522 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
528 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 523 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_device *dev)
1103 entry = tp->cur_tx++ % TX_RING_SIZE; 1098 entry = tp->cur_tx++ % TX_RING_SIZE;
1104 1099
1105 if (entry != 0) { 1100 if (entry != 0) {
1106 /* Avoid a chip errata by prefixing a dummy entry. Don't do 1101 /* Avoid a chip errata by prefixing a dummy entry. */
1107 this on the ULI526X as it triggers a different problem */ 1102 tp->tx_buffers[entry].skb = NULL;
1108 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) { 1103 tp->tx_buffers[entry].mapping = 0;
1109 tp->tx_buffers[entry].skb = NULL; 1104 tp->tx_ring[entry].length =
1110 tp->tx_buffers[entry].mapping = 0; 1105 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1111 tp->tx_ring[entry].length = 1106 tp->tx_ring[entry].buffer1 = 0;
1112 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; 1107 /* Must set DescOwned later to avoid race with chip */
1113 tp->tx_ring[entry].buffer1 = 0; 1108 dummy = entry;
1114 /* Must set DescOwned later to avoid race with chip */ 1109 entry = tp->cur_tx++ % TX_RING_SIZE;
1115 dummy = entry; 1110
1116 entry = tp->cur_tx++ % TX_RING_SIZE;
1117 }
1118 } 1111 }
1119 1112
1120 tp->tx_buffers[entry].skb = NULL; 1113 tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1235{ 1228{
1236 if (pdev->vendor == 0x1282 && pdev->device == 0x9102) 1229 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1237 return 1; 1230 return 1;
1238 if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
1239 return 1;
1240 if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
1241 return 1;
1242 return 0; 1231 return 0;
1243} 1232}
1244 1233
@@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1680 switch (chip_idx) { 1669 switch (chip_idx) {
1681 case DC21140: 1670 case DC21140:
1682 case DM910X: 1671 case DM910X:
1683 case ULI526X:
1684 default: 1672 default:
1685 if (tp->mtable) 1673 if (tp->mtable)
1686 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); 1674 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644
index 000000000000..5ae22b7bc5ca
--- /dev/null
+++ b/drivers/net/tulip/uli526x.c
@@ -0,0 +1,1749 @@
1/*
2 This program is free software; you can redistribute it and/or
3 modify it under the terms of the GNU General Public License
4 as published by the Free Software Foundation; either version 2
5 of the License, or (at your option) any later version.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12
13*/
14
15#define DRV_NAME "uli526x"
16#define DRV_VERSION "0.9.3"
17#define DRV_RELDATE "2005-7-29"
18
19#include <linux/module.h>
20
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/timer.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/spinlock.h>
37
38#include <asm/processor.h>
39#include <asm/bitops.h>
40#include <asm/io.h>
41#include <asm/dma.h>
42#include <asm/uaccess.h>
43
44
45/* Board/System/Debug information/definition ---------------- */
46#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
47#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/
48
49#define ULI526X_IO_SIZE 0x100
50#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */
51#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */
52#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
53#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
54#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
55#define TX_BUF_ALLOC 0x600
56#define RX_ALLOC_SIZE 0x620
57#define ULI526X_RESET 1
58#define CR0_DEFAULT 0
59#define CR6_DEFAULT 0x22200000
60#define CR7_DEFAULT 0x180c1
61#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
62#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
63#define MAX_PACKET_SIZE 1514
64#define ULI5261_MAX_MULTICAST 14
65#define RX_COPY_SIZE 100
66#define MAX_CHECK_PACKET 0x8000
67
68#define ULI526X_10MHF 0
69#define ULI526X_100MHF 1
70#define ULI526X_10MFD 4
71#define ULI526X_100MFD 5
72#define ULI526X_AUTO 8
73
74#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */
75#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */
76#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */
77#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */
78#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */
79#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */
80
81#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
82#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
83#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
84
85#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
86
87#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
88
89
90/* CR9 definition: SROM/MII */
91#define CR9_SROM_READ 0x4800
92#define CR9_SRCS 0x1
93#define CR9_SRCLK 0x2
94#define CR9_CRDOUT 0x8
95#define SROM_DATA_0 0x0
96#define SROM_DATA_1 0x4
97#define PHY_DATA_1 0x20000
98#define PHY_DATA_0 0x00000
99#define MDCLKH 0x10000
100
101#define PHY_POWER_DOWN 0x800
102
103#define SROM_V41_CODE 0x14
104
105#define SROM_CLK_WRITE(data, ioaddr) \
106 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
107 udelay(5); \
108 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
109 udelay(5); \
110 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
111 udelay(5);
112
113/* Structure/enum declaration ------------------------------- */
114struct tx_desc {
115 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
116 char *tx_buf_ptr; /* Data for us */
117 struct tx_desc *next_tx_desc;
118} __attribute__(( aligned(32) ));
119
120struct rx_desc {
121 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
122 struct sk_buff *rx_skb_ptr; /* Data for us */
123 struct rx_desc *next_rx_desc;
124} __attribute__(( aligned(32) ));
125
126struct uli526x_board_info {
127 u32 chip_id; /* Chip vendor/Device ID */
128 struct net_device *next_dev; /* next device */
129 struct pci_dev *pdev; /* PCI device */
130 spinlock_t lock;
131
132 long ioaddr; /* I/O base address */
133 u32 cr0_data;
134 u32 cr5_data;
135 u32 cr6_data;
136 u32 cr7_data;
137 u32 cr15_data;
138
139 /* pointer for memory physical address */
140 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
141 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
142 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
143 dma_addr_t first_tx_desc_dma;
144 dma_addr_t first_rx_desc_dma;
145
146 /* descriptor pointer */
147 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
148 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
149 unsigned char *desc_pool_ptr; /* descriptor pool memory */
150 struct tx_desc *first_tx_desc;
151 struct tx_desc *tx_insert_ptr;
152 struct tx_desc *tx_remove_ptr;
153 struct rx_desc *first_rx_desc;
154 struct rx_desc *rx_insert_ptr;
155 struct rx_desc *rx_ready_ptr; /* packet come pointer */
156 unsigned long tx_packet_cnt; /* transmitted packet count */
157 unsigned long rx_avail_cnt; /* available rx descriptor count */
158 unsigned long interval_rx_cnt; /* rx packet count a callback time */
159
160 u16 dbug_cnt;
161 u16 NIC_capability; /* NIC media capability */
162 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
163
164 u8 media_mode; /* user specify media mode */
165 u8 op_mode; /* real work media mode */
166 u8 phy_addr;
167 u8 link_failed; /* Ever link failed */
168 u8 wait_reset; /* Hardware failed, need to reset */
169 struct timer_list timer;
170
171 /* System defined statistic counter */
172 struct net_device_stats stats;
173
174 /* Driver defined statistic counter */
175 unsigned long tx_fifo_underrun;
176 unsigned long tx_loss_carrier;
177 unsigned long tx_no_carrier;
178 unsigned long tx_late_collision;
179 unsigned long tx_excessive_collision;
180 unsigned long tx_jabber_timeout;
181 unsigned long reset_count;
182 unsigned long reset_cr8;
183 unsigned long reset_fatal;
184 unsigned long reset_TXtimeout;
185
186 /* NIC SROM data */
187 unsigned char srom[128];
188 u8 init;
189};
190
191enum uli526x_offsets {
192 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
193 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
194 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
195 DCR15 = 0x78
196};
197
198enum uli526x_CR6_bits {
199 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
200 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
201 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
202};
203
204/* Global variable declaration ----------------------------- */
205static int __devinitdata printed_version;
206static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
208 DRV_VERSION " (" DRV_RELDATE ")\n";
209
210static int uli526x_debug;
211static unsigned char uli526x_media_mode = ULI526X_AUTO;
212static u32 uli526x_cr6_user_set;
213
214/* For module input parameter */
215static int debug;
216static u32 cr6set;
217static unsigned char mode = 8;
218
219/* function declaration ------------------------------------- */
220static int uli526x_open(struct net_device *);
221static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
222static int uli526x_stop(struct net_device *);
223static struct net_device_stats * uli526x_get_stats(struct net_device *);
224static void uli526x_set_filter_mode(struct net_device *);
225static struct ethtool_ops netdev_ethtool_ops;
226static u16 read_srom_word(long, int);
227static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *);
228static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
229static void allocate_rx_buffer(struct uli526x_board_info *);
230static void update_cr6(u32, unsigned long);
231static void send_filter_frame(struct net_device *, int);
232static u16 phy_read(unsigned long, u8, u8, u32);
233static u16 phy_readby_cr10(unsigned long, u8, u8);
234static void phy_write(unsigned long, u8, u8, u16, u32);
235static void phy_writeby_cr10(unsigned long, u8, u8, u16);
236static void phy_write_1bit(unsigned long, u32, u32);
237static u16 phy_read_1bit(unsigned long, u32);
238static u8 uli526x_sense_speed(struct uli526x_board_info *);
239static void uli526x_process_mode(struct uli526x_board_info *);
240static void uli526x_timer(unsigned long);
241static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
242static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
243static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
244static void uli526x_dynamic_reset(struct net_device *);
245static void uli526x_free_rxbuffer(struct uli526x_board_info *);
246static void uli526x_init(struct net_device *);
247static void uli526x_set_phyxcer(struct uli526x_board_info *);
248
249/* ULI526X network board routine ---------------------------- */
250
251/*
252 * Search ULI526X board, allocate space and register it
253 */
254
255static int __devinit uli526x_init_one (struct pci_dev *pdev,
256 const struct pci_device_id *ent)
257{
258 struct uli526x_board_info *db; /* board information structure */
259 struct net_device *dev;
260 int i, err;
261
262 ULI526X_DBUG(0, "uli526x_init_one()", 0);
263
264 if (!printed_version++)
265 printk(version);
266
267 /* Init network device */
268 dev = alloc_etherdev(sizeof(*db));
269 if (dev == NULL)
270 return -ENOMEM;
271 SET_MODULE_OWNER(dev);
272 SET_NETDEV_DEV(dev, &pdev->dev);
273
274 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
275 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
276 err = -ENODEV;
277 goto err_out_free;
278 }
279
280 /* Enable Master/IO access, Disable memory access */
281 err = pci_enable_device(pdev);
282 if (err)
283 goto err_out_free;
284
285 if (!pci_resource_start(pdev, 0)) {
286 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
287 err = -ENODEV;
288 goto err_out_disable;
289 }
290
291 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
292 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
293 err = -ENODEV;
294 goto err_out_disable;
295 }
296
297 if (pci_request_regions(pdev, DRV_NAME)) {
298 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
299 err = -ENODEV;
300 goto err_out_disable;
301 }
302
303 /* Init system & device */
304 db = netdev_priv(dev);
305
306 /* Allocate Tx/Rx descriptor memory */
307 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
308 if(db->desc_pool_ptr == NULL)
309 {
310 err = -ENOMEM;
311 goto err_out_nomem;
312 }
313 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
314 if(db->buf_pool_ptr == NULL)
315 {
316 err = -ENOMEM;
317 goto err_out_nomem;
318 }
319
320 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
321 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
322 db->buf_pool_start = db->buf_pool_ptr;
323 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
324
325 db->chip_id = ent->driver_data;
326 db->ioaddr = pci_resource_start(pdev, 0);
327
328 db->pdev = pdev;
329 db->init = 1;
330
331 dev->base_addr = db->ioaddr;
332 dev->irq = pdev->irq;
333 pci_set_drvdata(pdev, dev);
334
335 /* Register some necessary functions */
336 dev->open = &uli526x_open;
337 dev->hard_start_xmit = &uli526x_start_xmit;
338 dev->stop = &uli526x_stop;
339 dev->get_stats = &uli526x_get_stats;
340 dev->set_multicast_list = &uli526x_set_filter_mode;
341 dev->ethtool_ops = &netdev_ethtool_ops;
342 spin_lock_init(&db->lock);
343
344
345 /* read 64 word srom data */
346 for (i = 0; i < 64; i++)
347 ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
348
349 /* Set Node address */
350 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
351 {
352 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode
353 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port
354 outl(0, db->ioaddr + DCR14); //Clear reset port
355 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer
356 outl(0, db->ioaddr + DCR14); //Clear reset port
357 outl(0, db->ioaddr + DCR13); //Clear CR13
358 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port
359 //Read MAC address from CR14
360 for (i = 0; i < 6; i++)
361 dev->dev_addr[i] = inl(db->ioaddr + DCR14);
362 //Read end
363 outl(0, db->ioaddr + DCR13); //Clear CR13
364 outl(0, db->ioaddr + DCR0); //Clear CR0
365 udelay(10);
366 }
367 else /*Exist SROM*/
368 {
369 for (i = 0; i < 6; i++)
370 dev->dev_addr[i] = db->srom[20 + i];
371 }
372 err = register_netdev (dev);
373 if (err)
374 goto err_out_res;
375
376 printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
377
378 for (i = 0; i < 6; i++)
379 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
380 printk(", irq %d.\n", dev->irq);
381
382 pci_set_master(pdev);
383
384 return 0;
385
386err_out_res:
387 pci_release_regions(pdev);
388err_out_nomem:
389 if(db->desc_pool_ptr)
390 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
391 db->desc_pool_ptr, db->desc_pool_dma_ptr);
392
393 if(db->buf_pool_ptr != NULL)
394 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
395 db->buf_pool_ptr, db->buf_pool_dma_ptr);
396err_out_disable:
397 pci_disable_device(pdev);
398err_out_free:
399 pci_set_drvdata(pdev, NULL);
400 free_netdev(dev);
401
402 return err;
403}
404
405
406static void __devexit uli526x_remove_one (struct pci_dev *pdev)
407{
408 struct net_device *dev = pci_get_drvdata(pdev);
409 struct uli526x_board_info *db = netdev_priv(dev);
410
411 ULI526X_DBUG(0, "uli526x_remove_one()", 0);
412
413 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
414 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
415 db->desc_pool_dma_ptr);
416 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
417 db->buf_pool_ptr, db->buf_pool_dma_ptr);
418 unregister_netdev(dev);
419 pci_release_regions(pdev);
420 free_netdev(dev); /* free board information */
421 pci_set_drvdata(pdev, NULL);
422 pci_disable_device(pdev);
423 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
424}
425
426
427/*
428 * Open the interface.
429 * The interface is opened whenever "ifconfig" activates it.
430 */
431
432static int uli526x_open(struct net_device *dev)
433{
434 int ret;
435 struct uli526x_board_info *db = netdev_priv(dev);
436
437 ULI526X_DBUG(0, "uli526x_open", 0);
438
439 ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
440 if (ret)
441 return ret;
442
443 /* system variable init */
444 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
445 db->tx_packet_cnt = 0;
446 db->rx_avail_cnt = 0;
447 db->link_failed = 1;
448 netif_carrier_off(dev);
449 db->wait_reset = 0;
450
451 db->NIC_capability = 0xf; /* All capability*/
452 db->PHY_reg4 = 0x1e0;
453
454 /* CR6 operation mode decision */
455 db->cr6_data |= ULI526X_TXTH_256;
456 db->cr0_data = CR0_DEFAULT;
457
458 /* Initialize ULI526X board */
459 uli526x_init(dev);
460
461 /* Active System Interface */
462 netif_wake_queue(dev);
463
464 /* set and active a timer process */
465 init_timer(&db->timer);
466 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
467 db->timer.data = (unsigned long)dev;
468 db->timer.function = &uli526x_timer;
469 add_timer(&db->timer);
470
471 return 0;
472}
473
474
475/* Initialize ULI526X board
476 * Reset ULI526X board
477 * Initialize TX/Rx descriptor chain structure
478 * Send the set-up frame
479 * Enable Tx/Rx machine
480 */
481
482static void uli526x_init(struct net_device *dev)
483{
484 struct uli526x_board_info *db = netdev_priv(dev);
485 unsigned long ioaddr = db->ioaddr;
486 u8 phy_tmp;
487 u16 phy_value;
488 u16 phy_reg_reset;
489
490 ULI526X_DBUG(0, "uli526x_init()", 0);
491
492 /* Reset M526x MAC controller */
493 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */
494 udelay(100);
495 outl(db->cr0_data, ioaddr + DCR0);
496 udelay(5);
497
498 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
499 db->phy_addr = 1;
500 for(phy_tmp=0;phy_tmp<32;phy_tmp++)
501 {
502 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
503 if(phy_value != 0xffff&&phy_value!=0)
504 {
505 db->phy_addr = phy_tmp;
506 break;
507 }
508 }
509 if(phy_tmp == 32)
510 printk(KERN_WARNING "Can not find the phy address!!!");
511 /* Parser SROM and media mode */
512 db->media_mode = uli526x_media_mode;
513
514 /* Phyxcer capability setting */
515 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
516 phy_reg_reset = (phy_reg_reset | 0x8000);
517 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
518 udelay(500);
519
520 /* Process Phyxcer Media Mode */
521 uli526x_set_phyxcer(db);
522
523 /* Media Mode Process */
524 if ( !(db->media_mode & ULI526X_AUTO) )
525 db->op_mode = db->media_mode; /* Force Mode */
526
527 /* Initialize Transmit/Receive decriptor and CR3/4 */
528 uli526x_descriptor_init(db, ioaddr);
529
530 /* Init CR6 to program M526X operation */
531 update_cr6(db->cr6_data, ioaddr);
532
533 /* Send setup frame */
534 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
535
536 /* Init CR7, interrupt active bit */
537 db->cr7_data = CR7_DEFAULT;
538 outl(db->cr7_data, ioaddr + DCR7);
539
540 /* Init CR15, Tx jabber and Rx watchdog timer */
541 outl(db->cr15_data, ioaddr + DCR15);
542
543 /* Enable ULI526X Tx/Rx function */
544 db->cr6_data |= CR6_RXSC | CR6_TXSC;
545 update_cr6(db->cr6_data, ioaddr);
546}
547
548
549/*
550 * Hardware start transmission.
551 * Send a packet to media from the upper layer.
552 */
553
554static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
555{
556 struct uli526x_board_info *db = netdev_priv(dev);
557 struct tx_desc *txptr;
558 unsigned long flags;
559
560 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
561
562 /* Resource flag check */
563 netif_stop_queue(dev);
564
565 /* Too large packet check */
566 if (skb->len > MAX_PACKET_SIZE) {
567 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
568 dev_kfree_skb(skb);
569 return 0;
570 }
571
572 spin_lock_irqsave(&db->lock, flags);
573
574 /* No Tx resource check, it never happen nromally */
575 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
576 spin_unlock_irqrestore(&db->lock, flags);
577 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
578 return 1;
579 }
580
581 /* Disable NIC interrupt */
582 outl(0, dev->base_addr + DCR7);
583
584 /* transmit this packet */
585 txptr = db->tx_insert_ptr;
586 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
588
589 /* Point to next transmit free descriptor */
590 db->tx_insert_ptr = txptr->next_tx_desc;
591
592 /* Transmit Packet Process */
593 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
594 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
595 db->tx_packet_cnt++; /* Ready to send */
596 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
597 dev->trans_start = jiffies; /* saved time stamp */
598 }
599
600 /* Tx resource check */
601 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
602 netif_wake_queue(dev);
603
604 /* Restore CR7 to enable interrupt */
605 spin_unlock_irqrestore(&db->lock, flags);
606 outl(db->cr7_data, dev->base_addr + DCR7);
607
608 /* free this SKB */
609 dev_kfree_skb(skb);
610
611 return 0;
612}
613
614
615/*
616 * Stop the interface.
617 * The interface is stopped when it is brought.
618 */
619
620static int uli526x_stop(struct net_device *dev)
621{
622 struct uli526x_board_info *db = netdev_priv(dev);
623 unsigned long ioaddr = dev->base_addr;
624
625 ULI526X_DBUG(0, "uli526x_stop", 0);
626
627 /* disable system */
628 netif_stop_queue(dev);
629
630 /* deleted timer */
631 del_timer_sync(&db->timer);
632
633 /* Reset & stop ULI526X board */
634 outl(ULI526X_RESET, ioaddr + DCR0);
635 udelay(5);
636 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
637
638 /* free interrupt */
639 free_irq(dev->irq, dev);
640
641 /* free allocated rx buffer */
642 uli526x_free_rxbuffer(db);
643
644#if 0
645 /* show statistic counter */
646 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
647 db->tx_fifo_underrun, db->tx_excessive_collision,
648 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
649 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
650 db->reset_fatal, db->reset_TXtimeout);
651#endif
652
653 return 0;
654}
655
656
657/*
658 * M5261/M5263 insterrupt handler
659 * receive the packet to upper layer, free the transmitted packet
660 */
661
662static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
663{
664 struct net_device *dev = dev_id;
665 struct uli526x_board_info *db = netdev_priv(dev);
666 unsigned long ioaddr = dev->base_addr;
667 unsigned long flags;
668
669 if (!dev) {
670 ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
671 return IRQ_NONE;
672 }
673
674 spin_lock_irqsave(&db->lock, flags);
675 outl(0, ioaddr + DCR7);
676
677 /* Got ULI526X status */
678 db->cr5_data = inl(ioaddr + DCR5);
679 outl(db->cr5_data, ioaddr + DCR5);
680 if ( !(db->cr5_data & 0x180c1) ) {
681 spin_unlock_irqrestore(&db->lock, flags);
682 outl(db->cr7_data, ioaddr + DCR7);
683 return IRQ_HANDLED;
684 }
685
686 /* Check system status */
687 if (db->cr5_data & 0x2000) {
688 /* system bus error happen */
689 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
690 db->reset_fatal++;
691 db->wait_reset = 1; /* Need to RESET */
692 spin_unlock_irqrestore(&db->lock, flags);
693 return IRQ_HANDLED;
694 }
695
696 /* Received the coming packet */
697 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
698 uli526x_rx_packet(dev, db);
699
700 /* reallocate rx descriptor buffer */
701 if (db->rx_avail_cnt<RX_DESC_CNT)
702 allocate_rx_buffer(db);
703
704 /* Free the transmitted descriptor */
705 if ( db->cr5_data & 0x01)
706 uli526x_free_tx_pkt(dev, db);
707
708 /* Restore CR7 to enable interrupt mask */
709 outl(db->cr7_data, ioaddr + DCR7);
710
711 spin_unlock_irqrestore(&db->lock, flags);
712 return IRQ_HANDLED;
713}
714
715
716/*
717 * Free TX resource after TX complete
718 */
719
720static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
721{
722 struct tx_desc *txptr;
723 u32 tdes0;
724
725 txptr = db->tx_remove_ptr;
726 while(db->tx_packet_cnt) {
727 tdes0 = le32_to_cpu(txptr->tdes0);
728 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
729 if (tdes0 & 0x80000000)
730 break;
731
732 /* A packet sent completed */
733 db->tx_packet_cnt--;
734 db->stats.tx_packets++;
735
736 /* Transmit statistic counter */
737 if ( tdes0 != 0x7fffffff ) {
738 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
739 db->stats.collisions += (tdes0 >> 3) & 0xf;
740 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
741 if (tdes0 & TDES0_ERR_MASK) {
742 db->stats.tx_errors++;
743 if (tdes0 & 0x0002) { /* UnderRun */
744 db->tx_fifo_underrun++;
745 if ( !(db->cr6_data & CR6_SFT) ) {
746 db->cr6_data = db->cr6_data | CR6_SFT;
747 update_cr6(db->cr6_data, db->ioaddr);
748 }
749 }
750 if (tdes0 & 0x0100)
751 db->tx_excessive_collision++;
752 if (tdes0 & 0x0200)
753 db->tx_late_collision++;
754 if (tdes0 & 0x0400)
755 db->tx_no_carrier++;
756 if (tdes0 & 0x0800)
757 db->tx_loss_carrier++;
758 if (tdes0 & 0x4000)
759 db->tx_jabber_timeout++;
760 }
761 }
762
763 txptr = txptr->next_tx_desc;
764 }/* End of while */
765
766 /* Update TX remove pointer to next */
767 db->tx_remove_ptr = txptr;
768
769 /* Resource available check */
770 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
771 netif_wake_queue(dev); /* Active upper layer, send again */
772}
773
774
775/*
776 * Receive the come packet and pass to upper layer
777 */
778
779static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
780{
781 struct rx_desc *rxptr;
782 struct sk_buff *skb;
783 int rxlen;
784 u32 rdes0;
785
786 rxptr = db->rx_ready_ptr;
787
788 while(db->rx_avail_cnt) {
789 rdes0 = le32_to_cpu(rxptr->rdes0);
790 if (rdes0 & 0x80000000) /* packet owner check */
791 {
792 break;
793 }
794
795 db->rx_avail_cnt--;
796 db->interval_rx_cnt++;
797
798 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
799 if ( (rdes0 & 0x300) != 0x300) {
800 /* A packet without First/Last flag */
801 /* reuse this SKB */
802 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
803 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
804 } else {
805 /* A packet with First/Last flag */
806 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
807
808 /* error summary bit check */
809 if (rdes0 & 0x8000) {
810 /* This is a error packet */
811 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
812 db->stats.rx_errors++;
813 if (rdes0 & 1)
814 db->stats.rx_fifo_errors++;
815 if (rdes0 & 2)
816 db->stats.rx_crc_errors++;
817 if (rdes0 & 0x80)
818 db->stats.rx_length_errors++;
819 }
820
821 if ( !(rdes0 & 0x8000) ||
822 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
823 skb = rxptr->rx_skb_ptr;
824
825 /* Good packet, send to upper layer */
826 /* Shorst packet used new SKB */
827 if ( (rxlen < RX_COPY_SIZE) &&
828 ( (skb = dev_alloc_skb(rxlen + 2) )
829 != NULL) ) {
830 /* size less than COPY_SIZE, allocate a rxlen SKB */
831 skb->dev = dev;
832 skb_reserve(skb, 2); /* 16byte align */
833 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
834 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
835 } else {
836 skb->dev = dev;
837 skb_put(skb, rxlen);
838 }
839 skb->protocol = eth_type_trans(skb, dev);
840 netif_rx(skb);
841 dev->last_rx = jiffies;
842 db->stats.rx_packets++;
843 db->stats.rx_bytes += rxlen;
844
845 } else {
846 /* Reuse SKB buffer when the packet is error */
847 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
848 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
849 }
850 }
851
852 rxptr = rxptr->next_rx_desc;
853 }
854
855 db->rx_ready_ptr = rxptr;
856}
857
858
859/*
860 * Get statistics from driver.
861 */
862
863static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
864{
865 struct uli526x_board_info *db = netdev_priv(dev);
866
867 ULI526X_DBUG(0, "uli526x_get_stats", 0);
868 return &db->stats;
869}
870
871
872/*
873 * Set ULI526X multicast address
874 */
875
876static void uli526x_set_filter_mode(struct net_device * dev)
877{
878 struct uli526x_board_info *db = dev->priv;
879 unsigned long flags;
880
881 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
882 spin_lock_irqsave(&db->lock, flags);
883
884 if (dev->flags & IFF_PROMISC) {
885 ULI526X_DBUG(0, "Enable PROM Mode", 0);
886 db->cr6_data |= CR6_PM | CR6_PBF;
887 update_cr6(db->cr6_data, db->ioaddr);
888 spin_unlock_irqrestore(&db->lock, flags);
889 return;
890 }
891
892 if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
893 ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
894 db->cr6_data &= ~(CR6_PM | CR6_PBF);
895 db->cr6_data |= CR6_PAM;
896 spin_unlock_irqrestore(&db->lock, flags);
897 return;
898 }
899
900 ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
901 send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
902 spin_unlock_irqrestore(&db->lock, flags);
903}
904
905static void
906ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
907{
908 ecmd->supported = (SUPPORTED_10baseT_Half |
909 SUPPORTED_10baseT_Full |
910 SUPPORTED_100baseT_Half |
911 SUPPORTED_100baseT_Full |
912 SUPPORTED_Autoneg |
913 SUPPORTED_MII);
914
915 ecmd->advertising = (ADVERTISED_10baseT_Half |
916 ADVERTISED_10baseT_Full |
917 ADVERTISED_100baseT_Half |
918 ADVERTISED_100baseT_Full |
919 ADVERTISED_Autoneg |
920 ADVERTISED_MII);
921
922
923 ecmd->port = PORT_MII;
924 ecmd->phy_address = db->phy_addr;
925
926 ecmd->transceiver = XCVR_EXTERNAL;
927
928 ecmd->speed = 10;
929 ecmd->duplex = DUPLEX_HALF;
930
931 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
932 {
933 ecmd->speed = 100;
934 }
935 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
936 {
937 ecmd->duplex = DUPLEX_FULL;
938 }
939 if(db->link_failed)
940 {
941 ecmd->speed = -1;
942 ecmd->duplex = -1;
943 }
944
945 if (db->media_mode & ULI526X_AUTO)
946 {
947 ecmd->autoneg = AUTONEG_ENABLE;
948 }
949}
950
951static void netdev_get_drvinfo(struct net_device *dev,
952 struct ethtool_drvinfo *info)
953{
954 struct uli526x_board_info *np = netdev_priv(dev);
955
956 strcpy(info->driver, DRV_NAME);
957 strcpy(info->version, DRV_VERSION);
958 if (np->pdev)
959 strcpy(info->bus_info, pci_name(np->pdev));
960 else
961 sprintf(info->bus_info, "EISA 0x%lx %d",
962 dev->base_addr, dev->irq);
963}
964
965static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
966 struct uli526x_board_info *np = netdev_priv(dev);
967
968 ULi_ethtool_gset(np, cmd);
969
970 return 0;
971}
972
973static u32 netdev_get_link(struct net_device *dev) {
974 struct uli526x_board_info *np = netdev_priv(dev);
975
976 if(np->link_failed)
977 return 0;
978 else
979 return 1;
980}
981
982static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
983{
984 wol->supported = WAKE_PHY | WAKE_MAGIC;
985 wol->wolopts = 0;
986}
987
988static struct ethtool_ops netdev_ethtool_ops = {
989 .get_drvinfo = netdev_get_drvinfo,
990 .get_settings = netdev_get_settings,
991 .get_link = netdev_get_link,
992 .get_wol = uli526x_get_wol,
993};
994
995/*
996 * A periodic timer routine
997 * Dynamic media sense, allocate Rx buffer...
998 */
999
1000static void uli526x_timer(unsigned long data)
1001{
1002 u32 tmp_cr8;
1003 unsigned char tmp_cr12=0;
1004 struct net_device *dev = (struct net_device *) data;
1005 struct uli526x_board_info *db = netdev_priv(dev);
1006 unsigned long flags;
1007 u8 TmpSpeed=10;
1008
1009 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1010 spin_lock_irqsave(&db->lock, flags);
1011
1012
1013 /* Dynamic reset ULI526X : system error or transmit time-out */
1014 tmp_cr8 = inl(db->ioaddr + DCR8);
1015 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1016 db->reset_cr8++;
1017 db->wait_reset = 1;
1018 }
1019 db->interval_rx_cnt = 0;
1020
1021 /* TX polling kick monitor */
1022 if ( db->tx_packet_cnt &&
1023 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
1024 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1025
1026 // TX Timeout
1027 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
1028 db->reset_TXtimeout++;
1029 db->wait_reset = 1;
1030 printk( "%s: Tx timeout - resetting\n",
1031 dev->name);
1032 }
1033 }
1034
1035 if (db->wait_reset) {
1036 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1037 db->reset_count++;
1038 uli526x_dynamic_reset(dev);
1039 db->timer.expires = ULI526X_TIMER_WUT;
1040 add_timer(&db->timer);
1041 spin_unlock_irqrestore(&db->lock, flags);
1042 return;
1043 }
1044
1045 /* Link status check, Dynamic media type change */
1046 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
1047 tmp_cr12 = 3;
1048
1049 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1050 /* Link Failed */
1051 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1052 netif_carrier_off(dev);
1053 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1054 db->link_failed = 1;
1055
1056 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1057 /* AUTO don't need */
1058 if ( !(db->media_mode & 0x8) )
1059 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1060
1061 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1062 if (db->media_mode & ULI526X_AUTO) {
1063 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1064 update_cr6(db->cr6_data, db->ioaddr);
1065 }
1066 } else
1067 if ((tmp_cr12 & 0x3) && db->link_failed) {
1068 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1069 db->link_failed = 0;
1070
1071 /* Auto Sense Speed */
1072 if ( (db->media_mode & ULI526X_AUTO) &&
1073 uli526x_sense_speed(db) )
1074 db->link_failed = 1;
1075 uli526x_process_mode(db);
1076
1077 if(db->link_failed==0)
1078 {
1079 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
1080 {
1081 TmpSpeed = 100;
1082 }
1083 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
1084 {
1085 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
1086 }
1087 else
1088 {
1089 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1090 }
1091 netif_carrier_on(dev);
1092 }
1093 /* SHOW_MEDIA_TYPE(db->op_mode); */
1094 }
1095 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1096 {
1097 if(db->init==1)
1098 {
1099 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
1100 netif_carrier_off(dev);
1101 }
1102 }
1103 db->init=0;
1104
1105 /* Timer active again */
1106 db->timer.expires = ULI526X_TIMER_WUT;
1107 add_timer(&db->timer);
1108 spin_unlock_irqrestore(&db->lock, flags);
1109}
1110
1111
1112/*
1113 * Dynamic reset the ULI526X board
1114 * Stop ULI526X board
1115 * Free Tx/Rx allocated memory
1116 * Reset ULI526X board
1117 * Re-initialize ULI526X board
1118 */
1119
1120static void uli526x_dynamic_reset(struct net_device *dev)
1121{
1122 struct uli526x_board_info *db = netdev_priv(dev);
1123
1124 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1125
1126 /* Sopt MAC controller */
1127 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1128 update_cr6(db->cr6_data, dev->base_addr);
1129 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1130 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1131
1132 /* Disable upper layer interface */
1133 netif_stop_queue(dev);
1134
1135 /* Free Rx Allocate buffer */
1136 uli526x_free_rxbuffer(db);
1137
1138 /* system variable init */
1139 db->tx_packet_cnt = 0;
1140 db->rx_avail_cnt = 0;
1141 db->link_failed = 1;
1142 db->init=1;
1143 db->wait_reset = 0;
1144
1145 /* Re-initialize ULI526X board */
1146 uli526x_init(dev);
1147
1148 /* Restart upper layer interface */
1149 netif_wake_queue(dev);
1150}
1151
1152
1153/*
1154 * free all allocated rx buffer
1155 */
1156
1157static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1158{
1159 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1160
1161 /* free allocated rx buffer */
1162 while (db->rx_avail_cnt) {
1163 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1164 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1165 db->rx_avail_cnt--;
1166 }
1167}
1168
1169
1170/*
1171 * Reuse the SK buffer
1172 */
1173
1174static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1175{
1176 struct rx_desc *rxptr = db->rx_insert_ptr;
1177
1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1179 rxptr->rx_skb_ptr = skb;
1180 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1181 wmb();
1182 rxptr->rdes0 = cpu_to_le32(0x80000000);
1183 db->rx_avail_cnt++;
1184 db->rx_insert_ptr = rxptr->next_rx_desc;
1185 } else
1186 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1187}
1188
1189
1190/*
1191 * Initialize transmit/Receive descriptor
1192 * Using Chain structure, and allocate Tx/Rx buffer
1193 */
1194
1195static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
1196{
1197 struct tx_desc *tmp_tx;
1198 struct rx_desc *tmp_rx;
1199 unsigned char *tmp_buf;
1200 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1201 dma_addr_t tmp_buf_dma;
1202 int i;
1203
1204 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1205
1206 /* tx descriptor start pointer */
1207 db->tx_insert_ptr = db->first_tx_desc;
1208 db->tx_remove_ptr = db->first_tx_desc;
1209 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1210
1211 /* rx descriptor start pointer */
1212 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1213 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1214 db->rx_insert_ptr = db->first_rx_desc;
1215 db->rx_ready_ptr = db->first_rx_desc;
1216 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1217
1218 /* Init Transmit chain */
1219 tmp_buf = db->buf_pool_start;
1220 tmp_buf_dma = db->buf_pool_dma_start;
1221 tmp_tx_dma = db->first_tx_desc_dma;
1222 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1223 tmp_tx->tx_buf_ptr = tmp_buf;
1224 tmp_tx->tdes0 = cpu_to_le32(0);
1225 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1226 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1227 tmp_tx_dma += sizeof(struct tx_desc);
1228 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1229 tmp_tx->next_tx_desc = tmp_tx + 1;
1230 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1231 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1232 }
1233 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1234 tmp_tx->next_tx_desc = db->first_tx_desc;
1235
1236 /* Init Receive descriptor chain */
1237 tmp_rx_dma=db->first_rx_desc_dma;
1238 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1239 tmp_rx->rdes0 = cpu_to_le32(0);
1240 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1241 tmp_rx_dma += sizeof(struct rx_desc);
1242 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1243 tmp_rx->next_rx_desc = tmp_rx + 1;
1244 }
1245 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1246 tmp_rx->next_rx_desc = db->first_rx_desc;
1247
1248 /* pre-allocate Rx buffer */
1249 allocate_rx_buffer(db);
1250}
1251
1252
1253/*
1254 * Update CR6 value
1255 * Firstly stop ULI526X, then written value and start
1256 */
1257
1258static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1259{
1260
1261 outl(cr6_data, ioaddr + DCR6);
1262 udelay(5);
1263}
1264
1265
1266/*
1267 * Send a setup frame for M5261/M5263
1268 * This setup frame initialize ULI526X address filter mode
1269 */
1270
1271static void send_filter_frame(struct net_device *dev, int mc_cnt)
1272{
1273 struct uli526x_board_info *db = netdev_priv(dev);
1274 struct dev_mc_list *mcptr;
1275 struct tx_desc *txptr;
1276 u16 * addrptr;
1277 u32 * suptr;
1278 int i;
1279
1280 ULI526X_DBUG(0, "send_filter_frame()", 0);
1281
1282 txptr = db->tx_insert_ptr;
1283 suptr = (u32 *) txptr->tx_buf_ptr;
1284
1285 /* Node address */
1286 addrptr = (u16 *) dev->dev_addr;
1287 *suptr++ = addrptr[0];
1288 *suptr++ = addrptr[1];
1289 *suptr++ = addrptr[2];
1290
1291 /* broadcast address */
1292 *suptr++ = 0xffff;
1293 *suptr++ = 0xffff;
1294 *suptr++ = 0xffff;
1295
1296 /* fit the multicast address */
1297 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1298 addrptr = (u16 *) mcptr->dmi_addr;
1299 *suptr++ = addrptr[0];
1300 *suptr++ = addrptr[1];
1301 *suptr++ = addrptr[2];
1302 }
1303
1304 for (; i<14; i++) {
1305 *suptr++ = 0xffff;
1306 *suptr++ = 0xffff;
1307 *suptr++ = 0xffff;
1308 }
1309
1310 /* prepare the setup frame */
1311 db->tx_insert_ptr = txptr->next_tx_desc;
1312 txptr->tdes1 = cpu_to_le32(0x890000c0);
1313
1314 /* Resource Check and Send the setup packet */
1315 if (db->tx_packet_cnt < TX_DESC_CNT) {
1316 /* Resource Empty */
1317 db->tx_packet_cnt++;
1318 txptr->tdes0 = cpu_to_le32(0x80000000);
1319 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1320 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1321 update_cr6(db->cr6_data, dev->base_addr);
1322 dev->trans_start = jiffies;
1323 } else
1324 printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
1325}
1326
1327
1328/*
1329 * Allocate rx buffer,
1330 * As possible as allocate maxiumn Rx buffer
1331 */
1332
1333static void allocate_rx_buffer(struct uli526x_board_info *db)
1334{
1335 struct rx_desc *rxptr;
1336 struct sk_buff *skb;
1337
1338 rxptr = db->rx_insert_ptr;
1339
1340 while(db->rx_avail_cnt < RX_DESC_CNT) {
1341 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1342 break;
1343 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1344 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1345 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 rxptr = rxptr->next_rx_desc;
1348 db->rx_avail_cnt++;
1349 }
1350
1351 db->rx_insert_ptr = rxptr;
1352}
1353
1354
1355/*
1356 * Read one word data from the serial ROM
1357 */
1358
1359static u16 read_srom_word(long ioaddr, int offset)
1360{
1361 int i;
1362 u16 srom_data = 0;
1363 long cr9_ioaddr = ioaddr + DCR9;
1364
1365 outl(CR9_SROM_READ, cr9_ioaddr);
1366 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1367
1368 /* Send the Read Command 110b */
1369 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1370 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1371 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1372
1373 /* Send the offset */
1374 for (i = 5; i >= 0; i--) {
1375 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1376 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1377 }
1378
1379 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1380
1381 for (i = 16; i > 0; i--) {
1382 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1383 udelay(5);
1384 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1385 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1386 udelay(5);
1387 }
1388
1389 outl(CR9_SROM_READ, cr9_ioaddr);
1390 return srom_data;
1391}
1392
1393
1394/*
1395 * Auto sense the media mode
1396 */
1397
1398static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1399{
1400 u8 ErrFlag = 0;
1401 u16 phy_mode;
1402
1403 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1404 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1405
1406 if ( (phy_mode & 0x24) == 0x24 ) {
1407
1408 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
1409 if(phy_mode&0x8000)
1410 phy_mode = 0x8000;
1411 else if(phy_mode&0x4000)
1412 phy_mode = 0x4000;
1413 else if(phy_mode&0x2000)
1414 phy_mode = 0x2000;
1415 else
1416 phy_mode = 0x1000;
1417
1418 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1419 switch (phy_mode) {
1420 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1421 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1422 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1423 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1424 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1425 }
1426 } else {
1427 db->op_mode = ULI526X_10MHF;
1428 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1429 ErrFlag = 1;
1430 }
1431
1432 return ErrFlag;
1433}
1434
1435
1436/*
1437 * Set 10/100 phyxcer capability
1438 * AUTO mode : phyxcer register4 is NIC capability
1439 * Force mode: phyxcer register4 is the force media
1440 */
1441
1442static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1443{
1444 u16 phy_reg;
1445
1446 /* Phyxcer capability setting */
1447 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1448
1449 if (db->media_mode & ULI526X_AUTO) {
1450 /* AUTO Mode */
1451 phy_reg |= db->PHY_reg4;
1452 } else {
1453 /* Force Mode */
1454 switch(db->media_mode) {
1455 case ULI526X_10MHF: phy_reg |= 0x20; break;
1456 case ULI526X_10MFD: phy_reg |= 0x40; break;
1457 case ULI526X_100MHF: phy_reg |= 0x80; break;
1458 case ULI526X_100MFD: phy_reg |= 0x100; break;
1459 }
1460
1461 }
1462
1463 /* Write new capability to Phyxcer Reg4 */
1464 if ( !(phy_reg & 0x01e0)) {
1465 phy_reg|=db->PHY_reg4;
1466 db->media_mode|=ULI526X_AUTO;
1467 }
1468 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1469
1470 /* Restart Auto-Negotiation */
1471 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1472 udelay(50);
1473}
1474
1475
1476/*
1477 * Process op-mode
1478 AUTO mode : PHY controller in Auto-negotiation Mode
1479 * Force mode: PHY controller in force mode with HUB
1480 * N-way force capability with SWITCH
1481 */
1482
1483static void uli526x_process_mode(struct uli526x_board_info *db)
1484{
1485 u16 phy_reg;
1486
1487 /* Full Duplex Mode Check */
1488 if (db->op_mode & 0x4)
1489 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1490 else
1491 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1492
1493 update_cr6(db->cr6_data, db->ioaddr);
1494
1495 /* 10/100M phyxcer force mode need */
1496 if ( !(db->media_mode & 0x8)) {
1497 /* Forece Mode */
1498 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1499 if ( !(phy_reg & 0x1) ) {
1500 /* parter without N-Way capability */
1501 phy_reg = 0x0;
1502 switch(db->op_mode) {
1503 case ULI526X_10MHF: phy_reg = 0x0; break;
1504 case ULI526X_10MFD: phy_reg = 0x100; break;
1505 case ULI526X_100MHF: phy_reg = 0x2000; break;
1506 case ULI526X_100MFD: phy_reg = 0x2100; break;
1507 }
1508 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1509 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1510 }
1511 }
1512}
1513
1514
1515/*
1516 * Write a word to Phy register
1517 */
1518
1519static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1520{
1521 u16 i;
1522 unsigned long ioaddr;
1523
1524 if(chip_id == PCI_ULI5263_ID)
1525 {
1526 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1527 return;
1528 }
1529 /* M5261/M5263 Chip */
1530 ioaddr = iobase + DCR9;
1531
1532 /* Send 33 synchronization clock to Phy controller */
1533 for (i = 0; i < 35; i++)
1534 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1535
1536 /* Send start command(01) to Phy */
1537 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1538 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1539
1540 /* Send write command(01) to Phy */
1541 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1542 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1543
1544 /* Send Phy address */
1545 for (i = 0x10; i > 0; i = i >> 1)
1546 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1547
1548 /* Send register address */
1549 for (i = 0x10; i > 0; i = i >> 1)
1550 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1551
1552 /* written trasnition */
1553 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1554 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1555
1556 /* Write a word data to PHY controller */
1557 for ( i = 0x8000; i > 0; i >>= 1)
1558 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1559
1560}
1561
1562
1563/*
1564 * Read a word data from phy register
1565 */
1566
1567static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1568{
1569 int i;
1570 u16 phy_data;
1571 unsigned long ioaddr;
1572
1573 if(chip_id == PCI_ULI5263_ID)
1574 return phy_readby_cr10(iobase, phy_addr, offset);
1575 /* M5261/M5263 Chip */
1576 ioaddr = iobase + DCR9;
1577
1578 /* Send 33 synchronization clock to Phy controller */
1579 for (i = 0; i < 35; i++)
1580 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1581
1582 /* Send start command(01) to Phy */
1583 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1584 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1585
1586 /* Send read command(10) to Phy */
1587 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1588 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1589
1590 /* Send Phy address */
1591 for (i = 0x10; i > 0; i = i >> 1)
1592 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1593
1594 /* Send register address */
1595 for (i = 0x10; i > 0; i = i >> 1)
1596 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1597
1598 /* Skip transition state */
1599 phy_read_1bit(ioaddr, chip_id);
1600
1601 /* read 16bit data */
1602 for (phy_data = 0, i = 0; i < 16; i++) {
1603 phy_data <<= 1;
1604 phy_data |= phy_read_1bit(ioaddr, chip_id);
1605 }
1606
1607 return phy_data;
1608}
1609
1610static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1611{
1612 unsigned long ioaddr,cr10_value;
1613
1614 ioaddr = iobase + DCR10;
1615 cr10_value = phy_addr;
1616 cr10_value = (cr10_value<<5) + offset;
1617 cr10_value = (cr10_value<<16) + 0x08000000;
1618 outl(cr10_value,ioaddr);
1619 udelay(1);
1620 while(1)
1621 {
1622 cr10_value = inl(ioaddr);
1623 if(cr10_value&0x10000000)
1624 break;
1625 }
1626 return (cr10_value&0x0ffff);
1627}
1628
1629static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
1630{
1631 unsigned long ioaddr,cr10_value;
1632
1633 ioaddr = iobase + DCR10;
1634 cr10_value = phy_addr;
1635 cr10_value = (cr10_value<<5) + offset;
1636 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1637 outl(cr10_value,ioaddr);
1638 udelay(1);
1639}
1640/*
1641 * Write one bit data to Phy Controller
1642 */
1643
1644static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1645{
1646 outl(phy_data , ioaddr); /* MII Clock Low */
1647 udelay(1);
1648 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1649 udelay(1);
1650 outl(phy_data , ioaddr); /* MII Clock Low */
1651 udelay(1);
1652}
1653
1654
1655/*
1656 * Read one bit phy data from PHY controller
1657 */
1658
1659static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1660{
1661 u16 phy_data;
1662
1663 outl(0x50000 , ioaddr);
1664 udelay(1);
1665 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1666 outl(0x40000 , ioaddr);
1667 udelay(1);
1668
1669 return phy_data;
1670}
1671
1672
1673static struct pci_device_id uli526x_pci_tbl[] = {
1674 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1675 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1676 { 0, }
1677};
1678MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1679
1680
1681static struct pci_driver uli526x_driver = {
1682 .name = "uli526x",
1683 .id_table = uli526x_pci_tbl,
1684 .probe = uli526x_init_one,
1685 .remove = __devexit_p(uli526x_remove_one),
1686};
1687
1688MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1689MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1690MODULE_LICENSE("GPL");
1691
1692MODULE_PARM(debug, "i");
1693MODULE_PARM(mode, "i");
1694MODULE_PARM(cr6set, "i");
1695MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1696MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1697
1698/* Description:
1699 * when user used insmod to add module, system invoked init_module()
1700 * to register the services.
1701 */
1702
1703static int __init uli526x_init_module(void)
1704{
1705 int rc;
1706
1707 printk(version);
1708 printed_version = 1;
1709
1710 ULI526X_DBUG(0, "init_module() ", debug);
1711
1712 if (debug)
1713 uli526x_debug = debug; /* set debug flag */
1714 if (cr6set)
1715 uli526x_cr6_user_set = cr6set;
1716
1717 switch(mode) {
1718 case ULI526X_10MHF:
1719 case ULI526X_100MHF:
1720 case ULI526X_10MFD:
1721 case ULI526X_100MFD:
1722 uli526x_media_mode = mode;
1723 break;
1724 default:uli526x_media_mode = ULI526X_AUTO;
1725 break;
1726 }
1727
1728 rc = pci_module_init(&uli526x_driver);
1729 if (rc < 0)
1730 return rc;
1731
1732 return 0;
1733}
1734
1735
1736/*
1737 * Description:
1738 * when user used rmmod to delete module, system invoked clean_module()
1739 * to un-register all registered services.
1740 */
1741
1742static void __exit uli526x_cleanup_module(void)
1743{
1744 ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
1745 pci_unregister_driver(&uli526x_driver);
1746}
1747
1748module_init(uli526x_init_module);
1749module_exit(uli526x_cleanup_module);
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 6e74af62ca08..9e56fc346ba4 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -56,7 +56,7 @@
56#include <linux/sched.h> /* for jiffies, HZ, etc. */ 56#include <linux/sched.h> /* for jiffies, HZ, etc. */
57#include <linux/cycx_drv.h> /* API definitions */ 57#include <linux/cycx_drv.h> /* API definitions */
58#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */ 58#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
59#include <linux/delay.h> /* udelay */ 59#include <linux/delay.h> /* udelay, msleep_interruptible */
60#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */ 60#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
61 61
62#define MOD_VERSION 0 62#define MOD_VERSION 0
@@ -74,7 +74,6 @@ static int reset_cyc2x(void __iomem *addr);
74static int detect_cyc2x(void __iomem *addr); 74static int detect_cyc2x(void __iomem *addr);
75 75
76/* Miscellaneous functions */ 76/* Miscellaneous functions */
77static void delay_cycx(int sec);
78static int get_option_index(long *optlist, long optval); 77static int get_option_index(long *optlist, long optval);
79static u16 checksum(u8 *buf, u32 len); 78static u16 checksum(u8 *buf, u32 len);
80 79
@@ -259,7 +258,7 @@ static int memory_exists(void __iomem *addr)
259 if (readw(addr + 0x10) == TEST_PATTERN) 258 if (readw(addr + 0x10) == TEST_PATTERN)
260 return 1; 259 return 1;
261 260
262 delay_cycx(1); 261 msleep_interruptible(1 * 1000);
263 } 262 }
264 263
265 return 0; 264 return 0;
@@ -316,7 +315,7 @@ static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
316 315
317 /* 80186 was in hold, go */ 316 /* 80186 was in hold, go */
318 writeb(0, addr + START_CPU); 317 writeb(0, addr + START_CPU);
319 delay_cycx(1); 318 msleep_interruptible(1 * 1000);
320} 319}
321 320
322/* Load data.bin file through boot (reset) interface. */ 321/* Load data.bin file through boot (reset) interface. */
@@ -462,13 +461,13 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
462 cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size); 461 cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
463 /* reset is waiting for boot */ 462 /* reset is waiting for boot */
464 writew(GEN_POWER_ON, pt_cycld); 463 writew(GEN_POWER_ON, pt_cycld);
465 delay_cycx(1); 464 msleep_interruptible(1 * 1000);
466 465
467 for (j = 0 ; j < 3 ; j++) 466 for (j = 0 ; j < 3 ; j++)
468 if (!readw(pt_cycld)) 467 if (!readw(pt_cycld))
469 goto reset_loaded; 468 goto reset_loaded;
470 else 469 else
471 delay_cycx(1); 470 msleep_interruptible(1 * 1000);
472 } 471 }
473 472
474 printk(KERN_ERR "%s: reset not started.\n", modname); 473 printk(KERN_ERR "%s: reset not started.\n", modname);
@@ -495,7 +494,7 @@ reset_loaded:
495 494
496 /* Arthur Ganzert's tip: wait a while after the firmware loading... 495 /* Arthur Ganzert's tip: wait a while after the firmware loading...
497 seg abr 26 17:17:12 EST 1999 - acme */ 496 seg abr 26 17:17:12 EST 1999 - acme */
498 delay_cycx(7); 497 msleep_interruptible(7 * 1000);
499 printk(KERN_INFO "%s: firmware loaded!\n", modname); 498 printk(KERN_INFO "%s: firmware loaded!\n", modname);
500 499
501 /* enable interrupts */ 500 /* enable interrupts */
@@ -547,20 +546,13 @@ static int get_option_index(long *optlist, long optval)
547static int reset_cyc2x(void __iomem *addr) 546static int reset_cyc2x(void __iomem *addr)
548{ 547{
549 writeb(0, addr + RST_ENABLE); 548 writeb(0, addr + RST_ENABLE);
550 delay_cycx(2); 549 msleep_interruptible(2 * 1000);
551 writeb(0, addr + RST_DISABLE); 550 writeb(0, addr + RST_DISABLE);
552 delay_cycx(2); 551 msleep_interruptible(2 * 1000);
553 552
554 return memory_exists(addr); 553 return memory_exists(addr);
555} 554}
556 555
557/* Delay */
558static void delay_cycx(int sec)
559{
560 set_current_state(TASK_INTERRUPTIBLE);
561 schedule_timeout(sec * HZ);
562}
563
564/* Calculate 16-bit CRC using CCITT polynomial. */ 556/* Calculate 16-bit CRC using CCITT polynomial. */
565static u16 checksum(u8 *buf, u32 len) 557static u16 checksum(u8 *buf, u32 len)
566{ 558{
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
index a63f6a2cc4f7..cdd4c09c2d90 100644
--- a/drivers/net/wan/hdlc_generic.c
+++ b/drivers/net/wan/hdlc_generic.c
@@ -61,7 +61,7 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
61 61
62 62
63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
64 struct packet_type *p) 64 struct packet_type *p, struct net_device *orig_dev)
65{ 65{
66 hdlc_device *hdlc = dev_to_hdlc(dev); 66 hdlc_device *hdlc = dev_to_hdlc(dev);
67 if (hdlc->proto.netif_rx) 67 if (hdlc->proto.netif_rx)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 7f2e3653c5e5..6c302e9dbca2 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -86,7 +86,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
86/* 86/*
87 * Receive a LAPB frame via an ethernet interface. 87 * Receive a LAPB frame via an ethernet interface.
88 */ 88 */
89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) 89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
90{ 90{
91 int len, err; 91 int len, err;
92 struct lapbethdev *lapbeth; 92 struct lapbethdev *lapbeth;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index c5f5e62aab8b..0497dbdb8631 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); 445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
446 446
447unsigned short calc_checksum (char *, int); 447unsigned short calc_checksum (char *, int);
448static int setup_fr_header(struct sk_buff** skb, 448static int setup_fr_header(struct sk_buff *skb,
449 struct net_device* dev, char op_mode); 449 struct net_device* dev, char op_mode);
450 450
451 451
@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
1372 /* Move the if_header() code to here. By inserting frame 1372 /* Move the if_header() code to here. By inserting frame
1373 * relay header in if_header() we would break the 1373 * relay header in if_header() we would break the
1374 * tcpdump and other packet sniffers */ 1374 * tcpdump and other packet sniffers */
1375 chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); 1375 chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
1376 if (chan->fr_header_len < 0 ){ 1376 if (chan->fr_header_len < 0 ){
1377 ++chan->ifstats.tx_dropped; 1377 ++chan->ifstats.tx_dropped;
1378 ++card->wandev.stats.tx_dropped; 1378 ++card->wandev.stats.tx_dropped;
@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
1597 return 1; 1597 return 1;
1598 } 1598 }
1599 1599
1600 skb_unlink(skb);
1601
1602 chan->transmit_length = len; 1600 chan->transmit_length = len;
1603 chan->delay_skb = skb; 1601 chan->delay_skb = skb;
1604 1602
@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
4871 } 4869 }
4872} 4870}
4873 4871
4874static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, 4872static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
4875 char op_mode) 4873 char op_mode)
4876{ 4874{
4877 struct sk_buff *skb = *skb_orig;
4878 fr_channel_t *chan=dev->priv; 4875 fr_channel_t *chan=dev->priv;
4879 4876
4880 if (op_mode == WANPIPE){ 4877 if (op_mode == WANPIPE) {
4881
4882 chan->fr_header[0]=Q922_UI; 4878 chan->fr_header[0]=Q922_UI;
4883 4879
4884 switch (htons(skb->protocol)){ 4880 switch (htons(skb->protocol)){
4885
4886 case ETH_P_IP: 4881 case ETH_P_IP:
4887 chan->fr_header[1]=NLPID_IP; 4882 chan->fr_header[1]=NLPID_IP;
4888 break; 4883 break;
@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4894 } 4889 }
4895 4890
4896 /* If we are in bridging mode, we must apply 4891 /* If we are in bridging mode, we must apply
4897 * an Ethernet header */ 4892 * an Ethernet header
4898 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ 4893 */
4899 4894 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
4900
4901 /* Encapsulate the packet as a bridged Ethernet frame. */ 4895 /* Encapsulate the packet as a bridged Ethernet frame. */
4902#ifdef DEBUG 4896#ifdef DEBUG
4903 printk(KERN_INFO "%s: encapsulating skb for frame relay\n", 4897 printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
4904 dev->name); 4898 dev->name);
4905#endif 4899#endif
4906
4907 chan->fr_header[0] = 0x03; 4900 chan->fr_header[0] = 0x03;
4908 chan->fr_header[1] = 0x00; 4901 chan->fr_header[1] = 0x00;
4909 chan->fr_header[2] = 0x80; 4902 chan->fr_header[2] = 0x80;
@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4916 /* Yuck. */ 4909 /* Yuck. */
4917 skb->protocol = ETH_P_802_3; 4910 skb->protocol = ETH_P_802_3;
4918 return 8; 4911 return 8;
4919
4920 } 4912 }
4921 4913
4922 return 0; 4914 return 0;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 84b65c60c799..f58c794a963a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -1447,7 +1447,7 @@ static void sppp_print_bytes (u_char *p, u16 len)
1447 * after interrupt servicing to process frames queued via netif_rx. 1447 * after interrupt servicing to process frames queued via netif_rx.
1448 */ 1448 */
1449 1449
1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) 1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1451{ 1451{
1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1453 return NET_RX_DROP; 1453 return NET_RX_DROP;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 4d0b5a336bd7..d7947358e49d 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -4323,36 +4323,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
4323 */ 4323 */
4324 4324
4325static const iw_handler orinoco_handler[] = { 4325static const iw_handler orinoco_handler[] = {
4326 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) orinoco_ioctl_commit, 4326 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
4327 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getname, 4327 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
4328 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfreq, 4328 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
4329 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfreq, 4329 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
4330 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setmode, 4330 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
4331 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getmode, 4331 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
4332 [SIOCSIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setsens, 4332 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
4333 [SIOCGIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getsens, 4333 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
4334 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwrange, 4334 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
4335 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setspy, 4335 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
4336 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getspy, 4336 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
4337 [SIOCSIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setwap, 4337 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
4338 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getwap, 4338 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
4339 [SIOCSIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setscan, 4339 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
4340 [SIOCGIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getscan, 4340 [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
4341 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setessid, 4341 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
4342 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getessid, 4342 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
4343 [SIOCSIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setnick, 4343 [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
4344 [SIOCGIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getnick, 4344 [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
4345 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrate, 4345 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
4346 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrate, 4346 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
4347 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrts, 4347 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
4348 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrts, 4348 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
4349 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfrag, 4349 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
4350 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfrag, 4350 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
4351 [SIOCGIWRETRY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getretry, 4351 [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
4352 [SIOCSIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_setiwencode, 4352 [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
4353 [SIOCGIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwencode, 4353 [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
4354 [SIOCSIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setpower, 4354 [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
4355 [SIOCGIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getpower, 4355 [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
4356}; 4356};
4357 4357
4358 4358
@@ -4360,15 +4360,15 @@ static const iw_handler orinoco_handler[] = {
4360 Added typecasting since we no longer use iwreq_data -- Moustafa 4360 Added typecasting since we no longer use iwreq_data -- Moustafa
4361 */ 4361 */
4362static const iw_handler orinoco_private_handler[] = { 4362static const iw_handler orinoco_private_handler[] = {
4363 [0] (iw_handler) orinoco_ioctl_reset, 4363 [0] = (iw_handler) orinoco_ioctl_reset,
4364 [1] (iw_handler) orinoco_ioctl_reset, 4364 [1] = (iw_handler) orinoco_ioctl_reset,
4365 [2] (iw_handler) orinoco_ioctl_setport3, 4365 [2] = (iw_handler) orinoco_ioctl_setport3,
4366 [3] (iw_handler) orinoco_ioctl_getport3, 4366 [3] = (iw_handler) orinoco_ioctl_getport3,
4367 [4] (iw_handler) orinoco_ioctl_setpreamble, 4367 [4] = (iw_handler) orinoco_ioctl_setpreamble,
4368 [5] (iw_handler) orinoco_ioctl_getpreamble, 4368 [5] = (iw_handler) orinoco_ioctl_getpreamble,
4369 [6] (iw_handler) orinoco_ioctl_setibssport, 4369 [6] = (iw_handler) orinoco_ioctl_setibssport,
4370 [7] (iw_handler) orinoco_ioctl_getibssport, 4370 [7] = (iw_handler) orinoco_ioctl_getibssport,
4371 [9] (iw_handler) orinoco_ioctl_getrid, 4371 [9] = (iw_handler) orinoco_ioctl_getrid,
4372}; 4372};
4373 4373
4374static const struct iw_handler_def orinoco_handler_def = { 4374static const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 00498e2f1205..d3dad0aac7cb 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -23,13 +23,8 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/parport.h> 24#include <linux/parport.h>
25#include <linux/parport_pc.h> 25#include <linux/parport_pc.h>
26#include <linux/serial.h>
27#include <linux/serialP.h>
28#include <linux/list.h>
29#include <linux/8250_pci.h> 26#include <linux/8250_pci.h>
30 27
31#include <asm/serial.h>
32
33enum parport_pc_pci_cards { 28enum parport_pc_pci_cards {
34 titan_110l = 0, 29 titan_110l = 0,
35 titan_210l, 30 titan_210l,
@@ -168,182 +163,147 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
168}; 163};
169MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl); 164MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
170 165
171struct pci_board_no_ids { 166/*
172 int flags; 167 * This table describes the serial "geometry" of these boards. Any
173 int num_ports; 168 * quirks for these can be found in drivers/serial/8250_pci.c
174 int base_baud; 169 *
175 int uart_offset; 170 * Cards not tested are marked n/t
176 int reg_shift; 171 * If you have one of these cards and it works for you, please tell me..
177 int (*init_fn)(struct pci_dev *dev, struct pci_board_no_ids *board, 172 */
178 int enable); 173static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
179 int first_uart_offset; 174 [titan_110l] = {
180}; 175 .flags = FL_BASE1 | FL_BASE_BARS,
181 176 .num_ports = 1,
182static int __devinit siig10x_init_fn(struct pci_dev *dev, struct pci_board_no_ids *board, int enable) 177 .base_baud = 921600,
183{ 178 .uart_offset = 8,
184 return pci_siig10x_fn(dev, enable); 179 },
185} 180 [titan_210l] = {
186 181 .flags = FL_BASE1 | FL_BASE_BARS,
187static int __devinit siig20x_init_fn(struct pci_dev *dev, struct pci_board_no_ids *board, int enable) 182 .num_ports = 2,
188{ 183 .base_baud = 921600,
189 return pci_siig20x_fn(dev, enable); 184 .uart_offset = 8,
190} 185 },
191 186 [netmos_9xx5_combo] = {
192static int __devinit netmos_serial_init(struct pci_dev *dev, struct pci_board_no_ids *board, int enable) 187 .flags = FL_BASE0 | FL_BASE_BARS,
193{ 188 .num_ports = 1,
194 board->num_ports = dev->subsystem_device & 0xf; 189 .base_baud = 115200,
195 return 0; 190 .uart_offset = 8,
196} 191 },
197 192 [netmos_9855] = {
198static struct pci_board_no_ids pci_boards[] __devinitdata = { 193 .flags = FL_BASE2 | FL_BASE_BARS,
199 /* 194 .num_ports = 1,
200 * PCI Flags, Number of Ports, Base (Maximum) Baud Rate, 195 .base_baud = 115200,
201 * Offset to get to next UART's registers, 196 .uart_offset = 8,
202 * Register shift to use for memory-mapped I/O, 197 },
203 * Initialization function, first UART offset 198 [avlab_1s1p] = { /* n/t */
204 */ 199 .flags = FL_BASE0 | FL_BASE_BARS,
205 200 .num_ports = 1,
206// Cards not tested are marked n/t 201 .base_baud = 115200,
207// If you have one of these cards and it works for you, please tell me.. 202 .uart_offset = 8,
208 203 },
209/* titan_110l */ { SPCI_FL_BASE1 | SPCI_FL_BASE_TABLE, 1, 921600 }, 204 [avlab_1s1p_650] = { /* nt */
210/* titan_210l */ { SPCI_FL_BASE1 | SPCI_FL_BASE_TABLE, 2, 921600 }, 205 .flags = FL_BASE0 | FL_BASE_BARS,
211/* netmos_9xx5_combo */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200, 0, 0, netmos_serial_init }, 206 .num_ports = 1,
212/* netmos_9855 */ { SPCI_FL_BASE2 | SPCI_FL_BASE_TABLE, 1, 115200, 0, 0, netmos_serial_init }, 207 .base_baud = 115200,
213/* avlab_1s1p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 208 .uart_offset = 8,
214/* avlab_1s1p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 209 },
215/* avlab_1s1p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 210 [avlab_1s1p_850] = { /* nt */
216/* avlab_1s2p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 211 .flags = FL_BASE0 | FL_BASE_BARS,
217/* avlab_1s2p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 212 .num_ports = 1,
218/* avlab_1s2p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, 213 .base_baud = 115200,
219/* avlab_2s1p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, 214 .uart_offset = 8,
220/* avlab_2s1p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, 215 },
221/* avlab_2s1p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, 216 [avlab_1s2p] = { /* n/t */
222/* siig_1s1p_10x */ { SPCI_FL_BASE2, 1, 460800, 0, 0, siig10x_init_fn }, 217 .flags = FL_BASE0 | FL_BASE_BARS,
223/* siig_2s1p_10x */ { SPCI_FL_BASE2, 1, 921600, 0, 0, siig10x_init_fn }, 218 .num_ports = 1,
224/* siig_2p1s_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn }, 219 .base_baud = 115200,
225/* siig_1s1p_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn }, 220 .uart_offset = 8,
226/* siig_2s1p_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn }, 221 },
222 [avlab_1s2p_650] = { /* nt */
223 .flags = FL_BASE0 | FL_BASE_BARS,
224 .num_ports = 1,
225 .base_baud = 115200,
226 .uart_offset = 8,
227 },
228 [avlab_1s2p_850] = { /* nt */
229 .flags = FL_BASE0 | FL_BASE_BARS,
230 .num_ports = 1,
231 .base_baud = 115200,
232 .uart_offset = 8,
233 },
234 [avlab_2s1p] = { /* n/t */
235 .flags = FL_BASE0 | FL_BASE_BARS,
236 .num_ports = 2,
237 .base_baud = 115200,
238 .uart_offset = 8,
239 },
240 [avlab_2s1p_650] = { /* nt */
241 .flags = FL_BASE0 | FL_BASE_BARS,
242 .num_ports = 2,
243 .base_baud = 115200,
244 .uart_offset = 8,
245 },
246 [avlab_2s1p_850] = { /* nt */
247 .flags = FL_BASE0 | FL_BASE_BARS,
248 .num_ports = 2,
249 .base_baud = 115200,
250 .uart_offset = 8,
251 },
252 [siig_1s1p_10x] = {
253 .flags = FL_BASE2,
254 .num_ports = 1,
255 .base_baud = 460800,
256 .uart_offset = 8,
257 },
258 [siig_2s1p_10x] = {
259 .flags = FL_BASE2,
260 .num_ports = 1,
261 .base_baud = 921600,
262 .uart_offset = 8,
263 },
264 [siig_2p1s_20x] = {
265 .flags = FL_BASE0,
266 .num_ports = 1,
267 .base_baud = 921600,
268 .uart_offset = 8,
269 },
270 [siig_1s1p_20x] = {
271 .flags = FL_BASE0,
272 .num_ports = 1,
273 .base_baud = 921600,
274 .uart_offset = 8,
275 },
276 [siig_2s1p_20x] = {
277 .flags = FL_BASE0,
278 .num_ports = 1,
279 .base_baud = 921600,
280 .uart_offset = 8,
281 },
227}; 282};
228 283
229struct parport_serial_private { 284struct parport_serial_private {
230 int num_ser; 285 struct serial_private *serial;
231 int line[20];
232 struct pci_board_no_ids ser;
233 int num_par; 286 int num_par;
234 struct parport *port[PARPORT_MAX]; 287 struct parport *port[PARPORT_MAX];
235 struct parport_pc_pci par; 288 struct parport_pc_pci par;
236}; 289};
237 290
238static int __devinit get_pci_port (struct pci_dev *dev,
239 struct pci_board_no_ids *board,
240 struct serial_struct *req,
241 int idx)
242{
243 unsigned long port;
244 int base_idx;
245 int max_port;
246 int offset;
247
248 base_idx = SPCI_FL_GET_BASE(board->flags);
249 if (board->flags & SPCI_FL_BASE_TABLE)
250 base_idx += idx;
251
252 if (board->flags & SPCI_FL_REGION_SZ_CAP) {
253 max_port = pci_resource_len(dev, base_idx) / 8;
254 if (idx >= max_port)
255 return 1;
256 }
257
258 offset = board->first_uart_offset;
259
260 /* Timedia/SUNIX uses a mixture of BARs and offsets */
261 /* Ugh, this is ugly as all hell --- TYT */
262 if(dev->vendor == PCI_VENDOR_ID_TIMEDIA ) /* 0x1409 */
263 switch(idx) {
264 case 0: base_idx=0;
265 break;
266 case 1: base_idx=0; offset=8;
267 break;
268 case 2: base_idx=1;
269 break;
270 case 3: base_idx=1; offset=8;
271 break;
272 case 4: /* BAR 2*/
273 case 5: /* BAR 3 */
274 case 6: /* BAR 4*/
275 case 7: base_idx=idx-2; /* BAR 5*/
276 }
277
278 port = pci_resource_start(dev, base_idx) + offset;
279
280 if ((board->flags & SPCI_FL_BASE_TABLE) == 0)
281 port += idx * (board->uart_offset ? board->uart_offset : 8);
282
283 if (pci_resource_flags (dev, base_idx) & IORESOURCE_IO) {
284 int high_bits_offset = ((sizeof(long)-sizeof(int))*8);
285 req->port = port;
286 if (high_bits_offset)
287 req->port_high = port >> high_bits_offset;
288 else
289 req->port_high = 0;
290 return 0;
291 }
292 req->io_type = SERIAL_IO_MEM;
293 req->iomem_base = ioremap(port, board->uart_offset);
294 req->iomem_reg_shift = board->reg_shift;
295 req->port = 0;
296 return req->iomem_base ? 0 : 1;
297}
298
299/* Register the serial port(s) of a PCI card. */ 291/* Register the serial port(s) of a PCI card. */
300static int __devinit serial_register (struct pci_dev *dev, 292static int __devinit serial_register (struct pci_dev *dev,
301 const struct pci_device_id *id) 293 const struct pci_device_id *id)
302{ 294{
303 struct pci_board_no_ids *board;
304 struct parport_serial_private *priv = pci_get_drvdata (dev); 295 struct parport_serial_private *priv = pci_get_drvdata (dev);
305 struct serial_struct serial_req; 296 struct pciserial_board *board;
306 int base_baud; 297 struct serial_private *serial;
307 int k;
308 int success = 0;
309
310 priv->ser = pci_boards[id->driver_data];
311 board = &priv->ser;
312 if (board->init_fn && ((board->init_fn) (dev, board, 1) != 0))
313 return 1;
314
315 base_baud = board->base_baud;
316 if (!base_baud)
317 base_baud = BASE_BAUD;
318 memset (&serial_req, 0, sizeof (serial_req));
319
320 for (k = 0; k < board->num_ports; k++) {
321 int line;
322 298
323 if (priv->num_ser == ARRAY_SIZE (priv->line)) { 299 board = &pci_parport_serial_boards[id->driver_data];
324 printk (KERN_WARNING 300 serial = pciserial_init_ports(dev, board);
325 "parport_serial: %s: only %u serial lines "
326 "supported (%d reported)\n", pci_name (dev),
327 ARRAY_SIZE (priv->line), board->num_ports);
328 break;
329 }
330 301
331 serial_req.irq = dev->irq; 302 if (IS_ERR(serial))
332 if (get_pci_port (dev, board, &serial_req, k)) 303 return PTR_ERR(serial);
333 break;
334 serial_req.flags = ASYNC_SKIP_TEST | ASYNC_AUTOPROBE;
335 serial_req.baud_base = base_baud;
336 line = register_serial (&serial_req);
337 if (line < 0) {
338 printk (KERN_DEBUG
339 "parport_serial: register_serial failed\n");
340 continue;
341 }
342 priv->line[priv->num_ser++] = line;
343 success = 1;
344 }
345 304
346 return success ? 0 : 1; 305 priv->serial = serial;
306 return 0;
347} 307}
348 308
349/* Register the parallel port(s) of a PCI card. */ 309/* Register the parallel port(s) of a PCI card. */
@@ -411,7 +371,7 @@ static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
411 priv = kmalloc (sizeof *priv, GFP_KERNEL); 371 priv = kmalloc (sizeof *priv, GFP_KERNEL);
412 if (!priv) 372 if (!priv)
413 return -ENOMEM; 373 return -ENOMEM;
414 priv->num_ser = priv->num_par = 0; 374 memset(priv, 0, sizeof(struct parport_serial_private));
415 pci_set_drvdata (dev, priv); 375 pci_set_drvdata (dev, priv);
416 376
417 err = pci_enable_device (dev); 377 err = pci_enable_device (dev);
@@ -444,15 +404,12 @@ static void __devexit parport_serial_pci_remove (struct pci_dev *dev)
444 struct parport_serial_private *priv = pci_get_drvdata (dev); 404 struct parport_serial_private *priv = pci_get_drvdata (dev);
445 int i; 405 int i;
446 406
407 pci_set_drvdata(dev, NULL);
408
447 // Serial ports 409 // Serial ports
448 for (i = 0; i < priv->num_ser; i++) { 410 if (priv->serial)
449 unregister_serial (priv->line[i]); 411 pciserial_remove_ports(priv->serial);
450 412
451 if (priv->ser.init_fn)
452 (priv->ser.init_fn) (dev, &priv->ser, 0);
453 }
454 pci_set_drvdata (dev, NULL);
455
456 // Parallel ports 413 // Parallel ports
457 for (i = 0; i < priv->num_par; i++) 414 for (i = 0; i < priv->num_par; i++)
458 parport_pc_unregister_port (priv->port[i]); 415 parport_pc_unregister_port (priv->port[i]);
@@ -461,11 +418,47 @@ static void __devexit parport_serial_pci_remove (struct pci_dev *dev)
461 return; 418 return;
462} 419}
463 420
421static int parport_serial_pci_suspend(struct pci_dev *dev, pm_message_t state)
422{
423 struct parport_serial_private *priv = pci_get_drvdata(dev);
424
425 if (priv->serial)
426 pciserial_suspend_ports(priv->serial);
427
428 /* FIXME: What about parport? */
429
430 pci_save_state(dev);
431 pci_set_power_state(dev, pci_choose_state(dev, state));
432 return 0;
433}
434
435static int parport_serial_pci_resume(struct pci_dev *dev)
436{
437 struct parport_serial_private *priv = pci_get_drvdata(dev);
438
439 pci_set_power_state(dev, PCI_D0);
440 pci_restore_state(dev);
441
442 /*
443 * The device may have been disabled. Re-enable it.
444 */
445 pci_enable_device(dev);
446
447 if (priv->serial)
448 pciserial_resume_ports(priv->serial);
449
450 /* FIXME: What about parport? */
451
452 return 0;
453}
454
464static struct pci_driver parport_serial_pci_driver = { 455static struct pci_driver parport_serial_pci_driver = {
465 .name = "parport_serial", 456 .name = "parport_serial",
466 .id_table = parport_serial_pci_tbl, 457 .id_table = parport_serial_pci_tbl,
467 .probe = parport_serial_pci_probe, 458 .probe = parport_serial_pci_probe,
468 .remove = __devexit_p(parport_serial_pci_remove), 459 .remove = __devexit_p(parport_serial_pci_remove),
460 .suspend = parport_serial_pci_suspend,
461 .resume = parport_serial_pci_resume,
469}; 462};
470 463
471 464
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a2eebc6eaacc..6b0e6464eb39 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -40,7 +40,7 @@
40 * FIXME: IO should be max 256 bytes. However, since we may 40 * FIXME: IO should be max 256 bytes. However, since we may
41 * have a P2P bridge below a cardbus bridge, we need 4K. 41 * have a P2P bridge below a cardbus bridge, we need 4K.
42 */ 42 */
43#define CARDBUS_IO_SIZE (4096) 43#define CARDBUS_IO_SIZE (4*1024)
44#define CARDBUS_MEM_SIZE (32*1024*1024) 44#define CARDBUS_MEM_SIZE (32*1024*1024)
45 45
46static void __devinit 46static void __devinit
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 84eedc965688..5598b4714f77 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -53,7 +53,9 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
53 if (resno < 6) { 53 if (resno < 6) {
54 reg = PCI_BASE_ADDRESS_0 + 4 * resno; 54 reg = PCI_BASE_ADDRESS_0 + 4 * resno;
55 } else if (resno == PCI_ROM_RESOURCE) { 55 } else if (resno == PCI_ROM_RESOURCE) {
56 new |= res->flags & IORESOURCE_ROM_ENABLE; 56 if (!(res->flags & IORESOURCE_ROM_ENABLE))
57 return;
58 new |= PCI_ROM_ADDRESS_ENABLE;
57 reg = dev->rom_base_reg; 59 reg = dev->rom_base_reg;
58 } else { 60 } else {
59 /* Hmm, non-standard resource. */ 61 /* Hmm, non-standard resource. */
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 533f90c05cdf..381f339e3200 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -112,7 +112,7 @@ qdio_min(int a,int b)
112 112
113/***************** SCRUBBER HELPER ROUTINES **********************/ 113/***************** SCRUBBER HELPER ROUTINES **********************/
114 114
115static inline volatile __u64 115static inline __u64
116qdio_get_micros(void) 116qdio_get_micros(void)
117{ 117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */ 118 return (get_clock() >> 10); /* time>>12 is microseconds */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e17b4d58a9f6..bfe3ba73bc0f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1299,13 +1299,10 @@ struct zfcp_port *
1299zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status, 1299zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1300 u32 d_id) 1300 u32 d_id)
1301{ 1301{
1302 struct zfcp_port *port, *tmp_port; 1302 struct zfcp_port *port;
1303 int check_wwpn; 1303 int check_wwpn;
1304 scsi_id_t scsi_id;
1305 int found;
1306 1304
1307 check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN); 1305 check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
1308
1309 /* 1306 /*
1310 * check that there is no port with this WWPN already in list 1307 * check that there is no port with this WWPN already in list
1311 */ 1308 */
@@ -1368,7 +1365,7 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1368 } else { 1365 } else {
1369 snprintf(port->sysfs_device.bus_id, 1366 snprintf(port->sysfs_device.bus_id,
1370 BUS_ID_SIZE, "0x%016llx", wwpn); 1367 BUS_ID_SIZE, "0x%016llx", wwpn);
1371 port->sysfs_device.parent = &adapter->ccw_device->dev; 1368 port->sysfs_device.parent = &adapter->ccw_device->dev;
1372 } 1369 }
1373 port->sysfs_device.release = zfcp_sysfs_port_release; 1370 port->sysfs_device.release = zfcp_sysfs_port_release;
1374 dev_set_drvdata(&port->sysfs_device, port); 1371 dev_set_drvdata(&port->sysfs_device, port);
@@ -1388,24 +1385,8 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1388 1385
1389 zfcp_port_get(port); 1386 zfcp_port_get(port);
1390 1387
1391 scsi_id = 1;
1392 found = 0;
1393 write_lock_irq(&zfcp_data.config_lock); 1388 write_lock_irq(&zfcp_data.config_lock);
1394 list_for_each_entry(tmp_port, &adapter->port_list_head, list) { 1389 list_add_tail(&port->list, &adapter->port_list_head);
1395 if (atomic_test_mask(ZFCP_STATUS_PORT_NO_SCSI_ID,
1396 &tmp_port->status))
1397 continue;
1398 if (tmp_port->scsi_id != scsi_id) {
1399 found = 1;
1400 break;
1401 }
1402 scsi_id++;
1403 }
1404 port->scsi_id = scsi_id;
1405 if (found)
1406 list_add_tail(&port->list, &tmp_port->list);
1407 else
1408 list_add_tail(&port->list, &adapter->port_list_head);
1409 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 1390 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
1410 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); 1391 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
1411 if (d_id == ZFCP_DID_DIRECTORY_SERVICE) 1392 if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
@@ -1427,6 +1408,9 @@ zfcp_port_dequeue(struct zfcp_port *port)
1427 list_del(&port->list); 1408 list_del(&port->list);
1428 port->adapter->ports--; 1409 port->adapter->ports--;
1429 write_unlock_irq(&zfcp_data.config_lock); 1410 write_unlock_irq(&zfcp_data.config_lock);
1411 if (port->rport)
1412 fc_remote_port_delete(port->rport);
1413 port->rport = NULL;
1430 zfcp_adapter_put(port->adapter); 1414 zfcp_adapter_put(port->adapter);
1431 zfcp_sysfs_port_remove_files(&port->sysfs_device, 1415 zfcp_sysfs_port_remove_files(&port->sysfs_device,
1432 atomic_read(&port->status)); 1416 atomic_read(&port->status));
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0fc46381fc22..b30abab77da3 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,9 +202,19 @@ static int
202zfcp_ccw_set_offline(struct ccw_device *ccw_device) 202zfcp_ccw_set_offline(struct ccw_device *ccw_device)
203{ 203{
204 struct zfcp_adapter *adapter; 204 struct zfcp_adapter *adapter;
205 struct zfcp_port *port;
206 struct fc_rport *rport;
205 207
206 down(&zfcp_data.config_sema); 208 down(&zfcp_data.config_sema);
207 adapter = dev_get_drvdata(&ccw_device->dev); 209 adapter = dev_get_drvdata(&ccw_device->dev);
210 /* might be racy, but we cannot take config_lock due to the fact that
211 fc_remote_port_delete might sleep */
212 list_for_each_entry(port, &adapter->port_list_head, list)
213 if (port->rport) {
214 rport = port->rport;
215 port->rport = NULL;
216 fc_remote_port_delete(rport);
217 }
208 zfcp_erp_adapter_shutdown(adapter, 0); 218 zfcp_erp_adapter_shutdown(adapter, 0);
209 zfcp_erp_wait(adapter); 219 zfcp_erp_wait(adapter);
210 zfcp_adapter_scsi_unregister(adapter); 220 zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4103b5be7683..455e902533a9 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -906,6 +906,7 @@ struct zfcp_adapter {
906 */ 906 */
907struct zfcp_port { 907struct zfcp_port {
908 struct device sysfs_device; /* sysfs device */ 908 struct device sysfs_device; /* sysfs device */
909 struct fc_rport *rport; /* rport of fc transport class */
909 struct list_head list; /* list of remote ports */ 910 struct list_head list; /* list of remote ports */
910 atomic_t refcount; /* reference count */ 911 atomic_t refcount; /* reference count */
911 wait_queue_head_t remove_wq; /* can be used to wait for 912 wait_queue_head_t remove_wq; /* can be used to wait for
@@ -916,7 +917,6 @@ struct zfcp_port {
916 list */ 917 list */
917 u32 units; /* # of logical units in list */ 918 u32 units; /* # of logical units in list */
918 atomic_t status; /* status of this remote port */ 919 atomic_t status; /* status of this remote port */
919 scsi_id_t scsi_id; /* own SCSI ID */
920 wwn_t wwnn; /* WWNN if known */ 920 wwn_t wwnn; /* WWNN if known */
921 wwn_t wwpn; /* WWPN */ 921 wwn_t wwpn; /* WWPN */
922 fc_id_t d_id; /* D_ID */ 922 fc_id_t d_id; /* D_ID */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0cf31f7d1c0f..cb4f612550ba 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3360,13 +3360,32 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3360 if ((result == ZFCP_ERP_SUCCEEDED) 3360 if ((result == ZFCP_ERP_SUCCEEDED)
3361 && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, 3361 && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
3362 &unit->status)) 3362 &unit->status))
3363 && (!unit->device)) 3363 && !unit->device
3364 scsi_add_device(unit->port->adapter->scsi_host, 0, 3364 && port->rport)
3365 unit->port->scsi_id, unit->scsi_lun); 3365 scsi_add_device(port->adapter->scsi_host, 0,
3366 port->rport->scsi_target_id,
3367 unit->scsi_lun);
3366 zfcp_unit_put(unit); 3368 zfcp_unit_put(unit);
3367 break; 3369 break;
3368 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 3370 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3369 case ZFCP_ERP_ACTION_REOPEN_PORT: 3371 case ZFCP_ERP_ACTION_REOPEN_PORT:
3372 if ((result == ZFCP_ERP_SUCCEEDED)
3373 && !atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN,
3374 &port->status)
3375 && !port->rport) {
3376 struct fc_rport_identifiers ids;
3377 ids.node_name = port->wwnn;
3378 ids.port_name = port->wwpn;
3379 ids.port_id = port->d_id;
3380 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
3381 port->rport =
3382 fc_remote_port_add(adapter->scsi_host, 0, &ids);
3383 if (!port->rport)
3384 ZFCP_LOG_NORMAL("failed registration of rport"
3385 "(adapter %s, wwpn=0x%016Lx)\n",
3386 zfcp_get_busid_by_port(port),
3387 port->wwpn);
3388 }
3370 zfcp_port_put(port); 3389 zfcp_port_put(port);
3371 break; 3390 break;
3372 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3391 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 42df7e57eeae..cd98a2de9f8f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -143,6 +143,8 @@ extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
143 struct scsi_cmnd *, struct timer_list *); 143 struct scsi_cmnd *, struct timer_list *);
144extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, 144extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *,
145 struct timer_list *); 145 struct timer_list *);
146extern void zfcp_set_fc_host_attrs(struct zfcp_adapter *);
147extern void zfcp_set_fc_rport_attrs(struct zfcp_port *);
146extern struct scsi_transport_template *zfcp_transport_template; 148extern struct scsi_transport_template *zfcp_transport_template;
147extern struct fc_function_template zfcp_transport_functions; 149extern struct fc_function_template zfcp_transport_functions;
148 150
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0d9f20edc490..c007b6424e74 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2062,6 +2062,7 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2062 zfcp_erp_adapter_shutdown(adapter, 0); 2062 zfcp_erp_adapter_shutdown(adapter, 0);
2063 return -EIO; 2063 return -EIO;
2064 } 2064 }
2065 zfcp_set_fc_host_attrs(adapter);
2065 return 0; 2066 return 0;
2066} 2067}
2067 2068
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b61d309352c3..31a76065cf28 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -389,7 +389,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id,
389 struct zfcp_unit *unit, *retval = NULL; 389 struct zfcp_unit *unit, *retval = NULL;
390 390
391 list_for_each_entry(port, &adapter->port_list_head, list) { 391 list_for_each_entry(port, &adapter->port_list_head, list) {
392 if (id != port->scsi_id) 392 if (!port->rport || (id != port->rport->scsi_target_id))
393 continue; 393 continue;
394 list_for_each_entry(unit, &port->unit_list_head, list) { 394 list_for_each_entry(unit, &port->unit_list_head, list) {
395 if (lun == unit->scsi_lun) { 395 if (lun == unit->scsi_lun) {
@@ -408,7 +408,7 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
408 struct zfcp_port *port; 408 struct zfcp_port *port;
409 409
410 list_for_each_entry(port, &adapter->port_list_head, list) { 410 list_for_each_entry(port, &adapter->port_list_head, list) {
411 if (id == port->scsi_id) 411 if (port->rport && (id == port->rport->scsi_target_id))
412 return port; 412 return port;
413 } 413 }
414 return (struct zfcp_port *) NULL; 414 return (struct zfcp_port *) NULL;
@@ -634,7 +634,6 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
634{ 634{
635 int retval; 635 int retval;
636 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata; 636 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
637 struct Scsi_Host *scsi_host = scpnt->device->host;
638 637
639 if (!unit) { 638 if (!unit) {
640 ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n"); 639 ZFCP_LOG_NORMAL("bug: Tried reset for nonexistent unit\n");
@@ -729,7 +728,6 @@ zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
729{ 728{
730 int retval = 0; 729 int retval = 0;
731 struct zfcp_unit *unit; 730 struct zfcp_unit *unit;
732 struct Scsi_Host *scsi_host = scpnt->device->host;
733 731
734 unit = (struct zfcp_unit *) scpnt->device->hostdata; 732 unit = (struct zfcp_unit *) scpnt->device->hostdata;
735 ZFCP_LOG_NORMAL("bus reset because of problems with " 733 ZFCP_LOG_NORMAL("bus reset because of problems with "
@@ -753,7 +751,6 @@ zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
753{ 751{
754 int retval = 0; 752 int retval = 0;
755 struct zfcp_unit *unit; 753 struct zfcp_unit *unit;
756 struct Scsi_Host *scsi_host = scpnt->device->host;
757 754
758 unit = (struct zfcp_unit *) scpnt->device->hostdata; 755 unit = (struct zfcp_unit *) scpnt->device->hostdata;
759 ZFCP_LOG_NORMAL("host reset because of problems with " 756 ZFCP_LOG_NORMAL("host reset because of problems with "
@@ -833,6 +830,7 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
833 shost = adapter->scsi_host; 830 shost = adapter->scsi_host;
834 if (!shost) 831 if (!shost)
835 return; 832 return;
833 fc_remove_host(shost);
836 scsi_remove_host(shost); 834 scsi_remove_host(shost);
837 scsi_host_put(shost); 835 scsi_host_put(shost);
838 adapter->scsi_host = NULL; 836 adapter->scsi_host = NULL;
@@ -906,6 +904,18 @@ zfcp_get_node_name(struct scsi_target *starget)
906 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 904 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
907} 905}
908 906
907void
908zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
909{
910 struct Scsi_Host *shost = adapter->scsi_host;
911
912 fc_host_node_name(shost) = adapter->wwnn;
913 fc_host_port_name(shost) = adapter->wwpn;
914 strncpy(fc_host_serial_number(shost), adapter->serial_number,
915 min(FC_SERIAL_NUMBER_SIZE, 32));
916 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
917}
918
909struct fc_function_template zfcp_transport_functions = { 919struct fc_function_template zfcp_transport_functions = {
910 .get_starget_port_id = zfcp_get_port_id, 920 .get_starget_port_id = zfcp_get_port_id,
911 .get_starget_port_name = zfcp_get_port_name, 921 .get_starget_port_name = zfcp_get_port_name,
@@ -913,6 +923,11 @@ struct fc_function_template zfcp_transport_functions = {
913 .show_starget_port_id = 1, 923 .show_starget_port_id = 1,
914 .show_starget_port_name = 1, 924 .show_starget_port_name = 1,
915 .show_starget_node_name = 1, 925 .show_starget_node_name = 1,
926 .show_rport_supported_classes = 1,
927 .show_host_node_name = 1,
928 .show_host_port_name = 1,
929 .show_host_supported_classes = 1,
930 .show_host_serial_number = 1,
916}; 931};
917 932
918/** 933/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
index 7a84c7d474d9..c55e82d91deb 100644
--- a/drivers/s390/scsi/zfcp_sysfs_port.c
+++ b/drivers/s390/scsi/zfcp_sysfs_port.c
@@ -67,7 +67,6 @@ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
67ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status)); 67ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
68ZFCP_DEFINE_PORT_ATTR(wwnn, "0x%016llx\n", port->wwnn); 68ZFCP_DEFINE_PORT_ATTR(wwnn, "0x%016llx\n", port->wwnn);
69ZFCP_DEFINE_PORT_ATTR(d_id, "0x%06x\n", port->d_id); 69ZFCP_DEFINE_PORT_ATTR(d_id, "0x%06x\n", port->d_id);
70ZFCP_DEFINE_PORT_ATTR(scsi_id, "0x%x\n", port->scsi_id);
71ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask 70ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
72 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)); 71 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
73ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask 72ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
@@ -263,7 +262,6 @@ static struct attribute_group zfcp_port_common_attr_group = {
263static struct attribute *zfcp_port_no_ns_attrs[] = { 262static struct attribute *zfcp_port_no_ns_attrs[] = {
264 &dev_attr_unit_add.attr, 263 &dev_attr_unit_add.attr,
265 &dev_attr_unit_remove.attr, 264 &dev_attr_unit_remove.attr,
266 &dev_attr_scsi_id.attr,
267 NULL 265 NULL
268}; 266};
269 267
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index e3b9692b9688..179c95c878ac 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -1,26 +1,34 @@
1/* 1/*
2 * ahci.c - AHCI SATA support 2 * ahci.c - AHCI SATA support
3 * 3 *
4 * Copyright 2004 Red Hat, Inc. 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
5 * 7 *
6 * The contents of this file are subject to the Open 8 * Copyright 2004-2005 Red Hat, Inc.
7 * Software License version 1.1 that can be found at
8 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
9 * by reference.
10 * 9 *
11 * Alternatively, the contents of this file may be used under the terms
12 * of the GNU General Public License version 2 (the "GPL") as distributed
13 * in the kernel source COPYING file, in which case the provisions of
14 * the GPL are applicable instead of the above. If you wish to allow
15 * the use of your version of this file only under the terms of the
16 * GPL and not to allow others to use your version of this file under
17 * the OSL, indicate your decision by deleting the provisions above and
18 * replace them with the notice and other provisions required by the GPL.
19 * If you do not delete the provisions above, a recipient may use your
20 * version of this file under either the OSL or the GPL.
21 * 10 *
22 * Version 1.0 of the AHCI specification: 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
23 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
24 * 32 *
25 */ 33 */
26 34
@@ -269,6 +277,8 @@ static struct pci_device_id ahci_pci_tbl[] = {
269 board_ahci }, /* ESB2 */ 277 board_ahci }, /* ESB2 */
270 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 278 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
271 board_ahci }, /* ESB2 */ 279 board_ahci }, /* ESB2 */
280 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
281 board_ahci }, /* ICH7-M DH */
272 { } /* terminate list */ 282 { } /* terminate list */
273}; 283};
274 284
@@ -584,12 +594,16 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
584 594
585static void ahci_eng_timeout(struct ata_port *ap) 595static void ahci_eng_timeout(struct ata_port *ap)
586{ 596{
587 void *mmio = ap->host_set->mmio_base; 597 struct ata_host_set *host_set = ap->host_set;
598 void *mmio = host_set->mmio_base;
588 void *port_mmio = ahci_port_base(mmio, ap->port_no); 599 void *port_mmio = ahci_port_base(mmio, ap->port_no);
589 struct ata_queued_cmd *qc; 600 struct ata_queued_cmd *qc;
601 unsigned long flags;
590 602
591 DPRINTK("ENTER\n"); 603 DPRINTK("ENTER\n");
592 604
605 spin_lock_irqsave(&host_set->lock, flags);
606
593 ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT)); 607 ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
594 608
595 qc = ata_qc_from_tag(ap, ap->active_tag); 609 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -607,6 +621,7 @@ static void ahci_eng_timeout(struct ata_port *ap)
607 ata_qc_complete(qc, ATA_ERR); 621 ata_qc_complete(qc, ATA_ERR);
608 } 622 }
609 623
624 spin_unlock_irqrestore(&host_set->lock, flags);
610} 625}
611 626
612static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 627static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -696,9 +711,6 @@ static int ahci_qc_issue(struct ata_queued_cmd *qc)
696 struct ata_port *ap = qc->ap; 711 struct ata_port *ap = qc->ap;
697 void *port_mmio = (void *) ap->ioaddr.cmd_addr; 712 void *port_mmio = (void *) ap->ioaddr.cmd_addr;
698 713
699 writel(1, port_mmio + PORT_SCR_ACT);
700 readl(port_mmio + PORT_SCR_ACT); /* flush */
701
702 writel(1, port_mmio + PORT_CMD_ISSUE); 714 writel(1, port_mmio + PORT_CMD_ISSUE);
703 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 715 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
704 716
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index d96ebf9d2228..fb28c1261848 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -1,24 +1,42 @@
1/* 1/*
2 2 * ata_piix.c - Intel PATA/SATA controllers
3 ata_piix.c - Intel PATA/SATA controllers 3 *
4 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails.
7 on emails. 7 *
8 8 *
9 9 * Copyright 2003-2005 Red Hat Inc
10 Copyright 2003-2004 Red Hat Inc 10 * Copyright 2003-2005 Jeff Garzik
11 Copyright 2003-2004 Jeff Garzik 11 *
12 12 *
13 13 * Copyright header from piix.c:
14 Copyright header from piix.c: 14 *
15 15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 17 * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
18 Copyright (C) 2003 Red Hat Inc <alan@redhat.com> 18 *
19 19 *
20 May be copied or modified under the terms of the GNU General Public License 20 * This program is free software; you can redistribute it and/or modify
21 21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 *
34 *
35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.*
37 *
38 * Hardware documentation available at http://developer.intel.com/
39 *
22 */ 40 */
23 41
24#include <linux/kernel.h> 42#include <linux/kernel.h>
@@ -629,13 +647,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
629 port_info[1] = NULL; 647 port_info[1] = NULL;
630 648
631 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 649 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) {
632 u8 tmp; 650 u8 tmp;
633 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 651 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
634 if (tmp == PIIX_AHCI_DEVICE) { 652 if (tmp == PIIX_AHCI_DEVICE) {
635 int rc = piix_disable_ahci(pdev); 653 int rc = piix_disable_ahci(pdev);
636 if (rc) 654 if (rc)
637 return rc; 655 return rc;
638 } 656 }
639 } 657 }
640 658
641 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 659 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) {
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index fe09d145542a..2cb3c8340ca8 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1442,7 +1442,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1442 */ 1442 */
1443static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { 1443static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
1444 {"vscsi", "IBM,v-scsi"}, 1444 {"vscsi", "IBM,v-scsi"},
1445 {0,} 1445 { "", "" }
1446}; 1446};
1447 1447
1448MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1448MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 035f615817d7..8bf5652f1060 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <asm/vio.h> 30#include <asm/vio.h>
31#include <asm/prom.h>
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/hvcall.h> 33#include <asm/hvcall.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index f4e7dcb6492b..dee4b12b0342 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1,25 +1,35 @@
1/* 1/*
2 libata-core.c - helper library for ATA 2 * libata-core.c - helper library for ATA
3 3 *
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2003-2004 Jeff Garzik 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 6 * on emails.
7 The contents of this file are subject to the Open 7 *
8 Software License version 1.1 that can be found at 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 9 * Copyright 2003-2004 Jeff Garzik
10 by reference. 10 *
11 11 *
12 Alternatively, the contents of this file may be used under the terms 12 * This program is free software; you can redistribute it and/or modify
13 of the GNU General Public License version 2 (the "GPL") as distributed 13 * it under the terms of the GNU General Public License as published by
14 in the kernel source COPYING file, in which case the provisions of 14 * the Free Software Foundation; either version 2, or (at your option)
15 the GPL are applicable instead of the above. If you wish to allow 15 * any later version.
16 the use of your version of this file only under the terms of the 16 *
17 GPL and not to allow others to use your version of this file under 17 * This program is distributed in the hope that it will be useful,
18 the OSL, indicate your decision by deleting the provisions above and 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 replace them with the notice and other provisions required by the GPL. 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 If you do not delete the provisions above, a recipient may use your 20 * GNU General Public License for more details.
21 version of this file under either the OSL or the GPL. 21 *
22 22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
23 */ 33 */
24 34
25#include <linux/config.h> 35#include <linux/config.h>
@@ -1304,12 +1314,12 @@ static inline u8 ata_dev_knobble(struct ata_port *ap)
1304/** 1314/**
1305 * ata_dev_config - Run device specific handlers and check for 1315 * ata_dev_config - Run device specific handlers and check for
1306 * SATA->PATA bridges 1316 * SATA->PATA bridges
1307 * @ap: Bus 1317 * @ap: Bus
1308 * @i: Device 1318 * @i: Device
1309 * 1319 *
1310 * LOCKING: 1320 * LOCKING:
1311 */ 1321 */
1312 1322
1313void ata_dev_config(struct ata_port *ap, unsigned int i) 1323void ata_dev_config(struct ata_port *ap, unsigned int i)
1314{ 1324{
1315 /* limit bridge transfers to udma5, 200 sectors */ 1325 /* limit bridge transfers to udma5, 200 sectors */
@@ -2377,6 +2387,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2377} 2387}
2378 2388
2379/** 2389/**
2390 * ata_poll_qc_complete - turn irq back on and finish qc
2391 * @qc: Command to complete
2392 * @drv_stat: ATA status register content
2393 *
2394 * LOCKING:
2395 * None. (grabs host lock)
2396 */
2397
2398void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2399{
2400 struct ata_port *ap = qc->ap;
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&ap->host_set->lock, flags);
2404 ap->flags &= ~ATA_FLAG_NOINTR;
2405 ata_irq_on(ap);
2406 ata_qc_complete(qc, drv_stat);
2407 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2408}
2409
2410/**
2380 * ata_pio_poll - 2411 * ata_pio_poll -
2381 * @ap: 2412 * @ap:
2382 * 2413 *
@@ -2438,11 +2469,10 @@ static void ata_pio_complete (struct ata_port *ap)
2438 u8 drv_stat; 2469 u8 drv_stat;
2439 2470
2440 /* 2471 /*
2441 * This is purely hueristic. This is a fast path. 2472 * This is purely heuristic. This is a fast path. Sometimes when
2442 * Sometimes when we enter, BSY will be cleared in 2473 * we enter, BSY will be cleared in a chk-status or two. If not,
2443 * a chk-status or two. If not, the drive is probably seeking 2474 * the drive is probably seeking or something. Snooze for a couple
2444 * or something. Snooze for a couple msecs, then 2475 * msecs, then chk-status again. If still busy, fall back to
2445 * chk-status again. If still busy, fall back to
2446 * PIO_ST_POLL state. 2476 * PIO_ST_POLL state.
2447 */ 2477 */
2448 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2478 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
@@ -2467,9 +2497,7 @@ static void ata_pio_complete (struct ata_port *ap)
2467 2497
2468 ap->pio_task_state = PIO_ST_IDLE; 2498 ap->pio_task_state = PIO_ST_IDLE;
2469 2499
2470 ata_irq_on(ap); 2500 ata_poll_qc_complete(qc, drv_stat);
2471
2472 ata_qc_complete(qc, drv_stat);
2473} 2501}
2474 2502
2475 2503
@@ -2494,6 +2522,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
2494#endif /* __BIG_ENDIAN */ 2522#endif /* __BIG_ENDIAN */
2495} 2523}
2496 2524
2525/**
2526 * ata_mmio_data_xfer - Transfer data by MMIO
2527 * @ap: port to read/write
2528 * @buf: data buffer
2529 * @buflen: buffer length
2530 * @do_write: read/write
2531 *
2532 * Transfer data from/to the device data register by MMIO.
2533 *
2534 * LOCKING:
2535 * Inherited from caller.
2536 *
2537 */
2538
2497static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 2539static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2498 unsigned int buflen, int write_data) 2540 unsigned int buflen, int write_data)
2499{ 2541{
@@ -2502,6 +2544,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2502 u16 *buf16 = (u16 *) buf; 2544 u16 *buf16 = (u16 *) buf;
2503 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; 2545 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2504 2546
2547 /* Transfer multiple of 2 bytes */
2505 if (write_data) { 2548 if (write_data) {
2506 for (i = 0; i < words; i++) 2549 for (i = 0; i < words; i++)
2507 writew(le16_to_cpu(buf16[i]), mmio); 2550 writew(le16_to_cpu(buf16[i]), mmio);
@@ -2509,19 +2552,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2509 for (i = 0; i < words; i++) 2552 for (i = 0; i < words; i++)
2510 buf16[i] = cpu_to_le16(readw(mmio)); 2553 buf16[i] = cpu_to_le16(readw(mmio));
2511 } 2554 }
2555
2556 /* Transfer trailing 1 byte, if any. */
2557 if (unlikely(buflen & 0x01)) {
2558 u16 align_buf[1] = { 0 };
2559 unsigned char *trailing_buf = buf + buflen - 1;
2560
2561 if (write_data) {
2562 memcpy(align_buf, trailing_buf, 1);
2563 writew(le16_to_cpu(align_buf[0]), mmio);
2564 } else {
2565 align_buf[0] = cpu_to_le16(readw(mmio));
2566 memcpy(trailing_buf, align_buf, 1);
2567 }
2568 }
2512} 2569}
2513 2570
2571/**
2572 * ata_pio_data_xfer - Transfer data by PIO
2573 * @ap: port to read/write
2574 * @buf: data buffer
2575 * @buflen: buffer length
2576 * @do_write: read/write
2577 *
2578 * Transfer data from/to the device data register by PIO.
2579 *
2580 * LOCKING:
2581 * Inherited from caller.
2582 *
2583 */
2584
2514static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 2585static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2515 unsigned int buflen, int write_data) 2586 unsigned int buflen, int write_data)
2516{ 2587{
2517 unsigned int dwords = buflen >> 1; 2588 unsigned int words = buflen >> 1;
2518 2589
2590 /* Transfer multiple of 2 bytes */
2519 if (write_data) 2591 if (write_data)
2520 outsw(ap->ioaddr.data_addr, buf, dwords); 2592 outsw(ap->ioaddr.data_addr, buf, words);
2521 else 2593 else
2522 insw(ap->ioaddr.data_addr, buf, dwords); 2594 insw(ap->ioaddr.data_addr, buf, words);
2595
2596 /* Transfer trailing 1 byte, if any. */
2597 if (unlikely(buflen & 0x01)) {
2598 u16 align_buf[1] = { 0 };
2599 unsigned char *trailing_buf = buf + buflen - 1;
2600
2601 if (write_data) {
2602 memcpy(align_buf, trailing_buf, 1);
2603 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
2604 } else {
2605 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
2606 memcpy(trailing_buf, align_buf, 1);
2607 }
2608 }
2523} 2609}
2524 2610
2611/**
2612 * ata_data_xfer - Transfer data from/to the data register.
2613 * @ap: port to read/write
2614 * @buf: data buffer
2615 * @buflen: buffer length
2616 * @do_write: read/write
2617 *
2618 * Transfer data from/to the device data register.
2619 *
2620 * LOCKING:
2621 * Inherited from caller.
2622 *
2623 */
2624
2525static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 2625static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2526 unsigned int buflen, int do_write) 2626 unsigned int buflen, int do_write)
2527{ 2627{
@@ -2531,6 +2631,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2531 ata_pio_data_xfer(ap, buf, buflen, do_write); 2631 ata_pio_data_xfer(ap, buf, buflen, do_write);
2532} 2632}
2533 2633
2634/**
2635 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
2636 * @qc: Command on going
2637 *
2638 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
2639 *
2640 * LOCKING:
2641 * Inherited from caller.
2642 */
2643
2534static void ata_pio_sector(struct ata_queued_cmd *qc) 2644static void ata_pio_sector(struct ata_queued_cmd *qc)
2535{ 2645{
2536 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2646 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2569,6 +2679,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2569 kunmap(page); 2679 kunmap(page);
2570} 2680}
2571 2681
2682/**
2683 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
2684 * @qc: Command on going
2685 * @bytes: number of bytes
2686 *
2687 * Transfer Transfer data from/to the ATAPI device.
2688 *
2689 * LOCKING:
2690 * Inherited from caller.
2691 *
2692 */
2693
2572static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 2694static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2573{ 2695{
2574 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2696 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2578,10 +2700,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2578 unsigned char *buf; 2700 unsigned char *buf;
2579 unsigned int offset, count; 2701 unsigned int offset, count;
2580 2702
2581 if (qc->curbytes == qc->nbytes - bytes) 2703 if (qc->curbytes + bytes >= qc->nbytes)
2582 ap->pio_task_state = PIO_ST_LAST; 2704 ap->pio_task_state = PIO_ST_LAST;
2583 2705
2584next_sg: 2706next_sg:
2707 if (unlikely(qc->cursg >= qc->n_elem)) {
2708 /*
2709 * The end of qc->sg is reached and the device expects
2710 * more data to transfer. In order not to overrun qc->sg
2711 * and fulfill length specified in the byte count register,
2712 * - for read case, discard trailing data from the device
2713 * - for write case, padding zero data to the device
2714 */
2715 u16 pad_buf[1] = { 0 };
2716 unsigned int words = bytes >> 1;
2717 unsigned int i;
2718
2719 if (words) /* warning if bytes > 1 */
2720 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
2721 ap->id, bytes);
2722
2723 for (i = 0; i < words; i++)
2724 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
2725
2726 ap->pio_task_state = PIO_ST_LAST;
2727 return;
2728 }
2729
2585 sg = &qc->sg[qc->cursg]; 2730 sg = &qc->sg[qc->cursg];
2586 2731
2587 page = sg->page; 2732 page = sg->page;
@@ -2615,11 +2760,21 @@ next_sg:
2615 2760
2616 kunmap(page); 2761 kunmap(page);
2617 2762
2618 if (bytes) { 2763 if (bytes)
2619 goto next_sg; 2764 goto next_sg;
2620 }
2621} 2765}
2622 2766
2767/**
2768 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
2769 * @qc: Command on going
2770 *
2771 * Transfer Transfer data from/to the ATAPI device.
2772 *
2773 * LOCKING:
2774 * Inherited from caller.
2775 *
2776 */
2777
2623static void atapi_pio_bytes(struct ata_queued_cmd *qc) 2778static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2624{ 2779{
2625 struct ata_port *ap = qc->ap; 2780 struct ata_port *ap = qc->ap;
@@ -2692,9 +2847,7 @@ static void ata_pio_block(struct ata_port *ap)
2692 if ((status & ATA_DRQ) == 0) { 2847 if ((status & ATA_DRQ) == 0) {
2693 ap->pio_task_state = PIO_ST_IDLE; 2848 ap->pio_task_state = PIO_ST_IDLE;
2694 2849
2695 ata_irq_on(ap); 2850 ata_poll_qc_complete(qc, status);
2696
2697 ata_qc_complete(qc, status);
2698 return; 2851 return;
2699 } 2852 }
2700 2853
@@ -2724,9 +2877,7 @@ static void ata_pio_error(struct ata_port *ap)
2724 2877
2725 ap->pio_task_state = PIO_ST_IDLE; 2878 ap->pio_task_state = PIO_ST_IDLE;
2726 2879
2727 ata_irq_on(ap); 2880 ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
2728
2729 ata_qc_complete(qc, drv_stat | ATA_ERR);
2730} 2881}
2731 2882
2732static void ata_pio_task(void *_data) 2883static void ata_pio_task(void *_data)
@@ -2832,8 +2983,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2832static void ata_qc_timeout(struct ata_queued_cmd *qc) 2983static void ata_qc_timeout(struct ata_queued_cmd *qc)
2833{ 2984{
2834 struct ata_port *ap = qc->ap; 2985 struct ata_port *ap = qc->ap;
2986 struct ata_host_set *host_set = ap->host_set;
2835 struct ata_device *dev = qc->dev; 2987 struct ata_device *dev = qc->dev;
2836 u8 host_stat = 0, drv_stat; 2988 u8 host_stat = 0, drv_stat;
2989 unsigned long flags;
2837 2990
2838 DPRINTK("ENTER\n"); 2991 DPRINTK("ENTER\n");
2839 2992
@@ -2844,7 +2997,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
2844 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { 2997 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
2845 2998
2846 /* finish completing original command */ 2999 /* finish completing original command */
3000 spin_lock_irqsave(&host_set->lock, flags);
2847 __ata_qc_complete(qc); 3001 __ata_qc_complete(qc);
3002 spin_unlock_irqrestore(&host_set->lock, flags);
2848 3003
2849 atapi_request_sense(ap, dev, cmd); 3004 atapi_request_sense(ap, dev, cmd);
2850 3005
@@ -2855,6 +3010,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
2855 } 3010 }
2856 } 3011 }
2857 3012
3013 spin_lock_irqsave(&host_set->lock, flags);
3014
2858 /* hack alert! We cannot use the supplied completion 3015 /* hack alert! We cannot use the supplied completion
2859 * function from inside the ->eh_strategy_handler() thread. 3016 * function from inside the ->eh_strategy_handler() thread.
2860 * libata is the only user of ->eh_strategy_handler() in 3017 * libata is the only user of ->eh_strategy_handler() in
@@ -2870,7 +3027,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
2870 host_stat = ap->ops->bmdma_status(ap); 3027 host_stat = ap->ops->bmdma_status(ap);
2871 3028
2872 /* before we do anything else, clear DMA-Start bit */ 3029 /* before we do anything else, clear DMA-Start bit */
2873 ap->ops->bmdma_stop(ap); 3030 ap->ops->bmdma_stop(qc);
2874 3031
2875 /* fall through */ 3032 /* fall through */
2876 3033
@@ -2888,6 +3045,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
2888 ata_qc_complete(qc, drv_stat); 3045 ata_qc_complete(qc, drv_stat);
2889 break; 3046 break;
2890 } 3047 }
3048
3049 spin_unlock_irqrestore(&host_set->lock, flags);
3050
2891out: 3051out:
2892 DPRINTK("EXIT\n"); 3052 DPRINTK("EXIT\n");
2893} 3053}
@@ -3061,9 +3221,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3061 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3221 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3062 ata_sg_clean(qc); 3222 ata_sg_clean(qc);
3063 3223
3224 /* atapi: mark qc as inactive to prevent the interrupt handler
3225 * from completing the command twice later, before the error handler
3226 * is called. (when rc != 0 and atapi request sense is needed)
3227 */
3228 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3229
3064 /* call completion callback */ 3230 /* call completion callback */
3065 rc = qc->complete_fn(qc, drv_stat); 3231 rc = qc->complete_fn(qc, drv_stat);
3066 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3067 3232
3068 /* if callback indicates not to complete command (non-zero), 3233 /* if callback indicates not to complete command (non-zero),
3069 * return immediately 3234 * return immediately
@@ -3193,11 +3358,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3193 break; 3358 break;
3194 3359
3195 case ATA_PROT_ATAPI_NODATA: 3360 case ATA_PROT_ATAPI_NODATA:
3361 ap->flags |= ATA_FLAG_NOINTR;
3196 ata_tf_to_host_nolock(ap, &qc->tf); 3362 ata_tf_to_host_nolock(ap, &qc->tf);
3197 queue_work(ata_wq, &ap->packet_task); 3363 queue_work(ata_wq, &ap->packet_task);
3198 break; 3364 break;
3199 3365
3200 case ATA_PROT_ATAPI_DMA: 3366 case ATA_PROT_ATAPI_DMA:
3367 ap->flags |= ATA_FLAG_NOINTR;
3201 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3368 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3202 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3369 ap->ops->bmdma_setup(qc); /* set up bmdma */
3203 queue_work(ata_wq, &ap->packet_task); 3370 queue_work(ata_wq, &ap->packet_task);
@@ -3242,7 +3409,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3242} 3409}
3243 3410
3244/** 3411/**
3245 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 3412 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
3246 * @qc: Info associated with this ATA transaction. 3413 * @qc: Info associated with this ATA transaction.
3247 * 3414 *
3248 * LOCKING: 3415 * LOCKING:
@@ -3413,7 +3580,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
3413 3580
3414/** 3581/**
3415 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3582 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3416 * @ap: Port associated with this ATA transaction. 3583 * @qc: Command we are ending DMA for
3417 * 3584 *
3418 * Clears the ATA_DMA_START flag in the dma control register 3585 * Clears the ATA_DMA_START flag in the dma control register
3419 * 3586 *
@@ -3423,8 +3590,9 @@ u8 ata_bmdma_status(struct ata_port *ap)
3423 * spin_lock_irqsave(host_set lock) 3590 * spin_lock_irqsave(host_set lock)
3424 */ 3591 */
3425 3592
3426void ata_bmdma_stop(struct ata_port *ap) 3593void ata_bmdma_stop(struct ata_queued_cmd *qc)
3427{ 3594{
3595 struct ata_port *ap = qc->ap;
3428 if (ap->flags & ATA_FLAG_MMIO) { 3596 if (ap->flags & ATA_FLAG_MMIO) {
3429 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3597 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3430 3598
@@ -3476,7 +3644,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
3476 goto idle_irq; 3644 goto idle_irq;
3477 3645
3478 /* before we do anything else, clear DMA-Start bit */ 3646 /* before we do anything else, clear DMA-Start bit */
3479 ap->ops->bmdma_stop(ap); 3647 ap->ops->bmdma_stop(qc);
3480 3648
3481 /* fall through */ 3649 /* fall through */
3482 3650
@@ -3551,7 +3719,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3551 struct ata_port *ap; 3719 struct ata_port *ap;
3552 3720
3553 ap = host_set->ports[i]; 3721 ap = host_set->ports[i];
3554 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 3722 if (ap &&
3723 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
3555 struct ata_queued_cmd *qc; 3724 struct ata_queued_cmd *qc;
3556 3725
3557 qc = ata_qc_from_tag(ap, ap->active_tag); 3726 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -3603,19 +3772,27 @@ static void atapi_packet_task(void *_data)
3603 /* send SCSI cdb */ 3772 /* send SCSI cdb */
3604 DPRINTK("send cdb\n"); 3773 DPRINTK("send cdb\n");
3605 assert(ap->cdb_len >= 12); 3774 assert(ap->cdb_len >= 12);
3606 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3607 3775
3608 /* if we are DMA'ing, irq handler takes over from here */ 3776 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3609 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 3777 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3610 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3778 unsigned long flags;
3611 3779
3612 /* non-data commands are also handled via irq */ 3780 /* Once we're done issuing command and kicking bmdma,
3613 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 3781 * irq handler takes over. To not lose irq, we need
3614 /* do nothing */ 3782 * to clear NOINTR flag before sending cdb, but
3615 } 3783 * interrupt handler shouldn't be invoked before we're
3784 * finished. Hence, the following locking.
3785 */
3786 spin_lock_irqsave(&ap->host_set->lock, flags);
3787 ap->flags &= ~ATA_FLAG_NOINTR;
3788 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3789 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3790 ap->ops->bmdma_start(qc); /* initiate bmdma */
3791 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3792 } else {
3793 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3616 3794
3617 /* PIO commands are handled by polling */ 3795 /* PIO commands are handled by polling */
3618 else {
3619 ap->pio_task_state = PIO_ST; 3796 ap->pio_task_state = PIO_ST;
3620 queue_work(ata_wq, &ap->pio_task); 3797 queue_work(ata_wq, &ap->pio_task);
3621 } 3798 }
@@ -3623,7 +3800,7 @@ static void atapi_packet_task(void *_data)
3623 return; 3800 return;
3624 3801
3625err_out: 3802err_out:
3626 ata_qc_complete(qc, ATA_ERR); 3803 ata_poll_qc_complete(qc, ATA_ERR);
3627} 3804}
3628 3805
3629 3806
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 6a75ec2187fd..346eb36b1e31 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1,25 +1,36 @@
1/* 1/*
2 libata-scsi.c - helper library for ATA 2 * libata-scsi.c - helper library for ATA
3 3 *
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2003-2004 Jeff Garzik 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 6 * on emails.
7 The contents of this file are subject to the Open 7 *
8 Software License version 1.1 that can be found at 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 9 * Copyright 2003-2004 Jeff Garzik
10 by reference. 10 *
11 11 *
12 Alternatively, the contents of this file may be used under the terms 12 * This program is free software; you can redistribute it and/or modify
13 of the GNU General Public License version 2 (the "GPL") as distributed 13 * it under the terms of the GNU General Public License as published by
14 in the kernel source COPYING file, in which case the provisions of 14 * the Free Software Foundation; either version 2, or (at your option)
15 the GPL are applicable instead of the above. If you wish to allow 15 * any later version.
16 the use of your version of this file only under the terms of the 16 *
17 GPL and not to allow others to use your version of this file under 17 * This program is distributed in the hope that it will be useful,
18 the OSL, indicate your decision by deleting the provisions above and 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 replace them with the notice and other provisions required by the GPL. 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 If you do not delete the provisions above, a recipient may use your 20 * GNU General Public License for more details.
21 version of this file under either the OSL or the GPL. 21 *
22 22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from
31 * - http://www.t10.org/
32 * - http://www.t13.org/
33 *
23 */ 34 */
24 35
25#include <linux/kernel.h> 36#include <linux/kernel.h>
@@ -392,6 +403,60 @@ int ata_scsi_error(struct Scsi_Host *host)
392} 403}
393 404
394/** 405/**
406 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
407 * @qc: Storage for translated ATA taskfile
408 * @scsicmd: SCSI command to translate
409 *
410 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
411 * (to start). Perhaps these commands should be preceded by
412 * CHECK POWER MODE to see what power mode the device is already in.
413 * [See SAT revision 5 at www.t10.org]
414 *
415 * LOCKING:
416 * spin_lock_irqsave(host_set lock)
417 *
418 * RETURNS:
419 * Zero on success, non-zero on error.
420 */
421
422static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
423 u8 *scsicmd)
424{
425 struct ata_taskfile *tf = &qc->tf;
426
427 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
428 tf->protocol = ATA_PROT_NODATA;
429 if (scsicmd[1] & 0x1) {
430 ; /* ignore IMMED bit, violates sat-r05 */
431 }
432 if (scsicmd[4] & 0x2)
433 return 1; /* LOEJ bit set not supported */
434 if (((scsicmd[4] >> 4) & 0xf) != 0)
435 return 1; /* power conditions not supported */
436 if (scsicmd[4] & 0x1) {
437 tf->nsect = 1; /* 1 sector, lba=0 */
438 tf->lbah = 0x0;
439 tf->lbam = 0x0;
440 tf->lbal = 0x0;
441 tf->device |= ATA_LBA;
442 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
443 } else {
444 tf->nsect = 0; /* time period value (0 implies now) */
445 tf->command = ATA_CMD_STANDBY;
446 /* Consider: ATA STANDBY IMMEDIATE command */
447 }
448 /*
449 * Standby and Idle condition timers could be implemented but that
450 * would require libata to implement the Power condition mode page
451 * and allow the user to change it. Changing mode pages requires
452 * MODE SELECT to be implemented.
453 */
454
455 return 0;
456}
457
458
459/**
395 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 460 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
396 * @qc: Storage for translated ATA taskfile 461 * @qc: Storage for translated ATA taskfile
397 * @scsicmd: SCSI command to translate (ignored) 462 * @scsicmd: SCSI command to translate (ignored)
@@ -576,11 +641,19 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
576 tf->lbah = scsicmd[3]; 641 tf->lbah = scsicmd[3];
577 642
578 VPRINTK("ten-byte command\n"); 643 VPRINTK("ten-byte command\n");
644 if (qc->nsect == 0) /* we don't support length==0 cmds */
645 return 1;
579 return 0; 646 return 0;
580 } 647 }
581 648
582 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { 649 if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) {
583 qc->nsect = tf->nsect = scsicmd[4]; 650 qc->nsect = tf->nsect = scsicmd[4];
651 if (!qc->nsect) {
652 qc->nsect = 256;
653 if (lba48)
654 tf->hob_nsect = 1;
655 }
656
584 tf->lbal = scsicmd[3]; 657 tf->lbal = scsicmd[3];
585 tf->lbam = scsicmd[2]; 658 tf->lbam = scsicmd[2];
586 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ 659 tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
@@ -620,6 +693,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
620 tf->lbah = scsicmd[7]; 693 tf->lbah = scsicmd[7];
621 694
622 VPRINTK("sixteen-byte command\n"); 695 VPRINTK("sixteen-byte command\n");
696 if (qc->nsect == 0) /* we don't support length==0 cmds */
697 return 1;
623 return 0; 698 return 0;
624 } 699 }
625 700
@@ -1435,6 +1510,8 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
1435 case VERIFY: 1510 case VERIFY:
1436 case VERIFY_16: 1511 case VERIFY_16:
1437 return ata_scsi_verify_xlat; 1512 return ata_scsi_verify_xlat;
1513 case START_STOP:
1514 return ata_scsi_start_stop_xlat;
1438 } 1515 }
1439 1516
1440 return NULL; 1517 return NULL;
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 3e7f4843020f..809c634afbcd 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -1,25 +1,28 @@
1/* 1/*
2 libata.h - helper library for ATA 2 * libata.h - helper library for ATA
3 3 *
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 4 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik 5 * Copyright 2003-2004 Jeff Garzik
6 6 *
7 The contents of this file are subject to the Open 7 *
8 Software License version 1.1 that can be found at 8 * This program is free software; you can redistribute it and/or modify
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 9 * it under the terms of the GNU General Public License as published by
10 by reference. 10 * the Free Software Foundation; either version 2, or (at your option)
11 11 * any later version.
12 Alternatively, the contents of this file may be used under the terms 12 *
13 of the GNU General Public License version 2 (the "GPL") as distributed 13 * This program is distributed in the hope that it will be useful,
14 in the kernel source COPYING file, in which case the provisions of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 the GPL are applicable instead of the above. If you wish to allow 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 the use of your version of this file only under the terms of the 16 * GNU General Public License for more details.
17 GPL and not to allow others to use your version of this file under 17 *
18 the OSL, indicate your decision by deleting the provisions above and 18 * You should have received a copy of the GNU General Public License
19 replace them with the notice and other provisions required by the GPL. 19 * along with this program; see the file COPYING. If not, write to
20 If you do not delete the provisions above, a recipient may use your 20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 version of this file under either the OSL or the GPL. 21 *
22 22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
23 */ 26 */
24 27
25#ifndef __LIBATA_H__ 28#ifndef __LIBATA_H__
@@ -72,7 +75,7 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
72extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, 75extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
73 void (*done)(struct scsi_cmnd *), 76 void (*done)(struct scsi_cmnd *),
74 u8 asc, u8 ascq); 77 u8 asc, u8 ascq);
75extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 78extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
76 unsigned int (*actor) (struct ata_scsi_args *args, 79 unsigned int (*actor) (struct ata_scsi_args *args,
77 u8 *rbuf, unsigned int buflen)); 80 u8 *rbuf, unsigned int buflen));
78 81
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index b0403ccd8a25..03d9bc6e69df 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -4,21 +4,37 @@
4 * Copyright 2004 NVIDIA Corp. All rights reserved. 4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew 5 * Copyright 2004 Andrew Chew
6 * 6 *
7 * The contents of this file are subject to the Open
8 * Software License version 1.1 that can be found at
9 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
10 * by reference.
11 * 7 *
12 * Alternatively, the contents of this file may be used under the terms 8 * This program is free software; you can redistribute it and/or modify
13 * of the GNU General Public License version 2 (the "GPL") as distributed 9 * it under the terms of the GNU General Public License as published by
14 * in the kernel source COPYING file, in which case the provisions of 10 * the Free Software Foundation; either version 2, or (at your option)
15 * the GPL are applicable instead of the above. If you wish to allow 11 * any later version.
16 * the use of your version of this file only under the terms of the 12 *
17 * GPL and not to allow others to use your version of this file under 13 * This program is distributed in the hope that it will be useful,
18 * the OSL, indicate your decision by deleting the provisions above and 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * replace them with the notice and other provisions required by the GPL. 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * If you do not delete the provisions above, a recipient may use your 16 * GNU General Public License for more details.
21 * version of this file under either the OSL or the GPL. 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 *
33 * 0.08
34 * - Added support for MCP51 and MCP55.
35 *
36 * 0.07
37 * - Added support for RAID class code.
22 * 38 *
23 * 0.06 39 * 0.06
24 * - Added generic SATA support by using a pci_device_id that filters on 40 * - Added generic SATA support by using a pci_device_id that filters on
@@ -48,7 +64,7 @@
48#include <linux/libata.h> 64#include <linux/libata.h>
49 65
50#define DRV_NAME "sata_nv" 66#define DRV_NAME "sata_nv"
51#define DRV_VERSION "0.6" 67#define DRV_VERSION "0.8"
52 68
53#define NV_PORTS 2 69#define NV_PORTS 2
54#define NV_PIO_MASK 0x1f 70#define NV_PIO_MASK 0x1f
@@ -116,7 +132,9 @@ enum nv_host_type
116 GENERIC, 132 GENERIC,
117 NFORCE2, 133 NFORCE2,
118 NFORCE3, 134 NFORCE3,
119 CK804 135 CK804,
136 MCP51,
137 MCP55
120}; 138};
121 139
122static struct pci_device_id nv_pci_tbl[] = { 140static struct pci_device_id nv_pci_tbl[] = {
@@ -134,9 +152,18 @@ static struct pci_device_id nv_pci_tbl[] = {
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, 153 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
155 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
157 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
137 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 161 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
138 PCI_ANY_ID, PCI_ANY_ID, 162 PCI_ANY_ID, PCI_ANY_ID,
139 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 163 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
164 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
165 PCI_ANY_ID, PCI_ANY_ID,
166 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
140 { 0, } /* terminate list */ 167 { 0, } /* terminate list */
141}; 168};
142 169
@@ -274,7 +301,8 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
274 struct ata_port *ap; 301 struct ata_port *ap;
275 302
276 ap = host_set->ports[i]; 303 ap = host_set->ports[i];
277 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 304 if (ap &&
305 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
278 struct ata_queued_cmd *qc; 306 struct ata_queued_cmd *qc;
279 307
280 qc = ata_qc_from_tag(ap, ap->active_tag); 308 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 919fb314ad10..7c4f6ecc1cc9 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -7,21 +7,26 @@
7 * 7 *
8 * Copyright 2003-2004 Red Hat, Inc. 8 * Copyright 2003-2004 Red Hat, Inc.
9 * 9 *
10 * The contents of this file are subject to the Open
11 * Software License version 1.1 that can be found at
12 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
13 * by reference.
14 * 10 *
15 * Alternatively, the contents of this file may be used under the terms 11 * This program is free software; you can redistribute it and/or modify
16 * of the GNU General Public License version 2 (the "GPL") as distributed 12 * it under the terms of the GNU General Public License as published by
17 * in the kernel source COPYING file, in which case the provisions of 13 * the Free Software Foundation; either version 2, or (at your option)
18 * the GPL are applicable instead of the above. If you wish to allow 14 * any later version.
19 * the use of your version of this file only under the terms of the 15 *
20 * GPL and not to allow others to use your version of this file under 16 * This program is distributed in the hope that it will be useful,
21 * the OSL, indicate your decision by deleting the provisions above and 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * replace them with the notice and other provisions required by the GPL. 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * If you do not delete the provisions above, a recipient may use your 19 * GNU General Public License for more details.
24 * version of this file under either the OSL or the GPL. 20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware information only available under NDA.
25 * 30 *
26 */ 31 */
27 32
@@ -79,7 +84,8 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
79static void pdc_eng_timeout(struct ata_port *ap); 84static void pdc_eng_timeout(struct ata_port *ap);
80static int pdc_port_start(struct ata_port *ap); 85static int pdc_port_start(struct ata_port *ap);
81static void pdc_port_stop(struct ata_port *ap); 86static void pdc_port_stop(struct ata_port *ap);
82static void pdc_phy_reset(struct ata_port *ap); 87static void pdc_pata_phy_reset(struct ata_port *ap);
88static void pdc_sata_phy_reset(struct ata_port *ap);
83static void pdc_qc_prep(struct ata_queued_cmd *qc); 89static void pdc_qc_prep(struct ata_queued_cmd *qc);
84static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); 90static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
85static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); 91static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
@@ -106,19 +112,22 @@ static Scsi_Host_Template pdc_ata_sht = {
106 .ordered_flush = 1, 112 .ordered_flush = 1,
107}; 113};
108 114
109static struct ata_port_operations pdc_ata_ops = { 115static struct ata_port_operations pdc_sata_ops = {
110 .port_disable = ata_port_disable, 116 .port_disable = ata_port_disable,
111 .tf_load = pdc_tf_load_mmio, 117 .tf_load = pdc_tf_load_mmio,
112 .tf_read = ata_tf_read, 118 .tf_read = ata_tf_read,
113 .check_status = ata_check_status, 119 .check_status = ata_check_status,
114 .exec_command = pdc_exec_command_mmio, 120 .exec_command = pdc_exec_command_mmio,
115 .dev_select = ata_std_dev_select, 121 .dev_select = ata_std_dev_select,
116 .phy_reset = pdc_phy_reset, 122
123 .phy_reset = pdc_sata_phy_reset,
124
117 .qc_prep = pdc_qc_prep, 125 .qc_prep = pdc_qc_prep,
118 .qc_issue = pdc_qc_issue_prot, 126 .qc_issue = pdc_qc_issue_prot,
119 .eng_timeout = pdc_eng_timeout, 127 .eng_timeout = pdc_eng_timeout,
120 .irq_handler = pdc_interrupt, 128 .irq_handler = pdc_interrupt,
121 .irq_clear = pdc_irq_clear, 129 .irq_clear = pdc_irq_clear,
130
122 .scr_read = pdc_sata_scr_read, 131 .scr_read = pdc_sata_scr_read,
123 .scr_write = pdc_sata_scr_write, 132 .scr_write = pdc_sata_scr_write,
124 .port_start = pdc_port_start, 133 .port_start = pdc_port_start,
@@ -126,6 +135,27 @@ static struct ata_port_operations pdc_ata_ops = {
126 .host_stop = ata_host_stop, 135 .host_stop = ata_host_stop,
127}; 136};
128 137
138static struct ata_port_operations pdc_pata_ops = {
139 .port_disable = ata_port_disable,
140 .tf_load = pdc_tf_load_mmio,
141 .tf_read = ata_tf_read,
142 .check_status = ata_check_status,
143 .exec_command = pdc_exec_command_mmio,
144 .dev_select = ata_std_dev_select,
145
146 .phy_reset = pdc_pata_phy_reset,
147
148 .qc_prep = pdc_qc_prep,
149 .qc_issue = pdc_qc_issue_prot,
150 .eng_timeout = pdc_eng_timeout,
151 .irq_handler = pdc_interrupt,
152 .irq_clear = pdc_irq_clear,
153
154 .port_start = pdc_port_start,
155 .port_stop = pdc_port_stop,
156 .host_stop = ata_host_stop,
157};
158
129static struct ata_port_info pdc_port_info[] = { 159static struct ata_port_info pdc_port_info[] = {
130 /* board_2037x */ 160 /* board_2037x */
131 { 161 {
@@ -135,7 +165,7 @@ static struct ata_port_info pdc_port_info[] = {
135 .pio_mask = 0x1f, /* pio0-4 */ 165 .pio_mask = 0x1f, /* pio0-4 */
136 .mwdma_mask = 0x07, /* mwdma0-2 */ 166 .mwdma_mask = 0x07, /* mwdma0-2 */
137 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 167 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
138 .port_ops = &pdc_ata_ops, 168 .port_ops = &pdc_sata_ops,
139 }, 169 },
140 170
141 /* board_20319 */ 171 /* board_20319 */
@@ -146,7 +176,7 @@ static struct ata_port_info pdc_port_info[] = {
146 .pio_mask = 0x1f, /* pio0-4 */ 176 .pio_mask = 0x1f, /* pio0-4 */
147 .mwdma_mask = 0x07, /* mwdma0-2 */ 177 .mwdma_mask = 0x07, /* mwdma0-2 */
148 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 178 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
149 .port_ops = &pdc_ata_ops, 179 .port_ops = &pdc_sata_ops,
150 }, 180 },
151 181
152 /* board_20619 */ 182 /* board_20619 */
@@ -157,7 +187,7 @@ static struct ata_port_info pdc_port_info[] = {
157 .pio_mask = 0x1f, /* pio0-4 */ 187 .pio_mask = 0x1f, /* pio0-4 */
158 .mwdma_mask = 0x07, /* mwdma0-2 */ 188 .mwdma_mask = 0x07, /* mwdma0-2 */
159 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 189 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
160 .port_ops = &pdc_ata_ops, 190 .port_ops = &pdc_pata_ops,
161 }, 191 },
162}; 192};
163 193
@@ -181,6 +211,10 @@ static struct pci_device_id pdc_ata_pci_tbl[] = {
181 board_20319 }, 211 board_20319 },
182 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 212 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
183 board_20319 }, 213 board_20319 },
214 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
215 board_20319 },
216 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
217 board_20319 },
184 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 218 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
185 board_20319 }, 219 board_20319 },
186 220
@@ -268,12 +302,23 @@ static void pdc_reset_port(struct ata_port *ap)
268 readl(mmio); /* flush */ 302 readl(mmio); /* flush */
269} 303}
270 304
271static void pdc_phy_reset(struct ata_port *ap) 305static void pdc_sata_phy_reset(struct ata_port *ap)
272{ 306{
273 pdc_reset_port(ap); 307 pdc_reset_port(ap);
274 sata_phy_reset(ap); 308 sata_phy_reset(ap);
275} 309}
276 310
311static void pdc_pata_phy_reset(struct ata_port *ap)
312{
313 /* FIXME: add cable detect. Don't assume 40-pin cable */
314 ap->cbl = ATA_CBL_PATA40;
315 ap->udma_mask &= ATA_UDMA_MASK_40C;
316
317 pdc_reset_port(ap);
318 ata_port_probe(ap);
319 ata_bus_reset(ap);
320}
321
277static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 322static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
278{ 323{
279 if (sc_reg > SCR_CONTROL) 324 if (sc_reg > SCR_CONTROL)
@@ -321,11 +366,15 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
321 366
322static void pdc_eng_timeout(struct ata_port *ap) 367static void pdc_eng_timeout(struct ata_port *ap)
323{ 368{
369 struct ata_host_set *host_set = ap->host_set;
324 u8 drv_stat; 370 u8 drv_stat;
325 struct ata_queued_cmd *qc; 371 struct ata_queued_cmd *qc;
372 unsigned long flags;
326 373
327 DPRINTK("ENTER\n"); 374 DPRINTK("ENTER\n");
328 375
376 spin_lock_irqsave(&host_set->lock, flags);
377
329 qc = ata_qc_from_tag(ap, ap->active_tag); 378 qc = ata_qc_from_tag(ap, ap->active_tag);
330 if (!qc) { 379 if (!qc) {
331 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 380 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
@@ -359,6 +408,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
359 } 408 }
360 409
361out: 410out:
411 spin_unlock_irqrestore(&host_set->lock, flags);
362 DPRINTK("EXIT\n"); 412 DPRINTK("EXIT\n");
363} 413}
364 414
@@ -441,7 +491,8 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
441 VPRINTK("port %u\n", i); 491 VPRINTK("port %u\n", i);
442 ap = host_set->ports[i]; 492 ap = host_set->ports[i];
443 tmp = mask & (1 << (i + 1)); 493 tmp = mask & (1 << (i + 1));
444 if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 494 if (tmp && ap &&
495 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
445 struct ata_queued_cmd *qc; 496 struct ata_queued_cmd *qc;
446 497
447 qc = ata_qc_from_tag(ap, ap->active_tag); 498 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h
index 6e7e96b9ee13..6ee5e190262d 100644
--- a/drivers/scsi/sata_promise.h
+++ b/drivers/scsi/sata_promise.h
@@ -3,21 +3,24 @@
3 * 3 *
4 * Copyright 2003-2004 Red Hat, Inc. 4 * Copyright 2003-2004 Red Hat, Inc.
5 * 5 *
6 * The contents of this file are subject to the Open
7 * Software License version 1.1 that can be found at
8 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
9 * by reference.
10 * 6 *
11 * Alternatively, the contents of this file may be used under the terms 7 * This program is free software; you can redistribute it and/or modify
12 * of the GNU General Public License version 2 (the "GPL") as distributed 8 * it under the terms of the GNU General Public License as published by
13 * in the kernel source COPYING file, in which case the provisions of 9 * the Free Software Foundation; either version 2, or (at your option)
14 * the GPL are applicable instead of the above. If you wish to allow 10 * any later version.
15 * the use of your version of this file only under the terms of the 11 *
16 * GPL and not to allow others to use your version of this file under 12 * This program is distributed in the hope that it will be useful,
17 * the OSL, indicate your decision by deleting the provisions above and 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * replace them with the notice and other provisions required by the GPL. 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * If you do not delete the provisions above, a recipient may use your 15 * GNU General Public License for more details.
20 * version of this file under either the OSL or the GPL. 16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
21 * 24 *
22 */ 25 */
23 26
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 1383e8a28d72..9c99ab433bd3 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -6,21 +6,24 @@
6 * Copyright 2005 Pacific Digital Corporation. 6 * Copyright 2005 Pacific Digital Corporation.
7 * (OSL/GPL code release authorized by Jalil Fadavi). 7 * (OSL/GPL code release authorized by Jalil Fadavi).
8 * 8 *
9 * The contents of this file are subject to the Open
10 * Software License version 1.1 that can be found at
11 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 * by reference.
13 * 9 *
14 * Alternatively, the contents of this file may be used under the terms 10 * This program is free software; you can redistribute it and/or modify
15 * of the GNU General Public License version 2 (the "GPL") as distributed 11 * it under the terms of the GNU General Public License as published by
16 * in the kernel source COPYING file, in which case the provisions of 12 * the Free Software Foundation; either version 2, or (at your option)
17 * the GPL are applicable instead of the above. If you wish to allow 13 * any later version.
18 * the use of your version of this file only under the terms of the 14 *
19 * GPL and not to allow others to use your version of this file under 15 * This program is distributed in the hope that it will be useful,
20 * the OSL, indicate your decision by deleting the provisions above and 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * replace them with the notice and other provisions required by the GPL. 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * If you do not delete the provisions above, a recipient may use your 18 * GNU General Public License for more details.
23 * version of this file under either the OSL or the GPL. 19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 *
25 * libata documentation is available via 'make {ps|pdf}docs',
26 * as Documentation/DocBook/libata.*
24 * 27 *
25 */ 28 */
26 29
@@ -117,7 +120,7 @@ static void qs_phy_reset(struct ata_port *ap);
117static void qs_qc_prep(struct ata_queued_cmd *qc); 120static void qs_qc_prep(struct ata_queued_cmd *qc);
118static int qs_qc_issue(struct ata_queued_cmd *qc); 121static int qs_qc_issue(struct ata_queued_cmd *qc);
119static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 122static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
120static void qs_bmdma_stop(struct ata_port *ap); 123static void qs_bmdma_stop(struct ata_queued_cmd *qc);
121static u8 qs_bmdma_status(struct ata_port *ap); 124static u8 qs_bmdma_status(struct ata_port *ap);
122static void qs_irq_clear(struct ata_port *ap); 125static void qs_irq_clear(struct ata_port *ap);
123static void qs_eng_timeout(struct ata_port *ap); 126static void qs_eng_timeout(struct ata_port *ap);
@@ -198,7 +201,7 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
198 return 1; /* ATAPI DMA not supported */ 201 return 1; /* ATAPI DMA not supported */
199} 202}
200 203
201static void qs_bmdma_stop(struct ata_port *ap) 204static void qs_bmdma_stop(struct ata_queued_cmd *qc)
202{ 205{
203 /* nothing */ 206 /* nothing */
204} 207}
@@ -386,7 +389,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
386 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 389 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
387 sff1, sff0, port_no, sHST, sDST); 390 sff1, sff0, port_no, sHST, sDST);
388 handled = 1; 391 handled = 1;
389 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 392 if (ap && !(ap->flags &
393 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
390 struct ata_queued_cmd *qc; 394 struct ata_queued_cmd *qc;
391 struct qs_port_priv *pp = ap->private_data; 395 struct qs_port_priv *pp = ap->private_data;
392 if (!pp || pp->state != qs_state_pkt) 396 if (!pp || pp->state != qs_state_pkt)
@@ -417,7 +421,8 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
417 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 421 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
418 struct ata_port *ap; 422 struct ata_port *ap;
419 ap = host_set->ports[port_no]; 423 ap = host_set->ports[port_no];
420 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 424 if (ap &&
425 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
421 struct ata_queued_cmd *qc; 426 struct ata_queued_cmd *qc;
422 struct qs_port_priv *pp = ap->private_data; 427 struct qs_port_priv *pp = ap->private_data;
423 if (!pp || pp->state != qs_state_mmio) 428 if (!pp || pp->state != qs_state_mmio)
@@ -431,7 +436,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
431 continue; 436 continue;
432 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 437 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
433 ap->id, qc->tf.protocol, status); 438 ap->id, qc->tf.protocol, status);
434 439
435 /* complete taskfile transaction */ 440 /* complete taskfile transaction */
436 pp->state = qs_state_idle; 441 pp->state = qs_state_idle;
437 ata_qc_complete(qc, status); 442 ata_qc_complete(qc, status);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 49ed557a4b66..71d49548f0a3 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -5,24 +5,32 @@
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2003 Red Hat, Inc. 8 * Copyright 2003-2005 Red Hat, Inc.
9 * Copyright 2003 Benjamin Herrenschmidt 9 * Copyright 2003 Benjamin Herrenschmidt
10 * 10 *
11 * The contents of this file are subject to the Open
12 * Software License version 1.1 that can be found at
13 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
14 * by reference.
15 * 11 *
16 * Alternatively, the contents of this file may be used under the terms 12 * This program is free software; you can redistribute it and/or modify
17 * of the GNU General Public License version 2 (the "GPL") as distributed 13 * it under the terms of the GNU General Public License as published by
18 * in the kernel source COPYING file, in which case the provisions of 14 * the Free Software Foundation; either version 2, or (at your option)
19 * the GPL are applicable instead of the above. If you wish to allow 15 * any later version.
20 * the use of your version of this file only under the terms of the 16 *
21 * GPL and not to allow others to use your version of this file under 17 * This program is distributed in the hope that it will be useful,
22 * the OSL, indicate your decision by deleting the provisions above and 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * replace them with the notice and other provisions required by the GPL. 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * If you do not delete the provisions above, a recipient may use your 20 * GNU General Public License for more details.
25 * version of this file under either the OSL or the GPL. 21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Documentation for SiI 3112:
31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 * Other errata and documentation available under NDA.
26 * 34 *
27 */ 35 */
28 36
@@ -41,8 +49,11 @@
41#define DRV_VERSION "0.9" 49#define DRV_VERSION "0.9"
42 50
43enum { 51enum {
52 SIL_FLAG_MOD15WRITE = (1 << 30),
53
44 sil_3112 = 0, 54 sil_3112 = 0,
45 sil_3114 = 1, 55 sil_3112_m15w = 1,
56 sil_3114 = 2,
46 57
47 SIL_FIFO_R0 = 0x40, 58 SIL_FIFO_R0 = 0x40,
48 SIL_FIFO_W0 = 0x41, 59 SIL_FIFO_W0 = 0x41,
@@ -76,13 +87,13 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
76static void sil_post_set_mode (struct ata_port *ap); 87static void sil_post_set_mode (struct ata_port *ap);
77 88
78static struct pci_device_id sil_pci_tbl[] = { 89static struct pci_device_id sil_pci_tbl[] = {
79 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 90 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
80 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 91 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
81 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 92 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
82 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 93 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
83 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 94 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
84 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 95 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
85 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 96 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
86 { } /* terminate list */ 97 { } /* terminate list */
87}; 98};
88 99
@@ -174,6 +185,16 @@ static struct ata_port_info sil_port_info[] = {
174 .mwdma_mask = 0x07, /* mwdma0-2 */ 185 .mwdma_mask = 0x07, /* mwdma0-2 */
175 .udma_mask = 0x3f, /* udma0-5 */ 186 .udma_mask = 0x3f, /* udma0-5 */
176 .port_ops = &sil_ops, 187 .port_ops = &sil_ops,
188 }, /* sil_3112_15w - keep it sync'd w/ sil_3112 */
189 {
190 .sht = &sil_sht,
191 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
192 ATA_FLAG_SRST | ATA_FLAG_MMIO |
193 SIL_FLAG_MOD15WRITE,
194 .pio_mask = 0x1f, /* pio0-4 */
195 .mwdma_mask = 0x07, /* mwdma0-2 */
196 .udma_mask = 0x3f, /* udma0-5 */
197 .port_ops = &sil_ops,
177 }, /* sil_3114 */ 198 }, /* sil_3114 */
178 { 199 {
179 .sht = &sil_sht, 200 .sht = &sil_sht,
@@ -323,15 +344,15 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
323 while ((len > 0) && (s[len - 1] == ' ')) 344 while ((len > 0) && (s[len - 1] == ' '))
324 len--; 345 len--;
325 346
326 for (n = 0; sil_blacklist[n].product; n++) 347 for (n = 0; sil_blacklist[n].product; n++)
327 if (!memcmp(sil_blacklist[n].product, s, 348 if (!memcmp(sil_blacklist[n].product, s,
328 strlen(sil_blacklist[n].product))) { 349 strlen(sil_blacklist[n].product))) {
329 quirks = sil_blacklist[n].quirk; 350 quirks = sil_blacklist[n].quirk;
330 break; 351 break;
331 } 352 }
332 353
333 /* limit requests to 15 sectors */ 354 /* limit requests to 15 sectors */
334 if (quirks & SIL_QUIRK_MOD15WRITE) { 355 if ((ap->flags & SIL_FLAG_MOD15WRITE) && (quirks & SIL_QUIRK_MOD15WRITE)) {
335 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix\n", 356 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix\n",
336 ap->id, dev->devno); 357 ap->id, dev->devno);
337 ap->host->max_sectors = 15; 358 ap->host->max_sectors = 15;
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index e418b89c6b9d..43af445b3ad2 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -7,21 +7,26 @@
7 * 7 *
8 * Copyright 2004 Uwe Koziolek 8 * Copyright 2004 Uwe Koziolek
9 * 9 *
10 * The contents of this file are subject to the Open
11 * Software License version 1.1 that can be found at
12 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
13 * by reference.
14 * 10 *
15 * Alternatively, the contents of this file may be used under the terms 11 * This program is free software; you can redistribute it and/or modify
16 * of the GNU General Public License version 2 (the "GPL") as distributed 12 * it under the terms of the GNU General Public License as published by
17 * in the kernel source COPYING file, in which case the provisions of 13 * the Free Software Foundation; either version 2, or (at your option)
18 * the GPL are applicable instead of the above. If you wish to allow 14 * any later version.
19 * the use of your version of this file only under the terms of the 15 *
20 * GPL and not to allow others to use your version of this file under 16 * This program is distributed in the hope that it will be useful,
21 * the OSL, indicate your decision by deleting the provisions above and 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * replace them with the notice and other provisions required by the GPL. 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * If you do not delete the provisions above, a recipient may use your 19 * GNU General Public License for more details.
24 * version of this file under either the OSL or the GPL. 20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
25 * 30 *
26 */ 31 */
27 32
@@ -234,7 +239,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
234 pci_read_config_dword(pdev, SIS_GENCTL, &genctl); 239 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
235 if ((genctl & GENCTL_IOMAPPED_SCR) == 0) 240 if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
236 probe_ent->host_flags |= SIS_FLAG_CFGSCR; 241 probe_ent->host_flags |= SIS_FLAG_CFGSCR;
237 242
238 /* if hardware thinks SCRs are in IO space, but there are 243 /* if hardware thinks SCRs are in IO space, but there are
239 * no IO resources assigned, change to PCI cfg space. 244 * no IO resources assigned, change to PCI cfg space.
240 */ 245 */
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 858e07185dbd..19d3bb3b0fb6 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -13,21 +13,26 @@
13 * This driver probably works with non-Apple versions of the 13 * This driver probably works with non-Apple versions of the
14 * Broadcom chipset... 14 * Broadcom chipset...
15 * 15 *
16 * The contents of this file are subject to the Open
17 * Software License version 1.1 that can be found at
18 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
19 * by reference.
20 * 16 *
21 * Alternatively, the contents of this file may be used under the terms 17 * This program is free software; you can redistribute it and/or modify
22 * of the GNU General Public License version 2 (the "GPL") as distributed 18 * it under the terms of the GNU General Public License as published by
23 * in the kernel source COPYING file, in which case the provisions of 19 * the Free Software Foundation; either version 2, or (at your option)
24 * the GPL are applicable instead of the above. If you wish to allow 20 * any later version.
25 * the use of your version of this file only under the terms of the 21 *
26 * GPL and not to allow others to use your version of this file under 22 * This program is distributed in the hope that it will be useful,
27 * the OSL, indicate your decision by deleting the provisions above and 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * replace them with the notice and other provisions required by the GPL. 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * If you do not delete the provisions above, a recipient may use your 25 * GNU General Public License for more details.
30 * version of this file under either the OSL or the GPL. 26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; see the file COPYING. If not, write to
29 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
30 *
31 *
32 * libata documentation is available via 'make {ps|pdf}docs',
33 * as Documentation/DocBook/libata.*
34 *
35 * Hardware documentation available under NDA.
31 * 36 *
32 */ 37 */
33 38
@@ -195,18 +200,18 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
195 /* start host DMA transaction */ 200 /* start host DMA transaction */
196 dmactl = readb(mmio + ATA_DMA_CMD); 201 dmactl = readb(mmio + ATA_DMA_CMD);
197 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 202 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
198 /* There is a race condition in certain SATA controllers that can 203 /* There is a race condition in certain SATA controllers that can
199 be seen when the r/w command is given to the controller before the 204 be seen when the r/w command is given to the controller before the
200 host DMA is started. On a Read command, the controller would initiate 205 host DMA is started. On a Read command, the controller would initiate
201 the command to the drive even before it sees the DMA start. When there 206 the command to the drive even before it sees the DMA start. When there
202 are very fast drives connected to the controller, or when the data request 207 are very fast drives connected to the controller, or when the data request
203 hits in the drive cache, there is the possibility that the drive returns a part 208 hits in the drive cache, there is the possibility that the drive returns a part
204 or all of the requested data to the controller before the DMA start is issued. 209 or all of the requested data to the controller before the DMA start is issued.
205 In this case, the controller would become confused as to what to do with the data. 210 In this case, the controller would become confused as to what to do with the data.
206 In the worst case when all the data is returned back to the controller, the 211 In the worst case when all the data is returned back to the controller, the
207 controller could hang. In other cases it could return partial data returning 212 controller could hang. In other cases it could return partial data returning
208 in data corruption. This problem has been seen in PPC systems and can also appear 213 in data corruption. This problem has been seen in PPC systems and can also appear
209 on an system with very fast disks, where the SATA controller is sitting behind a 214 on an system with very fast disks, where the SATA controller is sitting behind a
210 number of bridges, and hence there is significant latency between the r/w command 215 number of bridges, and hence there is significant latency between the r/w command
211 and the start command. */ 216 and the start command. */
212 /* issue r/w command if the access is to ATA*/ 217 /* issue r/w command if the access is to ATA*/
@@ -214,7 +219,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
214 ap->ops->exec_command(ap, &qc->tf); 219 ap->ops->exec_command(ap, &qc->tf);
215} 220}
216 221
217 222
218static u8 k2_stat_check_status(struct ata_port *ap) 223static u8 k2_stat_check_status(struct ata_port *ap)
219{ 224{
220 return readl((void *) ap->ioaddr.status_addr); 225 return readl((void *) ap->ioaddr.status_addr);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index efd7d7a61135..c72fcc46f0fa 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -7,21 +7,26 @@
7 * 7 *
8 * Copyright 2003-2004 Red Hat, Inc. 8 * Copyright 2003-2004 Red Hat, Inc.
9 * 9 *
10 * The contents of this file are subject to the Open
11 * Software License version 1.1 that can be found at
12 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
13 * by reference.
14 * 10 *
15 * Alternatively, the contents of this file may be used under the terms 11 * This program is free software; you can redistribute it and/or modify
16 * of the GNU General Public License version 2 (the "GPL") as distributed 12 * it under the terms of the GNU General Public License as published by
17 * in the kernel source COPYING file, in which case the provisions of 13 * the Free Software Foundation; either version 2, or (at your option)
18 * the GPL are applicable instead of the above. If you wish to allow 14 * any later version.
19 * the use of your version of this file only under the terms of the 15 *
20 * GPL and not to allow others to use your version of this file under 16 * This program is distributed in the hope that it will be useful,
21 * the OSL, indicate your decision by deleting the provisions above and 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * replace them with the notice and other provisions required by the GPL. 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * If you do not delete the provisions above, a recipient may use your 19 * GNU General Public License for more details.
24 * version of this file under either the OSL or the GPL. 20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
25 * 30 *
26 */ 31 */
27 32
@@ -94,7 +99,7 @@ enum {
94 PDC_DIMM1_CONTROL_OFFSET = 0x84, 99 PDC_DIMM1_CONTROL_OFFSET = 0x84,
95 PDC_SDRAM_CONTROL_OFFSET = 0x88, 100 PDC_SDRAM_CONTROL_OFFSET = 0x88,
96 PDC_I2C_WRITE = 0x00000000, 101 PDC_I2C_WRITE = 0x00000000,
97 PDC_I2C_READ = 0x00000040, 102 PDC_I2C_READ = 0x00000040,
98 PDC_I2C_START = 0x00000080, 103 PDC_I2C_START = 0x00000080,
99 PDC_I2C_MASK_INT = 0x00000020, 104 PDC_I2C_MASK_INT = 0x00000020,
100 PDC_I2C_COMPLETE = 0x00010000, 105 PDC_I2C_COMPLETE = 0x00010000,
@@ -105,16 +110,16 @@ enum {
105 PDC_DIMM_SPD_COLUMN_NUM = 4, 110 PDC_DIMM_SPD_COLUMN_NUM = 4,
106 PDC_DIMM_SPD_MODULE_ROW = 5, 111 PDC_DIMM_SPD_MODULE_ROW = 5,
107 PDC_DIMM_SPD_TYPE = 11, 112 PDC_DIMM_SPD_TYPE = 11,
108 PDC_DIMM_SPD_FRESH_RATE = 12, 113 PDC_DIMM_SPD_FRESH_RATE = 12,
109 PDC_DIMM_SPD_BANK_NUM = 17, 114 PDC_DIMM_SPD_BANK_NUM = 17,
110 PDC_DIMM_SPD_CAS_LATENCY = 18, 115 PDC_DIMM_SPD_CAS_LATENCY = 18,
111 PDC_DIMM_SPD_ATTRIBUTE = 21, 116 PDC_DIMM_SPD_ATTRIBUTE = 21,
112 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, 117 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
113 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, 118 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
114 PDC_DIMM_SPD_RAS_CAS_DELAY = 29, 119 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
115 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, 120 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
116 PDC_DIMM_SPD_SYSTEM_FREQ = 126, 121 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
117 PDC_CTL_STATUS = 0x08, 122 PDC_CTL_STATUS = 0x08,
118 PDC_DIMM_WINDOW_CTLR = 0x0C, 123 PDC_DIMM_WINDOW_CTLR = 0x0C,
119 PDC_TIME_CONTROL = 0x3C, 124 PDC_TIME_CONTROL = 0x3C,
120 PDC_TIME_PERIOD = 0x40, 125 PDC_TIME_PERIOD = 0x40,
@@ -157,15 +162,15 @@ static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
157static void pdc20621_host_stop(struct ata_host_set *host_set); 162static void pdc20621_host_stop(struct ata_host_set *host_set);
158static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 163static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
159static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 164static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
160static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, 165static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
161 u32 device, u32 subaddr, u32 *pdata); 166 u32 device, u32 subaddr, u32 *pdata);
162static int pdc20621_prog_dimm0(struct ata_probe_ent *pe); 167static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
163static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe); 168static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
164#ifdef ATA_VERBOSE_DEBUG 169#ifdef ATA_VERBOSE_DEBUG
165static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, 170static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
166 void *psource, u32 offset, u32 size); 171 void *psource, u32 offset, u32 size);
167#endif 172#endif
168static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 173static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
169 void *psource, u32 offset, u32 size); 174 void *psource, u32 offset, u32 size);
170static void pdc20621_irq_clear(struct ata_port *ap); 175static void pdc20621_irq_clear(struct ata_port *ap);
171static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 176static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -825,7 +830,8 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
825 ap = host_set->ports[port_no]; 830 ap = host_set->ports[port_no];
826 tmp = mask & (1 << i); 831 tmp = mask & (1 << i);
827 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 832 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
828 if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 833 if (tmp && ap &&
834 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
829 struct ata_queued_cmd *qc; 835 struct ata_queued_cmd *qc;
830 836
831 qc = ata_qc_from_tag(ap, ap->active_tag); 837 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -847,10 +853,14 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
847static void pdc_eng_timeout(struct ata_port *ap) 853static void pdc_eng_timeout(struct ata_port *ap)
848{ 854{
849 u8 drv_stat; 855 u8 drv_stat;
856 struct ata_host_set *host_set = ap->host_set;
850 struct ata_queued_cmd *qc; 857 struct ata_queued_cmd *qc;
858 unsigned long flags;
851 859
852 DPRINTK("ENTER\n"); 860 DPRINTK("ENTER\n");
853 861
862 spin_lock_irqsave(&host_set->lock, flags);
863
854 qc = ata_qc_from_tag(ap, ap->active_tag); 864 qc = ata_qc_from_tag(ap, ap->active_tag);
855 if (!qc) { 865 if (!qc) {
856 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 866 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
@@ -884,6 +894,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
884 } 894 }
885 895
886out: 896out:
897 spin_unlock_irqrestore(&host_set->lock, flags);
887 DPRINTK("EXIT\n"); 898 DPRINTK("EXIT\n");
888} 899}
889 900
@@ -922,7 +933,7 @@ static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
922 933
923 934
924#ifdef ATA_VERBOSE_DEBUG 935#ifdef ATA_VERBOSE_DEBUG
925static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, 936static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
926 u32 offset, u32 size) 937 u32 offset, u32 size)
927{ 938{
928 u32 window_size; 939 u32 window_size;
@@ -936,9 +947,9 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
936 /* hard-code chip #0 */ 947 /* hard-code chip #0 */
937 mmio += PDC_CHIP0_OFS; 948 mmio += PDC_CHIP0_OFS;
938 949
939 page_mask = 0x00; 950 page_mask = 0x00;
940 window_size = 0x2000 * 4; /* 32K byte uchar size */ 951 window_size = 0x2000 * 4; /* 32K byte uchar size */
941 idx = (u16) (offset / window_size); 952 idx = (u16) (offset / window_size);
942 953
943 writel(0x01, mmio + PDC_GENERAL_CTLR); 954 writel(0x01, mmio + PDC_GENERAL_CTLR);
944 readl(mmio + PDC_GENERAL_CTLR); 955 readl(mmio + PDC_GENERAL_CTLR);
@@ -947,19 +958,19 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
947 958
948 offset -= (idx * window_size); 959 offset -= (idx * window_size);
949 idx++; 960 idx++;
950 dist = ((long) (window_size - (offset + size))) >= 0 ? size : 961 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
951 (long) (window_size - offset); 962 (long) (window_size - offset);
952 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), 963 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
953 dist); 964 dist);
954 965
955 psource += dist; 966 psource += dist;
956 size -= dist; 967 size -= dist;
957 for (; (long) size >= (long) window_size ;) { 968 for (; (long) size >= (long) window_size ;) {
958 writel(0x01, mmio + PDC_GENERAL_CTLR); 969 writel(0x01, mmio + PDC_GENERAL_CTLR);
959 readl(mmio + PDC_GENERAL_CTLR); 970 readl(mmio + PDC_GENERAL_CTLR);
960 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 971 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
961 readl(mmio + PDC_DIMM_WINDOW_CTLR); 972 readl(mmio + PDC_DIMM_WINDOW_CTLR);
962 memcpy_fromio((char *) psource, (char *) (dimm_mmio), 973 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
963 window_size / 4); 974 window_size / 4);
964 psource += window_size; 975 psource += window_size;
965 size -= window_size; 976 size -= window_size;
@@ -971,14 +982,14 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
971 readl(mmio + PDC_GENERAL_CTLR); 982 readl(mmio + PDC_GENERAL_CTLR);
972 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 983 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
973 readl(mmio + PDC_DIMM_WINDOW_CTLR); 984 readl(mmio + PDC_DIMM_WINDOW_CTLR);
974 memcpy_fromio((char *) psource, (char *) (dimm_mmio), 985 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
975 size / 4); 986 size / 4);
976 } 987 }
977} 988}
978#endif 989#endif
979 990
980 991
981static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, 992static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
982 u32 offset, u32 size) 993 u32 offset, u32 size)
983{ 994{
984 u32 window_size; 995 u32 window_size;
@@ -989,16 +1000,16 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
989 struct pdc_host_priv *hpriv = pe->private_data; 1000 struct pdc_host_priv *hpriv = pe->private_data;
990 void *dimm_mmio = hpriv->dimm_mmio; 1001 void *dimm_mmio = hpriv->dimm_mmio;
991 1002
992 /* hard-code chip #0 */ 1003 /* hard-code chip #0 */
993 mmio += PDC_CHIP0_OFS; 1004 mmio += PDC_CHIP0_OFS;
994 1005
995 page_mask = 0x00; 1006 page_mask = 0x00;
996 window_size = 0x2000 * 4; /* 32K byte uchar size */ 1007 window_size = 0x2000 * 4; /* 32K byte uchar size */
997 idx = (u16) (offset / window_size); 1008 idx = (u16) (offset / window_size);
998 1009
999 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1010 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1000 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1011 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1001 offset -= (idx * window_size); 1012 offset -= (idx * window_size);
1002 idx++; 1013 idx++;
1003 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : 1014 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1004 (long) (window_size - offset); 1015 (long) (window_size - offset);
@@ -1006,12 +1017,12 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1006 writel(0x01, mmio + PDC_GENERAL_CTLR); 1017 writel(0x01, mmio + PDC_GENERAL_CTLR);
1007 readl(mmio + PDC_GENERAL_CTLR); 1018 readl(mmio + PDC_GENERAL_CTLR);
1008 1019
1009 psource += dist; 1020 psource += dist;
1010 size -= dist; 1021 size -= dist;
1011 for (; (long) size >= (long) window_size ;) { 1022 for (; (long) size >= (long) window_size ;) {
1012 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1023 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1013 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1024 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1014 memcpy_toio((char *) (dimm_mmio), (char *) psource, 1025 memcpy_toio((char *) (dimm_mmio), (char *) psource,
1015 window_size / 4); 1026 window_size / 4);
1016 writel(0x01, mmio + PDC_GENERAL_CTLR); 1027 writel(0x01, mmio + PDC_GENERAL_CTLR);
1017 readl(mmio + PDC_GENERAL_CTLR); 1028 readl(mmio + PDC_GENERAL_CTLR);
@@ -1019,7 +1030,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1019 size -= window_size; 1030 size -= window_size;
1020 idx ++; 1031 idx ++;
1021 } 1032 }
1022 1033
1023 if (size) { 1034 if (size) {
1024 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1035 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1025 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1036 readl(mmio + PDC_DIMM_WINDOW_CTLR);
@@ -1030,12 +1041,12 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1030} 1041}
1031 1042
1032 1043
1033static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, 1044static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1034 u32 subaddr, u32 *pdata) 1045 u32 subaddr, u32 *pdata)
1035{ 1046{
1036 void *mmio = pe->mmio_base; 1047 void *mmio = pe->mmio_base;
1037 u32 i2creg = 0; 1048 u32 i2creg = 0;
1038 u32 status; 1049 u32 status;
1039 u32 count =0; 1050 u32 count =0;
1040 1051
1041 /* hard-code chip #0 */ 1052 /* hard-code chip #0 */
@@ -1049,7 +1060,7 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1049 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET); 1060 readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
1050 1061
1051 /* Write Control to perform read operation, mask int */ 1062 /* Write Control to perform read operation, mask int */
1052 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, 1063 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1053 mmio + PDC_I2C_CONTROL_OFFSET); 1064 mmio + PDC_I2C_CONTROL_OFFSET);
1054 1065
1055 for (count = 0; count <= 1000; count ++) { 1066 for (count = 0; count <= 1000; count ++) {
@@ -1062,26 +1073,26 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1062 } 1073 }
1063 1074
1064 *pdata = (status >> 8) & 0x000000ff; 1075 *pdata = (status >> 8) & 0x000000ff;
1065 return 1; 1076 return 1;
1066} 1077}
1067 1078
1068 1079
1069static int pdc20621_detect_dimm(struct ata_probe_ent *pe) 1080static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
1070{ 1081{
1071 u32 data=0 ; 1082 u32 data=0 ;
1072 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 1083 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1073 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { 1084 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1074 if (data == 100) 1085 if (data == 100)
1075 return 100; 1086 return 100;
1076 } else 1087 } else
1077 return 0; 1088 return 0;
1078 1089
1079 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { 1090 if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1080 if(data <= 0x75) 1091 if(data <= 0x75)
1081 return 133; 1092 return 133;
1082 } else 1093 } else
1083 return 0; 1094 return 0;
1084 1095
1085 return 0; 1096 return 0;
1086} 1097}
1087 1098
@@ -1091,15 +1102,15 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1091 u32 spd0[50]; 1102 u32 spd0[50];
1092 u32 data = 0; 1103 u32 data = 0;
1093 int size, i; 1104 int size, i;
1094 u8 bdimmsize; 1105 u8 bdimmsize;
1095 void *mmio = pe->mmio_base; 1106 void *mmio = pe->mmio_base;
1096 static const struct { 1107 static const struct {
1097 unsigned int reg; 1108 unsigned int reg;
1098 unsigned int ofs; 1109 unsigned int ofs;
1099 } pdc_i2c_read_data [] = { 1110 } pdc_i2c_read_data [] = {
1100 { PDC_DIMM_SPD_TYPE, 11 }, 1111 { PDC_DIMM_SPD_TYPE, 11 },
1101 { PDC_DIMM_SPD_FRESH_RATE, 12 }, 1112 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1102 { PDC_DIMM_SPD_COLUMN_NUM, 4 }, 1113 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1103 { PDC_DIMM_SPD_ATTRIBUTE, 21 }, 1114 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1104 { PDC_DIMM_SPD_ROW_NUM, 3 }, 1115 { PDC_DIMM_SPD_ROW_NUM, 3 },
1105 { PDC_DIMM_SPD_BANK_NUM, 17 }, 1116 { PDC_DIMM_SPD_BANK_NUM, 17 },
@@ -1108,7 +1119,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1108 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, 1119 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1109 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, 1120 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1110 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, 1121 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1111 { PDC_DIMM_SPD_CAS_LATENCY, 18 }, 1122 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1112 }; 1123 };
1113 1124
1114 /* hard-code chip #0 */ 1125 /* hard-code chip #0 */
@@ -1116,17 +1127,17 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1116 1127
1117 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++) 1128 for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
1118 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 1129 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1119 pdc_i2c_read_data[i].reg, 1130 pdc_i2c_read_data[i].reg,
1120 &spd0[pdc_i2c_read_data[i].ofs]); 1131 &spd0[pdc_i2c_read_data[i].ofs]);
1121 1132
1122 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); 1133 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1123 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | 1134 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1124 ((((spd0[27] + 9) / 10) - 1) << 8) ; 1135 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1125 data |= (((((spd0[29] > spd0[28]) 1136 data |= (((((spd0[29] > spd0[28])
1126 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; 1137 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1127 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; 1138 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1128 1139
1129 if (spd0[18] & 0x08) 1140 if (spd0[18] & 0x08)
1130 data |= ((0x03) << 14); 1141 data |= ((0x03) << 14);
1131 else if (spd0[18] & 0x04) 1142 else if (spd0[18] & 0x04)
1132 data |= ((0x02) << 14); 1143 data |= ((0x02) << 14);
@@ -1135,7 +1146,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1135 else 1146 else
1136 data |= (0 << 14); 1147 data |= (0 << 14);
1137 1148
1138 /* 1149 /*
1139 Calculate the size of bDIMMSize (power of 2) and 1150 Calculate the size of bDIMMSize (power of 2) and
1140 merge the DIMM size by program start/end address. 1151 merge the DIMM size by program start/end address.
1141 */ 1152 */
@@ -1145,9 +1156,9 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1145 data |= (((size / 16) - 1) << 16); 1156 data |= (((size / 16) - 1) << 16);
1146 data |= (0 << 23); 1157 data |= (0 << 23);
1147 data |= 8; 1158 data |= 8;
1148 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET); 1159 writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
1149 readl(mmio + PDC_DIMM0_CONTROL_OFFSET); 1160 readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
1150 return size; 1161 return size;
1151} 1162}
1152 1163
1153 1164
@@ -1167,12 +1178,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1167 Refresh Enable (bit 17) 1178 Refresh Enable (bit 17)
1168 */ 1179 */
1169 1180
1170 data = 0x022259F1; 1181 data = 0x022259F1;
1171 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); 1182 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1172 readl(mmio + PDC_SDRAM_CONTROL_OFFSET); 1183 readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1173 1184
1174 /* Turn on for ECC */ 1185 /* Turn on for ECC */
1175 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 1186 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1176 PDC_DIMM_SPD_TYPE, &spd0); 1187 PDC_DIMM_SPD_TYPE, &spd0);
1177 if (spd0 == 0x02) { 1188 if (spd0 == 0x02) {
1178 data |= (0x01 << 16); 1189 data |= (0x01 << 16);
@@ -1186,22 +1197,22 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1186 data |= (1<<19); 1197 data |= (1<<19);
1187 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); 1198 writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
1188 1199
1189 error = 1; 1200 error = 1;
1190 for (i = 1; i <= 10; i++) { /* polling ~5 secs */ 1201 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1191 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET); 1202 data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
1192 if (!(data & (1<<19))) { 1203 if (!(data & (1<<19))) {
1193 error = 0; 1204 error = 0;
1194 break; 1205 break;
1195 } 1206 }
1196 msleep(i*100); 1207 msleep(i*100);
1197 } 1208 }
1198 return error; 1209 return error;
1199} 1210}
1200 1211
1201 1212
1202static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) 1213static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1203{ 1214{
1204 int speed, size, length; 1215 int speed, size, length;
1205 u32 addr,spd0,pci_status; 1216 u32 addr,spd0,pci_status;
1206 u32 tmp=0; 1217 u32 tmp=0;
1207 u32 time_period=0; 1218 u32 time_period=0;
@@ -1228,7 +1239,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1228 /* Wait 3 seconds */ 1239 /* Wait 3 seconds */
1229 msleep(3000); 1240 msleep(3000);
1230 1241
1231 /* 1242 /*
1232 When timer is enabled, counter is decreased every internal 1243 When timer is enabled, counter is decreased every internal
1233 clock cycle. 1244 clock cycle.
1234 */ 1245 */
@@ -1236,24 +1247,24 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1236 tcount = readl(mmio + PDC_TIME_COUNTER); 1247 tcount = readl(mmio + PDC_TIME_COUNTER);
1237 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); 1248 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1238 1249
1239 /* 1250 /*
1240 If SX4 is on PCI-X bus, after 3 seconds, the timer counter 1251 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1241 register should be >= (0xffffffff - 3x10^8). 1252 register should be >= (0xffffffff - 3x10^8).
1242 */ 1253 */
1243 if(tcount >= PCI_X_TCOUNT) { 1254 if(tcount >= PCI_X_TCOUNT) {
1244 ticks = (time_period - tcount); 1255 ticks = (time_period - tcount);
1245 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); 1256 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1246 1257
1247 clock = (ticks / 300000); 1258 clock = (ticks / 300000);
1248 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); 1259 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1249 1260
1250 clock = (clock * 33); 1261 clock = (clock * 33);
1251 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); 1262 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1252 1263
1253 /* PLL F Param (bit 22:16) */ 1264 /* PLL F Param (bit 22:16) */
1254 fparam = (1400000 / clock) - 2; 1265 fparam = (1400000 / clock) - 2;
1255 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); 1266 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1256 1267
1257 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ 1268 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1258 pci_status = (0x8a001824 | (fparam << 16)); 1269 pci_status = (0x8a001824 | (fparam << 16));
1259 } else 1270 } else
@@ -1264,21 +1275,21 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1264 writel(pci_status, mmio + PDC_CTL_STATUS); 1275 writel(pci_status, mmio + PDC_CTL_STATUS);
1265 readl(mmio + PDC_CTL_STATUS); 1276 readl(mmio + PDC_CTL_STATUS);
1266 1277
1267 /* 1278 /*
1268 Read SPD of DIMM by I2C interface, 1279 Read SPD of DIMM by I2C interface,
1269 and program the DIMM Module Controller. 1280 and program the DIMM Module Controller.
1270 */ 1281 */
1271 if (!(speed = pdc20621_detect_dimm(pe))) { 1282 if (!(speed = pdc20621_detect_dimm(pe))) {
1272 printk(KERN_ERR "Detect Local DIMM Fail\n"); 1283 printk(KERN_ERR "Detect Local DIMM Fail\n");
1273 return 1; /* DIMM error */ 1284 return 1; /* DIMM error */
1274 } 1285 }
1275 VPRINTK("Local DIMM Speed = %d\n", speed); 1286 VPRINTK("Local DIMM Speed = %d\n", speed);
1276 1287
1277 /* Programming DIMM0 Module Control Register (index_CID0:80h) */ 1288 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1278 size = pdc20621_prog_dimm0(pe); 1289 size = pdc20621_prog_dimm0(pe);
1279 VPRINTK("Local DIMM Size = %dMB\n",size); 1290 VPRINTK("Local DIMM Size = %dMB\n",size);
1280 1291
1281 /* Programming DIMM Module Global Control Register (index_CID0:88h) */ 1292 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1282 if (pdc20621_prog_dimm_global(pe)) { 1293 if (pdc20621_prog_dimm_global(pe)) {
1283 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); 1294 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1284 return 1; 1295 return 1;
@@ -1297,30 +1308,30 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1297 1308
1298 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40); 1309 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
1299 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); 1310 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1300 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1311 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1301 test_parttern2[1], &(test_parttern2[2])); 1312 test_parttern2[1], &(test_parttern2[2]));
1302 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040, 1313 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
1303 40); 1314 40);
1304 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1315 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1305 test_parttern2[1], &(test_parttern2[2])); 1316 test_parttern2[1], &(test_parttern2[2]));
1306 1317
1307 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40); 1318 pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
1308 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); 1319 pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
1309 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1320 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1310 test_parttern2[1], &(test_parttern2[2])); 1321 test_parttern2[1], &(test_parttern2[2]));
1311 } 1322 }
1312#endif 1323#endif
1313 1324
1314 /* ECC initiliazation. */ 1325 /* ECC initiliazation. */
1315 1326
1316 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 1327 pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
1317 PDC_DIMM_SPD_TYPE, &spd0); 1328 PDC_DIMM_SPD_TYPE, &spd0);
1318 if (spd0 == 0x02) { 1329 if (spd0 == 0x02) {
1319 VPRINTK("Start ECC initialization\n"); 1330 VPRINTK("Start ECC initialization\n");
1320 addr = 0; 1331 addr = 0;
1321 length = size * 1024 * 1024; 1332 length = size * 1024 * 1024;
1322 while (addr < length) { 1333 while (addr < length) {
1323 pdc20621_put_to_dimm(pe, (void *) &tmp, addr, 1334 pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
1324 sizeof(u32)); 1335 sizeof(u32));
1325 addr += sizeof(u32); 1336 addr += sizeof(u32);
1326 } 1337 }
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index a71fb54eebd3..1566886815fb 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -1,21 +1,26 @@
1/* 1/*
2 * sata_uli.c - ULi Electronics SATA 2 * sata_uli.c - ULi Electronics SATA
3 * 3 *
4 * The contents of this file are subject to the Open
5 * Software License version 1.1 that can be found at
6 * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
7 * by reference.
8 * 4 *
9 * Alternatively, the contents of this file may be used under the terms 5 * This program is free software; you can redistribute it and/or modify
10 * of the GNU General Public License version 2 (the "GPL") as distributed 6 * it under the terms of the GNU General Public License as published by
11 * in the kernel source COPYING file, in which case the provisions of 7 * the Free Software Foundation; either version 2, or (at your option)
12 * the GPL are applicable instead of the above. If you wish to allow 8 * any later version.
13 * the use of your version of this file only under the terms of the 9 *
14 * GPL and not to allow others to use your version of this file under 10 * This program is distributed in the hope that it will be useful,
15 * the OSL, indicate your decision by deleting the provisions above and 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * replace them with the notice and other provisions required by the GPL. 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * If you do not delete the provisions above, a recipient may use your 13 * GNU General Public License for more details.
18 * version of this file under either the OSL or the GPL. 14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 *
20 * libata documentation is available via 'make {ps|pdf}docs',
21 * as Documentation/DocBook/libata.*
22 *
23 * Hardware documentation available under NDA.
19 * 24 *
20 */ 25 */
21 26
@@ -214,7 +219,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
214 rc = -ENOMEM; 219 rc = -ENOMEM;
215 goto err_out_regions; 220 goto err_out_regions;
216 } 221 }
217 222
218 switch (board_idx) { 223 switch (board_idx) {
219 case uli_5287: 224 case uli_5287:
220 probe_ent->port[0].scr_addr = ULI5287_BASE; 225 probe_ent->port[0].scr_addr = ULI5287_BASE;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index f43183c19a12..128b996b07b7 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -1,34 +1,38 @@
1/* 1/*
2 sata_via.c - VIA Serial ATA controllers 2 * sata_via.c - VIA Serial ATA controllers
3 3 *
4 Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 on emails. 6 on emails.
7 7 *
8 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 Copyright 2003-2004 Jeff Garzik 9 * Copyright 2003-2004 Jeff Garzik
10 10 *
11 The contents of this file are subject to the Open 11 *
12 Software License version 1.1 that can be found at 12 * This program is free software; you can redistribute it and/or modify
13 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 13 * it under the terms of the GNU General Public License as published by
14 by reference. 14 * the Free Software Foundation; either version 2, or (at your option)
15 15 * any later version.
16 Alternatively, the contents of this file may be used under the terms 16 *
17 of the GNU General Public License version 2 (the "GPL") as distributed 17 * This program is distributed in the hope that it will be useful,
18 in the kernel source COPYING file, in which case the provisions of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 the GPL are applicable instead of the above. If you wish to allow 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 the use of your version of this file only under the terms of the 20 * GNU General Public License for more details.
21 GPL and not to allow others to use your version of this file under 21 *
22 the OSL, indicate your decision by deleting the provisions above and 22 * You should have received a copy of the GNU General Public License
23 replace them with the notice and other provisions required by the GPL. 23 * along with this program; see the file COPYING. If not, write to
24 If you do not delete the provisions above, a recipient may use your 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 version of this file under either the OSL or the GPL. 25 *
26 26 *
27 ---------------------------------------------------------------------- 27 * libata documentation is available via 'make {ps|pdf}docs',
28 28 * as Documentation/DocBook/libata.*
29 To-do list: 29 *
30 * VT6421 PATA support 30 * Hardware documentation available under NDA.
31 31 *
32 *
33 * To-do list:
34 * - VT6421 PATA support
35 *
32 */ 36 */
33 37
34#include <linux/kernel.h> 38#include <linux/kernel.h>
@@ -347,7 +351,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
347 probe_ent = vt6420_init_probe_ent(pdev); 351 probe_ent = vt6420_init_probe_ent(pdev);
348 else 352 else
349 probe_ent = vt6421_init_probe_ent(pdev); 353 probe_ent = vt6421_init_probe_ent(pdev);
350 354
351 if (!probe_ent) { 355 if (!probe_ent) {
352 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 356 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
353 pci_name(pdev)); 357 pci_name(pdev));
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index c5e09dc6f3de..3985f344da4d 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -9,9 +9,29 @@
9 * 9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc. 10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 * 11 *
12 * This file is subject to the terms and conditions of the GNU General Public 12 *
13 * License. See the file "COPYING" in the main directory of this archive 13 * This program is free software; you can redistribute it and/or modify
14 * for more details. 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
15 */ 35 */
16 36
17#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -173,7 +193,8 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
173 struct ata_port *ap; 193 struct ata_port *ap;
174 194
175 ap = host_set->ports[i]; 195 ap = host_set->ports[i];
176 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 196 if (ap && !(ap->flags &
197 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
177 struct ata_queued_cmd *qc; 198 struct ata_queued_cmd *qc;
178 199
179 qc = ata_qc_from_tag(ap, ap->active_tag); 200 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -342,7 +363,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
342 363
343 pci_set_master(pdev); 364 pci_set_master(pdev);
344 365
345 /* 366 /*
346 * Config offset 0x98 is "Extended Control and Status Register 0" 367 * Config offset 0x98 is "Extended Control and Status Register 0"
347 * Default value is (1 << 28). All bits except bit 28 are reserved in 368 * Default value is (1 << 28). All bits except bit 28 are reserved in
348 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity. 369 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 51292f269ce5..e822ca0e97cf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2971,23 +2971,22 @@ static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2971{ 2971{
2972 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); 2972 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2973 2973
2974 s->private = it;
2974 if (! it) 2975 if (! it)
2975 return NULL; 2976 return NULL;
2977
2976 if (NULL == sg_dev_arr) 2978 if (NULL == sg_dev_arr)
2977 goto err1; 2979 return NULL;
2978 it->index = *pos; 2980 it->index = *pos;
2979 it->max = sg_last_dev(); 2981 it->max = sg_last_dev();
2980 if (it->index >= it->max) 2982 if (it->index >= it->max)
2981 goto err1; 2983 return NULL;
2982 return it; 2984 return it;
2983err1:
2984 kfree(it);
2985 return NULL;
2986} 2985}
2987 2986
2988static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) 2987static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2989{ 2988{
2990 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2989 struct sg_proc_deviter * it = s->private;
2991 2990
2992 *pos = ++it->index; 2991 *pos = ++it->index;
2993 return (it->index < it->max) ? it : NULL; 2992 return (it->index < it->max) ? it : NULL;
@@ -2995,7 +2994,7 @@ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2995 2994
2996static void dev_seq_stop(struct seq_file *s, void *v) 2995static void dev_seq_stop(struct seq_file *s, void *v)
2997{ 2996{
2998 kfree (v); 2997 kfree(s->private);
2999} 2998}
3000 2999
3001static int sg_proc_open_dev(struct inode *inode, struct file *file) 3000static int sg_proc_open_dev(struct inode *inode, struct file *file)
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 07f05e9d0955..0e21f583690e 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -34,36 +34,6 @@
34#undef SERIAL_DEBUG_PCI 34#undef SERIAL_DEBUG_PCI
35 35
36/* 36/*
37 * Definitions for PCI support.
38 */
39#define FL_BASE_MASK 0x0007
40#define FL_BASE0 0x0000
41#define FL_BASE1 0x0001
42#define FL_BASE2 0x0002
43#define FL_BASE3 0x0003
44#define FL_BASE4 0x0004
45#define FL_GET_BASE(x) (x & FL_BASE_MASK)
46
47/* Use successive BARs (PCI base address registers),
48 else use offset into some specified BAR */
49#define FL_BASE_BARS 0x0008
50
51/* do not assign an irq */
52#define FL_NOIRQ 0x0080
53
54/* Use the Base address register size to cap number of ports */
55#define FL_REGION_SZ_CAP 0x0100
56
57struct pci_board {
58 unsigned int flags;
59 unsigned int num_ports;
60 unsigned int base_baud;
61 unsigned int uart_offset;
62 unsigned int reg_shift;
63 unsigned int first_offset;
64};
65
66/*
67 * init function returns: 37 * init function returns:
68 * > 0 - number of ports 38 * > 0 - number of ports
69 * = 0 - use board->num_ports 39 * = 0 - use board->num_ports
@@ -75,14 +45,15 @@ struct pci_serial_quirk {
75 u32 subvendor; 45 u32 subvendor;
76 u32 subdevice; 46 u32 subdevice;
77 int (*init)(struct pci_dev *dev); 47 int (*init)(struct pci_dev *dev);
78 int (*setup)(struct pci_dev *dev, struct pci_board *board, 48 int (*setup)(struct serial_private *, struct pciserial_board *,
79 struct uart_port *port, int idx); 49 struct uart_port *, int);
80 void (*exit)(struct pci_dev *dev); 50 void (*exit)(struct pci_dev *dev);
81}; 51};
82 52
83#define PCI_NUM_BAR_RESOURCES 6 53#define PCI_NUM_BAR_RESOURCES 6
84 54
85struct serial_private { 55struct serial_private {
56 struct pci_dev *dev;
86 unsigned int nr; 57 unsigned int nr;
87 void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; 58 void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
88 struct pci_serial_quirk *quirk; 59 struct pci_serial_quirk *quirk;
@@ -101,17 +72,18 @@ static void moan_device(const char *str, struct pci_dev *dev)
101} 72}
102 73
103static int 74static int
104setup_port(struct pci_dev *dev, struct uart_port *port, 75setup_port(struct serial_private *priv, struct uart_port *port,
105 int bar, int offset, int regshift) 76 int bar, int offset, int regshift)
106{ 77{
107 struct serial_private *priv = pci_get_drvdata(dev); 78 struct pci_dev *dev = priv->dev;
108 unsigned long base, len; 79 unsigned long base, len;
109 80
110 if (bar >= PCI_NUM_BAR_RESOURCES) 81 if (bar >= PCI_NUM_BAR_RESOURCES)
111 return -EINVAL; 82 return -EINVAL;
112 83
84 base = pci_resource_start(dev, bar);
85
113 if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) { 86 if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
114 base = pci_resource_start(dev, bar);
115 len = pci_resource_len(dev, bar); 87 len = pci_resource_len(dev, bar);
116 88
117 if (!priv->remapped_bar[bar]) 89 if (!priv->remapped_bar[bar])
@@ -120,13 +92,16 @@ setup_port(struct pci_dev *dev, struct uart_port *port,
120 return -ENOMEM; 92 return -ENOMEM;
121 93
122 port->iotype = UPIO_MEM; 94 port->iotype = UPIO_MEM;
95 port->iobase = 0;
123 port->mapbase = base + offset; 96 port->mapbase = base + offset;
124 port->membase = priv->remapped_bar[bar] + offset; 97 port->membase = priv->remapped_bar[bar] + offset;
125 port->regshift = regshift; 98 port->regshift = regshift;
126 } else { 99 } else {
127 base = pci_resource_start(dev, bar) + offset;
128 port->iotype = UPIO_PORT; 100 port->iotype = UPIO_PORT;
129 port->iobase = base; 101 port->iobase = base + offset;
102 port->mapbase = 0;
103 port->membase = NULL;
104 port->regshift = 0;
130 } 105 }
131 return 0; 106 return 0;
132} 107}
@@ -136,7 +111,7 @@ setup_port(struct pci_dev *dev, struct uart_port *port,
136 * Not that ugly ;) -- HW 111 * Not that ugly ;) -- HW
137 */ 112 */
138static int 113static int
139afavlab_setup(struct pci_dev *dev, struct pci_board *board, 114afavlab_setup(struct serial_private *priv, struct pciserial_board *board,
140 struct uart_port *port, int idx) 115 struct uart_port *port, int idx)
141{ 116{
142 unsigned int bar, offset = board->first_offset; 117 unsigned int bar, offset = board->first_offset;
@@ -149,7 +124,7 @@ afavlab_setup(struct pci_dev *dev, struct pci_board *board,
149 offset += (idx - 4) * board->uart_offset; 124 offset += (idx - 4) * board->uart_offset;
150 } 125 }
151 126
152 return setup_port(dev, port, bar, offset, board->reg_shift); 127 return setup_port(priv, port, bar, offset, board->reg_shift);
153} 128}
154 129
155/* 130/*
@@ -189,13 +164,13 @@ static int __devinit pci_hp_diva_init(struct pci_dev *dev)
189 * some serial ports are supposed to be hidden on certain models. 164 * some serial ports are supposed to be hidden on certain models.
190 */ 165 */
191static int 166static int
192pci_hp_diva_setup(struct pci_dev *dev, struct pci_board *board, 167pci_hp_diva_setup(struct serial_private *priv, struct pciserial_board *board,
193 struct uart_port *port, int idx) 168 struct uart_port *port, int idx)
194{ 169{
195 unsigned int offset = board->first_offset; 170 unsigned int offset = board->first_offset;
196 unsigned int bar = FL_GET_BASE(board->flags); 171 unsigned int bar = FL_GET_BASE(board->flags);
197 172
198 switch (dev->subsystem_device) { 173 switch (priv->dev->subsystem_device) {
199 case PCI_DEVICE_ID_HP_DIVA_MAESTRO: 174 case PCI_DEVICE_ID_HP_DIVA_MAESTRO:
200 if (idx == 3) 175 if (idx == 3)
201 idx++; 176 idx++;
@@ -212,7 +187,7 @@ pci_hp_diva_setup(struct pci_dev *dev, struct pci_board *board,
212 187
213 offset += idx * board->uart_offset; 188 offset += idx * board->uart_offset;
214 189
215 return setup_port(dev, port, bar, offset, board->reg_shift); 190 return setup_port(priv, port, bar, offset, board->reg_shift);
216} 191}
217 192
218/* 193/*
@@ -307,7 +282,7 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev)
307 282
308/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */ 283/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */
309static int 284static int
310sbs_setup(struct pci_dev *dev, struct pci_board *board, 285sbs_setup(struct serial_private *priv, struct pciserial_board *board,
311 struct uart_port *port, int idx) 286 struct uart_port *port, int idx)
312{ 287{
313 unsigned int bar, offset = board->first_offset; 288 unsigned int bar, offset = board->first_offset;
@@ -323,7 +298,7 @@ sbs_setup(struct pci_dev *dev, struct pci_board *board,
323 } else /* we have only 8 ports on PMC-OCTALPRO */ 298 } else /* we have only 8 ports on PMC-OCTALPRO */
324 return 1; 299 return 1;
325 300
326 return setup_port(dev, port, bar, offset, board->reg_shift); 301 return setup_port(priv, port, bar, offset, board->reg_shift);
327} 302}
328 303
329/* 304/*
@@ -389,6 +364,9 @@ static void __devexit sbs_exit(struct pci_dev *dev)
389 * - 10x cards have control registers in IO and/or memory space; 364 * - 10x cards have control registers in IO and/or memory space;
390 * - 20x cards have control registers in standard PCI configuration space. 365 * - 20x cards have control registers in standard PCI configuration space.
391 * 366 *
367 * Note: all 10x cards have PCI device ids 0x10..
368 * all 20x cards have PCI device ids 0x20..
369 *
392 * There are also Quartet Serial cards which use Oxford Semiconductor 370 * There are also Quartet Serial cards which use Oxford Semiconductor
393 * 16954 quad UART PCI chip clocked by 18.432 MHz quartz. 371 * 16954 quad UART PCI chip clocked by 18.432 MHz quartz.
394 * 372 *
@@ -445,24 +423,18 @@ static int pci_siig20x_init(struct pci_dev *dev)
445 return 0; 423 return 0;
446} 424}
447 425
448int pci_siig10x_fn(struct pci_dev *dev, int enable) 426static int pci_siig_init(struct pci_dev *dev)
449{ 427{
450 int ret = 0; 428 unsigned int type = dev->device & 0xff00;
451 if (enable)
452 ret = pci_siig10x_init(dev);
453 return ret;
454}
455 429
456int pci_siig20x_fn(struct pci_dev *dev, int enable) 430 if (type == 0x1000)
457{ 431 return pci_siig10x_init(dev);
458 int ret = 0; 432 else if (type == 0x2000)
459 if (enable) 433 return pci_siig20x_init(dev);
460 ret = pci_siig20x_init(dev);
461 return ret;
462}
463 434
464EXPORT_SYMBOL(pci_siig10x_fn); 435 moan_device("Unknown SIIG card", dev);
465EXPORT_SYMBOL(pci_siig20x_fn); 436 return -ENODEV;
437}
466 438
467/* 439/*
468 * Timedia has an explosion of boards, and to avoid the PCI table from 440 * Timedia has an explosion of boards, and to avoid the PCI table from
@@ -523,7 +495,7 @@ static int __devinit pci_timedia_init(struct pci_dev *dev)
523 * Ugh, this is ugly as all hell --- TYT 495 * Ugh, this is ugly as all hell --- TYT
524 */ 496 */
525static int 497static int
526pci_timedia_setup(struct pci_dev *dev, struct pci_board *board, 498pci_timedia_setup(struct serial_private *priv, struct pciserial_board *board,
527 struct uart_port *port, int idx) 499 struct uart_port *port, int idx)
528{ 500{
529 unsigned int bar = 0, offset = board->first_offset; 501 unsigned int bar = 0, offset = board->first_offset;
@@ -549,14 +521,15 @@ pci_timedia_setup(struct pci_dev *dev, struct pci_board *board,
549 bar = idx - 2; 521 bar = idx - 2;
550 } 522 }
551 523
552 return setup_port(dev, port, bar, offset, board->reg_shift); 524 return setup_port(priv, port, bar, offset, board->reg_shift);
553} 525}
554 526
555/* 527/*
556 * Some Titan cards are also a little weird 528 * Some Titan cards are also a little weird
557 */ 529 */
558static int 530static int
559titan_400l_800l_setup(struct pci_dev *dev, struct pci_board *board, 531titan_400l_800l_setup(struct serial_private *priv,
532 struct pciserial_board *board,
560 struct uart_port *port, int idx) 533 struct uart_port *port, int idx)
561{ 534{
562 unsigned int bar, offset = board->first_offset; 535 unsigned int bar, offset = board->first_offset;
@@ -573,7 +546,7 @@ titan_400l_800l_setup(struct pci_dev *dev, struct pci_board *board,
573 offset = (idx - 2) * board->uart_offset; 546 offset = (idx - 2) * board->uart_offset;
574 } 547 }
575 548
576 return setup_port(dev, port, bar, offset, board->reg_shift); 549 return setup_port(priv, port, bar, offset, board->reg_shift);
577} 550}
578 551
579static int __devinit pci_xircom_init(struct pci_dev *dev) 552static int __devinit pci_xircom_init(struct pci_dev *dev)
@@ -593,7 +566,7 @@ static int __devinit pci_netmos_init(struct pci_dev *dev)
593} 566}
594 567
595static int 568static int
596pci_default_setup(struct pci_dev *dev, struct pci_board *board, 569pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
597 struct uart_port *port, int idx) 570 struct uart_port *port, int idx)
598{ 571{
599 unsigned int bar, offset = board->first_offset, maxnr; 572 unsigned int bar, offset = board->first_offset, maxnr;
@@ -604,13 +577,13 @@ pci_default_setup(struct pci_dev *dev, struct pci_board *board,
604 else 577 else
605 offset += idx * board->uart_offset; 578 offset += idx * board->uart_offset;
606 579
607 maxnr = (pci_resource_len(dev, bar) - board->first_offset) / 580 maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) /
608 (8 << board->reg_shift); 581 (8 << board->reg_shift);
609 582
610 if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) 583 if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
611 return 1; 584 return 1;
612 585
613 return setup_port(dev, port, bar, offset, board->reg_shift); 586 return setup_port(priv, port, bar, offset, board->reg_shift);
614} 587}
615 588
616/* This should be in linux/pci_ids.h */ 589/* This should be in linux/pci_ids.h */
@@ -754,152 +727,15 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
754 .setup = sbs_setup, 727 .setup = sbs_setup,
755 .exit = __devexit_p(sbs_exit), 728 .exit = __devexit_p(sbs_exit),
756 }, 729 },
757
758 /* 730 /*
759 * SIIG cards. 731 * SIIG cards.
760 * It is not clear whether these could be collapsed.
761 */ 732 */
762 { 733 {
763 .vendor = PCI_VENDOR_ID_SIIG, 734 .vendor = PCI_VENDOR_ID_SIIG,
764 .device = PCI_DEVICE_ID_SIIG_1S_10x_550, 735 .device = PCI_ANY_ID,
765 .subvendor = PCI_ANY_ID,
766 .subdevice = PCI_ANY_ID,
767 .init = pci_siig10x_init,
768 .setup = pci_default_setup,
769 },
770 {
771 .vendor = PCI_VENDOR_ID_SIIG,
772 .device = PCI_DEVICE_ID_SIIG_1S_10x_650,
773 .subvendor = PCI_ANY_ID,
774 .subdevice = PCI_ANY_ID,
775 .init = pci_siig10x_init,
776 .setup = pci_default_setup,
777 },
778 {
779 .vendor = PCI_VENDOR_ID_SIIG,
780 .device = PCI_DEVICE_ID_SIIG_1S_10x_850,
781 .subvendor = PCI_ANY_ID,
782 .subdevice = PCI_ANY_ID,
783 .init = pci_siig10x_init,
784 .setup = pci_default_setup,
785 },
786 {
787 .vendor = PCI_VENDOR_ID_SIIG,
788 .device = PCI_DEVICE_ID_SIIG_2S_10x_550,
789 .subvendor = PCI_ANY_ID,
790 .subdevice = PCI_ANY_ID,
791 .init = pci_siig10x_init,
792 .setup = pci_default_setup,
793 },
794 {
795 .vendor = PCI_VENDOR_ID_SIIG,
796 .device = PCI_DEVICE_ID_SIIG_2S_10x_650,
797 .subvendor = PCI_ANY_ID,
798 .subdevice = PCI_ANY_ID,
799 .init = pci_siig10x_init,
800 .setup = pci_default_setup,
801 },
802 {
803 .vendor = PCI_VENDOR_ID_SIIG,
804 .device = PCI_DEVICE_ID_SIIG_2S_10x_850,
805 .subvendor = PCI_ANY_ID,
806 .subdevice = PCI_ANY_ID,
807 .init = pci_siig10x_init,
808 .setup = pci_default_setup,
809 },
810 {
811 .vendor = PCI_VENDOR_ID_SIIG,
812 .device = PCI_DEVICE_ID_SIIG_4S_10x_550,
813 .subvendor = PCI_ANY_ID,
814 .subdevice = PCI_ANY_ID,
815 .init = pci_siig10x_init,
816 .setup = pci_default_setup,
817 },
818 {
819 .vendor = PCI_VENDOR_ID_SIIG,
820 .device = PCI_DEVICE_ID_SIIG_4S_10x_650,
821 .subvendor = PCI_ANY_ID,
822 .subdevice = PCI_ANY_ID,
823 .init = pci_siig10x_init,
824 .setup = pci_default_setup,
825 },
826 {
827 .vendor = PCI_VENDOR_ID_SIIG,
828 .device = PCI_DEVICE_ID_SIIG_4S_10x_850,
829 .subvendor = PCI_ANY_ID,
830 .subdevice = PCI_ANY_ID,
831 .init = pci_siig10x_init,
832 .setup = pci_default_setup,
833 },
834 {
835 .vendor = PCI_VENDOR_ID_SIIG,
836 .device = PCI_DEVICE_ID_SIIG_1S_20x_550,
837 .subvendor = PCI_ANY_ID,
838 .subdevice = PCI_ANY_ID,
839 .init = pci_siig20x_init,
840 .setup = pci_default_setup,
841 },
842 {
843 .vendor = PCI_VENDOR_ID_SIIG,
844 .device = PCI_DEVICE_ID_SIIG_1S_20x_650,
845 .subvendor = PCI_ANY_ID,
846 .subdevice = PCI_ANY_ID,
847 .init = pci_siig20x_init,
848 .setup = pci_default_setup,
849 },
850 {
851 .vendor = PCI_VENDOR_ID_SIIG,
852 .device = PCI_DEVICE_ID_SIIG_1S_20x_850,
853 .subvendor = PCI_ANY_ID,
854 .subdevice = PCI_ANY_ID,
855 .init = pci_siig20x_init,
856 .setup = pci_default_setup,
857 },
858 {
859 .vendor = PCI_VENDOR_ID_SIIG,
860 .device = PCI_DEVICE_ID_SIIG_2S_20x_550,
861 .subvendor = PCI_ANY_ID,
862 .subdevice = PCI_ANY_ID,
863 .init = pci_siig20x_init,
864 .setup = pci_default_setup,
865 },
866 { .vendor = PCI_VENDOR_ID_SIIG,
867 .device = PCI_DEVICE_ID_SIIG_2S_20x_650,
868 .subvendor = PCI_ANY_ID,
869 .subdevice = PCI_ANY_ID,
870 .init = pci_siig20x_init,
871 .setup = pci_default_setup,
872 },
873 {
874 .vendor = PCI_VENDOR_ID_SIIG,
875 .device = PCI_DEVICE_ID_SIIG_2S_20x_850,
876 .subvendor = PCI_ANY_ID,
877 .subdevice = PCI_ANY_ID,
878 .init = pci_siig20x_init,
879 .setup = pci_default_setup,
880 },
881 {
882 .vendor = PCI_VENDOR_ID_SIIG,
883 .device = PCI_DEVICE_ID_SIIG_4S_20x_550,
884 .subvendor = PCI_ANY_ID,
885 .subdevice = PCI_ANY_ID,
886 .init = pci_siig20x_init,
887 .setup = pci_default_setup,
888 },
889 {
890 .vendor = PCI_VENDOR_ID_SIIG,
891 .device = PCI_DEVICE_ID_SIIG_4S_20x_650,
892 .subvendor = PCI_ANY_ID,
893 .subdevice = PCI_ANY_ID,
894 .init = pci_siig20x_init,
895 .setup = pci_default_setup,
896 },
897 {
898 .vendor = PCI_VENDOR_ID_SIIG,
899 .device = PCI_DEVICE_ID_SIIG_4S_20x_850,
900 .subvendor = PCI_ANY_ID, 736 .subvendor = PCI_ANY_ID,
901 .subdevice = PCI_ANY_ID, 737 .subdevice = PCI_ANY_ID,
902 .init = pci_siig20x_init, 738 .init = pci_siig_init,
903 .setup = pci_default_setup, 739 .setup = pci_default_setup,
904 }, 740 },
905 /* 741 /*
@@ -990,7 +826,7 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev)
990} 826}
991 827
992static _INLINE_ int 828static _INLINE_ int
993get_pci_irq(struct pci_dev *dev, struct pci_board *board, int idx) 829get_pci_irq(struct pci_dev *dev, struct pciserial_board *board)
994{ 830{
995 if (board->flags & FL_NOIRQ) 831 if (board->flags & FL_NOIRQ)
996 return 0; 832 return 0;
@@ -1115,7 +951,7 @@ enum pci_board_num_t {
1115 * see first lines of serial_in() and serial_out() in 8250.c 951 * see first lines of serial_in() and serial_out() in 8250.c
1116*/ 952*/
1117 953
1118static struct pci_board pci_boards[] __devinitdata = { 954static struct pciserial_board pci_boards[] __devinitdata = {
1119 [pbn_default] = { 955 [pbn_default] = {
1120 .flags = FL_BASE0, 956 .flags = FL_BASE0,
1121 .num_ports = 1, 957 .num_ports = 1,
@@ -1575,7 +1411,7 @@ static struct pci_board pci_boards[] __devinitdata = {
1575 * serial specs. Returns 0 on success, 1 on failure. 1411 * serial specs. Returns 0 on success, 1 on failure.
1576 */ 1412 */
1577static int __devinit 1413static int __devinit
1578serial_pci_guess_board(struct pci_dev *dev, struct pci_board *board) 1414serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
1579{ 1415{
1580 int num_iomem, num_port, first_port = -1, i; 1416 int num_iomem, num_port, first_port = -1, i;
1581 1417
@@ -1640,7 +1476,8 @@ serial_pci_guess_board(struct pci_dev *dev, struct pci_board *board)
1640} 1476}
1641 1477
1642static inline int 1478static inline int
1643serial_pci_matches(struct pci_board *board, struct pci_board *guessed) 1479serial_pci_matches(struct pciserial_board *board,
1480 struct pciserial_board *guessed)
1644{ 1481{
1645 return 1482 return
1646 board->num_ports == guessed->num_ports && 1483 board->num_ports == guessed->num_ports &&
@@ -1650,58 +1487,14 @@ serial_pci_matches(struct pci_board *board, struct pci_board *guessed)
1650 board->first_offset == guessed->first_offset; 1487 board->first_offset == guessed->first_offset;
1651} 1488}
1652 1489
1653/* 1490struct serial_private *
1654 * Probe one serial board. Unfortunately, there is no rhyme nor reason 1491pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
1655 * to the arrangement of serial ports on a PCI card.
1656 */
1657static int __devinit
1658pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
1659{ 1492{
1493 struct uart_port serial_port;
1660 struct serial_private *priv; 1494 struct serial_private *priv;
1661 struct pci_board *board, tmp;
1662 struct pci_serial_quirk *quirk; 1495 struct pci_serial_quirk *quirk;
1663 int rc, nr_ports, i; 1496 int rc, nr_ports, i;
1664 1497
1665 if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
1666 printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
1667 ent->driver_data);
1668 return -EINVAL;
1669 }
1670
1671 board = &pci_boards[ent->driver_data];
1672
1673 rc = pci_enable_device(dev);
1674 if (rc)
1675 return rc;
1676
1677 if (ent->driver_data == pbn_default) {
1678 /*
1679 * Use a copy of the pci_board entry for this;
1680 * avoid changing entries in the table.
1681 */
1682 memcpy(&tmp, board, sizeof(struct pci_board));
1683 board = &tmp;
1684
1685 /*
1686 * We matched one of our class entries. Try to
1687 * determine the parameters of this board.
1688 */
1689 rc = serial_pci_guess_board(dev, board);
1690 if (rc)
1691 goto disable;
1692 } else {
1693 /*
1694 * We matched an explicit entry. If we are able to
1695 * detect this boards settings with our heuristic,
1696 * then we no longer need this entry.
1697 */
1698 memcpy(&tmp, &pci_boards[pbn_default], sizeof(struct pci_board));
1699 rc = serial_pci_guess_board(dev, &tmp);
1700 if (rc == 0 && serial_pci_matches(board, &tmp))
1701 moan_device("Redundant entry in serial pci_table.",
1702 dev);
1703 }
1704
1705 nr_ports = board->num_ports; 1498 nr_ports = board->num_ports;
1706 1499
1707 /* 1500 /*
@@ -1718,8 +1511,10 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
1718 */ 1511 */
1719 if (quirk->init) { 1512 if (quirk->init) {
1720 rc = quirk->init(dev); 1513 rc = quirk->init(dev);
1721 if (rc < 0) 1514 if (rc < 0) {
1722 goto disable; 1515 priv = ERR_PTR(rc);
1516 goto err_out;
1517 }
1723 if (rc) 1518 if (rc)
1724 nr_ports = rc; 1519 nr_ports = rc;
1725 } 1520 }
@@ -1728,27 +1523,26 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
1728 sizeof(unsigned int) * nr_ports, 1523 sizeof(unsigned int) * nr_ports,
1729 GFP_KERNEL); 1524 GFP_KERNEL);
1730 if (!priv) { 1525 if (!priv) {
1731 rc = -ENOMEM; 1526 priv = ERR_PTR(-ENOMEM);
1732 goto deinit; 1527 goto err_deinit;
1733 } 1528 }
1734 1529
1735 memset(priv, 0, sizeof(struct serial_private) + 1530 memset(priv, 0, sizeof(struct serial_private) +
1736 sizeof(unsigned int) * nr_ports); 1531 sizeof(unsigned int) * nr_ports);
1737 1532
1533 priv->dev = dev;
1738 priv->quirk = quirk; 1534 priv->quirk = quirk;
1739 pci_set_drvdata(dev, priv); 1535
1536 memset(&serial_port, 0, sizeof(struct uart_port));
1537 serial_port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
1538 serial_port.uartclk = board->base_baud * 16;
1539 serial_port.irq = get_pci_irq(dev, board);
1540 serial_port.dev = &dev->dev;
1740 1541
1741 for (i = 0; i < nr_ports; i++) { 1542 for (i = 0; i < nr_ports; i++) {
1742 struct uart_port serial_port; 1543 if (quirk->setup(priv, board, &serial_port, i))
1743 memset(&serial_port, 0, sizeof(struct uart_port));
1744
1745 serial_port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF |
1746 UPF_SHARE_IRQ;
1747 serial_port.uartclk = board->base_baud * 16;
1748 serial_port.irq = get_pci_irq(dev, board, i);
1749 serial_port.dev = &dev->dev;
1750 if (quirk->setup(dev, board, &serial_port, i))
1751 break; 1544 break;
1545
1752#ifdef SERIAL_DEBUG_PCI 1546#ifdef SERIAL_DEBUG_PCI
1753 printk("Setup PCI port: port %x, irq %d, type %d\n", 1547 printk("Setup PCI port: port %x, irq %d, type %d\n",
1754 serial_port.iobase, serial_port.irq, serial_port.iotype); 1548 serial_port.iobase, serial_port.irq, serial_port.iotype);
@@ -1763,24 +1557,21 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
1763 1557
1764 priv->nr = i; 1558 priv->nr = i;
1765 1559
1766 return 0; 1560 return priv;
1767 1561
1768 deinit: 1562 err_deinit:
1769 if (quirk->exit) 1563 if (quirk->exit)
1770 quirk->exit(dev); 1564 quirk->exit(dev);
1771 disable: 1565 err_out:
1772 pci_disable_device(dev); 1566 return priv;
1773 return rc;
1774} 1567}
1568EXPORT_SYMBOL_GPL(pciserial_init_ports);
1775 1569
1776static void __devexit pciserial_remove_one(struct pci_dev *dev) 1570void pciserial_remove_ports(struct serial_private *priv)
1777{ 1571{
1778 struct serial_private *priv = pci_get_drvdata(dev);
1779 struct pci_serial_quirk *quirk; 1572 struct pci_serial_quirk *quirk;
1780 int i; 1573 int i;
1781 1574
1782 pci_set_drvdata(dev, NULL);
1783
1784 for (i = 0; i < priv->nr; i++) 1575 for (i = 0; i < priv->nr; i++)
1785 serial8250_unregister_port(priv->line[i]); 1576 serial8250_unregister_port(priv->line[i]);
1786 1577
@@ -1793,25 +1584,123 @@ static void __devexit pciserial_remove_one(struct pci_dev *dev)
1793 /* 1584 /*
1794 * Find the exit quirks. 1585 * Find the exit quirks.
1795 */ 1586 */
1796 quirk = find_quirk(dev); 1587 quirk = find_quirk(priv->dev);
1797 if (quirk->exit) 1588 if (quirk->exit)
1798 quirk->exit(dev); 1589 quirk->exit(priv->dev);
1590
1591 kfree(priv);
1592}
1593EXPORT_SYMBOL_GPL(pciserial_remove_ports);
1594
1595void pciserial_suspend_ports(struct serial_private *priv)
1596{
1597 int i;
1598
1599 for (i = 0; i < priv->nr; i++)
1600 if (priv->line[i] >= 0)
1601 serial8250_suspend_port(priv->line[i]);
1602}
1603EXPORT_SYMBOL_GPL(pciserial_suspend_ports);
1604
1605void pciserial_resume_ports(struct serial_private *priv)
1606{
1607 int i;
1608
1609 /*
1610 * Ensure that the board is correctly configured.
1611 */
1612 if (priv->quirk->init)
1613 priv->quirk->init(priv->dev);
1614
1615 for (i = 0; i < priv->nr; i++)
1616 if (priv->line[i] >= 0)
1617 serial8250_resume_port(priv->line[i]);
1618}
1619EXPORT_SYMBOL_GPL(pciserial_resume_ports);
1620
1621/*
1622 * Probe one serial board. Unfortunately, there is no rhyme nor reason
1623 * to the arrangement of serial ports on a PCI card.
1624 */
1625static int __devinit
1626pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
1627{
1628 struct serial_private *priv;
1629 struct pciserial_board *board, tmp;
1630 int rc;
1631
1632 if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
1633 printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
1634 ent->driver_data);
1635 return -EINVAL;
1636 }
1637
1638 board = &pci_boards[ent->driver_data];
1639
1640 rc = pci_enable_device(dev);
1641 if (rc)
1642 return rc;
1643
1644 if (ent->driver_data == pbn_default) {
1645 /*
1646 * Use a copy of the pci_board entry for this;
1647 * avoid changing entries in the table.
1648 */
1649 memcpy(&tmp, board, sizeof(struct pciserial_board));
1650 board = &tmp;
1651
1652 /*
1653 * We matched one of our class entries. Try to
1654 * determine the parameters of this board.
1655 */
1656 rc = serial_pci_guess_board(dev, board);
1657 if (rc)
1658 goto disable;
1659 } else {
1660 /*
1661 * We matched an explicit entry. If we are able to
1662 * detect this boards settings with our heuristic,
1663 * then we no longer need this entry.
1664 */
1665 memcpy(&tmp, &pci_boards[pbn_default],
1666 sizeof(struct pciserial_board));
1667 rc = serial_pci_guess_board(dev, &tmp);
1668 if (rc == 0 && serial_pci_matches(board, &tmp))
1669 moan_device("Redundant entry in serial pci_table.",
1670 dev);
1671 }
1799 1672
1673 priv = pciserial_init_ports(dev, board);
1674 if (!IS_ERR(priv)) {
1675 pci_set_drvdata(dev, priv);
1676 return 0;
1677 }
1678
1679 rc = PTR_ERR(priv);
1680
1681 disable:
1800 pci_disable_device(dev); 1682 pci_disable_device(dev);
1683 return rc;
1684}
1801 1685
1802 kfree(priv); 1686static void __devexit pciserial_remove_one(struct pci_dev *dev)
1687{
1688 struct serial_private *priv = pci_get_drvdata(dev);
1689
1690 pci_set_drvdata(dev, NULL);
1691
1692 pciserial_remove_ports(priv);
1693
1694 pci_disable_device(dev);
1803} 1695}
1804 1696
1805static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state) 1697static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state)
1806{ 1698{
1807 struct serial_private *priv = pci_get_drvdata(dev); 1699 struct serial_private *priv = pci_get_drvdata(dev);
1808 1700
1809 if (priv) { 1701 if (priv)
1810 int i; 1702 pciserial_suspend_ports(priv);
1811 1703
1812 for (i = 0; i < priv->nr; i++)
1813 serial8250_suspend_port(priv->line[i]);
1814 }
1815 pci_save_state(dev); 1704 pci_save_state(dev);
1816 pci_set_power_state(dev, pci_choose_state(dev, state)); 1705 pci_set_power_state(dev, pci_choose_state(dev, state));
1817 return 0; 1706 return 0;
@@ -1825,21 +1714,12 @@ static int pciserial_resume_one(struct pci_dev *dev)
1825 pci_restore_state(dev); 1714 pci_restore_state(dev);
1826 1715
1827 if (priv) { 1716 if (priv) {
1828 int i;
1829
1830 /* 1717 /*
1831 * The device may have been disabled. Re-enable it. 1718 * The device may have been disabled. Re-enable it.
1832 */ 1719 */
1833 pci_enable_device(dev); 1720 pci_enable_device(dev);
1834 1721
1835 /* 1722 pciserial_resume_ports(priv);
1836 * Ensure that the board is correctly configured.
1837 */
1838 if (priv->quirk->init)
1839 priv->quirk->init(dev);
1840
1841 for (i = 0; i < priv->nr; i++)
1842 serial8250_resume_port(priv->line[i]);
1843 } 1723 }
1844 return 0; 1724 return 0;
1845} 1725}
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 4528a00c45b0..a2f67245f6da 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
2903 * completion callbacks. 2.5 should have fixed those bugs... 2903 * completion callbacks. 2.5 should have fixed those bugs...
2904 */ 2904 */
2905 2905
2906static void defer_bh (struct usbnet *dev, struct sk_buff *skb) 2906static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
2907{ 2907{
2908 struct sk_buff_head *list = skb->list;
2909 unsigned long flags; 2908 unsigned long flags;
2910 2909
2911 spin_lock_irqsave (&list->lock, flags); 2910 spin_lock_irqsave(&list->lock, flags);
2912 __skb_unlink (skb, list); 2911 __skb_unlink(skb, list);
2913 spin_unlock (&list->lock); 2912 spin_unlock(&list->lock);
2914 spin_lock (&dev->done.lock); 2913 spin_lock(&dev->done.lock);
2915 __skb_queue_tail (&dev->done, skb); 2914 __skb_queue_tail(&dev->done, skb);
2916 if (dev->done.qlen == 1) 2915 if (dev->done.qlen == 1)
2917 tasklet_schedule (&dev->bh); 2916 tasklet_schedule(&dev->bh);
2918 spin_unlock_irqrestore (&dev->done.lock, flags); 2917 spin_unlock_irqrestore(&dev->done.lock, flags);
2919} 2918}
2920 2919
2921/* some work can't be done in tasklets, so we use keventd 2920/* some work can't be done in tasklets, so we use keventd
@@ -3120,7 +3119,7 @@ block:
3120 break; 3119 break;
3121 } 3120 }
3122 3121
3123 defer_bh (dev, skb); 3122 defer_bh(dev, skb, &dev->rxq);
3124 3123
3125 if (urb) { 3124 if (urb) {
3126 if (netif_running (dev->net) 3125 if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
3490 3489
3491 urb->dev = NULL; 3490 urb->dev = NULL;
3492 entry->state = tx_done; 3491 entry->state = tx_done;
3493 defer_bh (dev, skb); 3492 defer_bh(dev, skb, &dev->txq);
3494} 3493}
3495 3494
3496/*-------------------------------------------------------------------------*/ 3495/*-------------------------------------------------------------------------*/
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b5a5e04b6d37..498ad505fa5f 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -86,9 +86,9 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
86 86
87 dev->driver = driver; 87 dev->driver = driver;
88 88
89 dev->groups = 23; 89 dev->groups = 1;
90 dev->seq = 1; 90 dev->seq = 1;
91 dev->nls = netlink_kernel_create(NETLINK_W1, NULL); 91 dev->nls = netlink_kernel_create(NETLINK_W1, 1, NULL, THIS_MODULE);
92 if (!dev->nls) { 92 if (!dev->nls) {
93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n", 93 printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n",
94 NETLINK_NFLOG, dev->dev.bus_id); 94 NETLINK_NFLOG, dev->dev.bus_id);
@@ -225,3 +225,5 @@ void w1_remove_master_device(struct w1_bus_master *bm)
225 225
226EXPORT_SYMBOL(w1_add_master_device); 226EXPORT_SYMBOL(w1_add_master_device);
227EXPORT_SYMBOL(w1_remove_master_device); 227EXPORT_SYMBOL(w1_remove_master_device);
228
229MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_W1);
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 2a82fb055c70..e7b774423dd6 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -51,7 +51,7 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
51 51
52 memcpy(data, msg, sizeof(struct w1_netlink_msg)); 52 memcpy(data, msg, sizeof(struct w1_netlink_msg));
53 53
54 NETLINK_CB(skb).dst_groups = dev->groups; 54 NETLINK_CB(skb).dst_group = dev->groups;
55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC); 55 netlink_broadcast(dev->nls, skb, 0, dev->groups, GFP_ATOMIC);
56 56
57nlmsg_failure: 57nlmsg_failure:
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 63f5df9afb71..fd528433de43 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -97,7 +97,7 @@ extern int adfs_dir_update(struct super_block *sb, struct object_info *obj);
97extern struct inode_operations adfs_file_inode_operations; 97extern struct inode_operations adfs_file_inode_operations;
98extern struct file_operations adfs_file_operations; 98extern struct file_operations adfs_file_operations;
99 99
100extern inline __u32 signed_asl(__u32 val, signed int shift) 100static inline __u32 signed_asl(__u32 val, signed int shift)
101{ 101{
102 if (shift >= 0) 102 if (shift >= 0)
103 val <<= shift; 103 val <<= shift;
@@ -112,7 +112,7 @@ extern inline __u32 signed_asl(__u32 val, signed int shift)
112 * 112 *
113 * The root directory ID should always be looked up in the map [3.4] 113 * The root directory ID should always be looked up in the map [3.4]
114 */ 114 */
115extern inline int 115static inline int
116__adfs_block_map(struct super_block *sb, unsigned int object_id, 116__adfs_block_map(struct super_block *sb, unsigned int object_id,
117 unsigned int block) 117 unsigned int block)
118{ 118{
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 30ab70ce5547..3497125189df 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -643,7 +643,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
643 netfid, length, 643 netfid, length,
644 pfLock->fl_start, numUnlock, numLock, lockType, 644 pfLock->fl_start, numUnlock, numLock, lockType,
645 wait_flag); 645 wait_flag);
646 if (rc == 0 && (pfLock->fl_flags & FL_POSIX)) 646 if (pfLock->fl_flags & FL_POSIX)
647 posix_lock_file_wait(file, pfLock); 647 posix_lock_file_wait(file, pfLock);
648 FreeXid(xid); 648 FreeXid(xid);
649 return rc; 649 return rc;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index ff150fedb981..52930915bad8 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -38,7 +38,7 @@ struct hppfs_inode_info {
38 38
39static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode) 39static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode)
40{ 40{
41 return(list_entry(inode, struct hppfs_inode_info, vfs_inode)); 41 return container_of(inode, struct hppfs_inode_info, vfs_inode);
42} 42}
43 43
44#define HPPFS_SUPER_MAGIC 0xb00000ee 44#define HPPFS_SUPER_MAGIC 0xb00000ee
@@ -662,42 +662,36 @@ static int hppfs_readlink(struct dentry *dentry, char *buffer, int buflen)
662{ 662{
663 struct file *proc_file; 663 struct file *proc_file;
664 struct dentry *proc_dentry; 664 struct dentry *proc_dentry;
665 int (*readlink)(struct dentry *, char *, int); 665 int ret;
666 int err, n;
667 666
668 proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; 667 proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
669 proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY); 668 proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
670 err = PTR_ERR(proc_dentry); 669 if (IS_ERR(proc_file))
671 if(IS_ERR(proc_dentry)) 670 return PTR_ERR(proc_file);
672 return(err);
673 671
674 readlink = proc_dentry->d_inode->i_op->readlink; 672 ret = proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen);
675 n = (*readlink)(proc_dentry, buffer, buflen);
676 673
677 fput(proc_file); 674 fput(proc_file);
678 675
679 return(n); 676 return ret;
680} 677}
681 678
682static int hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) 679static void* hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
683{ 680{
684 struct file *proc_file; 681 struct file *proc_file;
685 struct dentry *proc_dentry; 682 struct dentry *proc_dentry;
686 int (*follow_link)(struct dentry *, struct nameidata *); 683 void *ret;
687 int err, n;
688 684
689 proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; 685 proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
690 proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY); 686 proc_file = dentry_open(dget(proc_dentry), NULL, O_RDONLY);
691 err = PTR_ERR(proc_dentry); 687 if (IS_ERR(proc_file))
692 if(IS_ERR(proc_dentry)) 688 return proc_file;
693 return(err);
694 689
695 follow_link = proc_dentry->d_inode->i_op->follow_link; 690 ret = proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd);
696 n = (*follow_link)(proc_dentry, nd);
697 691
698 fput(proc_file); 692 fput(proc_file);
699 693
700 return(n); 694 return ret;
701} 695}
702 696
703static struct inode_operations hppfs_dir_iops = { 697static struct inode_operations hppfs_dir_iops = {
diff --git a/fs/inotify.c b/fs/inotify.c
index 868901b1e779..2e4e2a57708c 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -353,7 +353,7 @@ static int inotify_dev_get_wd(struct inotify_device *dev,
353 do { 353 do {
354 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) 354 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
355 return -ENOSPC; 355 return -ENOSPC;
356 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd, &watch->wd); 356 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
357 } while (ret == -EAGAIN); 357 } while (ret == -EAGAIN);
358 358
359 return ret; 359 return ret;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 1cae14e741eb..49ccde3937f9 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1390,6 +1390,8 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1390 1390
1391 jfs_info("jfs_lookup: name = %s", name); 1391 jfs_info("jfs_lookup: name = %s", name);
1392 1392
1393 if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
1394 dentry->d_op = &jfs_ci_dentry_operations;
1393 1395
1394 if ((name[0] == '.') && (len == 1)) 1396 if ((name[0] == '.') && (len == 1))
1395 inum = dip->i_ino; 1397 inum = dip->i_ino;
@@ -1417,9 +1419,6 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1417 return ERR_PTR(-EACCES); 1419 return ERR_PTR(-EACCES);
1418 } 1420 }
1419 1421
1420 if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
1421 dentry->d_op = &jfs_ci_dentry_operations;
1422
1423 dentry = d_splice_alias(ip, dentry); 1422 dentry = d_splice_alias(ip, dentry);
1424 1423
1425 if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)) 1424 if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
diff --git a/fs/smbfs/sock.c b/fs/smbfs/sock.c
index 93f3cd22a2e9..6815b1b12b68 100644
--- a/fs/smbfs/sock.c
+++ b/fs/smbfs/sock.c
@@ -15,12 +15,12 @@
15#include <linux/file.h> 15#include <linux/file.h>
16#include <linux/in.h> 16#include <linux/in.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/tcp.h>
19#include <linux/mm.h> 18#include <linux/mm.h>
20#include <linux/netdevice.h> 19#include <linux/netdevice.h>
21#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
22#include <linux/workqueue.h> 21#include <linux/workqueue.h>
23#include <net/scm.h> 22#include <net/scm.h>
23#include <net/tcp_states.h>
24#include <net/ip.h> 24#include <net/ip.h>
25 25
26#include <linux/smb_fs.h> 26#include <linux/smb_fs.h>
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index d727dc960634..970a33f03299 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -228,6 +228,10 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
228 struct sysfs_dirent * sd; 228 struct sysfs_dirent * sd;
229 struct sysfs_dirent * parent_sd = dir->d_fsdata; 229 struct sysfs_dirent * parent_sd = dir->d_fsdata;
230 230
231 if (dir->d_inode == NULL)
232 /* no inode means this hasn't been made visible yet */
233 return;
234
231 down(&dir->d_inode->i_sem); 235 down(&dir->d_inode->i_sem);
232 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 236 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
233 if (!sd->s_element) 237 if (!sd->s_element)
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
index d00259d3dc78..b5193229132a 100644
--- a/include/asm-alpha/socket.h
+++ b/include/asm-alpha/socket.h
@@ -25,6 +25,8 @@
25#define SO_ERROR 0x1007 25#define SO_ERROR 0x1007
26#define SO_SNDBUF 0x1001 26#define SO_SNDBUF 0x1001
27#define SO_RCVBUF 0x1002 27#define SO_RCVBUF 0x1002
28#define SO_SNDBUFFORCE 0x100a
29#define SO_RCVBUFFORCE 0x100b
28#define SO_RCVLOWAT 0x1010 30#define SO_RCVLOWAT 0x1010
29#define SO_SNDLOWAT 0x1011 31#define SO_SNDLOWAT 0x1011
30#define SO_RCVTIMEO 0x1012 32#define SO_RCVTIMEO 0x1012
diff --git a/include/asm-arm/arch-sa1100/mcp.h b/include/asm-arm/arch-sa1100/mcp.h
new file mode 100644
index 000000000000..f58a22755c61
--- /dev/null
+++ b/include/asm-arm/arch-sa1100/mcp.h
@@ -0,0 +1,21 @@
1/*
2 * linux/include/asm-arm/arch-sa1100/mcp.h
3 *
4 * Copyright (C) 2005 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_ARCH_MCP_H
11#define __ASM_ARM_ARCH_MCP_H
12
13#include <linux/types.h>
14
15struct mcp_plat_data {
16 u32 mccr0;
17 u32 mccr1;
18 unsigned int sclk_rate;
19};
20
21#endif
diff --git a/include/asm-arm/hardware/gic.h b/include/asm-arm/hardware/gic.h
new file mode 100644
index 000000000000..3fa5eb70f64e
--- /dev/null
+++ b/include/asm-arm/hardware/gic.h
@@ -0,0 +1,41 @@
1/*
2 * linux/include/asm-arm/hardware/gic.h
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_HARDWARE_GIC_H
11#define __ASM_ARM_HARDWARE_GIC_H
12
13#include <linux/compiler.h>
14
15#define GIC_CPU_CTRL 0x00
16#define GIC_CPU_PRIMASK 0x04
17#define GIC_CPU_BINPOINT 0x08
18#define GIC_CPU_INTACK 0x0c
19#define GIC_CPU_EOI 0x10
20#define GIC_CPU_RUNNINGPRI 0x14
21#define GIC_CPU_HIGHPRI 0x18
22
23#define GIC_DIST_CTRL 0x000
24#define GIC_DIST_CTR 0x004
25#define GIC_DIST_ENABLE_SET 0x100
26#define GIC_DIST_ENABLE_CLEAR 0x180
27#define GIC_DIST_PENDING_SET 0x200
28#define GIC_DIST_PENDING_CLEAR 0x280
29#define GIC_DIST_ACTIVE_BIT 0x300
30#define GIC_DIST_PRI 0x400
31#define GIC_DIST_TARGET 0x800
32#define GIC_DIST_CONFIG 0xc00
33#define GIC_DIST_SOFTINT 0xf00
34
35#ifndef __ASSEMBLY__
36void gic_dist_init(void __iomem *base);
37void gic_cpu_init(void __iomem *base);
38void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
39#endif
40
41#endif
diff --git a/include/asm-arm/socket.h b/include/asm-arm/socket.h
index 46d20585d951..3c51da6438c9 100644
--- a/include/asm-arm/socket.h
+++ b/include/asm-arm/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-arm26/socket.h b/include/asm-arm26/socket.h
index 46d20585d951..3c51da6438c9 100644
--- a/include/asm-arm26/socket.h
+++ b/include/asm-arm26/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-cris/socket.h b/include/asm-cris/socket.h
index f159b4f165f7..8b1da3e58c55 100644
--- a/include/asm-cris/socket.h
+++ b/include/asm-cris/socket.h
@@ -16,6 +16,8 @@
16#define SO_BROADCAST 6 16#define SO_BROADCAST 6
17#define SO_SNDBUF 7 17#define SO_SNDBUF 7
18#define SO_RCVBUF 8 18#define SO_RCVBUF 8
19#define SO_SNDBUFFORCE 32
20#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9 21#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10 22#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11 23#define SO_NO_CHECK 11
diff --git a/include/asm-frv/socket.h b/include/asm-frv/socket.h
index c3be17c7de4b..7177f8b9817c 100644
--- a/include/asm-frv/socket.h
+++ b/include/asm-frv/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-h8300/socket.h b/include/asm-h8300/socket.h
index af33b8525dcf..d98cf85bafc1 100644
--- a/include/asm-h8300/socket.h
+++ b/include/asm-h8300/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h
index f949e44c2a35..67d3630c4e89 100644
--- a/include/asm-i386/checksum.h
+++ b/include/asm-i386/checksum.h
@@ -83,7 +83,7 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
83 "adcl $0, %0 ;\n" 83 "adcl $0, %0 ;\n"
84 "notl %0 ;\n" 84 "notl %0 ;\n"
85"2: ;\n" 85"2: ;\n"
86 /* Since the input registers which are loaded with iph and ipl 86 /* Since the input registers which are loaded with iph and ihl
87 are modified, we must also specify them as outputs, or gcc 87 are modified, we must also specify them as outputs, or gcc
88 will assume they contain their original values. */ 88 will assume they contain their original values. */
89 : "=r" (sum), "=r" (iph), "=r" (ihl) 89 : "=r" (sum), "=r" (iph), "=r" (ihl)
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
index 07f6b38ad140..802ae76195b7 100644
--- a/include/asm-i386/socket.h
+++ b/include/asm-i386/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h
index 21a9f10d6baa..a255006fb7b5 100644
--- a/include/asm-ia64/socket.h
+++ b/include/asm-ia64/socket.h
@@ -23,6 +23,8 @@
23#define SO_BROADCAST 6 23#define SO_BROADCAST 6
24#define SO_SNDBUF 7 24#define SO_SNDBUF 7
25#define SO_RCVBUF 8 25#define SO_RCVBUF 8
26#define SO_SNDBUFFORCE 32
27#define SO_RCVBUFFORCE 33
26#define SO_KEEPALIVE 9 28#define SO_KEEPALIVE 9
27#define SO_OOBINLINE 10 29#define SO_OOBINLINE 10
28#define SO_NO_CHECK 11 30#define SO_NO_CHECK 11
diff --git a/include/asm-m32r/checksum.h b/include/asm-m32r/checksum.h
index 99f37dbf2558..877ebf46e9ff 100644
--- a/include/asm-m32r/checksum.h
+++ b/include/asm-m32r/checksum.h
@@ -105,7 +105,7 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
105 " addx %0, %3 \n" 105 " addx %0, %3 \n"
106 " .fillinsn\n" 106 " .fillinsn\n"
107 "2: \n" 107 "2: \n"
108 /* Since the input registers which are loaded with iph and ipl 108 /* Since the input registers which are loaded with iph and ihl
109 are modified, we must also specify them as outputs, or gcc 109 are modified, we must also specify them as outputs, or gcc
110 will assume they contain their original values. */ 110 will assume they contain their original values. */
111 : "=&r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmpreg0), "=&r" (tmpreg1) 111 : "=&r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmpreg0), "=&r" (tmpreg1)
diff --git a/include/asm-m32r/socket.h b/include/asm-m32r/socket.h
index 159519d99042..8b6680f223c0 100644
--- a/include/asm-m32r/socket.h
+++ b/include/asm-m32r/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index 99a516709210..206313e2a817 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -138,13 +138,13 @@ extern unsigned long m68k_memoffset;
138#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset) 138#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
139#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset)) 139#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
140#else 140#else
141#define __pa(vaddr) virt_to_phys((void *)vaddr) 141#define __pa(vaddr) virt_to_phys((void *)(vaddr))
142#define __va(paddr) phys_to_virt((unsigned long)paddr) 142#define __va(paddr) phys_to_virt((unsigned long)(paddr))
143#endif 143#endif
144 144
145#else /* !CONFIG_SUN3 */ 145#else /* !CONFIG_SUN3 */
146/* This #define is a horrible hack to suppress lots of warnings. --m */ 146/* This #define is a horrible hack to suppress lots of warnings. --m */
147#define __pa(x) ___pa((unsigned long)x) 147#define __pa(x) ___pa((unsigned long)(x))
148static inline unsigned long ___pa(unsigned long x) 148static inline unsigned long ___pa(unsigned long x)
149{ 149{
150 if(x == 0) 150 if(x == 0)
diff --git a/include/asm-m68k/socket.h b/include/asm-m68k/socket.h
index 8d0b9fc2d07e..f578ca4b776a 100644
--- a/include/asm-m68k/socket.h
+++ b/include/asm-m68k/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-mips/socket.h b/include/asm-mips/socket.h
index 020b4db70ee5..d478a86294ee 100644
--- a/include/asm-mips/socket.h
+++ b/include/asm-mips/socket.h
@@ -37,6 +37,8 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
37#define SO_ERROR 0x1007 /* get error status and clear */ 37#define SO_ERROR 0x1007 /* get error status and clear */
38#define SO_SNDBUF 0x1001 /* Send buffer size. */ 38#define SO_SNDBUF 0x1001 /* Send buffer size. */
39#define SO_RCVBUF 0x1002 /* Receive buffer. */ 39#define SO_RCVBUF 0x1002 /* Receive buffer. */
40#define SO_SNDBUFFORCE 0x100a
41#define SO_RCVBUFFORCE 0x100b
40#define SO_SNDLOWAT 0x1003 /* send low-water mark */ 42#define SO_SNDLOWAT 0x1003 /* send low-water mark */
41#define SO_RCVLOWAT 0x1004 /* receive low-water mark */ 43#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
42#define SO_SNDTIMEO 0x1005 /* send timeout */ 44#define SO_SNDTIMEO 0x1005 /* send timeout */
diff --git a/include/asm-parisc/socket.h b/include/asm-parisc/socket.h
index 4a77996c1862..1bf54dc53c10 100644
--- a/include/asm-parisc/socket.h
+++ b/include/asm-parisc/socket.h
@@ -16,6 +16,8 @@
16/* To add :#define SO_REUSEPORT 0x0200 */ 16/* To add :#define SO_REUSEPORT 0x0200 */
17#define SO_SNDBUF 0x1001 17#define SO_SNDBUF 0x1001
18#define SO_RCVBUF 0x1002 18#define SO_RCVBUF 0x1002
19#define SO_SNDBUFFORCE 0x100a
20#define SO_RCVBUFFORCE 0x100b
19#define SO_SNDLOWAT 0x1003 21#define SO_SNDLOWAT 0x1003
20#define SO_RCVLOWAT 0x1004 22#define SO_RCVLOWAT 0x1004
21#define SO_SNDTIMEO 0x1005 23#define SO_SNDTIMEO 0x1005
diff --git a/include/asm-ppc/8253pit.h b/include/asm-powerpc/8253pit.h
index 285f78488ccb..862708a749b0 100644
--- a/include/asm-ppc/8253pit.h
+++ b/include/asm-powerpc/8253pit.h
@@ -5,6 +5,6 @@
5#ifndef _8253PIT_H 5#ifndef _8253PIT_H
6#define _8253PIT_H 6#define _8253PIT_H
7 7
8#define PIT_TICK_RATE 1193182UL 8#define PIT_TICK_RATE 1193182UL
9 9
10#endif 10#endif
diff --git a/include/asm-ppc/agp.h b/include/asm-powerpc/agp.h
index ca9e423307f4..ca9e423307f4 100644
--- a/include/asm-ppc/agp.h
+++ b/include/asm-powerpc/agp.h
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
new file mode 100644
index 000000000000..6d68ad7e0ea3
--- /dev/null
+++ b/include/asm-powerpc/cputime.h
@@ -0,0 +1 @@
#include <asm-generic/cputime.h>
diff --git a/include/asm-ppc/div64.h b/include/asm-powerpc/div64.h
index 6cd978cefb28..6cd978cefb28 100644
--- a/include/asm-ppc/div64.h
+++ b/include/asm-powerpc/div64.h
diff --git a/include/asm-powerpc/emergency-restart.h b/include/asm-powerpc/emergency-restart.h
new file mode 100644
index 000000000000..3711bd9d50bd
--- /dev/null
+++ b/include/asm-powerpc/emergency-restart.h
@@ -0,0 +1 @@
#include <asm-generic/emergency-restart.h>
diff --git a/include/asm-ppc/errno.h b/include/asm-powerpc/errno.h
index 19f20bd41ae6..19f20bd41ae6 100644
--- a/include/asm-ppc/errno.h
+++ b/include/asm-powerpc/errno.h
diff --git a/include/asm-ppc/ioctl.h b/include/asm-powerpc/ioctl.h
index 93c6acfdd0fd..93c6acfdd0fd 100644
--- a/include/asm-ppc/ioctl.h
+++ b/include/asm-powerpc/ioctl.h
diff --git a/include/asm-ppc/ioctls.h b/include/asm-powerpc/ioctls.h
index f5b7f2b055e7..f5b7f2b055e7 100644
--- a/include/asm-ppc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
diff --git a/include/asm-ppc/ipc.h b/include/asm-powerpc/ipc.h
index a46e3d9c2a3f..a46e3d9c2a3f 100644
--- a/include/asm-ppc/ipc.h
+++ b/include/asm-powerpc/ipc.h
diff --git a/include/asm-ppc/linkage.h b/include/asm-powerpc/linkage.h
index 291c2d01c44f..291c2d01c44f 100644
--- a/include/asm-ppc/linkage.h
+++ b/include/asm-powerpc/linkage.h
diff --git a/include/asm-ppc64/local.h b/include/asm-powerpc/local.h
index c11c530f74d0..c11c530f74d0 100644
--- a/include/asm-ppc64/local.h
+++ b/include/asm-powerpc/local.h
diff --git a/include/asm-ppc/namei.h b/include/asm-powerpc/namei.h
index 29c9ec832133..29c9ec832133 100644
--- a/include/asm-ppc/namei.h
+++ b/include/asm-powerpc/namei.h
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
new file mode 100644
index 000000000000..06a959d67234
--- /dev/null
+++ b/include/asm-powerpc/percpu.h
@@ -0,0 +1 @@
#include <asm-generic/percpu.h>
diff --git a/include/asm-ppc/poll.h b/include/asm-powerpc/poll.h
index be5024913c62..be5024913c62 100644
--- a/include/asm-ppc/poll.h
+++ b/include/asm-powerpc/poll.h
diff --git a/include/asm-powerpc/resource.h b/include/asm-powerpc/resource.h
new file mode 100644
index 000000000000..04bc4db8921b
--- /dev/null
+++ b/include/asm-powerpc/resource.h
@@ -0,0 +1 @@
#include <asm-generic/resource.h>
diff --git a/include/asm-ppc/shmparam.h b/include/asm-powerpc/shmparam.h
index d6250602ae64..d6250602ae64 100644
--- a/include/asm-ppc/shmparam.h
+++ b/include/asm-powerpc/shmparam.h
diff --git a/include/asm-ppc/string.h b/include/asm-powerpc/string.h
index 225575997392..225575997392 100644
--- a/include/asm-ppc/string.h
+++ b/include/asm-powerpc/string.h
diff --git a/include/asm-ppc/unaligned.h b/include/asm-powerpc/unaligned.h
index 45520d9b85d1..45520d9b85d1 100644
--- a/include/asm-ppc/unaligned.h
+++ b/include/asm-powerpc/unaligned.h
diff --git a/include/asm-ppc/xor.h b/include/asm-powerpc/xor.h
index c82eb12a5b18..c82eb12a5b18 100644
--- a/include/asm-ppc/xor.h
+++ b/include/asm-powerpc/xor.h
diff --git a/include/asm-ppc/cputime.h b/include/asm-ppc/cputime.h
deleted file mode 100644
index 8e9faf5ce720..000000000000
--- a/include/asm-ppc/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __PPC_CPUTIME_H
2#define __PPC_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __PPC_CPUTIME_H */
diff --git a/include/asm-ppc/emergency-restart.h b/include/asm-ppc/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/include/asm-ppc/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-ppc/hdreg.h b/include/asm-ppc/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-ppc/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-ppc/local.h b/include/asm-ppc/local.h
deleted file mode 100644
index b08e3eced10e..000000000000
--- a/include/asm-ppc/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __PPC_LOCAL_H
2#define __PPC_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __PPC_LOCAL_H */
diff --git a/include/asm-ppc/percpu.h b/include/asm-ppc/percpu.h
deleted file mode 100644
index d66667cd5878..000000000000
--- a/include/asm-ppc/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ARCH_PPC_PERCPU__
2#define __ARCH_PPC_PERCPU__
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_PPC_PERCPU__ */
diff --git a/include/asm-ppc/resource.h b/include/asm-ppc/resource.h
deleted file mode 100644
index 86a1ea23a6ed..000000000000
--- a/include/asm-ppc/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _PPC_RESOURCE_H
2#define _PPC_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif
diff --git a/include/asm-ppc/socket.h b/include/asm-ppc/socket.h
index 4134376b0f66..296e1a3469d0 100644
--- a/include/asm-ppc/socket.h
+++ b/include/asm-ppc/socket.h
@@ -20,6 +20,8 @@
20#define SO_BROADCAST 6 20#define SO_BROADCAST 6
21#define SO_SNDBUF 7 21#define SO_SNDBUF 7
22#define SO_RCVBUF 8 22#define SO_RCVBUF 8
23#define SO_SNDBUFFORCE 32
24#define SO_RCVBUFFORCE 33
23#define SO_KEEPALIVE 9 25#define SO_KEEPALIVE 9
24#define SO_OOBINLINE 10 26#define SO_OOBINLINE 10
25#define SO_NO_CHECK 11 27#define SO_NO_CHECK 11
diff --git a/include/asm-ppc64/8253pit.h b/include/asm-ppc64/8253pit.h
deleted file mode 100644
index 285f78488ccb..000000000000
--- a/include/asm-ppc64/8253pit.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#define PIT_TICK_RATE 1193182UL
9
10#endif
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-ppc64/abs_addr.h
index 6d4e8e787058..84c24d4cdb71 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-ppc64/abs_addr.h
@@ -16,93 +16,51 @@
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/prom.h> 17#include <asm/prom.h>
18#include <asm/lmb.h> 18#include <asm/lmb.h>
19#include <asm/firmware.h>
19 20
20typedef u32 msChunks_entry; 21struct mschunks_map {
21struct msChunks {
22 unsigned long num_chunks; 22 unsigned long num_chunks;
23 unsigned long chunk_size; 23 unsigned long chunk_size;
24 unsigned long chunk_shift; 24 unsigned long chunk_shift;
25 unsigned long chunk_mask; 25 unsigned long chunk_mask;
26 msChunks_entry *abs; 26 u32 *mapping;
27}; 27};
28 28
29extern struct msChunks msChunks; 29extern struct mschunks_map mschunks_map;
30 30
31extern unsigned long msChunks_alloc(unsigned long, unsigned long, unsigned long); 31/* Chunks are 256 KB */
32extern unsigned long reloc_offset(void); 32#define MSCHUNKS_CHUNK_SHIFT (18)
33#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
34#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
33 35
34#ifdef CONFIG_MSCHUNKS 36static inline unsigned long chunk_to_addr(unsigned long chunk)
35
36static inline unsigned long
37chunk_to_addr(unsigned long chunk)
38{ 37{
39 unsigned long offset = reloc_offset(); 38 return chunk << MSCHUNKS_CHUNK_SHIFT;
40 struct msChunks *_msChunks = PTRRELOC(&msChunks);
41
42 return chunk << _msChunks->chunk_shift;
43} 39}
44 40
45static inline unsigned long 41static inline unsigned long addr_to_chunk(unsigned long addr)
46addr_to_chunk(unsigned long addr)
47{ 42{
48 unsigned long offset = reloc_offset(); 43 return addr >> MSCHUNKS_CHUNK_SHIFT;
49 struct msChunks *_msChunks = PTRRELOC(&msChunks);
50
51 return addr >> _msChunks->chunk_shift;
52} 44}
53 45
54static inline unsigned long 46static inline unsigned long phys_to_abs(unsigned long pa)
55chunk_offset(unsigned long addr)
56{ 47{
57 unsigned long offset = reloc_offset(); 48 unsigned long chunk;
58 struct msChunks *_msChunks = PTRRELOC(&msChunks);
59 49
60 return addr & _msChunks->chunk_mask; 50 /* This is a no-op on non-iSeries */
61} 51 if (!firmware_has_feature(FW_FEATURE_ISERIES))
52 return pa;
62 53
63static inline unsigned long 54 chunk = addr_to_chunk(pa);
64abs_chunk(unsigned long pchunk)
65{
66 unsigned long offset = reloc_offset();
67 struct msChunks *_msChunks = PTRRELOC(&msChunks);
68 if ( pchunk >= _msChunks->num_chunks ) {
69 return pchunk;
70 }
71 return PTRRELOC(_msChunks->abs)[pchunk];
72}
73 55
74/* A macro so it can take pointers or unsigned long. */ 56 if (chunk < mschunks_map.num_chunks)
75#define phys_to_abs(pa) \ 57 chunk = mschunks_map.mapping[chunk];
76 ({ unsigned long _pa = (unsigned long)(pa); \
77 chunk_to_addr(abs_chunk(addr_to_chunk(_pa))) + chunk_offset(_pa); \
78 })
79 58
80static inline unsigned long 59 return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
81physRpn_to_absRpn(unsigned long rpn)
82{
83 unsigned long pa = rpn << PAGE_SHIFT;
84 unsigned long aa = phys_to_abs(pa);
85 return (aa >> PAGE_SHIFT);
86} 60}
87 61
88/* A macro so it can take pointers or unsigned long. */
89#define abs_to_phys(aa) lmb_abs_to_phys((unsigned long)(aa))
90
91#else /* !CONFIG_MSCHUNKS */
92
93#define chunk_to_addr(chunk) ((unsigned long)(chunk))
94#define addr_to_chunk(addr) (addr)
95#define chunk_offset(addr) (0)
96#define abs_chunk(pchunk) (pchunk)
97
98#define phys_to_abs(pa) (pa)
99#define physRpn_to_absRpn(rpn) (rpn)
100#define abs_to_phys(aa) (aa)
101
102#endif /* !CONFIG_MSCHUNKS */
103
104/* Convenience macros */ 62/* Convenience macros */
105#define virt_to_abs(va) phys_to_abs(__pa(va)) 63#define virt_to_abs(va) phys_to_abs(__pa(va))
106#define abs_to_virt(aa) __va(abs_to_phys(aa)) 64#define abs_to_virt(aa) __va(aa)
107 65
108#endif /* _ABS_ADDR_H */ 66#endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/agp.h b/include/asm-ppc64/agp.h
deleted file mode 100644
index ca9e423307f4..000000000000
--- a/include/asm-ppc64/agp.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/io.h>
5
6/* nothing much needed here */
7
8#define map_page_into_agp(page)
9#define unmap_page_from_agp(page)
10#define flush_agp_mappings()
11#define flush_agp_cache() mb()
12
13/* Convert a physical address to an address suitable for the GART. */
14#define phys_to_gart(x) (x)
15#define gart_to_phys(x) (x)
16
17/* GATT allocation. Returns/accepts GATT kernel virtual address. */
18#define alloc_gatt_pages(order) \
19 ((char *)__get_free_pages(GFP_KERNEL, (order)))
20#define free_gatt_pages(table, order) \
21 free_pages((unsigned long)(table), (order))
22
23#endif
diff --git a/include/asm-ppc64/bug.h b/include/asm-ppc64/bug.h
index 169868fa307d..160178278861 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-ppc64/bug.h
@@ -43,8 +43,8 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43 ".section __bug_table,\"a\"\n\t" \ 43 ".section __bug_table,\"a\"\n\t" \
44 " .llong 1b,%1,%2,%3\n" \ 44 " .llong 1b,%1,%2,%3\n" \
45 ".previous" \ 45 ".previous" \
46 : : "r" (x), "i" (__LINE__), "i" (__FILE__), \ 46 : : "r" ((long long)(x)), "i" (__LINE__), \
47 "i" (__FUNCTION__)); \ 47 "i" (__FILE__), "i" (__FUNCTION__)); \
48} while (0) 48} while (0)
49 49
50#define WARN_ON(x) do { \ 50#define WARN_ON(x) do { \
@@ -53,7 +53,8 @@ struct bug_entry *find_bug(unsigned long bugaddr);
53 ".section __bug_table,\"a\"\n\t" \ 53 ".section __bug_table,\"a\"\n\t" \
54 " .llong 1b,%1,%2,%3\n" \ 54 " .llong 1b,%1,%2,%3\n" \
55 ".previous" \ 55 ".previous" \
56 : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \ 56 : : "r" ((long long)(x)), \
57 "i" (__LINE__ + BUG_WARNING_TRAP), \
57 "i" (__FILE__), "i" (__FUNCTION__)); \ 58 "i" (__FILE__), "i" (__FUNCTION__)); \
58} while (0) 59} while (0)
59 60
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
index d67fa9e26079..ae6cf3830108 100644
--- a/include/asm-ppc64/cputable.h
+++ b/include/asm-ppc64/cputable.h
@@ -56,11 +56,6 @@ struct cpu_spec {
56 * BHT, SPD, etc... from head.S before branching to identify_machine 56 * BHT, SPD, etc... from head.S before branching to identify_machine
57 */ 57 */
58 cpu_setup_t cpu_setup; 58 cpu_setup_t cpu_setup;
59
60 /* This is used to identify firmware features which are available
61 * to the kernel.
62 */
63 unsigned long firmware_features;
64}; 59};
65 60
66extern struct cpu_spec cpu_specs[]; 61extern struct cpu_spec cpu_specs[];
@@ -71,39 +66,6 @@ static inline unsigned long cpu_has_feature(unsigned long feature)
71 return cur_cpu_spec->cpu_features & feature; 66 return cur_cpu_spec->cpu_features & feature;
72} 67}
73 68
74
75/* firmware feature bitmask values */
76#define FIRMWARE_MAX_FEATURES 63
77
78#define FW_FEATURE_PFT (1UL<<0)
79#define FW_FEATURE_TCE (1UL<<1)
80#define FW_FEATURE_SPRG0 (1UL<<2)
81#define FW_FEATURE_DABR (1UL<<3)
82#define FW_FEATURE_COPY (1UL<<4)
83#define FW_FEATURE_ASR (1UL<<5)
84#define FW_FEATURE_DEBUG (1UL<<6)
85#define FW_FEATURE_TERM (1UL<<7)
86#define FW_FEATURE_PERF (1UL<<8)
87#define FW_FEATURE_DUMP (1UL<<9)
88#define FW_FEATURE_INTERRUPT (1UL<<10)
89#define FW_FEATURE_MIGRATE (1UL<<11)
90#define FW_FEATURE_PERFMON (1UL<<12)
91#define FW_FEATURE_CRQ (1UL<<13)
92#define FW_FEATURE_VIO (1UL<<14)
93#define FW_FEATURE_RDMA (1UL<<15)
94#define FW_FEATURE_LLAN (1UL<<16)
95#define FW_FEATURE_BULK (1UL<<17)
96#define FW_FEATURE_XDABR (1UL<<18)
97#define FW_FEATURE_MULTITCE (1UL<<19)
98#define FW_FEATURE_SPLPAR (1UL<<20)
99
100typedef struct {
101 unsigned long val;
102 char * name;
103} firmware_feature_t;
104
105extern firmware_feature_t firmware_features_table[];
106
107#endif /* __ASSEMBLY__ */ 69#endif /* __ASSEMBLY__ */
108 70
109/* CPU kernel features */ 71/* CPU kernel features */
@@ -140,10 +102,8 @@ extern firmware_feature_t firmware_features_table[];
140#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 102#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
141#define CPU_FTR_CTRL ASM_CONST(0x0000100000000000) 103#define CPU_FTR_CTRL ASM_CONST(0x0000100000000000)
142 104
143/* Platform firmware features */
144#define FW_FTR_ ASM_CONST(0x0000000000000001)
145
146#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106
147#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \ 107#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
148 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU) 108 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
149 109
@@ -156,10 +116,9 @@ extern firmware_feature_t firmware_features_table[];
156#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE) 116#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
157#else 117#else
158#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE) 118#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
159#endif 119#endif /* CONFIG_PPC_ISERIES */
160 120
161#define COMMON_PPC64_FW (0) 121#endif /* __ASSEMBLY */
162#endif
163 122
164#ifdef __ASSEMBLY__ 123#ifdef __ASSEMBLY__
165 124
diff --git a/include/asm-ppc64/cputime.h b/include/asm-ppc64/cputime.h
deleted file mode 100644
index 8e9faf5ce720..000000000000
--- a/include/asm-ppc64/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __PPC_CPUTIME_H
2#define __PPC_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __PPC_CPUTIME_H */
diff --git a/include/asm-ppc64/div64.h b/include/asm-ppc64/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/include/asm-ppc64/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/include/asm-ppc64/emergency-restart.h b/include/asm-ppc64/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/include/asm-ppc64/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-ppc64/errno.h b/include/asm-ppc64/errno.h
deleted file mode 100644
index 69bc3b0c6cbe..000000000000
--- a/include/asm-ppc64/errno.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef _PPC64_ERRNO_H
2#define _PPC64_ERRNO_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm-generic/errno.h>
12
13#undef EDEADLOCK
14#define EDEADLOCK 58 /* File locking deadlock error */
15
16#define _LAST_ERRNO 516
17
18#endif
diff --git a/include/asm-ppc64/firmware.h b/include/asm-ppc64/firmware.h
new file mode 100644
index 000000000000..22bb85cf60af
--- /dev/null
+++ b/include/asm-ppc64/firmware.h
@@ -0,0 +1,101 @@
1/*
2 * include/asm-ppc64/firmware.h
3 *
4 * Extracted from include/asm-ppc64/cputable.h
5 *
6 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
7 *
8 * Modifications for ppc64:
9 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#ifndef __ASM_PPC_FIRMWARE_H
17#define __ASM_PPC_FIRMWARE_H
18
19#ifdef __KERNEL__
20
21#ifndef __ASSEMBLY__
22
23/* firmware feature bitmask values */
24#define FIRMWARE_MAX_FEATURES 63
25
26#define FW_FEATURE_PFT (1UL<<0)
27#define FW_FEATURE_TCE (1UL<<1)
28#define FW_FEATURE_SPRG0 (1UL<<2)
29#define FW_FEATURE_DABR (1UL<<3)
30#define FW_FEATURE_COPY (1UL<<4)
31#define FW_FEATURE_ASR (1UL<<5)
32#define FW_FEATURE_DEBUG (1UL<<6)
33#define FW_FEATURE_TERM (1UL<<7)
34#define FW_FEATURE_PERF (1UL<<8)
35#define FW_FEATURE_DUMP (1UL<<9)
36#define FW_FEATURE_INTERRUPT (1UL<<10)
37#define FW_FEATURE_MIGRATE (1UL<<11)
38#define FW_FEATURE_PERFMON (1UL<<12)
39#define FW_FEATURE_CRQ (1UL<<13)
40#define FW_FEATURE_VIO (1UL<<14)
41#define FW_FEATURE_RDMA (1UL<<15)
42#define FW_FEATURE_LLAN (1UL<<16)
43#define FW_FEATURE_BULK (1UL<<17)
44#define FW_FEATURE_XDABR (1UL<<18)
45#define FW_FEATURE_MULTITCE (1UL<<19)
46#define FW_FEATURE_SPLPAR (1UL<<20)
47#define FW_FEATURE_ISERIES (1UL<<21)
48
49enum {
50 FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
51 FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
52 FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
53 FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
54 FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
55 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
56 FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
57 FW_FEATURE_SPLPAR,
58 FW_FEATURE_PSERIES_ALWAYS = 0,
59 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES,
60 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES,
61 FW_FEATURE_POSSIBLE =
62#ifdef CONFIG_PPC_PSERIES
63 FW_FEATURE_PSERIES_POSSIBLE |
64#endif
65#ifdef CONFIG_PPC_ISERIES
66 FW_FEATURE_ISERIES_POSSIBLE |
67#endif
68 0,
69 FW_FEATURE_ALWAYS =
70#ifdef CONFIG_PPC_PSERIES
71 FW_FEATURE_PSERIES_ALWAYS &
72#endif
73#ifdef CONFIG_PPC_ISERIES
74 FW_FEATURE_ISERIES_ALWAYS &
75#endif
76 FW_FEATURE_POSSIBLE,
77};
78
79/* This is used to identify firmware features which are available
80 * to the kernel.
81 */
82extern unsigned long ppc64_firmware_features;
83
84static inline unsigned long firmware_has_feature(unsigned long feature)
85{
86 return (FW_FEATURE_ALWAYS & feature) ||
87 (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
88}
89
90#ifdef CONFIG_PPC_PSERIES
91typedef struct {
92 unsigned long val;
93 char * name;
94} firmware_feature_t;
95
96extern firmware_feature_t firmware_features_table[];
97#endif
98
99#endif /* __ASSEMBLY__ */
100#endif /* __KERNEL__ */
101#endif /* __ASM_PPC_FIRMWARE_H */
diff --git a/include/asm-ppc64/hdreg.h b/include/asm-ppc64/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-ppc64/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
index e46ff68a6e41..42adf7033a81 100644
--- a/include/asm-ppc64/imalloc.h
+++ b/include/asm-ppc64/imalloc.h
@@ -6,7 +6,7 @@
6 */ 6 */
7#define PHBS_IO_BASE VMALLOC_END 7#define PHBS_IO_BASE VMALLOC_END
8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
9#define IMALLOC_END (VMALLOC_START + EADDR_MASK) 9#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
10 10
11 11
12/* imalloc region types */ 12/* imalloc region types */
diff --git a/include/asm-ppc64/ioctl.h b/include/asm-ppc64/ioctl.h
deleted file mode 100644
index 42b8c5da7fbc..000000000000
--- a/include/asm-ppc64/ioctl.h
+++ /dev/null
@@ -1,74 +0,0 @@
1#ifndef _PPC64_IOCTL_H
2#define _PPC64_IOCTL_H
3
4
5/*
6 * This was copied from the alpha as it's a bit cleaner there.
7 * -- Cort
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define _IOC_NRBITS 8
16#define _IOC_TYPEBITS 8
17#define _IOC_SIZEBITS 13
18#define _IOC_DIRBITS 3
19
20#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
21#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
22#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
23#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
24
25#define _IOC_NRSHIFT 0
26#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
27#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
28#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
29
30/*
31 * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
32 * And this turns out useful to catch old ioctl numbers in header
33 * files for us.
34 */
35#define _IOC_NONE 1U
36#define _IOC_READ 2U
37#define _IOC_WRITE 4U
38
39#define _IOC(dir,type,nr,size) \
40 (((dir) << _IOC_DIRSHIFT) | \
41 ((type) << _IOC_TYPESHIFT) | \
42 ((nr) << _IOC_NRSHIFT) | \
43 ((size) << _IOC_SIZESHIFT))
44
45/* provoke compile error for invalid uses of size argument */
46extern unsigned int __invalid_size_argument_for_IOC;
47#define _IOC_TYPECHECK(t) \
48 ((sizeof(t) == sizeof(t[1]) && \
49 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
50 sizeof(t) : __invalid_size_argument_for_IOC)
51
52/* used to create numbers */
53#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
54#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
55#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
56#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
57#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
58#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
59#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
60
61/* used to decode them.. */
62#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
63#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
64#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
65#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
66
67/* various drivers, such as the pcmcia stuff, need these... */
68#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
69#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
70#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
71#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
72#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
73
74#endif /* _PPC64_IOCTL_H */
diff --git a/include/asm-ppc64/ioctls.h b/include/asm-ppc64/ioctls.h
deleted file mode 100644
index 48796bf3e4fc..000000000000
--- a/include/asm-ppc64/ioctls.h
+++ /dev/null
@@ -1,114 +0,0 @@
1#ifndef _ASM_PPC64_IOCTLS_H
2#define _ASM_PPC64_IOCTLS_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm/ioctl.h>
12
13#define FIOCLEX _IO('f', 1)
14#define FIONCLEX _IO('f', 2)
15#define FIOASYNC _IOW('f', 125, int)
16#define FIONBIO _IOW('f', 126, int)
17#define FIONREAD _IOR('f', 127, int)
18#define TIOCINQ FIONREAD
19#define FIOQSIZE _IOR('f', 128, loff_t)
20
21#define TIOCGETP _IOR('t', 8, struct sgttyb)
22#define TIOCSETP _IOW('t', 9, struct sgttyb)
23#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
24
25#define TIOCSETC _IOW('t', 17, struct tchars)
26#define TIOCGETC _IOR('t', 18, struct tchars)
27#define TCGETS _IOR('t', 19, struct termios)
28#define TCSETS _IOW('t', 20, struct termios)
29#define TCSETSW _IOW('t', 21, struct termios)
30#define TCSETSF _IOW('t', 22, struct termios)
31
32#define TCGETA _IOR('t', 23, struct termio)
33#define TCSETA _IOW('t', 24, struct termio)
34#define TCSETAW _IOW('t', 25, struct termio)
35#define TCSETAF _IOW('t', 28, struct termio)
36
37#define TCSBRK _IO('t', 29)
38#define TCXONC _IO('t', 30)
39#define TCFLSH _IO('t', 31)
40
41#define TIOCSWINSZ _IOW('t', 103, struct winsize)
42#define TIOCGWINSZ _IOR('t', 104, struct winsize)
43#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
44#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
45#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
46
47#define TIOCGLTC _IOR('t', 116, struct ltchars)
48#define TIOCSLTC _IOW('t', 117, struct ltchars)
49#define TIOCSPGRP _IOW('t', 118, int)
50#define TIOCGPGRP _IOR('t', 119, int)
51
52#define TIOCEXCL 0x540C
53#define TIOCNXCL 0x540D
54#define TIOCSCTTY 0x540E
55
56#define TIOCSTI 0x5412
57#define TIOCMGET 0x5415
58#define TIOCMBIS 0x5416
59#define TIOCMBIC 0x5417
60#define TIOCMSET 0x5418
61# define TIOCM_LE 0x001
62# define TIOCM_DTR 0x002
63# define TIOCM_RTS 0x004
64# define TIOCM_ST 0x008
65# define TIOCM_SR 0x010
66# define TIOCM_CTS 0x020
67# define TIOCM_CAR 0x040
68# define TIOCM_RNG 0x080
69# define TIOCM_DSR 0x100
70# define TIOCM_CD TIOCM_CAR
71# define TIOCM_RI TIOCM_RNG
72
73#define TIOCGSOFTCAR 0x5419
74#define TIOCSSOFTCAR 0x541A
75#define TIOCLINUX 0x541C
76#define TIOCCONS 0x541D
77#define TIOCGSERIAL 0x541E
78#define TIOCSSERIAL 0x541F
79#define TIOCPKT 0x5420
80# define TIOCPKT_DATA 0
81# define TIOCPKT_FLUSHREAD 1
82# define TIOCPKT_FLUSHWRITE 2
83# define TIOCPKT_STOP 4
84# define TIOCPKT_START 8
85# define TIOCPKT_NOSTOP 16
86# define TIOCPKT_DOSTOP 32
87
88
89#define TIOCNOTTY 0x5422
90#define TIOCSETD 0x5423
91#define TIOCGETD 0x5424
92#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
93#define TIOCSBRK 0x5427 /* BSD compatibility */
94#define TIOCCBRK 0x5428 /* BSD compatibility */
95#define TIOCGSID 0x5429 /* Return the session ID of FD */
96#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
97#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
98
99#define TIOCSERCONFIG 0x5453
100#define TIOCSERGWILD 0x5454
101#define TIOCSERSWILD 0x5455
102#define TIOCGLCKTRMIOS 0x5456
103#define TIOCSLCKTRMIOS 0x5457
104#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
105#define TIOCSERGETLSR 0x5459 /* Get line status register */
106 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
107# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
108#define TIOCSERGETMULTI 0x545A /* Get multiport config */
109#define TIOCSERSETMULTI 0x545B /* Set multiport config */
110
111#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
112#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
113
114#endif /* _ASM_PPC64_IOCTLS_H */
diff --git a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h
index 729de5cc21d9..72dcf8116b04 100644
--- a/include/asm-ppc64/iommu.h
+++ b/include/asm-ppc64/iommu.h
@@ -104,9 +104,6 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn);
104 104
105#ifdef CONFIG_PPC_ISERIES 105#ifdef CONFIG_PPC_ISERIES
106 106
107/* Initializes tables for bio buses */
108extern void __init iommu_vio_init(void);
109
110struct iSeries_Device_Node; 107struct iSeries_Device_Node;
111/* Creates table for an individual device node */ 108/* Creates table for an individual device node */
112extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn); 109extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
diff --git a/include/asm-ppc64/ipc.h b/include/asm-ppc64/ipc.h
deleted file mode 100644
index a46e3d9c2a3f..000000000000
--- a/include/asm-ppc64/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipc.h>
diff --git a/include/asm-ppc64/linkage.h b/include/asm-ppc64/linkage.h
deleted file mode 100644
index 291c2d01c44f..000000000000
--- a/include/asm-ppc64/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif
diff --git a/include/asm-ppc64/lmb.h b/include/asm-ppc64/lmb.h
index a6cbca21ac1d..cb368bf0f264 100644
--- a/include/asm-ppc64/lmb.h
+++ b/include/asm-ppc64/lmb.h
@@ -22,7 +22,6 @@
22 22
23struct lmb_property { 23struct lmb_property {
24 unsigned long base; 24 unsigned long base;
25 unsigned long physbase;
26 unsigned long size; 25 unsigned long size;
27}; 26};
28 27
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h
index f0ef06375947..ff2c9287d3b6 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-ppc64/machdep.h
@@ -140,6 +140,9 @@ struct machdep_calls {
140 140
141 /* Idle loop for this platform, leave empty for default idle loop */ 141 /* Idle loop for this platform, leave empty for default idle loop */
142 int (*idle_loop)(void); 142 int (*idle_loop)(void);
143
144 /* Function to enable pmcs for this platform, called once per cpu. */
145 void (*enable_pmcs)(void);
143}; 146};
144 147
145extern int default_idle(void); 148extern int default_idle(void);
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 70348a851313..ad36bb28de29 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -28,9 +28,12 @@
28#define STE_VSID_SHIFT 12 28#define STE_VSID_SHIFT 12
29 29
30/* Location of cpu0's segment table */ 30/* Location of cpu0's segment table */
31#define STAB0_PAGE 0x9 31#define STAB0_PAGE 0x6
32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT) 32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
33#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR) 33
34#ifndef __ASSEMBLY__
35extern char initial_stab[];
36#endif /* ! __ASSEMBLY */
34 37
35/* 38/*
36 * SLB 39 * SLB
@@ -259,8 +262,10 @@ extern void stabs_alloc(void);
259#define VSID_BITS 36 262#define VSID_BITS 36
260#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 263#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
261 264
262#define CONTEXT_BITS 20 265#define CONTEXT_BITS 19
263#define USER_ESID_BITS 15 266#define USER_ESID_BITS 16
267
268#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
264 269
265/* 270/*
266 * This macro generates asm code to compute the VSID scramble 271 * This macro generates asm code to compute the VSID scramble
@@ -302,8 +307,7 @@ typedef unsigned long mm_context_id_t;
302typedef struct { 307typedef struct {
303 mm_context_id_t id; 308 mm_context_id_t id;
304#ifdef CONFIG_HUGETLB_PAGE 309#ifdef CONFIG_HUGETLB_PAGE
305 pgd_t *huge_pgdir; 310 u16 low_htlb_areas, high_htlb_areas;
306 u16 htlb_segs; /* bitmask */
307#endif 311#endif
308} mm_context_t; 312} mm_context_t;
309 313
diff --git a/include/asm-ppc64/naca.h b/include/asm-ppc64/naca.h
index bfb7caa32eaf..d2afe6447597 100644
--- a/include/asm-ppc64/naca.h
+++ b/include/asm-ppc64/naca.h
@@ -12,8 +12,6 @@
12 12
13#include <asm/types.h> 13#include <asm/types.h>
14 14
15#ifndef __ASSEMBLY__
16
17struct naca_struct { 15struct naca_struct {
18 /* Kernel only data - undefined for user space */ 16 /* Kernel only data - undefined for user space */
19 void *xItVpdAreas; /* VPD Data 0x00 */ 17 void *xItVpdAreas; /* VPD Data 0x00 */
@@ -23,9 +21,4 @@ struct naca_struct {
23 21
24extern struct naca_struct naca; 22extern struct naca_struct naca;
25 23
26#endif /* __ASSEMBLY__ */
27
28#define NACA_PAGE 0x4
29#define NACA_PHYS_ADDR (NACA_PAGE<<PAGE_SHIFT)
30
31#endif /* _NACA_H */ 24#endif /* _NACA_H */
diff --git a/include/asm-ppc64/namei.h b/include/asm-ppc64/namei.h
deleted file mode 100644
index a1412a2d102a..000000000000
--- a/include/asm-ppc64/namei.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * linux/include/asm-ppc/namei.h
3 * Adapted from linux/include/asm-alpha/namei.h
4 *
5 * Included from linux/fs/namei.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef __PPC64_NAMEI_H
14#define __PPC64_NAMEI_H
15
16/* This dummy routine maybe changed to something useful
17 * for /usr/gnemul/ emulation stuff.
18 * Look at asm-sparc/namei.h for details.
19 */
20
21#define __emul_prefix() NULL
22
23#endif /* __PPC64_NAMEI_H */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a5893a305a09..a79a08df62bd 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -37,39 +37,45 @@
37 37
38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
39 39
40/* For 64-bit processes the hugepage range is 1T-1.5T */ 40#define HTLB_AREA_SHIFT 40
41#define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000) 41#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
42#define TASK_HPAGE_END ASM_CONST(0x0000018000000000) 42#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
43 43
44#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ 44#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
45 - (1U << GET_ESID(addr))) & 0xffff) 45 - (1U << GET_ESID(addr))) & 0xffff)
46#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
47 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
46 48
47#define ARCH_HAS_HUGEPAGE_ONLY_RANGE 49#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
48#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 50#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
51#define ARCH_HAS_SETCLEAR_HUGE_PTE
49 52
50#define touches_hugepage_low_range(mm, addr, len) \ 53#define touches_hugepage_low_range(mm, addr, len) \
51 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs) 54 (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
52#define touches_hugepage_high_range(addr, len) \ 55#define touches_hugepage_high_range(mm, addr, len) \
53 (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END)) 56 (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
54 57
55#define __within_hugepage_low_range(addr, len, segmask) \ 58#define __within_hugepage_low_range(addr, len, segmask) \
56 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) 59 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
57#define within_hugepage_low_range(addr, len) \ 60#define within_hugepage_low_range(addr, len) \
58 __within_hugepage_low_range((addr), (len), \ 61 __within_hugepage_low_range((addr), (len), \
59 current->mm->context.htlb_segs) 62 current->mm->context.low_htlb_areas)
60#define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \ 63#define __within_hugepage_high_range(addr, len, zonemask) \
61 && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr))) 64 ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
65#define within_hugepage_high_range(addr, len) \
66 __within_hugepage_high_range((addr), (len), \
67 current->mm->context.high_htlb_areas)
62 68
63#define is_hugepage_only_range(mm, addr, len) \ 69#define is_hugepage_only_range(mm, addr, len) \
64 (touches_hugepage_high_range((addr), (len)) || \ 70 (touches_hugepage_high_range((mm), (addr), (len)) || \
65 touches_hugepage_low_range((mm), (addr), (len))) 71 touches_hugepage_low_range((mm), (addr), (len)))
66#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 72#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
67 73
68#define in_hugepage_area(context, addr) \ 74#define in_hugepage_area(context, addr) \
69 (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 75 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
70 ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ 76 ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
71 ( ((addr) < 0x100000000L) && \ 77 ( ((addr) < 0x100000000L) && \
72 ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) ) 78 ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
73 79
74#else /* !CONFIG_HUGETLB_PAGE */ 80#else /* !CONFIG_HUGETLB_PAGE */
75 81
@@ -125,36 +131,42 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
125 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 131 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
126 */ 132 */
127typedef struct { unsigned long pte; } pte_t; 133typedef struct { unsigned long pte; } pte_t;
128typedef struct { unsigned int pmd; } pmd_t; 134typedef struct { unsigned long pmd; } pmd_t;
129typedef struct { unsigned int pgd; } pgd_t; 135typedef struct { unsigned long pud; } pud_t;
136typedef struct { unsigned long pgd; } pgd_t;
130typedef struct { unsigned long pgprot; } pgprot_t; 137typedef struct { unsigned long pgprot; } pgprot_t;
131 138
132#define pte_val(x) ((x).pte) 139#define pte_val(x) ((x).pte)
133#define pmd_val(x) ((x).pmd) 140#define pmd_val(x) ((x).pmd)
141#define pud_val(x) ((x).pud)
134#define pgd_val(x) ((x).pgd) 142#define pgd_val(x) ((x).pgd)
135#define pgprot_val(x) ((x).pgprot) 143#define pgprot_val(x) ((x).pgprot)
136 144
137#define __pte(x) ((pte_t) { (x) } ) 145#define __pte(x) ((pte_t) { (x) })
138#define __pmd(x) ((pmd_t) { (x) } ) 146#define __pmd(x) ((pmd_t) { (x) })
139#define __pgd(x) ((pgd_t) { (x) } ) 147#define __pud(x) ((pud_t) { (x) })
140#define __pgprot(x) ((pgprot_t) { (x) } ) 148#define __pgd(x) ((pgd_t) { (x) })
149#define __pgprot(x) ((pgprot_t) { (x) })
141 150
142#else 151#else
143/* 152/*
144 * .. while these make it easier on the compiler 153 * .. while these make it easier on the compiler
145 */ 154 */
146typedef unsigned long pte_t; 155typedef unsigned long pte_t;
147typedef unsigned int pmd_t; 156typedef unsigned long pmd_t;
148typedef unsigned int pgd_t; 157typedef unsigned long pud_t;
158typedef unsigned long pgd_t;
149typedef unsigned long pgprot_t; 159typedef unsigned long pgprot_t;
150 160
151#define pte_val(x) (x) 161#define pte_val(x) (x)
152#define pmd_val(x) (x) 162#define pmd_val(x) (x)
163#define pud_val(x) (x)
153#define pgd_val(x) (x) 164#define pgd_val(x) (x)
154#define pgprot_val(x) (x) 165#define pgprot_val(x) (x)
155 166
156#define __pte(x) (x) 167#define __pte(x) (x)
157#define __pmd(x) (x) 168#define __pmd(x) (x)
169#define __pud(x) (x)
158#define __pgd(x) (x) 170#define __pgd(x) (x)
159#define __pgprot(x) (x) 171#define __pgprot(x) (x)
160 172
@@ -208,9 +220,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
208#define USER_REGION_ID (0UL) 220#define USER_REGION_ID (0UL)
209#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 221#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
210 222
211#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
212#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
213
214#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 223#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
215 224
216#ifdef CONFIG_DISCONTIGMEM 225#ifdef CONFIG_DISCONTIGMEM
diff --git a/include/asm-ppc64/param.h b/include/asm-ppc64/param.h
index 1fad38dcf707..76c212d475b3 100644
--- a/include/asm-ppc64/param.h
+++ b/include/asm-ppc64/param.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_PPC64_PARAM_H 1#ifndef _ASM_PPC64_PARAM_H
2#define _ASM_PPC64_PARAM_H 2#define _ASM_PPC64_PARAM_H
3 3
4#include <linux/config.h>
5
4/* 6/*
5 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -9,7 +11,7 @@
9 */ 11 */
10 12
11#ifdef __KERNEL__ 13#ifdef __KERNEL__
12# define HZ 1000 /* Internal kernel timer frequency */ 14# define HZ CONFIG_HZ /* Internal kernel timer frequency */
13# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ 15# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
14# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ 16# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
15#endif 17#endif
diff --git a/include/asm-ppc64/percpu.h b/include/asm-ppc64/percpu.h
deleted file mode 100644
index 60a659a4ce1f..000000000000
--- a/include/asm-ppc64/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ARCH_PPC64_PERCPU__
2#define __ARCH_PPC64_PERCPU__
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_PPC64_PERCPU__ */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 4fc4b739b380..26bc49c1108d 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -6,7 +6,12 @@
6#include <linux/cpumask.h> 6#include <linux/cpumask.h>
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8 8
9extern kmem_cache_t *zero_cache; 9extern kmem_cache_t *pgtable_cache[];
10
11#define PTE_CACHE_NUM 0
12#define PMD_CACHE_NUM 1
13#define PUD_CACHE_NUM 1
14#define PGD_CACHE_NUM 0
10 15
11/* 16/*
12 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
@@ -15,30 +20,40 @@ extern kmem_cache_t *zero_cache;
15 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
16 */ 21 */
17 22
18static inline pgd_t * 23static inline pgd_t *pgd_alloc(struct mm_struct *mm)
19pgd_alloc(struct mm_struct *mm)
20{ 24{
21 return kmem_cache_alloc(zero_cache, GFP_KERNEL); 25 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
22} 26}
23 27
24static inline void 28static inline void pgd_free(pgd_t *pgd)
25pgd_free(pgd_t *pgd)
26{ 29{
27 kmem_cache_free(zero_cache, pgd); 30 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
31}
32
33#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
34
35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
36{
37 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
38 GFP_KERNEL|__GFP_REPEAT);
39}
40
41static inline void pud_free(pud_t *pud)
42{
43 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
28} 44}
29 45
30#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 46#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
31 47
32static inline pmd_t * 48static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
33pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
34{ 49{
35 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 50 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
51 GFP_KERNEL|__GFP_REPEAT);
36} 52}
37 53
38static inline void 54static inline void pmd_free(pmd_t *pmd)
39pmd_free(pmd_t *pmd)
40{ 55{
41 kmem_cache_free(zero_cache, pmd); 56 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
42} 57}
43 58
44#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) 59#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
@@ -47,44 +62,58 @@ pmd_free(pmd_t *pmd)
47 62
48static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 63static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
49{ 64{
50 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 65 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
66 GFP_KERNEL|__GFP_REPEAT);
51} 67}
52 68
53static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 69static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
54{ 70{
55 pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 71 return virt_to_page(pte_alloc_one_kernel(mm, address));
56 if (pte)
57 return virt_to_page(pte);
58 return NULL;
59} 72}
60 73
61static inline void pte_free_kernel(pte_t *pte) 74static inline void pte_free_kernel(pte_t *pte)
62{ 75{
63 kmem_cache_free(zero_cache, pte); 76 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
64} 77}
65 78
66static inline void pte_free(struct page *ptepage) 79static inline void pte_free(struct page *ptepage)
67{ 80{
68 kmem_cache_free(zero_cache, page_address(ptepage)); 81 pte_free_kernel(page_address(ptepage));
69} 82}
70 83
71struct pte_freelist_batch 84#define PGF_CACHENUM_MASK 0xf
85
86typedef struct pgtable_free {
87 unsigned long val;
88} pgtable_free_t;
89
90static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
91 unsigned long mask)
72{ 92{
73 struct rcu_head rcu; 93 BUG_ON(cachenum > PGF_CACHENUM_MASK);
74 unsigned int index;
75 struct page * pages[0];
76};
77 94
78#define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \ 95 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
79 sizeof(struct page *)) 96}
80 97
81extern void pte_free_now(struct page *ptepage); 98static inline void pgtable_free(pgtable_free_t pgf)
82extern void pte_free_submit(struct pte_freelist_batch *batch); 99{
100 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
101 int cachenum = pgf.val & PGF_CACHENUM_MASK;
83 102
84DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 103 kmem_cache_free(pgtable_cache[cachenum], p);
104}
85 105
86void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage); 106void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
87#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) 107
108#define __pte_free_tlb(tlb, ptepage) \
109 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
110 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
111#define __pmd_free_tlb(tlb, pmd) \
112 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
113 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
114#define __pud_free_tlb(tlb, pmd) \
115 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
116 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
88 117
89#define check_pgt_cache() do { } while (0) 118#define check_pgt_cache() do { } while (0)
90 119
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 46cf61c2ff69..c83679c9d2b0 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -15,19 +15,24 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
17 17
18#include <asm-generic/pgtable-nopud.h>
19
20/* 18/*
21 * Entries per page directory level. The PTE level must use a 64b record 19 * Entries per page directory level. The PTE level must use a 64b record
22 * for each page table entry. The PMD and PGD level use a 32b record for 20 * for each page table entry. The PMD and PGD level use a 32b record for
23 * each entry by assuming that each entry is page aligned. 21 * each entry by assuming that each entry is page aligned.
24 */ 22 */
25#define PTE_INDEX_SIZE 9 23#define PTE_INDEX_SIZE 9
26#define PMD_INDEX_SIZE 10 24#define PMD_INDEX_SIZE 7
27#define PGD_INDEX_SIZE 10 25#define PUD_INDEX_SIZE 7
26#define PGD_INDEX_SIZE 9
27
28#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
29#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
30#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
31#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
28 32
29#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 33#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 34#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
35#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
32 37
33/* PMD_SHIFT determines what a second-level page table entry can map */ 38/* PMD_SHIFT determines what a second-level page table entry can map */
@@ -35,8 +40,13 @@
35#define PMD_SIZE (1UL << PMD_SHIFT) 40#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1)) 41#define PMD_MASK (~(PMD_SIZE-1))
37 42
38/* PGDIR_SHIFT determines what a third-level page table entry can map */ 43/* PUD_SHIFT determines what a third-level page table entry can map */
39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 44#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
45#define PUD_SIZE (1UL << PUD_SHIFT)
46#define PUD_MASK (~(PUD_SIZE-1))
47
48/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
49#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1)) 51#define PGDIR_MASK (~(PGDIR_SIZE-1))
42 52
@@ -45,15 +55,23 @@
45/* 55/*
46 * Size of EA range mapped by our pagetables. 56 * Size of EA range mapped by our pagetables.
47 */ 57 */
48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 58#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
49 PGD_INDEX_SIZE + PAGE_SHIFT) 59 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 60#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
61
62#if TASK_SIZE_USER64 > PGTABLE_RANGE
63#error TASK_SIZE_USER64 exceeds pagetable range
64#endif
65
66#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
67#error TASK_SIZE_USER64 exceeds user VSID range
68#endif
51 69
52/* 70/*
53 * Define the address range of the vmalloc VM area. 71 * Define the address range of the vmalloc VM area.
54 */ 72 */
55#define VMALLOC_START (0xD000000000000000ul) 73#define VMALLOC_START (0xD000000000000000ul)
56#define VMALLOC_SIZE (0x10000000000UL) 74#define VMALLOC_SIZE (0x80000000000UL)
57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 75#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
58 76
59/* 77/*
@@ -154,8 +172,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
154#ifndef __ASSEMBLY__ 172#ifndef __ASSEMBLY__
155int hash_huge_page(struct mm_struct *mm, unsigned long access, 173int hash_huge_page(struct mm_struct *mm, unsigned long access,
156 unsigned long ea, unsigned long vsid, int local); 174 unsigned long ea, unsigned long vsid, int local);
157
158void hugetlb_mm_free_pgd(struct mm_struct *mm);
159#endif /* __ASSEMBLY__ */ 175#endif /* __ASSEMBLY__ */
160 176
161#define HAVE_ARCH_UNMAPPED_AREA 177#define HAVE_ARCH_UNMAPPED_AREA
@@ -163,7 +179,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
163#else 179#else
164 180
165#define hash_huge_page(mm,a,ea,vsid,local) -1 181#define hash_huge_page(mm,a,ea,vsid,local) -1
166#define hugetlb_mm_free_pgd(mm) do {} while (0)
167 182
168#endif 183#endif
169 184
@@ -197,39 +212,45 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
197#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 212#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
198#define pte_page(x) pfn_to_page(pte_pfn(x)) 213#define pte_page(x) pfn_to_page(pte_pfn(x))
199 214
200#define pmd_set(pmdp, ptep) \ 215#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
201 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
202#define pmd_none(pmd) (!pmd_val(pmd)) 216#define pmd_none(pmd) (!pmd_val(pmd))
203#define pmd_bad(pmd) (pmd_val(pmd) == 0) 217#define pmd_bad(pmd) (pmd_val(pmd) == 0)
204#define pmd_present(pmd) (pmd_val(pmd) != 0) 218#define pmd_present(pmd) (pmd_val(pmd) != 0)
205#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 219#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
206#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 220#define pmd_page_kernel(pmd) (pmd_val(pmd))
207#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 221#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
208 222
209#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 223#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp))
210#define pud_none(pud) (!pud_val(pud)) 224#define pud_none(pud) (!pud_val(pud))
211#define pud_bad(pud) ((pud_val(pud)) == 0UL) 225#define pud_bad(pud) ((pud_val(pud)) == 0)
212#define pud_present(pud) (pud_val(pud) != 0UL) 226#define pud_present(pud) (pud_val(pud) != 0)
213#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 227#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
214#define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 228#define pud_page(pud) (pud_val(pud))
229
230#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
231#define pgd_none(pgd) (!pgd_val(pgd))
232#define pgd_bad(pgd) (pgd_val(pgd) == 0)
233#define pgd_present(pgd) (pgd_val(pgd) != 0)
234#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
235#define pgd_page(pgd) (pgd_val(pgd))
215 236
216/* 237/*
217 * Find an entry in a page-table-directory. We combine the address region 238 * Find an entry in a page-table-directory. We combine the address region
218 * (the high order N bits) and the pgd portion of the address. 239 * (the high order N bits) and the pgd portion of the address.
219 */ 240 */
220/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 241/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
221#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 242#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
222 243
223#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 244#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
224 245
225/* Find an entry in the second-level page table.. */ 246#define pud_offset(pgdp, addr) \
247 (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
248
226#define pmd_offset(pudp,addr) \ 249#define pmd_offset(pudp,addr) \
227 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
228 251
229/* Find an entry in the third-level page table.. */
230#define pte_offset_kernel(dir,addr) \ 252#define pte_offset_kernel(dir,addr) \
231 ((pte_t *) pmd_page_kernel(*(dir)) \ 253 (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
232 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
233 254
234#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
235#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -458,23 +479,20 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
458#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 479#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
459 480
460#define pmd_ERROR(e) \ 481#define pmd_ERROR(e) \
461 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 482 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
483#define pud_ERROR(e) \
484 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
462#define pgd_ERROR(e) \ 485#define pgd_ERROR(e) \
463 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 486 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
464 487
465extern pgd_t swapper_pg_dir[]; 488extern pgd_t swapper_pg_dir[];
466 489
467extern void paging_init(void); 490extern void paging_init(void);
468 491
469/* 492#ifdef CONFIG_HUGETLB_PAGE
470 * Because the huge pgtables are only 2 level, they can take
471 * at most around 4M, much less than one hugepage which the
472 * process is presumably entitled to use. So we don't bother
473 * freeing up the pagetables on unmap, and wait until
474 * destroy_context() to clean up the lot.
475 */
476#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 493#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
477 do { } while (0) 494 free_pgd_range(tlb, addr, end, floor, ceiling)
495#endif
478 496
479/* 497/*
480 * This gets called at the end of handling a page fault, when 498 * This gets called at the end of handling a page fault, when
diff --git a/include/asm-ppc64/pmc.h b/include/asm-ppc64/pmc.h
index c924748c0bea..d1d297dbccfe 100644
--- a/include/asm-ppc64/pmc.h
+++ b/include/asm-ppc64/pmc.h
@@ -26,4 +26,6 @@ typedef void (*perf_irq_t)(struct pt_regs *);
26int reserve_pmc_hardware(perf_irq_t new_perf_irq); 26int reserve_pmc_hardware(perf_irq_t new_perf_irq);
27void release_pmc_hardware(void); 27void release_pmc_hardware(void);
28 28
29void power4_enable_pmcs(void);
30
29#endif /* _PPC64_PMC_H */ 31#endif /* _PPC64_PMC_H */
diff --git a/include/asm-ppc64/poll.h b/include/asm-ppc64/poll.h
deleted file mode 100644
index 370fa3ba0db4..000000000000
--- a/include/asm-ppc64/poll.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef __PPC64_POLL_H
2#define __PPC64_POLL_H
3
4/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define POLLIN 0x0001
14#define POLLPRI 0x0002
15#define POLLOUT 0x0004
16#define POLLERR 0x0008
17#define POLLHUP 0x0010
18#define POLLNVAL 0x0020
19#define POLLRDNORM 0x0040
20#define POLLRDBAND 0x0080
21#define POLLWRNORM 0x0100
22#define POLLWRBAND 0x0200
23#define POLLMSG 0x0400
24#define POLLREMOVE 0x1000
25
26struct pollfd {
27 int fd;
28 short events;
29 short revents;
30};
31
32#endif /* __PPC64_POLL_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 352306cfb579..7bd4796f1236 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -268,6 +268,7 @@
268#define PV_970FX 0x003C 268#define PV_970FX 0x003C
269#define PV_630 0x0040 269#define PV_630 0x0040
270#define PV_630p 0x0041 270#define PV_630p 0x0041
271#define PV_970MP 0x0044
271#define PV_BE 0x0070 272#define PV_BE 0x0070
272 273
273/* Platforms supported by PPC64 */ 274/* Platforms supported by PPC64 */
@@ -382,8 +383,8 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
382extern struct task_struct *last_task_used_math; 383extern struct task_struct *last_task_used_math;
383extern struct task_struct *last_task_used_altivec; 384extern struct task_struct *last_task_used_altivec;
384 385
385/* 64-bit user address space is 41-bits (2TBs user VM) */ 386/* 64-bit user address space is 44-bits (16TB user VM) */
386#define TASK_SIZE_USER64 (0x0000020000000000UL) 387#define TASK_SIZE_USER64 (0x0000100000000000UL)
387 388
388/* 389/*
389 * 32-bit user address space is 4GB - 1 page 390 * 32-bit user address space is 4GB - 1 page
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index 04b1a84f7ca3..dc5330b39509 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -22,13 +22,15 @@
22#define RELOC(x) (*PTRRELOC(&(x))) 22#define RELOC(x) (*PTRRELOC(&(x)))
23 23
24/* Definitions used by the flattened device tree */ 24/* Definitions used by the flattened device tree */
25#define OF_DT_HEADER 0xd00dfeed /* 4: version, 4: total size */ 25#define OF_DT_HEADER 0xd00dfeed /* marker */
26#define OF_DT_BEGIN_NODE 0x1 /* Start node: full name */ 26#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
27#define OF_DT_END_NODE 0x2 /* End node */ 27#define OF_DT_END_NODE 0x2 /* End node */
28#define OF_DT_PROP 0x3 /* Property: name off, size, content */ 28#define OF_DT_PROP 0x3 /* Property: name off, size,
29 * content */
30#define OF_DT_NOP 0x4 /* nop */
29#define OF_DT_END 0x9 31#define OF_DT_END 0x9
30 32
31#define OF_DT_VERSION 1 33#define OF_DT_VERSION 0x10
32 34
33/* 35/*
34 * This is what gets passed to the kernel by prom_init or kexec 36 * This is what gets passed to the kernel by prom_init or kexec
@@ -54,7 +56,9 @@ struct boot_param_header
54 u32 version; /* format version */ 56 u32 version; /* format version */
55 u32 last_comp_version; /* last compatible version */ 57 u32 last_comp_version; /* last compatible version */
56 /* version 2 fields below */ 58 /* version 2 fields below */
57 u32 boot_cpuid_phys; /* Which physical CPU id we're booting on */ 59 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
60 /* version 3 fields below */
61 u32 dt_strings_size; /* size of the DT strings block */
58}; 62};
59 63
60 64
diff --git a/include/asm-ppc64/resource.h b/include/asm-ppc64/resource.h
deleted file mode 100644
index add031b9dfd4..000000000000
--- a/include/asm-ppc64/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _PPC64_RESOURCE_H
2#define _PPC64_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif /* _PPC64_RESOURCE_H */
diff --git a/include/asm-ppc64/shmparam.h b/include/asm-ppc64/shmparam.h
deleted file mode 100644
index b2825ceff05e..000000000000
--- a/include/asm-ppc64/shmparam.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _PPC64_SHMPARAM_H
2#define _PPC64_SHMPARAM_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
12
13#endif /* _PPC64_SHMPARAM_H */
diff --git a/include/asm-ppc64/socket.h b/include/asm-ppc64/socket.h
index 59e00dfc8b8e..9e1af8eb2d96 100644
--- a/include/asm-ppc64/socket.h
+++ b/include/asm-ppc64/socket.h
@@ -21,6 +21,8 @@
21#define SO_BROADCAST 6 21#define SO_BROADCAST 6
22#define SO_SNDBUF 7 22#define SO_SNDBUF 7
23#define SO_RCVBUF 8 23#define SO_RCVBUF 8
24#define SO_SNDBUFFORCE 32
25#define SO_RCVBUFFORCE 33
24#define SO_KEEPALIVE 9 26#define SO_KEEPALIVE 9
25#define SO_OOBINLINE 10 27#define SO_OOBINLINE 10
26#define SO_NO_CHECK 11 28#define SO_NO_CHECK 11
diff --git a/include/asm-ppc64/string.h b/include/asm-ppc64/string.h
deleted file mode 100644
index eeca68ef1e91..000000000000
--- a/include/asm-ppc64/string.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _PPC64_STRING_H_
2#define _PPC64_STRING_H_
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#define __HAVE_ARCH_STRCPY
12#define __HAVE_ARCH_STRNCPY
13#define __HAVE_ARCH_STRLEN
14#define __HAVE_ARCH_STRCMP
15#define __HAVE_ARCH_STRCAT
16#define __HAVE_ARCH_MEMSET
17#define __HAVE_ARCH_MEMCPY
18#define __HAVE_ARCH_MEMMOVE
19#define __HAVE_ARCH_MEMCMP
20#define __HAVE_ARCH_MEMCHR
21
22extern int strcasecmp(const char *, const char *);
23extern int strncasecmp(const char *, const char *, int);
24extern char * strcpy(char *,const char *);
25extern char * strncpy(char *,const char *, __kernel_size_t);
26extern __kernel_size_t strlen(const char *);
27extern int strcmp(const char *,const char *);
28extern char * strcat(char *, const char *);
29extern void * memset(void *,int,__kernel_size_t);
30extern void * memcpy(void *,const void *,__kernel_size_t);
31extern void * memmove(void *,const void *,__kernel_size_t);
32extern int memcmp(const void *,const void *,__kernel_size_t);
33extern void * memchr(const void *,int,__kernel_size_t);
34
35#endif /* _PPC64_STRING_H_ */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 98d120ca8a91..b9e1835351e9 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -88,7 +88,7 @@ DEBUGGER_BOILERPLATE(debugger_dabr_match)
88DEBUGGER_BOILERPLATE(debugger_fault_handler) 88DEBUGGER_BOILERPLATE(debugger_fault_handler)
89 89
90#ifdef CONFIG_XMON 90#ifdef CONFIG_XMON
91extern void xmon_init(void); 91extern void xmon_init(int enable);
92#endif 92#endif
93 93
94#else 94#else
@@ -302,5 +302,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
302 302
303#define arch_align_stack(x) (x) 303#define arch_align_stack(x) (x)
304 304
305extern unsigned long reloc_offset(void);
306
305#endif /* __KERNEL__ */ 307#endif /* __KERNEL__ */
306#endif 308#endif
diff --git a/include/asm-ppc64/unaligned.h b/include/asm-ppc64/unaligned.h
deleted file mode 100644
index 636e93c4f379..000000000000
--- a/include/asm-ppc64/unaligned.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __PPC64_UNALIGNED_H
2#define __PPC64_UNALIGNED_H
3
4/*
5 * The PowerPC can do unaligned accesses itself in big endian mode.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#define get_unaligned(ptr) (*(ptr))
18
19#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
20
21#endif /* __PPC64_UNALIGNED_H */
diff --git a/include/asm-ppc64/vio.h b/include/asm-ppc64/vio.h
index 20cd98ee6337..03f1b95f433b 100644
--- a/include/asm-ppc64/vio.h
+++ b/include/asm-ppc64/vio.h
@@ -19,13 +19,15 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/mod_devicetable.h>
23
22#include <asm/hvcall.h> 24#include <asm/hvcall.h>
23#include <asm/prom.h>
24#include <asm/scatterlist.h> 25#include <asm/scatterlist.h>
25/* 26
27/*
26 * Architecture-specific constants for drivers to 28 * Architecture-specific constants for drivers to
27 * extract attributes of the device using vio_get_attribute() 29 * extract attributes of the device using vio_get_attribute()
28*/ 30 */
29#define VETH_MAC_ADDR "local-mac-address" 31#define VETH_MAC_ADDR "local-mac-address"
30#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters" 32#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters"
31 33
@@ -37,64 +39,65 @@
37#define VIO_IRQ_DISABLE 0UL 39#define VIO_IRQ_DISABLE 0UL
38#define VIO_IRQ_ENABLE 1UL 40#define VIO_IRQ_ENABLE 1UL
39 41
40struct vio_dev;
41struct vio_driver;
42struct vio_device_id;
43struct iommu_table; 42struct iommu_table;
44 43
45int vio_register_driver(struct vio_driver *drv); 44/*
46void vio_unregister_driver(struct vio_driver *drv); 45 * The vio_dev structure is used to describe virtual I/O devices.
47 46 */
48#ifdef CONFIG_PPC_PSERIES 47struct vio_dev {
49struct vio_dev * __devinit vio_register_device_node( 48 struct iommu_table *iommu_table; /* vio_map_* uses this */
50 struct device_node *node_vdev); 49 char *name;
51#endif
52void __devinit vio_unregister_device(struct vio_dev *dev);
53struct vio_dev *vio_find_node(struct device_node *vnode);
54
55const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length);
56int vio_get_irq(struct vio_dev *dev);
57int vio_enable_interrupts(struct vio_dev *dev);
58int vio_disable_interrupts(struct vio_dev *dev);
59
60extern struct dma_mapping_ops vio_dma_ops;
61
62extern struct bus_type vio_bus_type;
63
64struct vio_device_id {
65 char *type; 50 char *type;
66 char *compat; 51 uint32_t unit_address;
52 unsigned int irq;
53 struct device dev;
67}; 54};
68 55
69struct vio_driver { 56struct vio_driver {
70 struct list_head node; 57 struct list_head node;
71 char *name; 58 char *name;
72 const struct vio_device_id *id_table; /* NULL if wants all devices */ 59 const struct vio_device_id *id_table;
73 int (*probe) (struct vio_dev *dev, const struct vio_device_id *id); /* New device inserted */ 60 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
74 int (*remove) (struct vio_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 61 int (*remove)(struct vio_dev *dev);
75 unsigned long driver_data; 62 unsigned long driver_data;
76
77 struct device_driver driver; 63 struct device_driver driver;
78}; 64};
79 65
66struct vio_bus_ops {
67 int (*match)(const struct vio_device_id *id, const struct vio_dev *dev);
68 void (*unregister_device)(struct vio_dev *);
69 void (*release_device)(struct device *);
70};
71
72extern struct dma_mapping_ops vio_dma_ops;
73extern struct bus_type vio_bus_type;
74extern struct vio_dev vio_bus_device;
75
76extern int vio_register_driver(struct vio_driver *drv);
77extern void vio_unregister_driver(struct vio_driver *drv);
78
79extern struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev);
80extern void __devinit vio_unregister_device(struct vio_dev *dev);
81
82extern int vio_bus_init(struct vio_bus_ops *);
83
84#ifdef CONFIG_PPC_PSERIES
85struct device_node;
86
87extern struct vio_dev * __devinit vio_register_device_node(
88 struct device_node *node_vdev);
89extern struct vio_dev *vio_find_node(struct device_node *vnode);
90extern const void *vio_get_attribute(struct vio_dev *vdev, void *which,
91 int *length);
92extern int vio_enable_interrupts(struct vio_dev *dev);
93extern int vio_disable_interrupts(struct vio_dev *dev);
94#endif
95
80static inline struct vio_driver *to_vio_driver(struct device_driver *drv) 96static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
81{ 97{
82 return container_of(drv, struct vio_driver, driver); 98 return container_of(drv, struct vio_driver, driver);
83} 99}
84 100
85/*
86 * The vio_dev structure is used to describe virtual I/O devices.
87 */
88struct vio_dev {
89 struct iommu_table *iommu_table; /* vio_map_* uses this */
90 char *name;
91 char *type;
92 uint32_t unit_address;
93 unsigned int irq;
94
95 struct device dev;
96};
97
98static inline struct vio_dev *to_vio_dev(struct device *dev) 101static inline struct vio_dev *to_vio_dev(struct device *dev)
99{ 102{
100 return container_of(dev, struct vio_dev, dev); 103 return container_of(dev, struct vio_dev, dev);
diff --git a/include/asm-ppc64/xor.h b/include/asm-ppc64/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/include/asm-ppc64/xor.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/xor.h>
diff --git a/include/asm-s390/socket.h b/include/asm-s390/socket.h
index 0e96eeca4e6b..15a5298c8744 100644
--- a/include/asm-s390/socket.h
+++ b/include/asm-s390/socket.h
@@ -22,6 +22,8 @@
22#define SO_BROADCAST 6 22#define SO_BROADCAST 6
23#define SO_SNDBUF 7 23#define SO_SNDBUF 7
24#define SO_RCVBUF 8 24#define SO_RCVBUF 8
25#define SO_SNDBUFFORCE 32
26#define SO_RCVBUFFORCE 33
25#define SO_KEEPALIVE 9 27#define SO_KEEPALIVE 9
26#define SO_OOBINLINE 10 28#define SO_OOBINLINE 10
27#define SO_NO_CHECK 11 29#define SO_NO_CHECK 11
diff --git a/include/asm-sh/socket.h b/include/asm-sh/socket.h
index dde696c3b4c7..553904ff9336 100644
--- a/include/asm-sh/socket.h
+++ b/include/asm-sh/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_RCVBUFFORCE 32
18#define SO_SNDBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 32c9699367cf..5a7a1a8d29ac 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -19,7 +19,6 @@
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/head.h> 20#include <asm/head.h>
21#include <asm/signal.h> 21#include <asm/signal.h>
22#include <asm/segment.h>
23#include <asm/btfixup.h> 22#include <asm/btfixup.h>
24#include <asm/page.h> 23#include <asm/page.h>
25 24
diff --git a/include/asm-sparc/segment.h b/include/asm-sparc/segment.h
deleted file mode 100644
index a1b7ffc9eec9..000000000000
--- a/include/asm-sparc/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_SEGMENT_H
2#define __SPARC_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif
diff --git a/include/asm-sparc/socket.h b/include/asm-sparc/socket.h
index c1154e3ecfdf..09575b608adb 100644
--- a/include/asm-sparc/socket.h
+++ b/include/asm-sparc/socket.h
@@ -29,6 +29,8 @@
29 29
30#define SO_SNDBUF 0x1001 30#define SO_SNDBUF 0x1001
31#define SO_RCVBUF 0x1002 31#define SO_RCVBUF 0x1002
32#define SO_SNDBUFFORCE 0x100a
33#define SO_RCVBUFFORCE 0x100b
32#define SO_ERROR 0x1007 34#define SO_ERROR 0x1007
33#define SO_TYPE 0x1008 35#define SO_TYPE 0x1008
34 36
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 898562ebe94c..3557781a4bfd 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -9,7 +9,6 @@
9#include <linux/threads.h> /* NR_CPUS */ 9#include <linux/threads.h> /* NR_CPUS */
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12#include <asm/segment.h>
13#include <asm/page.h> 12#include <asm/page.h>
14#include <asm/psr.h> 13#include <asm/psr.h>
15#include <asm/ptrace.h> 14#include <asm/ptrace.h>
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index d80f3379669b..e175afcf2cde 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,10 +72,10 @@ extern int atomic64_sub_ret(int, atomic64_t *);
72 72
73/* Atomic operations are already serializing */ 73/* Atomic operations are already serializing */
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad") 75#define smp_mb__before_atomic_dec() membar_storeload_loadload();
76#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore") 76#define smp_mb__after_atomic_dec() membar_storeload_storestore();
77#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad") 77#define smp_mb__before_atomic_inc() membar_storeload_loadload();
78#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore") 78#define smp_mb__after_atomic_inc() membar_storeload_storestore();
79#else 79#else
80#define smp_mb__before_atomic_dec() barrier() 80#define smp_mb__before_atomic_dec() barrier()
81#define smp_mb__after_atomic_dec() barrier() 81#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9c5e71970287..6388b8376c50 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -72,8 +72,8 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
72} 72}
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75#define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad") 75#define smp_mb__before_clear_bit() membar_storeload_loadload()
76#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore") 76#define smp_mb__after_clear_bit() membar_storeload_storestore()
77#else 77#else
78#define smp_mb__before_clear_bit() barrier() 78#define smp_mb__before_clear_bit() barrier()
79#define smp_mb__after_clear_bit() barrier() 79#define smp_mb__after_clear_bit() barrier()
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index d0bee2413560..3169f3e2237e 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -18,7 +18,6 @@
18#include <asm/a.out.h> 18#include <asm/a.out.h>
19#include <asm/pstate.h> 19#include <asm/pstate.h>
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/segment.h>
22#include <asm/page.h> 21#include <asm/page.h>
23 22
24/* The sparc has no problems with write protection */ 23/* The sparc has no problems with write protection */
diff --git a/include/asm-sparc64/segment.h b/include/asm-sparc64/segment.h
deleted file mode 100644
index b03e709fc945..000000000000
--- a/include/asm-sparc64/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC64_SEGMENT_H
2#define __SPARC64_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif
diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h
new file mode 100644
index 000000000000..2f792c20b53c
--- /dev/null
+++ b/include/asm-sparc64/sfafsr.h
@@ -0,0 +1,82 @@
1#ifndef _SPARC64_SFAFSR_H
2#define _SPARC64_SFAFSR_H
3
4#include <asm/const.h>
5
6/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
7
8#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
9#define SFAFSR_ME_SHIFT 32
10#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
11#define SFAFSR_PRIV_SHIFT 31
12#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
13#define SFAFSR_ISAP_SHIFT 30
14#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
15#define SFAFSR_ETP_SHIFT 29
16#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
17#define SFAFSR_IVUE_SHIFT 28
18#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
19#define SFAFSR_TO_SHIFT 27
20#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
21#define SFAFSR_BERR_SHIFT 26
22#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
23#define SFAFSR_LDP_SHIFT 25
24#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
25#define SFAFSR_CP_SHIFT 24
26#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
27#define SFAFSR_WP_SHIFT 23
28#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
29#define SFAFSR_EDP_SHIFT 22
30#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
31#define SFAFSR_UE_SHIFT 21
32#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
33#define SFAFSR_CE_SHIFT 20
34#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
35#define SFAFSR_ETS_SHIFT 16
36#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
37#define SFAFSR_PSYND_SHIFT 0
38
39/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
40 * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
41 */
42
43#define UDBE_UE (_AC(1,UL) << 9)
44#define UDBE_CE (_AC(1,UL) << 8)
45#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
46
47/* The trap handlers for asynchronous errors encode the AFSR and
48 * other pieces of information into a 64-bit argument for C code
49 * encoded as follows:
50 *
51 * -----------------------------------------------
52 * | UDB_H | UDB_L | TL>1 | TT | AFSR |
53 * -----------------------------------------------
54 * 63 54 53 44 42 41 33 32 0
55 *
56 * The AFAR is passed in unchanged.
57 */
58#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
59#define SFSTAT_UDBH_SHIFT 54
60#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
61#define SFSTAT_UDBL_SHIFT 44
62#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
63#define SFSTAT_TL_GT_ONE_SHIFT 42
64#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
65#define SFSTAT_TRAP_TYPE_SHIFT 33
66#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
67#define SFSTAT_AFSR_SHIFT 0
68
69/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
70#define ESTATE_ERR_CE 0x1 /* Correctable errors */
71#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
72#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
73#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
74 ESTATE_ERR_NCE | \
75 ESTATE_ERR_ISAP)
76
77/* The various trap types that report using the above state. */
78#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
79#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
80#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
81
82#endif /* _SPARC64_SFAFSR_H */
diff --git a/include/asm-sparc64/socket.h b/include/asm-sparc64/socket.h
index 865547a23908..59987dad3359 100644
--- a/include/asm-sparc64/socket.h
+++ b/include/asm-sparc64/socket.h
@@ -29,6 +29,8 @@
29 29
30#define SO_SNDBUF 0x1001 30#define SO_SNDBUF 0x1001
31#define SO_RCVBUF 0x1002 31#define SO_RCVBUF 0x1002
32#define SO_SNDBUFFORCE 0x100a
33#define SO_RCVBUFFORCE 0x100b
32#define SO_ERROR 0x1007 34#define SO_ERROR 0x1007
33#define SO_TYPE 0x1008 35#define SO_TYPE 0x1008
34 36
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index 9cb93a5c2b4f..a02c4370eb42 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -43,7 +43,7 @@ typedef struct {
43#define spin_is_locked(lp) ((lp)->lock != 0) 43#define spin_is_locked(lp) ((lp)->lock != 0)
44 44
45#define spin_unlock_wait(lp) \ 45#define spin_unlock_wait(lp) \
46do { membar("#LoadLoad"); \ 46do { rmb(); \
47} while((lp)->lock) 47} while((lp)->lock)
48 48
49static inline void _raw_spin_lock(spinlock_t *lock) 49static inline void _raw_spin_lock(spinlock_t *lock)
@@ -129,15 +129,18 @@ typedef struct {
129#define spin_is_locked(__lock) ((__lock)->lock != 0) 129#define spin_is_locked(__lock) ((__lock)->lock != 0)
130#define spin_unlock_wait(__lock) \ 130#define spin_unlock_wait(__lock) \
131do { \ 131do { \
132 membar("#LoadLoad"); \ 132 rmb(); \
133} while((__lock)->lock) 133} while((__lock)->lock)
134 134
135extern void _do_spin_lock (spinlock_t *lock, char *str); 135extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
136extern void _do_spin_unlock (spinlock_t *lock); 136extern void _do_spin_unlock(spinlock_t *lock);
137extern int _do_spin_trylock (spinlock_t *lock); 137extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
138 138
139#define _raw_spin_trylock(lp) _do_spin_trylock(lp) 139#define _raw_spin_trylock(lp) \
140#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") 140 _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
141#define _raw_spin_lock(lock) \
142 _do_spin_lock(lock, "spin_lock", \
143 (unsigned long) __builtin_return_address(0))
141#define _raw_spin_unlock(lock) _do_spin_unlock(lock) 144#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
142#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 145#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
143 146
@@ -279,37 +282,41 @@ typedef struct {
279#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } 282#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
280#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) 283#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
281 284
282extern void _do_read_lock(rwlock_t *rw, char *str); 285extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
283extern void _do_read_unlock(rwlock_t *rw, char *str); 286extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
284extern void _do_write_lock(rwlock_t *rw, char *str); 287extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
285extern void _do_write_unlock(rwlock_t *rw); 288extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
286extern int _do_write_trylock(rwlock_t *rw, char *str); 289extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
287 290
288#define _raw_read_lock(lock) \ 291#define _raw_read_lock(lock) \
289do { unsigned long flags; \ 292do { unsigned long flags; \
290 local_irq_save(flags); \ 293 local_irq_save(flags); \
291 _do_read_lock(lock, "read_lock"); \ 294 _do_read_lock(lock, "read_lock", \
295 (unsigned long) __builtin_return_address(0)); \
292 local_irq_restore(flags); \ 296 local_irq_restore(flags); \
293} while(0) 297} while(0)
294 298
295#define _raw_read_unlock(lock) \ 299#define _raw_read_unlock(lock) \
296do { unsigned long flags; \ 300do { unsigned long flags; \
297 local_irq_save(flags); \ 301 local_irq_save(flags); \
298 _do_read_unlock(lock, "read_unlock"); \ 302 _do_read_unlock(lock, "read_unlock", \
303 (unsigned long) __builtin_return_address(0)); \
299 local_irq_restore(flags); \ 304 local_irq_restore(flags); \
300} while(0) 305} while(0)
301 306
302#define _raw_write_lock(lock) \ 307#define _raw_write_lock(lock) \
303do { unsigned long flags; \ 308do { unsigned long flags; \
304 local_irq_save(flags); \ 309 local_irq_save(flags); \
305 _do_write_lock(lock, "write_lock"); \ 310 _do_write_lock(lock, "write_lock", \
311 (unsigned long) __builtin_return_address(0)); \
306 local_irq_restore(flags); \ 312 local_irq_restore(flags); \
307} while(0) 313} while(0)
308 314
309#define _raw_write_unlock(lock) \ 315#define _raw_write_unlock(lock) \
310do { unsigned long flags; \ 316do { unsigned long flags; \
311 local_irq_save(flags); \ 317 local_irq_save(flags); \
312 _do_write_unlock(lock); \ 318 _do_write_unlock(lock, \
319 (unsigned long) __builtin_return_address(0)); \
313 local_irq_restore(flags); \ 320 local_irq_restore(flags); \
314} while(0) 321} while(0)
315 322
@@ -317,7 +324,8 @@ do { unsigned long flags; \
317({ unsigned long flags; \ 324({ unsigned long flags; \
318 int val; \ 325 int val; \
319 local_irq_save(flags); \ 326 local_irq_save(flags); \
320 val = _do_write_trylock(lock, "write_trylock"); \ 327 val = _do_write_trylock(lock, "write_trylock", \
328 (unsigned long) __builtin_return_address(0)); \
321 local_irq_restore(flags); \ 329 local_irq_restore(flags); \
322 val; \ 330 val; \
323}) 331})
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index ee4bdfc6b88f..5e94c05dc2fc 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -28,6 +28,14 @@ enum sparc_cpu {
28#define ARCH_SUN4C_SUN4 0 28#define ARCH_SUN4C_SUN4 0
29#define ARCH_SUN4 0 29#define ARCH_SUN4 0
30 30
31extern void mb(void);
32extern void rmb(void);
33extern void wmb(void);
34extern void membar_storeload(void);
35extern void membar_storeload_storestore(void);
36extern void membar_storeload_loadload(void);
37extern void membar_storestore_loadstore(void);
38
31#endif 39#endif
32 40
33#define setipl(__new_ipl) \ 41#define setipl(__new_ipl) \
@@ -78,16 +86,11 @@ enum sparc_cpu {
78 86
79#define nop() __asm__ __volatile__ ("nop") 87#define nop() __asm__ __volatile__ ("nop")
80 88
81#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory")
82#define mb() \
83 membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
84#define rmb() membar("#LoadLoad")
85#define wmb() membar("#StoreStore")
86#define read_barrier_depends() do { } while(0) 89#define read_barrier_depends() do { } while(0)
87#define set_mb(__var, __value) \ 90#define set_mb(__var, __value) \
88 do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) 91 do { __var = __value; membar_storeload_storestore(); } while(0)
89#define set_wmb(__var, __value) \ 92#define set_wmb(__var, __value) \
90 do { __var = __value; membar("#StoreStore"); } while(0) 93 do { __var = __value; wmb(); } while(0)
91 94
92#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
93#define smp_mb() mb() 96#define smp_mb() mb()
diff --git a/include/asm-v850/socket.h b/include/asm-v850/socket.h
index 213b852af53e..0240d366a0a4 100644
--- a/include/asm-v850/socket.h
+++ b/include/asm-v850/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h
index d01356f01448..989469e8e0b7 100644
--- a/include/asm-x86_64/checksum.h
+++ b/include/asm-x86_64/checksum.h
@@ -64,7 +64,7 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
64 " adcl $0, %0\n" 64 " adcl $0, %0\n"
65 " notl %0\n" 65 " notl %0\n"
66 "2:" 66 "2:"
67 /* Since the input registers which are loaded with iph and ipl 67 /* Since the input registers which are loaded with iph and ihl
68 are modified, we must also specify them as outputs, or gcc 68 are modified, we must also specify them as outputs, or gcc
69 will assume they contain their original values. */ 69 will assume they contain their original values. */
70 : "=r" (sum), "=r" (iph), "=r" (ihl) 70 : "=r" (sum), "=r" (iph), "=r" (ihl)
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index 8e94edf0b984..e682edc24a68 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -51,6 +51,8 @@ extern int e820_mapped(unsigned long start, unsigned long end, unsigned type);
51 51
52extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); 52extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
53extern void e820_setup_gap(void); 53extern void e820_setup_gap(void);
54extern unsigned long e820_hole_size(unsigned long start_pfn,
55 unsigned long end_pfn);
54 56
55extern void __init parse_memopt(char *p, char **end); 57extern void __init parse_memopt(char *p, char **end);
56 58
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
index d9a252ea8210..f2cdbeae5d5b 100644
--- a/include/asm-x86_64/socket.h
+++ b/include/asm-x86_64/socket.h
@@ -14,6 +14,8 @@
14#define SO_BROADCAST 6 14#define SO_BROADCAST 6
15#define SO_SNDBUF 7 15#define SO_SNDBUF 7
16#define SO_RCVBUF 8 16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
17#define SO_KEEPALIVE 9 19#define SO_KEEPALIVE 9
18#define SO_OOBINLINE 10 20#define SO_OOBINLINE 10
19#define SO_NO_CHECK 11 21#define SO_NO_CHECK 11
diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h
index daccd05a14cd..00f83f3a6d72 100644
--- a/include/asm-xtensa/socket.h
+++ b/include/asm-xtensa/socket.h
@@ -24,6 +24,8 @@
24#define SO_BROADCAST 6 24#define SO_BROADCAST 6
25#define SO_SNDBUF 7 25#define SO_SNDBUF 7
26#define SO_RCVBUF 8 26#define SO_RCVBUF 8
27#define SO_SNDBUFFORCE 32
28#define SO_RCVBUFFORCE 33
27#define SO_KEEPALIVE 9 29#define SO_KEEPALIVE 9
28#define SO_OOBINLINE 10 30#define SO_OOBINLINE 10
29#define SO_NO_CHECK 11 31#define SO_NO_CHECK 11
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
index 5f3ab21b339b..3209dd46ea7d 100644
--- a/include/linux/8250_pci.h
+++ b/include/linux/8250_pci.h
@@ -1,2 +1,37 @@
1int pci_siig10x_fn(struct pci_dev *dev, int enable); 1/*
2int pci_siig20x_fn(struct pci_dev *dev, int enable); 2 * Definitions for PCI support.
3 */
4#define FL_BASE_MASK 0x0007
5#define FL_BASE0 0x0000
6#define FL_BASE1 0x0001
7#define FL_BASE2 0x0002
8#define FL_BASE3 0x0003
9#define FL_BASE4 0x0004
10#define FL_GET_BASE(x) (x & FL_BASE_MASK)
11
12/* Use successive BARs (PCI base address registers),
13 else use offset into some specified BAR */
14#define FL_BASE_BARS 0x0008
15
16/* do not assign an irq */
17#define FL_NOIRQ 0x0080
18
19/* Use the Base address register size to cap number of ports */
20#define FL_REGION_SZ_CAP 0x0100
21
22struct pciserial_board {
23 unsigned int flags;
24 unsigned int num_ports;
25 unsigned int base_baud;
26 unsigned int uart_offset;
27 unsigned int reg_shift;
28 unsigned int first_offset;
29};
30
31struct serial_private;
32
33struct serial_private *
34pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board);
35void pciserial_remove_ports(struct serial_private *priv);
36void pciserial_suspend_ports(struct serial_private *priv);
37void pciserial_resume_ports(struct serial_private *priv);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ca5fcadf9981..a5b74efab067 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1,24 +1,29 @@
1 1
2/* 2/*
3 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
4 Copyright 2003-2004 Jeff Garzik 4 * Copyright 2003-2004 Jeff Garzik
5 5 *
6 The contents of this file are subject to the Open 6 *
7 Software License version 1.1 that can be found at 7 * This program is free software; you can redistribute it and/or modify
8 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 8 * it under the terms of the GNU General Public License as published by
9 by reference. 9 * the Free Software Foundation; either version 2, or (at your option)
10 10 * any later version.
11 Alternatively, the contents of this file may be used under the terms 11 *
12 of the GNU General Public License version 2 (the "GPL") as distributed 12 * This program is distributed in the hope that it will be useful,
13 in the kernel source COPYING file, in which case the provisions of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 the GPL are applicable instead of the above. If you wish to allow 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 the use of your version of this file only under the terms of the 15 * GNU General Public License for more details.
16 GPL and not to allow others to use your version of this file under 16 *
17 the OSL, indicate your decision by deleting the provisions above and 17 * You should have received a copy of the GNU General Public License
18 replace them with the notice and other provisions required by the GPL. 18 * along with this program; see the file COPYING. If not, write to
19 If you do not delete the provisions above, a recipient may use your 19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 version of this file under either the OSL or the GPL. 20 *
21 21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 * Hardware documentation available from http://www.t13.org/
26 *
22 */ 27 */
23 28
24#ifndef __LINUX_ATA_H__ 29#ifndef __LINUX_ATA_H__
@@ -108,6 +113,8 @@ enum {
108 113
109 /* ATA device commands */ 114 /* ATA device commands */
110 ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ 115 ATA_CMD_CHK_POWER = 0xE5, /* check power mode */
116 ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
117 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
111 ATA_CMD_EDD = 0x90, /* execute device diagnostic */ 118 ATA_CMD_EDD = 0x90, /* execute device diagnostic */
112 ATA_CMD_FLUSH = 0xE7, 119 ATA_CMD_FLUSH = 0xE7,
113 ATA_CMD_FLUSH_EXT = 0xEA, 120 ATA_CMD_FLUSH_EXT = 0xEA,
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
new file mode 100644
index 000000000000..007c290f74d4
--- /dev/null
+++ b/include/linux/dccp.h
@@ -0,0 +1,456 @@
1#ifndef _LINUX_DCCP_H
2#define _LINUX_DCCP_H
3
4#include <linux/types.h>
5#include <asm/byteorder.h>
6
7/* Structure describing an Internet (DCCP) socket address. */
8struct sockaddr_dccp {
9 __u16 sdccp_family; /* Address family */
10 __u16 sdccp_port; /* Port number */
11 __u32 sdccp_addr; /* Internet address */
12 __u32 sdccp_service; /* Service */
13 /* Pad to size of `struct sockaddr': 16 bytes . */
14 __u32 sdccp_pad;
15};
16
17/**
18 * struct dccp_hdr - generic part of DCCP packet header
19 *
20 * @dccph_sport - Relevant port on the endpoint that sent this packet
21 * @dccph_dport - Relevant port on the other endpoint
22 * @dccph_doff - Data Offset from the start of the DCCP header, in 32-bit words
23 * @dccph_ccval - Used by the HC-Sender CCID
24 * @dccph_cscov - Parts of the packet that are covered by the Checksum field
25 * @dccph_checksum - Internet checksum, depends on dccph_cscov
26 * @dccph_x - 0 = 24 bit sequence number, 1 = 48
27 * @dccph_type - packet type, see DCCP_PKT_ prefixed macros
28 * @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x
29 */
30struct dccp_hdr {
31 __u16 dccph_sport,
32 dccph_dport;
33 __u8 dccph_doff;
34#if defined(__LITTLE_ENDIAN_BITFIELD)
35 __u8 dccph_cscov:4,
36 dccph_ccval:4;
37#elif defined(__BIG_ENDIAN_BITFIELD)
38 __u8 dccph_ccval:4,
39 dccph_cscov:4;
40#else
41#error "Adjust your <asm/byteorder.h> defines"
42#endif
43 __u16 dccph_checksum;
44#if defined(__LITTLE_ENDIAN_BITFIELD)
45 __u32 dccph_x:1,
46 dccph_type:4,
47 dccph_reserved:3,
48 dccph_seq:24;
49#elif defined(__BIG_ENDIAN_BITFIELD)
50 __u32 dccph_reserved:3,
51 dccph_type:4,
52 dccph_x:1,
53 dccph_seq:24;
54#else
55#error "Adjust your <asm/byteorder.h> defines"
56#endif
57};
58
59/**
60 * struct dccp_hdr_ext - the low bits of a 48 bit seq packet
61 *
62 * @dccph_seq_low - low 24 bits of a 48 bit seq packet
63 */
64struct dccp_hdr_ext {
65 __u32 dccph_seq_low;
66};
67
68/**
69 * struct dccp_hdr_request - Conection initiation request header
70 *
71 * @dccph_req_service - Service to which the client app wants to connect
72 * @dccph_req_options - list of options (must be a multiple of 32 bits
73 */
74struct dccp_hdr_request {
75 __u32 dccph_req_service;
76};
77/**
78 * struct dccp_hdr_ack_bits - acknowledgment bits common to most packets
79 *
80 * @dccph_resp_ack_nr_high - 48 bit ack number high order bits, contains GSR
81 * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR
82 */
83struct dccp_hdr_ack_bits {
84 __u32 dccph_reserved1:8,
85 dccph_ack_nr_high:24;
86 __u32 dccph_ack_nr_low;
87};
88/**
89 * struct dccp_hdr_response - Conection initiation response header
90 *
91 * @dccph_resp_ack_nr_high - 48 bit ack number high order bits, contains GSR
92 * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR
93 * @dccph_resp_service - Echoes the Service Code on a received DCCP-Request
94 * @dccph_resp_options - list of options (must be a multiple of 32 bits
95 */
96struct dccp_hdr_response {
97 struct dccp_hdr_ack_bits dccph_resp_ack;
98 __u32 dccph_resp_service;
99};
100
101/**
102 * struct dccp_hdr_reset - Unconditionally shut down a connection
103 *
104 * @dccph_reset_service - Echoes the Service Code on a received DCCP-Request
105 * @dccph_reset_options - list of options (must be a multiple of 32 bits
106 */
107struct dccp_hdr_reset {
108 struct dccp_hdr_ack_bits dccph_reset_ack;
109 __u8 dccph_reset_code,
110 dccph_reset_data[3];
111};
112
113enum dccp_pkt_type {
114 DCCP_PKT_REQUEST = 0,
115 DCCP_PKT_RESPONSE,
116 DCCP_PKT_DATA,
117 DCCP_PKT_ACK,
118 DCCP_PKT_DATAACK,
119 DCCP_PKT_CLOSEREQ,
120 DCCP_PKT_CLOSE,
121 DCCP_PKT_RESET,
122 DCCP_PKT_SYNC,
123 DCCP_PKT_SYNCACK,
124 DCCP_PKT_INVALID,
125};
126
127#define DCCP_NR_PKT_TYPES DCCP_PKT_INVALID
128
129static inline unsigned int dccp_packet_hdr_len(const __u8 type)
130{
131 if (type == DCCP_PKT_DATA)
132 return 0;
133 if (type == DCCP_PKT_DATAACK ||
134 type == DCCP_PKT_ACK ||
135 type == DCCP_PKT_SYNC ||
136 type == DCCP_PKT_SYNCACK ||
137 type == DCCP_PKT_CLOSE ||
138 type == DCCP_PKT_CLOSEREQ)
139 return sizeof(struct dccp_hdr_ack_bits);
140 if (type == DCCP_PKT_REQUEST)
141 return sizeof(struct dccp_hdr_request);
142 if (type == DCCP_PKT_RESPONSE)
143 return sizeof(struct dccp_hdr_response);
144 return sizeof(struct dccp_hdr_reset);
145}
146enum dccp_reset_codes {
147 DCCP_RESET_CODE_UNSPECIFIED = 0,
148 DCCP_RESET_CODE_CLOSED,
149 DCCP_RESET_CODE_ABORTED,
150 DCCP_RESET_CODE_NO_CONNECTION,
151 DCCP_RESET_CODE_PACKET_ERROR,
152 DCCP_RESET_CODE_OPTION_ERROR,
153 DCCP_RESET_CODE_MANDATORY_ERROR,
154 DCCP_RESET_CODE_CONNECTION_REFUSED,
155 DCCP_RESET_CODE_BAD_SERVICE_CODE,
156 DCCP_RESET_CODE_TOO_BUSY,
157 DCCP_RESET_CODE_BAD_INIT_COOKIE,
158 DCCP_RESET_CODE_AGGRESSION_PENALTY,
159};
160
161/* DCCP options */
162enum {
163 DCCPO_PADDING = 0,
164 DCCPO_MANDATORY = 1,
165 DCCPO_MIN_RESERVED = 3,
166 DCCPO_MAX_RESERVED = 31,
167 DCCPO_NDP_COUNT = 37,
168 DCCPO_ACK_VECTOR_0 = 38,
169 DCCPO_ACK_VECTOR_1 = 39,
170 DCCPO_TIMESTAMP = 41,
171 DCCPO_TIMESTAMP_ECHO = 42,
172 DCCPO_ELAPSED_TIME = 43,
173 DCCPO_MAX = 45,
174 DCCPO_MIN_CCID_SPECIFIC = 128,
175 DCCPO_MAX_CCID_SPECIFIC = 255,
176};
177
178/* DCCP features */
179enum {
180 DCCPF_RESERVED = 0,
181 DCCPF_SEQUENCE_WINDOW = 3,
182 DCCPF_SEND_ACK_VECTOR = 6,
183 DCCPF_SEND_NDP_COUNT = 7,
184 /* 10-127 reserved */
185 DCCPF_MIN_CCID_SPECIFIC = 128,
186 DCCPF_MAX_CCID_SPECIFIC = 255,
187};
188
189/* DCCP socket options */
190#define DCCP_SOCKOPT_PACKET_SIZE 1
191
192#ifdef __KERNEL__
193
194#include <linux/in.h>
195#include <linux/list.h>
196#include <linux/uio.h>
197#include <linux/workqueue.h>
198
199#include <net/inet_connection_sock.h>
200#include <net/inet_timewait_sock.h>
201#include <net/sock.h>
202#include <net/tcp_states.h>
203#include <net/tcp.h>
204
205enum dccp_state {
206 DCCP_OPEN = TCP_ESTABLISHED,
207 DCCP_REQUESTING = TCP_SYN_SENT,
208 DCCP_PARTOPEN = TCP_FIN_WAIT1, /* FIXME:
209 This mapping is horrible, but TCP has
210 no matching state for DCCP_PARTOPEN,
211 as TCP_SYN_RECV is already used by
212 DCCP_RESPOND, why don't stop using TCP
213 mapping of states? OK, now we don't use
214 sk_stream_sendmsg anymore, so doesn't
215 seem to exist any reason for us to
216 do the TCP mapping here */
217 DCCP_LISTEN = TCP_LISTEN,
218 DCCP_RESPOND = TCP_SYN_RECV,
219 DCCP_CLOSING = TCP_CLOSING,
220 DCCP_TIME_WAIT = TCP_TIME_WAIT,
221 DCCP_CLOSED = TCP_CLOSE,
222 DCCP_MAX_STATES = TCP_MAX_STATES,
223};
224
225#define DCCP_STATE_MASK 0xf
226#define DCCP_ACTION_FIN (1<<7)
227
228enum {
229 DCCPF_OPEN = TCPF_ESTABLISHED,
230 DCCPF_REQUESTING = TCPF_SYN_SENT,
231 DCCPF_PARTOPEN = TCPF_FIN_WAIT1,
232 DCCPF_LISTEN = TCPF_LISTEN,
233 DCCPF_RESPOND = TCPF_SYN_RECV,
234 DCCPF_CLOSING = TCPF_CLOSING,
235 DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
236 DCCPF_CLOSED = TCPF_CLOSE,
237};
238
239static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
240{
241 return (struct dccp_hdr *)skb->h.raw;
242}
243
244static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
245{
246 return (struct dccp_hdr_ext *)(skb->h.raw + sizeof(struct dccp_hdr));
247}
248
249static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
250{
251 return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
252}
253
254static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
255{
256 const struct dccp_hdr *dh = dccp_hdr(skb);
257 return __dccp_basic_hdr_len(dh);
258}
259
260static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
261{
262 const struct dccp_hdr *dh = dccp_hdr(skb);
263#if defined(__LITTLE_ENDIAN_BITFIELD)
264 __u64 seq_nr = ntohl(dh->dccph_seq << 8);
265#elif defined(__BIG_ENDIAN_BITFIELD)
266 __u64 seq_nr = ntohl(dh->dccph_seq);
267#else
268#error "Adjust your <asm/byteorder.h> defines"
269#endif
270
271 if (dh->dccph_x != 0)
272 seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low);
273
274 return seq_nr;
275}
276
277static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
278{
279 return (struct dccp_hdr_request *)(skb->h.raw + dccp_basic_hdr_len(skb));
280}
281
282static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
283{
284 return (struct dccp_hdr_ack_bits *)(skb->h.raw + dccp_basic_hdr_len(skb));
285}
286
287static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
288{
289 const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
290#if defined(__LITTLE_ENDIAN_BITFIELD)
291 return (((u64)ntohl(dhack->dccph_ack_nr_high << 8)) << 32) + ntohl(dhack->dccph_ack_nr_low);
292#elif defined(__BIG_ENDIAN_BITFIELD)
293 return (((u64)ntohl(dhack->dccph_ack_nr_high)) << 32) + ntohl(dhack->dccph_ack_nr_low);
294#else
295#error "Adjust your <asm/byteorder.h> defines"
296#endif
297}
298
299static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
300{
301 return (struct dccp_hdr_response *)(skb->h.raw + dccp_basic_hdr_len(skb));
302}
303
304static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
305{
306 return (struct dccp_hdr_reset *)(skb->h.raw + dccp_basic_hdr_len(skb));
307}
308
309static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
310{
311 return __dccp_basic_hdr_len(dh) +
312 dccp_packet_hdr_len(dh->dccph_type);
313}
314
315static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
316{
317 return __dccp_hdr_len(dccp_hdr(skb));
318}
319
320
321/* initial values for each feature */
322#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
323/* FIXME: for now we're using CCID 3 (TFRC) */
324#define DCCPF_INITIAL_CCID 3
325#define DCCPF_INITIAL_SEND_ACK_VECTOR 0
326/* FIXME: for now we're default to 1 but it should really be 0 */
327#define DCCPF_INITIAL_SEND_NDP_COUNT 1
328
329#define DCCP_NDP_LIMIT 0xFFFFFF
330
331/**
332 * struct dccp_options - option values for a DCCP connection
333 * @dccpo_sequence_window - Sequence Window Feature (section 7.5.2)
334 * @dccpo_ccid - Congestion Control Id (CCID) (section 10)
335 * @dccpo_send_ack_vector - Send Ack Vector Feature (section 11.5)
336 * @dccpo_send_ndp_count - Send NDP Count Feature (7.7.2)
337 */
338struct dccp_options {
339 __u64 dccpo_sequence_window;
340 __u8 dccpo_ccid;
341 __u8 dccpo_send_ack_vector;
342 __u8 dccpo_send_ndp_count;
343};
344
345extern void __dccp_options_init(struct dccp_options *dccpo);
346extern void dccp_options_init(struct dccp_options *dccpo);
347extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb);
348
349struct dccp_request_sock {
350 struct inet_request_sock dreq_inet_rsk;
351 __u64 dreq_iss;
352 __u64 dreq_isr;
353 __u32 dreq_service;
354};
355
356static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
357{
358 return (struct dccp_request_sock *)req;
359}
360
361extern struct inet_timewait_death_row dccp_death_row;
362
363/* Read about the ECN nonce to see why it is 253 */
364#define DCCP_MAX_ACK_VECTOR_LEN 253
365
366struct dccp_options_received {
367 u32 dccpor_ndp:24,
368 dccpor_ack_vector_len:8;
369 u32 dccpor_ack_vector_idx:10;
370 /* 22 bits hole, try to pack */
371 u32 dccpor_timestamp;
372 u32 dccpor_timestamp_echo;
373 u32 dccpor_elapsed_time;
374};
375
376struct ccid;
377
378enum dccp_role {
379 DCCP_ROLE_UNDEFINED,
380 DCCP_ROLE_LISTEN,
381 DCCP_ROLE_CLIENT,
382 DCCP_ROLE_SERVER,
383};
384
385/**
386 * struct dccp_sock - DCCP socket state
387 *
388 * @dccps_swl - sequence number window low
389 * @dccps_swh - sequence number window high
390 * @dccps_awl - acknowledgement number window low
391 * @dccps_awh - acknowledgement number window high
392 * @dccps_iss - initial sequence number sent
393 * @dccps_isr - initial sequence number received
394 * @dccps_osr - first OPEN sequence number received
395 * @dccps_gss - greatest sequence number sent
396 * @dccps_gsr - greatest valid sequence number received
397 * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
398 * @dccps_timestamp_time - time of latest TIMESTAMP option
399 * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
400 * @dccps_ext_header_len - network protocol overhead (IP/IPv6 options)
401 * @dccps_pmtu_cookie - Last pmtu seen by socket
402 * @dccps_packet_size - Set thru setsockopt
403 * @dccps_role - Role of this sock, one of %dccp_role
404 * @dccps_ndp_count - number of Non Data Packets since last data packet
405 * @dccps_hc_rx_ackpkts - receiver half connection acked packets
406 */
407struct dccp_sock {
408 /* inet_connection_sock has to be the first member of dccp_sock */
409 struct inet_connection_sock dccps_inet_connection;
410 __u64 dccps_swl;
411 __u64 dccps_swh;
412 __u64 dccps_awl;
413 __u64 dccps_awh;
414 __u64 dccps_iss;
415 __u64 dccps_isr;
416 __u64 dccps_osr;
417 __u64 dccps_gss;
418 __u64 dccps_gsr;
419 __u64 dccps_gar;
420 unsigned long dccps_service;
421 struct timeval dccps_timestamp_time;
422 __u32 dccps_timestamp_echo;
423 __u32 dccps_packet_size;
424 unsigned long dccps_ndp_count;
425 __u16 dccps_ext_header_len;
426 __u32 dccps_pmtu_cookie;
427 __u32 dccps_mss_cache;
428 struct dccp_options dccps_options;
429 struct dccp_ackpkts *dccps_hc_rx_ackpkts;
430 void *dccps_hc_rx_ccid_private;
431 void *dccps_hc_tx_ccid_private;
432 struct ccid *dccps_hc_rx_ccid;
433 struct ccid *dccps_hc_tx_ccid;
434 struct dccp_options_received dccps_options_received;
435 enum dccp_role dccps_role:2;
436};
437
438static inline struct dccp_sock *dccp_sk(const struct sock *sk)
439{
440 return (struct dccp_sock *)sk;
441}
442
443static inline const char *dccp_role(const struct sock *sk)
444{
445 switch (dccp_sk(sk)->dccps_role) {
446 case DCCP_ROLE_UNDEFINED: return "undefined";
447 case DCCP_ROLE_LISTEN: return "listen";
448 case DCCP_ROLE_SERVER: return "server";
449 case DCCP_ROLE_CLIENT: return "client";
450 }
451 return NULL;
452}
453
454#endif /* __KERNEL__ */
455
456#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a0ab26aab450..ed1440ea4c91 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -250,6 +250,12 @@ struct ethtool_stats {
250 u64 data[0]; 250 u64 data[0];
251}; 251};
252 252
253struct ethtool_perm_addr {
254 u32 cmd; /* ETHTOOL_GPERMADDR */
255 u32 size;
256 u8 data[0];
257};
258
253struct net_device; 259struct net_device;
254 260
255/* Some generic methods drivers may use in their ethtool_ops */ 261/* Some generic methods drivers may use in their ethtool_ops */
@@ -261,6 +267,8 @@ u32 ethtool_op_get_sg(struct net_device *dev);
261int ethtool_op_set_sg(struct net_device *dev, u32 data); 267int ethtool_op_set_sg(struct net_device *dev, u32 data);
262u32 ethtool_op_get_tso(struct net_device *dev); 268u32 ethtool_op_get_tso(struct net_device *dev);
263int ethtool_op_set_tso(struct net_device *dev, u32 data); 269int ethtool_op_set_tso(struct net_device *dev, u32 data);
270int ethtool_op_get_perm_addr(struct net_device *dev,
271 struct ethtool_perm_addr *addr, u8 *data);
264 272
265/** 273/**
266 * &ethtool_ops - Alter and report network device settings 274 * &ethtool_ops - Alter and report network device settings
@@ -294,7 +302,8 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data);
294 * get_strings: Return a set of strings that describe the requested objects 302 * get_strings: Return a set of strings that describe the requested objects
295 * phys_id: Identify the device 303 * phys_id: Identify the device
296 * get_stats: Return statistics about the device 304 * get_stats: Return statistics about the device
297 * 305 * get_perm_addr: Gets the permanent hardware address
306 *
298 * Description: 307 * Description:
299 * 308 *
300 * get_settings: 309 * get_settings:
@@ -352,6 +361,7 @@ struct ethtool_ops {
352 int (*phys_id)(struct net_device *, u32); 361 int (*phys_id)(struct net_device *, u32);
353 int (*get_stats_count)(struct net_device *); 362 int (*get_stats_count)(struct net_device *);
354 void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); 363 void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);
364 int (*get_perm_addr)(struct net_device *, struct ethtool_perm_addr *, u8 *);
355 int (*begin)(struct net_device *); 365 int (*begin)(struct net_device *);
356 void (*complete)(struct net_device *); 366 void (*complete)(struct net_device *);
357}; 367};
@@ -389,6 +399,7 @@ struct ethtool_ops {
389#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ 399#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
390#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ 400#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
391#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ 401#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
402#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */
392 403
393/* compatibility with older code */ 404/* compatibility with older code */
394#define SPARC_ETH_GSET ETHTOOL_GSET 405#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -408,6 +419,8 @@ struct ethtool_ops {
408#define SUPPORTED_FIBRE (1 << 10) 419#define SUPPORTED_FIBRE (1 << 10)
409#define SUPPORTED_BNC (1 << 11) 420#define SUPPORTED_BNC (1 << 11)
410#define SUPPORTED_10000baseT_Full (1 << 12) 421#define SUPPORTED_10000baseT_Full (1 << 12)
422#define SUPPORTED_Pause (1 << 13)
423#define SUPPORTED_Asym_Pause (1 << 14)
411 424
412/* Indicates what features are advertised by the interface. */ 425/* Indicates what features are advertised by the interface. */
413#define ADVERTISED_10baseT_Half (1 << 0) 426#define ADVERTISED_10baseT_Half (1 << 0)
@@ -423,6 +436,8 @@ struct ethtool_ops {
423#define ADVERTISED_FIBRE (1 << 10) 436#define ADVERTISED_FIBRE (1 << 10)
424#define ADVERTISED_BNC (1 << 11) 437#define ADVERTISED_BNC (1 << 11)
425#define ADVERTISED_10000baseT_Full (1 << 12) 438#define ADVERTISED_10000baseT_Full (1 << 12)
439#define ADVERTISED_Pause (1 << 13)
440#define ADVERTISED_Asym_Pause (1 << 14)
426 441
427/* The following are all involved in forcing a particular link 442/* The following are all involved in forcing a particular link
428 * mode for the device for setting things. When getting the 443 * mode for the device for setting things. When getting the
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 9debe6bbe5f0..bab303dafd6e 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -26,8 +26,12 @@
26#include <linux/if_hippi.h> 26#include <linux/if_hippi.h>
27 27
28#ifdef __KERNEL__ 28#ifdef __KERNEL__
29extern unsigned short hippi_type_trans(struct sk_buff *skb, 29
30 struct net_device *dev); 30struct hippi_cb {
31 __u32 ifield;
32};
33
34extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
31 35
32extern struct net_device *alloc_hippi_dev(int sizeof_priv); 36extern struct net_device *alloc_hippi_dev(int sizeof_priv);
33#endif 37#endif
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index b5b58e9c054c..fc2d4c8225aa 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -110,6 +110,8 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
110{ 110{
111 return (struct ethhdr *)skb->mac.raw; 111 return (struct ethhdr *)skb->mac.raw;
112} 112}
113
114extern struct ctl_table ether_table[];
113#endif 115#endif
114 116
115#endif /* _LINUX_IF_ETHER_H */ 117#endif /* _LINUX_IF_ETHER_H */
diff --git a/include/linux/if_fc.h b/include/linux/if_fc.h
index 33330b458b95..376a34ea4723 100644
--- a/include/linux/if_fc.h
+++ b/include/linux/if_fc.h
@@ -44,7 +44,7 @@ struct fcllc {
44 __u8 ssap; /* source SAP */ 44 __u8 ssap; /* source SAP */
45 __u8 llc; /* LLC control field */ 45 __u8 llc; /* LLC control field */
46 __u8 protid[3]; /* protocol id */ 46 __u8 protid[3]; /* protocol id */
47 __u16 ethertype; /* ether type field */ 47 __be16 ethertype; /* ether type field */
48}; 48};
49 49
50#endif /* _LINUX_IF_FC_H */ 50#endif /* _LINUX_IF_FC_H */
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index a912818e6361..1288a161bc0b 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -85,7 +85,7 @@ struct fddi_snap_hdr
85 __u8 ssap; /* always 0xAA */ 85 __u8 ssap; /* always 0xAA */
86 __u8 ctrl; /* always 0x03 */ 86 __u8 ctrl; /* always 0x03 */
87 __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ 87 __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
88 __u16 ethertype; /* packet type ID field */ 88 __be16 ethertype; /* packet type ID field */
89 } __attribute__ ((packed)); 89 } __attribute__ ((packed));
90 90
91/* Define FDDI LLC frame header */ 91/* Define FDDI LLC frame header */
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 3c94b1736570..511999c7eeda 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -191,10 +191,12 @@ struct frad_local
191 int buffer; /* current buffer for S508 firmware */ 191 int buffer; /* current buffer for S508 firmware */
192}; 192};
193 193
194extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
195
196#endif /* __KERNEL__ */ 194#endif /* __KERNEL__ */
197 195
198#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ 196#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
199 197
198#ifdef __KERNEL__
199extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
200#endif
201
200#endif 202#endif
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h
index c8ca72c46f76..94d31ca7d71a 100644
--- a/include/linux/if_hippi.h
+++ b/include/linux/if_hippi.h
@@ -102,9 +102,9 @@ struct hippi_fp_hdr
102#error "Please fix <asm/byteorder.h>" 102#error "Please fix <asm/byteorder.h>"
103#endif 103#endif
104#else 104#else
105 __u32 fixed; 105 __be32 fixed;
106#endif 106#endif
107 __u32 d2_size; 107 __be32 d2_size;
108} __attribute__ ((packed)); 108} __attribute__ ((packed));
109 109
110struct hippi_le_hdr 110struct hippi_le_hdr
@@ -144,7 +144,7 @@ struct hippi_snap_hdr
144 __u8 ssap; /* always 0xAA */ 144 __u8 ssap; /* always 0xAA */
145 __u8 ctrl; /* always 0x03 */ 145 __u8 ctrl; /* always 0x03 */
146 __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ 146 __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
147 __u16 ethertype; /* packet type ID field */ 147 __be16 ethertype; /* packet type ID field */
148} __attribute__ ((packed)); 148} __attribute__ ((packed));
149 149
150struct hippi_hdr 150struct hippi_hdr
diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h
index 3fba9e2f5427..5502f597cf0e 100644
--- a/include/linux/if_tr.h
+++ b/include/linux/if_tr.h
@@ -43,12 +43,16 @@ struct trh_hdr {
43}; 43};
44 44
45#ifdef __KERNEL__ 45#ifdef __KERNEL__
46#include <linux/config.h>
46#include <linux/skbuff.h> 47#include <linux/skbuff.h>
47 48
48static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb) 49static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb)
49{ 50{
50 return (struct trh_hdr *)skb->mac.raw; 51 return (struct trh_hdr *)skb->mac.raw;
51} 52}
53#ifdef CONFIG_SYSCTL
54extern struct ctl_table tr_table[];
55#endif
52#endif 56#endif
53 57
54/* This is an Token-Ring LLC structure */ 58/* This is an Token-Ring LLC structure */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 62a9d89dfbe2..17d0c0d40b0e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -155,7 +155,6 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
155{ 155{
156 struct net_device_stats *stats; 156 struct net_device_stats *stats;
157 157
158 skb->real_dev = skb->dev;
159 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 158 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
160 if (skb->dev == NULL) { 159 if (skb->dev == NULL) {
161 dev_kfree_skb_any(skb); 160 dev_kfree_skb_any(skb);
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 0c31ef0b5bad..28f4f3b36950 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -129,6 +129,9 @@ struct igmpv3_query {
129#include <linux/skbuff.h> 129#include <linux/skbuff.h>
130#include <linux/in.h> 130#include <linux/in.h>
131 131
132extern int sysctl_igmp_max_memberships;
133extern int sysctl_igmp_max_msf;
134
132struct ip_sf_socklist 135struct ip_sf_socklist
133{ 136{
134 unsigned int sl_max; 137 unsigned int sl_max;
diff --git a/include/linux/in.h b/include/linux/in.h
index fb88c66d748d..ba355384016a 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -32,6 +32,7 @@ enum {
32 IPPROTO_PUP = 12, /* PUP protocol */ 32 IPPROTO_PUP = 12, /* PUP protocol */
33 IPPROTO_UDP = 17, /* User Datagram Protocol */ 33 IPPROTO_UDP = 17, /* User Datagram Protocol */
34 IPPROTO_IDP = 22, /* XNS IDP protocol */ 34 IPPROTO_IDP = 22, /* XNS IDP protocol */
35 IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
35 IPPROTO_RSVP = 46, /* RSVP protocol */ 36 IPPROTO_RSVP = 46, /* RSVP protocol */
36 IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */ 37 IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
37 38
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
new file mode 100644
index 000000000000..a4606e5810e5
--- /dev/null
+++ b/include/linux/inet_diag.h
@@ -0,0 +1,138 @@
1#ifndef _INET_DIAG_H_
2#define _INET_DIAG_H_ 1
3
4/* Just some random number */
5#define TCPDIAG_GETSOCK 18
6#define DCCPDIAG_GETSOCK 19
7
8#define INET_DIAG_GETSOCK_MAX 24
9
10/* Socket identity */
11struct inet_diag_sockid {
12 __u16 idiag_sport;
13 __u16 idiag_dport;
14 __u32 idiag_src[4];
15 __u32 idiag_dst[4];
16 __u32 idiag_if;
17 __u32 idiag_cookie[2];
18#define INET_DIAG_NOCOOKIE (~0U)
19};
20
21/* Request structure */
22
23struct inet_diag_req {
24 __u8 idiag_family; /* Family of addresses. */
25 __u8 idiag_src_len;
26 __u8 idiag_dst_len;
27 __u8 idiag_ext; /* Query extended information */
28
29 struct inet_diag_sockid id;
30
31 __u32 idiag_states; /* States to dump */
32 __u32 idiag_dbs; /* Tables to dump (NI) */
33};
34
35enum {
36 INET_DIAG_REQ_NONE,
37 INET_DIAG_REQ_BYTECODE,
38};
39
40#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE
41
42/* Bytecode is sequence of 4 byte commands followed by variable arguments.
43 * All the commands identified by "code" are conditional jumps forward:
44 * to offset cc+"yes" or to offset cc+"no". "yes" is supposed to be
45 * length of the command and its arguments.
46 */
47
48struct inet_diag_bc_op {
49 unsigned char code;
50 unsigned char yes;
51 unsigned short no;
52};
53
54enum {
55 INET_DIAG_BC_NOP,
56 INET_DIAG_BC_JMP,
57 INET_DIAG_BC_S_GE,
58 INET_DIAG_BC_S_LE,
59 INET_DIAG_BC_D_GE,
60 INET_DIAG_BC_D_LE,
61 INET_DIAG_BC_AUTO,
62 INET_DIAG_BC_S_COND,
63 INET_DIAG_BC_D_COND,
64};
65
66struct inet_diag_hostcond {
67 __u8 family;
68 __u8 prefix_len;
69 int port;
70 __u32 addr[0];
71};
72
73/* Base info structure. It contains socket identity (addrs/ports/cookie)
74 * and, alas, the information shown by netstat. */
75struct inet_diag_msg {
76 __u8 idiag_family;
77 __u8 idiag_state;
78 __u8 idiag_timer;
79 __u8 idiag_retrans;
80
81 struct inet_diag_sockid id;
82
83 __u32 idiag_expires;
84 __u32 idiag_rqueue;
85 __u32 idiag_wqueue;
86 __u32 idiag_uid;
87 __u32 idiag_inode;
88};
89
90/* Extensions */
91
92enum {
93 INET_DIAG_NONE,
94 INET_DIAG_MEMINFO,
95 INET_DIAG_INFO,
96 INET_DIAG_VEGASINFO,
97 INET_DIAG_CONG,
98};
99
100#define INET_DIAG_MAX INET_DIAG_CONG
101
102
103/* INET_DIAG_MEM */
104
105struct inet_diag_meminfo {
106 __u32 idiag_rmem;
107 __u32 idiag_wmem;
108 __u32 idiag_fmem;
109 __u32 idiag_tmem;
110};
111
112/* INET_DIAG_VEGASINFO */
113
114struct tcpvegas_info {
115 __u32 tcpv_enabled;
116 __u32 tcpv_rttcnt;
117 __u32 tcpv_rtt;
118 __u32 tcpv_minrtt;
119};
120
121#ifdef __KERNEL__
122struct sock;
123struct inet_hashinfo;
124
125struct inet_diag_handler {
126 struct inet_hashinfo *idiag_hashinfo;
127 void (*idiag_get_info)(struct sock *sk,
128 struct inet_diag_msg *r,
129 void *info);
130 __u16 idiag_info_size;
131 __u16 idiag_type;
132};
133
134extern int inet_diag_register(const struct inet_diag_handler *handler);
135extern void inet_diag_unregister(const struct inet_diag_handler *handler);
136#endif /* __KERNEL__ */
137
138#endif /* _INET_DIAG_H_ */
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 31e7cedd9f84..33e8a19a1a0f 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -196,6 +196,8 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
196#endif 196#endif
197#endif 197#endif
198 198
199extern int inet_sk_rebuild_header(struct sock *sk);
200
199struct iphdr { 201struct iphdr {
200#if defined(__LITTLE_ENDIAN_BITFIELD) 202#if defined(__LITTLE_ENDIAN_BITFIELD)
201 __u8 ihl:4, 203 __u8 ihl:4,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 6fcd6a0ade24..3c7dbc6a0a70 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -193,6 +193,11 @@ struct inet6_skb_parm {
193 193
194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
195 195
196static inline int inet6_iif(const struct sk_buff *skb)
197{
198 return IP6CB(skb)->iif;
199}
200
196struct tcp6_request_sock { 201struct tcp6_request_sock {
197 struct tcp_request_sock req; 202 struct tcp_request_sock req;
198 struct in6_addr loc_addr; 203 struct in6_addr loc_addr;
@@ -308,6 +313,36 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
308 313
309#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) 314#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
310#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) 315#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
316
317#include <linux/tcp.h>
318
319struct tcp6_timewait_sock {
320 struct tcp_timewait_sock tw_v6_sk;
321 struct in6_addr tw_v6_daddr;
322 struct in6_addr tw_v6_rcv_saddr;
323};
324
325static inline struct tcp6_timewait_sock *tcp6_twsk(const struct sock *sk)
326{
327 return (struct tcp6_timewait_sock *)sk;
328}
329
330static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
331{
332 return likely(sk->sk_state != TCP_TIME_WAIT) ?
333 &inet6_sk(sk)->rcv_saddr : &tcp6_twsk(sk)->tw_v6_rcv_saddr;
334}
335
336static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
337{
338 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
339}
340
341static inline int inet_v6_ipv6only(const struct sock *sk)
342{
343 return likely(sk->sk_state != TCP_TIME_WAIT) ?
344 ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only;
345}
311#else 346#else
312#define __ipv6_only_sock(sk) 0 347#define __ipv6_only_sock(sk) 0
313#define ipv6_only_sock(sk) 0 348#define ipv6_only_sock(sk) 0
@@ -322,8 +357,19 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
322 return NULL; 357 return NULL;
323} 358}
324 359
325#endif 360#define __tcp_v6_rcv_saddr(__sk) NULL
361#define tcp_v6_rcv_saddr(__sk) NULL
362#define tcp_twsk_ipv6only(__sk) 0
363#define inet_v6_ipv6only(__sk) 0
364#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
326 365
327#endif 366#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
367 (((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
368 ((__sk)->sk_family == AF_INET6) && \
369 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
370 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
371 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
328 372
329#endif 373#endif /* __KERNEL__ */
374
375#endif /* _IPV6_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cd9ba63563b..fc05a9899288 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1,23 +1,26 @@
1/* 1/*
2 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 Copyright 2003-2004 Jeff Garzik 3 * Copyright 2003-2005 Jeff Garzik
4 4 *
5 The contents of this file are subject to the Open 5 *
6 Software License version 1.1 that can be found at 6 * This program is free software; you can redistribute it and/or modify
7 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 7 * it under the terms of the GNU General Public License as published by
8 by reference. 8 * the Free Software Foundation; either version 2, or (at your option)
9 9 * any later version.
10 Alternatively, the contents of this file may be used under the terms 10 *
11 of the GNU General Public License version 2 (the "GPL") as distributed 11 * This program is distributed in the hope that it will be useful,
12 in the kernel source COPYING file, in which case the provisions of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 the GPL are applicable instead of the above. If you wish to allow 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 the use of your version of this file only under the terms of the 14 * GNU General Public License for more details.
15 GPL and not to allow others to use your version of this file under 15 *
16 the OSL, indicate your decision by deleting the provisions above and 16 * You should have received a copy of the GNU General Public License
17 replace them with the notice and other provisions required by the GPL. 17 * along with this program; see the file COPYING. If not, write to
18 If you do not delete the provisions above, a recipient may use your 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 version of this file under either the OSL or the GPL. 19 *
20 20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
21 */ 24 */
22 25
23#ifndef __LINUX_LIBATA_H__ 26#ifndef __LINUX_LIBATA_H__
@@ -113,6 +116,8 @@ enum {
113 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 116 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
114 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 117 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
115 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 118 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
119 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
120 * proper HSM is in place. */
116 121
117 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 122 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
118 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 123 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -363,7 +368,7 @@ struct ata_port_operations {
363 368
364 void (*host_stop) (struct ata_host_set *host_set); 369 void (*host_stop) (struct ata_host_set *host_set);
365 370
366 void (*bmdma_stop) (struct ata_port *ap); 371 void (*bmdma_stop) (struct ata_queued_cmd *qc);
367 u8 (*bmdma_status) (struct ata_port *ap); 372 u8 (*bmdma_status) (struct ata_port *ap);
368}; 373};
369 374
@@ -424,7 +429,7 @@ extern void ata_dev_id_string(u16 *id, unsigned char *s,
424extern void ata_dev_config(struct ata_port *ap, unsigned int i); 429extern void ata_dev_config(struct ata_port *ap, unsigned int i);
425extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 430extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
426extern void ata_bmdma_start (struct ata_queued_cmd *qc); 431extern void ata_bmdma_start (struct ata_queued_cmd *qc);
427extern void ata_bmdma_stop(struct ata_port *ap); 432extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
428extern u8 ata_bmdma_status(struct ata_port *ap); 433extern u8 ata_bmdma_status(struct ata_port *ap);
429extern void ata_bmdma_irq_clear(struct ata_port *ap); 434extern void ata_bmdma_irq_clear(struct ata_port *ap);
430extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat); 435extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat);
@@ -644,7 +649,7 @@ static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
644 ap->ops->scr_write(ap, reg, val); 649 ap->ops->scr_write(ap, reg, val);
645} 650}
646 651
647static inline void scr_write_flush(struct ata_port *ap, unsigned int reg, 652static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
648 u32 val) 653 u32 val)
649{ 654{
650 ap->ops->scr_write(ap, reg, val); 655 ap->ops->scr_write(ap, reg, val);
diff --git a/include/linux/list.h b/include/linux/list.h
index aab2db21b013..e6ec59682274 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -419,6 +419,20 @@ static inline void list_splice_init(struct list_head *list,
419 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 419 pos = n, n = list_entry(n->member.next, typeof(*n), member))
420 420
421/** 421/**
422 * list_for_each_entry_safe_continue - iterate over list of given type
423 * continuing after existing point safe against removal of list entry
424 * @pos: the type * to use as a loop counter.
425 * @n: another type * to use as temporary storage
426 * @head: the head for your list.
427 * @member: the name of the list_struct within the struct.
428 */
429#define list_for_each_entry_safe_continue(pos, n, head, member) \
430 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
431 n = list_entry(pos->member.next, typeof(*pos), member); \
432 &pos->member != (head); \
433 pos = n, n = list_entry(n->member.next, typeof(*n), member))
434
435/**
422 * list_for_each_rcu - iterate over an rcu-protected list 436 * list_for_each_rcu - iterate over an rcu-protected list
423 * @pos: the &struct list_head to use as a loop counter. 437 * @pos: the &struct list_head to use as a loop counter.
424 * @head: the head for your list. 438 * @head: the head for your list.
@@ -620,6 +634,57 @@ static inline void hlist_add_after(struct hlist_node *n,
620 next->next->pprev = &next->next; 634 next->next->pprev = &next->next;
621} 635}
622 636
637/**
638 * hlist_add_before_rcu - adds the specified element to the specified hlist
639 * before the specified node while permitting racing traversals.
640 * @n: the new element to add to the hash list.
641 * @next: the existing element to add the new element before.
642 *
643 * The caller must take whatever precautions are necessary
644 * (such as holding appropriate locks) to avoid racing
645 * with another list-mutation primitive, such as hlist_add_head_rcu()
646 * or hlist_del_rcu(), running on this same list.
647 * However, it is perfectly legal to run concurrently with
648 * the _rcu list-traversal primitives, such as
649 * hlist_for_each_rcu(), used to prevent memory-consistency
650 * problems on Alpha CPUs.
651 */
652static inline void hlist_add_before_rcu(struct hlist_node *n,
653 struct hlist_node *next)
654{
655 n->pprev = next->pprev;
656 n->next = next;
657 smp_wmb();
658 next->pprev = &n->next;
659 *(n->pprev) = n;
660}
661
662/**
663 * hlist_add_after_rcu - adds the specified element to the specified hlist
664 * after the specified node while permitting racing traversals.
665 * @prev: the existing element to add the new element after.
666 * @n: the new element to add to the hash list.
667 *
668 * The caller must take whatever precautions are necessary
669 * (such as holding appropriate locks) to avoid racing
670 * with another list-mutation primitive, such as hlist_add_head_rcu()
671 * or hlist_del_rcu(), running on this same list.
672 * However, it is perfectly legal to run concurrently with
673 * the _rcu list-traversal primitives, such as
674 * hlist_for_each_rcu(), used to prevent memory-consistency
675 * problems on Alpha CPUs.
676 */
677static inline void hlist_add_after_rcu(struct hlist_node *prev,
678 struct hlist_node *n)
679{
680 n->next = prev->next;
681 n->pprev = &prev->next;
682 smp_wmb();
683 prev->next = n;
684 if (n->next)
685 n->next->pprev = &n->next;
686}
687
623#define hlist_entry(ptr, type, member) container_of(ptr,type,member) 688#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
624 689
625#define hlist_for_each(pos, head) \ 690#define hlist_for_each(pos, head) \
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 374b615ea9ea..9b8d0476988a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
22#define MII_EXPANSION 0x06 /* Expansion register */ 22#define MII_EXPANSION 0x06 /* Expansion register */
23#define MII_CTRL1000 0x09 /* 1000BASE-T control */ 23#define MII_CTRL1000 0x09 /* 1000BASE-T control */
24#define MII_STAT1000 0x0a /* 1000BASE-T status */ 24#define MII_STAT1000 0x0a /* 1000BASE-T status */
25#define MII_ESTATUS 0x0f /* Extended Status */
25#define MII_DCOUNTER 0x12 /* Disconnect counter */ 26#define MII_DCOUNTER 0x12 /* Disconnect counter */
26#define MII_FCSCOUNTER 0x13 /* False carrier counter */ 27#define MII_FCSCOUNTER 0x13 /* False carrier counter */
27#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ 28#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
@@ -54,7 +55,10 @@
54#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ 55#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
55#define BMSR_RFAULT 0x0010 /* Remote fault detected */ 56#define BMSR_RFAULT 0x0010 /* Remote fault detected */
56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ 57#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
57#define BMSR_RESV 0x07c0 /* Unused... */ 58#define BMSR_RESV 0x00c0 /* Unused... */
59#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
60#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
61#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
58#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ 62#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
59#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ 63#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
60#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ 64#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
114#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ 118#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
115#define EXPANSION_RESV 0xffe0 /* Unused... */ 119#define EXPANSION_RESV 0xffe0 /* Unused... */
116 120
121#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
122#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
123
117/* N-way test register. */ 124/* N-way test register. */
118#define NWAYTEST_RESV1 0x00ff /* Unused... */ 125#define NWAYTEST_RESV1 0x00ff /* Unused... */
119#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ 126#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f90f674eb3b0..9a0893f3249e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -63,11 +63,12 @@ struct device;
63 63
64struct mmc_host { 64struct mmc_host {
65 struct device *dev; 65 struct device *dev;
66 struct class_device class_dev;
67 int index;
66 struct mmc_host_ops *ops; 68 struct mmc_host_ops *ops;
67 unsigned int f_min; 69 unsigned int f_min;
68 unsigned int f_max; 70 unsigned int f_max;
69 u32 ocr_avail; 71 u32 ocr_avail;
70 char host_name[8];
71 72
72 /* host specific block data */ 73 /* host specific block data */
73 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 74 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
@@ -97,6 +98,7 @@ extern void mmc_free_host(struct mmc_host *);
97 98
98#define mmc_priv(x) ((void *)((x) + 1)) 99#define mmc_priv(x) ((void *)((x) + 1))
99#define mmc_dev(x) ((x)->dev) 100#define mmc_dev(x) ((x)->dev)
101#define mmc_hostname(x) ((x)->class_dev.class_id)
100 102
101extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 103extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
102extern int mmc_resume_host(struct mmc_host *); 104extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index dce53ac1625d..47da39ba3f03 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Device tables which are exported to userspace via 2 * Device tables which are exported to userspace via
3 * scripts/table2alias.c. You must keep that file in sync with this 3 * scripts/mod/file2alias.c. You must keep that file in sync with this
4 * header. 4 * header.
5 */ 5 */
6 6
@@ -33,7 +33,8 @@ struct ieee1394_device_id {
33 __u32 model_id; 33 __u32 model_id;
34 __u32 specifier_id; 34 __u32 specifier_id;
35 __u32 version; 35 __u32 version;
36 kernel_ulong_t driver_data; 36 kernel_ulong_t driver_data
37 __attribute__((aligned(sizeof(kernel_ulong_t))));
37}; 38};
38 39
39 40
@@ -182,9 +183,18 @@ struct of_device_id
182 char name[32]; 183 char name[32];
183 char type[32]; 184 char type[32];
184 char compatible[128]; 185 char compatible[128];
186#if __KERNEL__
185 void *data; 187 void *data;
188#else
189 kernel_ulong_t data;
190#endif
186}; 191};
187 192
193/* VIO */
194struct vio_device_id {
195 char type[32];
196 char compat[32];
197};
188 198
189/* PCMCIA */ 199/* PCMCIA */
190 200
@@ -208,7 +218,8 @@ struct pcmcia_device_id {
208#ifdef __KERNEL__ 218#ifdef __KERNEL__
209 const char * prod_id[4]; 219 const char * prod_id[4];
210#else 220#else
211 kernel_ulong_t prod_id[4]; 221 kernel_ulong_t prod_id[4]
222 __attribute__((aligned(sizeof(kernel_ulong_t))));
212#endif 223#endif
213 224
214 /* not matched against */ 225 /* not matched against */
diff --git a/include/linux/net.h b/include/linux/net.h
index 20cb226b2268..4e981585a89a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -84,6 +84,7 @@ enum sock_type {
84 SOCK_RAW = 3, 84 SOCK_RAW = 3,
85 SOCK_RDM = 4, 85 SOCK_RDM = 4,
86 SOCK_SEQPACKET = 5, 86 SOCK_SEQPACKET = 5,
87 SOCK_DCCP = 6,
87 SOCK_PACKET = 10, 88 SOCK_PACKET = 10,
88}; 89};
89 90
@@ -282,5 +283,15 @@ static struct proto_ops name##_ops = { \
282#define MODULE_ALIAS_NETPROTO(proto) \ 283#define MODULE_ALIAS_NETPROTO(proto) \
283 MODULE_ALIAS("net-pf-" __stringify(proto)) 284 MODULE_ALIAS("net-pf-" __stringify(proto))
284 285
286#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
287 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
288
289#ifdef CONFIG_SYSCTL
290#include <linux/sysctl.h>
291extern ctl_table net_table[];
292extern int net_msg_cost;
293extern int net_msg_burst;
294#endif
295
285#endif /* __KERNEL__ */ 296#endif /* __KERNEL__ */
286#endif /* _LINUX_NET_H */ 297#endif /* _LINUX_NET_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3a0ed7f9e801..7c717907896d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -244,6 +244,7 @@ struct netdev_boot_setup {
244}; 244};
245#define NETDEV_BOOT_SETUP_MAX 8 245#define NETDEV_BOOT_SETUP_MAX 8
246 246
247extern int __init netdev_boot_setup(char *str);
247 248
248/* 249/*
249 * The DEVICE structure. 250 * The DEVICE structure.
@@ -336,6 +337,7 @@ struct net_device
336 /* Interface address info. */ 337 /* Interface address info. */
337 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 338 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
338 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ 339 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
340 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
339 unsigned char addr_len; /* hardware address length */ 341 unsigned char addr_len; /* hardware address length */
340 unsigned short dev_id; /* for shared network cards */ 342 unsigned short dev_id; /* for shared network cards */
341 343
@@ -497,10 +499,12 @@ static inline void *netdev_priv(struct net_device *dev)
497#define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev)) 499#define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev))
498 500
499struct packet_type { 501struct packet_type {
500 __be16 type; /* This is really htons(ether_type). */ 502 __be16 type; /* This is really htons(ether_type). */
501 struct net_device *dev; /* NULL is wildcarded here */ 503 struct net_device *dev; /* NULL is wildcarded here */
502 int (*func) (struct sk_buff *, struct net_device *, 504 int (*func) (struct sk_buff *,
503 struct packet_type *); 505 struct net_device *,
506 struct packet_type *,
507 struct net_device *);
504 void *af_packet_priv; 508 void *af_packet_priv;
505 struct list_head list; 509 struct list_head list;
506}; 510};
@@ -671,6 +675,7 @@ extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
671extern void dev_init(void); 675extern void dev_init(void);
672 676
673extern int netdev_nit; 677extern int netdev_nit;
678extern int netdev_budget;
674 679
675/* Called by rtnetlink.c:rtnl_unlock() */ 680/* Called by rtnetlink.c:rtnl_unlock() */
676extern void netdev_run_todo(void); 681extern void netdev_run_todo(void);
@@ -697,19 +702,9 @@ static inline int netif_carrier_ok(const struct net_device *dev)
697 702
698extern void __netdev_watchdog_up(struct net_device *dev); 703extern void __netdev_watchdog_up(struct net_device *dev);
699 704
700static inline void netif_carrier_on(struct net_device *dev) 705extern void netif_carrier_on(struct net_device *dev);
701{
702 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
703 linkwatch_fire_event(dev);
704 if (netif_running(dev))
705 __netdev_watchdog_up(dev);
706}
707 706
708static inline void netif_carrier_off(struct net_device *dev) 707extern void netif_carrier_off(struct net_device *dev);
709{
710 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
711 linkwatch_fire_event(dev);
712}
713 708
714/* Hot-plugging. */ 709/* Hot-plugging. */
715static inline int netif_device_present(struct net_device *dev) 710static inline int netif_device_present(struct net_device *dev)
@@ -916,6 +911,14 @@ extern int skb_checksum_help(struct sk_buff *skb, int inward);
916extern void net_enable_timestamp(void); 911extern void net_enable_timestamp(void);
917extern void net_disable_timestamp(void); 912extern void net_disable_timestamp(void);
918 913
914#ifdef CONFIG_PROC_FS
915extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
916extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
917extern void dev_seq_stop(struct seq_file *seq, void *v);
918#endif
919
920extern void linkwatch_run_queue(void);
921
919#endif /* __KERNEL__ */ 922#endif /* __KERNEL__ */
920 923
921#endif /* _LINUX_DEV_H */ 924#endif /* _LINUX_DEV_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2e2045482cb1..be365e70ee99 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -21,10 +21,23 @@
21#define NF_STOP 5 21#define NF_STOP 5
22#define NF_MAX_VERDICT NF_STOP 22#define NF_MAX_VERDICT NF_STOP
23 23
24/* we overload the higher bits for encoding auxiliary data such as the queue
25 * number. Not nice, but better than additional function arguments. */
26#define NF_VERDICT_MASK 0x0000ffff
27#define NF_VERDICT_BITS 16
28
29#define NF_VERDICT_QMASK 0xffff0000
30#define NF_VERDICT_QBITS 16
31
32#define NF_QUEUE_NR(x) (((x << NF_VERDICT_QBITS) & NF_VERDICT_QMASK) | NF_QUEUE)
33
34/* only for userspace compatibility */
35#ifndef __KERNEL__
24/* Generic cache responses from hook functions. 36/* Generic cache responses from hook functions.
25 <= 0x2000 is used for protocol-flags. */ 37 <= 0x2000 is used for protocol-flags. */
26#define NFC_UNKNOWN 0x4000 38#define NFC_UNKNOWN 0x4000
27#define NFC_ALTERED 0x8000 39#define NFC_ALTERED 0x8000
40#endif
28 41
29#ifdef __KERNEL__ 42#ifdef __KERNEL__
30#include <linux/config.h> 43#include <linux/config.h>
@@ -101,15 +114,51 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
101 114
102extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; 115extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
103 116
104typedef void nf_logfn(unsigned int hooknum, 117/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
118 * disappear once iptables is replaced with pkttables. Please DO NOT use them
119 * for any new code! */
120#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
121#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
122#define NF_LOG_IPOPT 0x04 /* Log IP options */
123#define NF_LOG_UID 0x08 /* Log UID owning local socket */
124#define NF_LOG_MASK 0x0f
125
126#define NF_LOG_TYPE_LOG 0x01
127#define NF_LOG_TYPE_ULOG 0x02
128
129struct nf_loginfo {
130 u_int8_t type;
131 union {
132 struct {
133 u_int32_t copy_len;
134 u_int16_t group;
135 u_int16_t qthreshold;
136 } ulog;
137 struct {
138 u_int8_t level;
139 u_int8_t logflags;
140 } log;
141 } u;
142};
143
144typedef void nf_logfn(unsigned int pf,
145 unsigned int hooknum,
105 const struct sk_buff *skb, 146 const struct sk_buff *skb,
106 const struct net_device *in, 147 const struct net_device *in,
107 const struct net_device *out, 148 const struct net_device *out,
149 const struct nf_loginfo *li,
108 const char *prefix); 150 const char *prefix);
109 151
152struct nf_logger {
153 struct module *me;
154 nf_logfn *logfn;
155 char *name;
156};
157
110/* Function to register/unregister log function. */ 158/* Function to register/unregister log function. */
111int nf_log_register(int pf, nf_logfn *logfn); 159int nf_log_register(int pf, struct nf_logger *logger);
112void nf_log_unregister(int pf, nf_logfn *logfn); 160int nf_log_unregister_pf(int pf);
161void nf_log_unregister_logger(struct nf_logger *logger);
113 162
114/* Calls the registered backend logging function */ 163/* Calls the registered backend logging function */
115void nf_log_packet(int pf, 164void nf_log_packet(int pf,
@@ -117,6 +166,7 @@ void nf_log_packet(int pf,
117 const struct sk_buff *skb, 166 const struct sk_buff *skb,
118 const struct net_device *in, 167 const struct net_device *in,
119 const struct net_device *out, 168 const struct net_device *out,
169 struct nf_loginfo *li,
120 const char *fmt, ...); 170 const char *fmt, ...);
121 171
122/* Activate hook; either okfn or kfree_skb called, unless a hook 172/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -175,11 +225,16 @@ int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt,
175 int *len); 225 int *len);
176 226
177/* Packet queuing */ 227/* Packet queuing */
178typedef int (*nf_queue_outfn_t)(struct sk_buff *skb, 228struct nf_queue_handler {
179 struct nf_info *info, void *data); 229 int (*outfn)(struct sk_buff *skb, struct nf_info *info,
230 unsigned int queuenum, void *data);
231 void *data;
232 char *name;
233};
180extern int nf_register_queue_handler(int pf, 234extern int nf_register_queue_handler(int pf,
181 nf_queue_outfn_t outfn, void *data); 235 struct nf_queue_handler *qh);
182extern int nf_unregister_queue_handler(int pf); 236extern int nf_unregister_queue_handler(int pf);
237extern void nf_unregister_queue_handlers(struct nf_queue_handler *qh);
183extern void nf_reinject(struct sk_buff *skb, 238extern void nf_reinject(struct sk_buff *skb,
184 struct nf_info *info, 239 struct nf_info *info,
185 unsigned int verdict); 240 unsigned int verdict);
@@ -190,6 +245,27 @@ extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
190/* FIXME: Before cache is ever used, this must be implemented for real. */ 245/* FIXME: Before cache is ever used, this must be implemented for real. */
191extern void nf_invalidate_cache(int pf); 246extern void nf_invalidate_cache(int pf);
192 247
248/* Call this before modifying an existing packet: ensures it is
249 modifiable and linear to the point you care about (writable_len).
250 Returns true or false. */
251extern int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len);
252
253struct nf_queue_rerouter {
254 void (*save)(const struct sk_buff *skb, struct nf_info *info);
255 int (*reroute)(struct sk_buff **skb, const struct nf_info *info);
256 int rer_size;
257};
258
259#define nf_info_reroute(x) ((void *)x + sizeof(struct nf_info))
260
261extern int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer);
262extern int nf_unregister_queue_rerouter(int pf);
263
264#ifdef CONFIG_PROC_FS
265#include <linux/proc_fs.h>
266extern struct proc_dir_entry *proc_net_netfilter;
267#endif
268
193#else /* !CONFIG_NETFILTER */ 269#else /* !CONFIG_NETFILTER */
194#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) 270#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
195static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 271static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
new file mode 100644
index 000000000000..1d5b10ae2399
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink.h
@@ -0,0 +1,169 @@
1#ifndef _NFNETLINK_H
2#define _NFNETLINK_H
3#include <linux/types.h>
4
5#ifndef __KERNEL__
6/* nfnetlink groups: Up to 32 maximum - backwards compatibility for userspace */
7#define NF_NETLINK_CONNTRACK_NEW 0x00000001
8#define NF_NETLINK_CONNTRACK_UPDATE 0x00000002
9#define NF_NETLINK_CONNTRACK_DESTROY 0x00000004
10#define NF_NETLINK_CONNTRACK_EXP_NEW 0x00000008
11#define NF_NETLINK_CONNTRACK_EXP_UPDATE 0x00000010
12#define NF_NETLINK_CONNTRACK_EXP_DESTROY 0x00000020
13#endif
14
15enum nfnetlink_groups {
16 NFNLGRP_NONE,
17#define NFNLGRP_NONE NFNLGRP_NONE
18 NFNLGRP_CONNTRACK_NEW,
19#define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW
20 NFNLGRP_CONNTRACK_UPDATE,
21#define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE
22 NFNLGRP_CONNTRACK_DESTROY,
23#define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY
24 NFNLGRP_CONNTRACK_EXP_NEW,
25#define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW
26 NFNLGRP_CONNTRACK_EXP_UPDATE,
27#define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE
28 NFNLGRP_CONNTRACK_EXP_DESTROY,
29#define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY
30 __NFNLGRP_MAX,
31};
32#define NFNLGRP_MAX (__NFNLGRP_MAX - 1)
33
34/* Generic structure for encapsulation optional netfilter information.
35 * It is reminiscent of sockaddr, but with sa_family replaced
36 * with attribute type.
37 * ! This should someday be put somewhere generic as now rtnetlink and
38 * ! nfnetlink use the same attributes methods. - J. Schulist.
39 */
40
41struct nfattr
42{
43 u_int16_t nfa_len;
44 u_int16_t nfa_type;
45} __attribute__ ((packed));
46
47/* FIXME: Shamelessly copy and pasted from rtnetlink.h, it's time
48 * to put this in a generic file */
49
50#define NFA_ALIGNTO 4
51#define NFA_ALIGN(len) (((len) + NFA_ALIGNTO - 1) & ~(NFA_ALIGNTO - 1))
52#define NFA_OK(nfa,len) ((len) > 0 && (nfa)->nfa_len >= sizeof(struct nfattr) \
53 && (nfa)->nfa_len <= (len))
54#define NFA_NEXT(nfa,attrlen) ((attrlen) -= NFA_ALIGN((nfa)->nfa_len), \
55 (struct nfattr *)(((char *)(nfa)) + NFA_ALIGN((nfa)->nfa_len)))
56#define NFA_LENGTH(len) (NFA_ALIGN(sizeof(struct nfattr)) + (len))
57#define NFA_SPACE(len) NFA_ALIGN(NFA_LENGTH(len))
58#define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0)))
59#define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0))
60#define NFA_NEST(skb, type) \
61({ struct nfattr *__start = (struct nfattr *) (skb)->tail; \
62 NFA_PUT(skb, type, 0, NULL); \
63 __start; })
64#define NFA_NEST_END(skb, start) \
65({ (start)->nfa_len = ((skb)->tail - (unsigned char *) (start)); \
66 (skb)->len; })
67#define NFA_NEST_CANCEL(skb, start) \
68({ if (start) \
69 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
70 -1; })
71
72/* General form of address family dependent message.
73 */
74struct nfgenmsg {
75 u_int8_t nfgen_family; /* AF_xxx */
76 u_int8_t version; /* nfnetlink version */
77 u_int16_t res_id; /* resource id */
78} __attribute__ ((packed));
79
80#define NFNETLINK_V0 0
81
82#define NFM_NFA(n) ((struct nfattr *)(((char *)(n)) \
83 + NLMSG_ALIGN(sizeof(struct nfgenmsg))))
84#define NFM_PAYLOAD(n) NLMSG_PAYLOAD(n, sizeof(struct nfgenmsg))
85
86/* netfilter netlink message types are split in two pieces:
87 * 8 bit subsystem, 8bit operation.
88 */
89
90#define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8)
91#define NFNL_MSG_TYPE(x) (x & 0x00ff)
92
93/* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS()
94 * won't work anymore */
95#define NFNL_SUBSYS_NONE 0
96#define NFNL_SUBSYS_CTNETLINK 1
97#define NFNL_SUBSYS_CTNETLINK_EXP 2
98#define NFNL_SUBSYS_QUEUE 3
99#define NFNL_SUBSYS_ULOG 4
100#define NFNL_SUBSYS_COUNT 5
101
102#ifdef __KERNEL__
103
104#include <linux/netlink.h>
105#include <linux/capability.h>
106
107struct nfnl_callback
108{
109 int (*call)(struct sock *nl, struct sk_buff *skb,
110 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp);
111 kernel_cap_t cap_required; /* capabilities required for this msg */
112 u_int16_t attr_count; /* number of nfattr's */
113};
114
115struct nfnetlink_subsystem
116{
117 const char *name;
118 __u8 subsys_id; /* nfnetlink subsystem ID */
119 __u8 cb_count; /* number of callbacks */
120 struct nfnl_callback *cb; /* callback for individual types */
121};
122
123extern void __nfa_fill(struct sk_buff *skb, int attrtype,
124 int attrlen, const void *data);
125#define NFA_PUT(skb, attrtype, attrlen, data) \
126({ if (skb_tailroom(skb) < (int)NFA_SPACE(attrlen)) goto nfattr_failure; \
127 __nfa_fill(skb, attrtype, attrlen, data); })
128
129extern struct semaphore nfnl_sem;
130
131#define nfnl_shlock() down(&nfnl_sem)
132#define nfnl_shlock_nowait() down_trylock(&nfnl_sem)
133
134#define nfnl_shunlock() do { up(&nfnl_sem); \
135 if(nfnl && nfnl->sk_receive_queue.qlen) \
136 nfnl->sk_data_ready(nfnl, 0); \
137 } while(0)
138
139extern void nfnl_lock(void);
140extern void nfnl_unlock(void);
141
142extern int nfnetlink_subsys_register(struct nfnetlink_subsystem *n);
143extern int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n);
144
145extern int nfattr_parse(struct nfattr *tb[], int maxattr,
146 struct nfattr *nfa, int len);
147
148#define nfattr_parse_nested(tb, max, nfa) \
149 nfattr_parse((tb), (max), NFA_DATA((nfa)), NFA_PAYLOAD((nfa)))
150
151#define nfattr_bad_size(tb, max, cta_min) \
152({ int __i, __res = 0; \
153 for (__i=0; __i<max; __i++) \
154 if (tb[__i] && NFA_PAYLOAD(tb[__i]) < cta_min[__i]){ \
155 __res = 1; \
156 break; \
157 } \
158 __res; \
159})
160
161extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
162 int echo);
163extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
164
165#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
166 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
167
168#endif /* __KERNEL__ */
169#endif /* _NFNETLINK_H */
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
new file mode 100644
index 000000000000..5c55751c78e4
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -0,0 +1,124 @@
1#ifndef _IPCONNTRACK_NETLINK_H
2#define _IPCONNTRACK_NETLINK_H
3#include <linux/netfilter/nfnetlink.h>
4
5enum cntl_msg_types {
6 IPCTNL_MSG_CT_NEW,
7 IPCTNL_MSG_CT_GET,
8 IPCTNL_MSG_CT_DELETE,
9 IPCTNL_MSG_CT_GET_CTRZERO,
10
11 IPCTNL_MSG_MAX
12};
13
14enum ctnl_exp_msg_types {
15 IPCTNL_MSG_EXP_NEW,
16 IPCTNL_MSG_EXP_GET,
17 IPCTNL_MSG_EXP_DELETE,
18
19 IPCTNL_MSG_EXP_MAX
20};
21
22
23enum ctattr_type {
24 CTA_UNSPEC,
25 CTA_TUPLE_ORIG,
26 CTA_TUPLE_REPLY,
27 CTA_STATUS,
28 CTA_PROTOINFO,
29 CTA_HELP,
30 CTA_NAT,
31 CTA_TIMEOUT,
32 CTA_MARK,
33 CTA_COUNTERS_ORIG,
34 CTA_COUNTERS_REPLY,
35 CTA_USE,
36 CTA_ID,
37 __CTA_MAX
38};
39#define CTA_MAX (__CTA_MAX - 1)
40
41enum ctattr_tuple {
42 CTA_TUPLE_UNSPEC,
43 CTA_TUPLE_IP,
44 CTA_TUPLE_PROTO,
45 __CTA_TUPLE_MAX
46};
47#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
48
49enum ctattr_ip {
50 CTA_IP_UNSPEC,
51 CTA_IP_V4_SRC,
52 CTA_IP_V4_DST,
53 CTA_IP_V6_SRC,
54 CTA_IP_V6_DST,
55 __CTA_IP_MAX
56};
57#define CTA_IP_MAX (__CTA_IP_MAX - 1)
58
59enum ctattr_l4proto {
60 CTA_PROTO_UNSPEC,
61 CTA_PROTO_NUM,
62 CTA_PROTO_SRC_PORT,
63 CTA_PROTO_DST_PORT,
64 CTA_PROTO_ICMP_ID,
65 CTA_PROTO_ICMP_TYPE,
66 CTA_PROTO_ICMP_CODE,
67 __CTA_PROTO_MAX
68};
69#define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1)
70
71enum ctattr_protoinfo {
72 CTA_PROTOINFO_UNSPEC,
73 CTA_PROTOINFO_TCP_STATE,
74 __CTA_PROTOINFO_MAX
75};
76#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
77
78enum ctattr_counters {
79 CTA_COUNTERS_UNSPEC,
80 CTA_COUNTERS_PACKETS,
81 CTA_COUNTERS_BYTES,
82 __CTA_COUNTERS_MAX
83};
84#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
85
86enum ctattr_nat {
87 CTA_NAT_UNSPEC,
88 CTA_NAT_MINIP,
89 CTA_NAT_MAXIP,
90 CTA_NAT_PROTO,
91 __CTA_NAT_MAX
92};
93#define CTA_NAT_MAX (__CTA_NAT_MAX - 1)
94
95enum ctattr_protonat {
96 CTA_PROTONAT_UNSPEC,
97 CTA_PROTONAT_PORT_MIN,
98 CTA_PROTONAT_PORT_MAX,
99 __CTA_PROTONAT_MAX
100};
101#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
102
103enum ctattr_expect {
104 CTA_EXPECT_UNSPEC,
105 CTA_EXPECT_MASTER,
106 CTA_EXPECT_TUPLE,
107 CTA_EXPECT_MASK,
108 CTA_EXPECT_TIMEOUT,
109 CTA_EXPECT_ID,
110 CTA_EXPECT_HELP_NAME,
111 __CTA_EXPECT_MAX
112};
113#define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1)
114
115enum ctattr_help {
116 CTA_HELP_UNSPEC,
117 CTA_HELP_NAME,
118 __CTA_HELP_MAX
119};
120#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
121
122#define CTA_HELP_MAXNAMESIZE 32
123
124#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
new file mode 100644
index 000000000000..b04b03880595
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -0,0 +1,88 @@
1#ifndef _NFNETLINK_LOG_H
2#define _NFNETLINK_LOG_H
3
4/* This file describes the netlink messages (i.e. 'protocol packets'),
5 * and not any kind of function definitions. It is shared between kernel and
6 * userspace. Don't put kernel specific stuff in here */
7
8#include <linux/types.h>
9#include <linux/netfilter/nfnetlink.h>
10
11enum nfulnl_msg_types {
12 NFULNL_MSG_PACKET, /* packet from kernel to userspace */
13 NFULNL_MSG_CONFIG, /* connect to a particular queue */
14
15 NFULNL_MSG_MAX
16};
17
18struct nfulnl_msg_packet_hdr {
19 u_int16_t hw_protocol; /* hw protocol (network order) */
20 u_int8_t hook; /* netfilter hook */
21 u_int8_t _pad;
22} __attribute__ ((packed));
23
24struct nfulnl_msg_packet_hw {
25 u_int16_t hw_addrlen;
26 u_int16_t _pad;
27 u_int8_t hw_addr[8];
28} __attribute__ ((packed));
29
30struct nfulnl_msg_packet_timestamp {
31 aligned_u64 sec;
32 aligned_u64 usec;
33} __attribute__ ((packed));
34
35#define NFULNL_PREFIXLEN 30 /* just like old log target */
36
37enum nfulnl_attr_type {
38 NFULA_UNSPEC,
39 NFULA_PACKET_HDR,
40 NFULA_MARK, /* u_int32_t nfmark */
41 NFULA_TIMESTAMP, /* nfulnl_msg_packet_timestamp */
42 NFULA_IFINDEX_INDEV, /* u_int32_t ifindex */
43 NFULA_IFINDEX_OUTDEV, /* u_int32_t ifindex */
44 NFULA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */
45 NFULA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */
46 NFULA_HWADDR, /* nfulnl_msg_packet_hw */
47 NFULA_PAYLOAD, /* opaque data payload */
48 NFULA_PREFIX, /* string prefix */
49 NFULA_UID, /* user id of socket */
50
51 __NFULA_MAX
52};
53#define NFULA_MAX (__NFULA_MAX - 1)
54
55enum nfulnl_msg_config_cmds {
56 NFULNL_CFG_CMD_NONE,
57 NFULNL_CFG_CMD_BIND,
58 NFULNL_CFG_CMD_UNBIND,
59 NFULNL_CFG_CMD_PF_BIND,
60 NFULNL_CFG_CMD_PF_UNBIND,
61};
62
63struct nfulnl_msg_config_cmd {
64 u_int8_t command; /* nfulnl_msg_config_cmds */
65} __attribute__ ((packed));
66
67struct nfulnl_msg_config_mode {
68 u_int32_t copy_range;
69 u_int8_t copy_mode;
70 u_int8_t _pad;
71} __attribute__ ((packed));
72
73enum nfulnl_attr_config {
74 NFULA_CFG_UNSPEC,
75 NFULA_CFG_CMD, /* nfulnl_msg_config_cmd */
76 NFULA_CFG_MODE, /* nfulnl_msg_config_mode */
77 NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */
78 NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */
79 NFULA_CFG_QTHRESH, /* u_int32_t */
80 __NFULA_CFG_MAX
81};
82#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1)
83
84#define NFULNL_COPY_NONE 0x00
85#define NFULNL_COPY_META 0x01
86#define NFULNL_COPY_PACKET 0x02
87
88#endif /* _NFNETLINK_LOG_H */
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
new file mode 100644
index 000000000000..9e774373244c
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -0,0 +1,89 @@
1#ifndef _NFNETLINK_QUEUE_H
2#define _NFNETLINK_QUEUE_H
3
4#include <linux/types.h>
5#include <linux/netfilter/nfnetlink.h>
6
7enum nfqnl_msg_types {
8 NFQNL_MSG_PACKET, /* packet from kernel to userspace */
9 NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */
10 NFQNL_MSG_CONFIG, /* connect to a particular queue */
11
12 NFQNL_MSG_MAX
13};
14
15struct nfqnl_msg_packet_hdr {
16 u_int32_t packet_id; /* unique ID of packet in queue */
17 u_int16_t hw_protocol; /* hw protocol (network order) */
18 u_int8_t hook; /* netfilter hook */
19} __attribute__ ((packed));
20
21struct nfqnl_msg_packet_hw {
22 u_int16_t hw_addrlen;
23 u_int16_t _pad;
24 u_int8_t hw_addr[8];
25} __attribute__ ((packed));
26
27struct nfqnl_msg_packet_timestamp {
28 aligned_u64 sec;
29 aligned_u64 usec;
30} __attribute__ ((packed));
31
32enum nfqnl_attr_type {
33 NFQA_UNSPEC,
34 NFQA_PACKET_HDR,
35 NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */
36 NFQA_MARK, /* u_int32_t nfmark */
37 NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */
38 NFQA_IFINDEX_INDEV, /* u_int32_t ifindex */
39 NFQA_IFINDEX_OUTDEV, /* u_int32_t ifindex */
40 NFQA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */
41 NFQA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */
42 NFQA_HWADDR, /* nfqnl_msg_packet_hw */
43 NFQA_PAYLOAD, /* opaque data payload */
44
45 __NFQA_MAX
46};
47#define NFQA_MAX (__NFQA_MAX - 1)
48
49struct nfqnl_msg_verdict_hdr {
50 u_int32_t verdict;
51 u_int32_t id;
52} __attribute__ ((packed));
53
54
55enum nfqnl_msg_config_cmds {
56 NFQNL_CFG_CMD_NONE,
57 NFQNL_CFG_CMD_BIND,
58 NFQNL_CFG_CMD_UNBIND,
59 NFQNL_CFG_CMD_PF_BIND,
60 NFQNL_CFG_CMD_PF_UNBIND,
61};
62
63struct nfqnl_msg_config_cmd {
64 u_int8_t command; /* nfqnl_msg_config_cmds */
65 u_int8_t _pad;
66 u_int16_t pf; /* AF_xxx for PF_[UN]BIND */
67} __attribute__ ((packed));
68
69enum nfqnl_config_mode {
70 NFQNL_COPY_NONE,
71 NFQNL_COPY_META,
72 NFQNL_COPY_PACKET,
73};
74
75struct nfqnl_msg_config_params {
76 u_int32_t copy_range;
77 u_int8_t copy_mode; /* enum nfqnl_config_mode */
78} __attribute__ ((packed));
79
80
81enum nfqnl_attr_config {
82 NFQA_CFG_UNSPEC,
83 NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */
84 NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */
85 __NFQA_CFG_MAX
86};
87#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
88
89#endif /* _NFNETLINK_QUEUE_H */
diff --git a/include/linux/netfilter_decnet.h b/include/linux/netfilter_decnet.h
index 3064eec9cb8e..6f425369ee29 100644
--- a/include/linux/netfilter_decnet.h
+++ b/include/linux/netfilter_decnet.h
@@ -9,6 +9,8 @@
9 9
10#include <linux/netfilter.h> 10#include <linux/netfilter.h>
11 11
12/* only for userspace compatibility */
13#ifndef __KERNEL__
12/* IP Cache bits. */ 14/* IP Cache bits. */
13/* Src IP address. */ 15/* Src IP address. */
14#define NFC_DN_SRC 0x0001 16#define NFC_DN_SRC 0x0001
@@ -18,6 +20,7 @@
18#define NFC_DN_IF_IN 0x0004 20#define NFC_DN_IF_IN 0x0004
19/* Output device. */ 21/* Output device. */
20#define NFC_DN_IF_OUT 0x0008 22#define NFC_DN_IF_OUT 0x0008
23#endif /* ! __KERNEL__ */
21 24
22/* DECnet Hooks */ 25/* DECnet Hooks */
23/* After promisc drops, checksum checks. */ 26/* After promisc drops, checksum checks. */
@@ -53,7 +56,21 @@ struct nf_dn_rtmsg {
53 56
54#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg))) 57#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)))
55 58
59#ifndef __KERNEL__
60/* backwards compatibility for userspace */
56#define DNRMG_L1_GROUP 0x01 61#define DNRMG_L1_GROUP 0x01
57#define DNRMG_L2_GROUP 0x02 62#define DNRMG_L2_GROUP 0x02
63#endif
64
65enum {
66 DNRNG_NLGRP_NONE,
67#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE
68 DNRNG_NLGRP_L1,
69#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1
70 DNRNG_NLGRP_L2,
71#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2
72 __DNRNG_NLGRP_MAX
73};
74#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1)
58 75
59#endif /*__LINUX_DECNET_NETFILTER_H*/ 76#endif /*__LINUX_DECNET_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 3ebc36afae1a..fdc4a9527343 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -8,6 +8,8 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/netfilter.h> 9#include <linux/netfilter.h>
10 10
11/* only for userspace compatibility */
12#ifndef __KERNEL__
11/* IP Cache bits. */ 13/* IP Cache bits. */
12/* Src IP address. */ 14/* Src IP address. */
13#define NFC_IP_SRC 0x0001 15#define NFC_IP_SRC 0x0001
@@ -35,6 +37,7 @@
35#define NFC_IP_DST_PT 0x0400 37#define NFC_IP_DST_PT 0x0400
36/* Something else about the proto */ 38/* Something else about the proto */
37#define NFC_IP_PROTO_UNKNOWN 0x2000 39#define NFC_IP_PROTO_UNKNOWN 0x2000
40#endif /* ! __KERNEL__ */
38 41
39/* IP Hooks */ 42/* IP Hooks */
40/* After promisc drops, checksum checks. */ 43/* After promisc drops, checksum checks. */
@@ -77,11 +80,6 @@ enum nf_ip_hook_priorities {
77#ifdef __KERNEL__ 80#ifdef __KERNEL__
78extern int ip_route_me_harder(struct sk_buff **pskb); 81extern int ip_route_me_harder(struct sk_buff **pskb);
79 82
80/* Call this before modifying an existing IP packet: ensures it is
81 modifiable and linear to the point you care about (writable_len).
82 Returns true or false. */
83extern int skb_ip_make_writable(struct sk_buff **pskb,
84 unsigned int writable_len);
85#endif /*__KERNEL__*/ 83#endif /*__KERNEL__*/
86 84
87#endif /*__LINUX_IP_NETFILTER_H*/ 85#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 08fe5f7d14a0..088742befe49 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -65,6 +65,63 @@ enum ip_conntrack_status {
65 65
66 /* Both together */ 66 /* Both together */
67 IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE), 67 IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
68
69 /* Connection is dying (removed from lists), can not be unset. */
70 IPS_DYING_BIT = 9,
71 IPS_DYING = (1 << IPS_DYING_BIT),
72};
73
74/* Connection tracking event bits */
75enum ip_conntrack_events
76{
77 /* New conntrack */
78 IPCT_NEW_BIT = 0,
79 IPCT_NEW = (1 << IPCT_NEW_BIT),
80
81 /* Expected connection */
82 IPCT_RELATED_BIT = 1,
83 IPCT_RELATED = (1 << IPCT_RELATED_BIT),
84
85 /* Destroyed conntrack */
86 IPCT_DESTROY_BIT = 2,
87 IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
88
89 /* Timer has been refreshed */
90 IPCT_REFRESH_BIT = 3,
91 IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
92
93 /* Status has changed */
94 IPCT_STATUS_BIT = 4,
95 IPCT_STATUS = (1 << IPCT_STATUS_BIT),
96
97 /* Update of protocol info */
98 IPCT_PROTOINFO_BIT = 5,
99 IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
100
101 /* Volatile protocol info */
102 IPCT_PROTOINFO_VOLATILE_BIT = 6,
103 IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
104
105 /* New helper for conntrack */
106 IPCT_HELPER_BIT = 7,
107 IPCT_HELPER = (1 << IPCT_HELPER_BIT),
108
109 /* Update of helper info */
110 IPCT_HELPINFO_BIT = 8,
111 IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
112
113 /* Volatile helper info */
114 IPCT_HELPINFO_VOLATILE_BIT = 9,
115 IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
116
117 /* NAT info */
118 IPCT_NATINFO_BIT = 10,
119 IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
120};
121
122enum ip_conntrack_expect_events {
123 IPEXP_NEW_BIT = 0,
124 IPEXP_NEW = (1 << IPEXP_NEW_BIT),
68}; 125};
69 126
70#ifdef __KERNEL__ 127#ifdef __KERNEL__
@@ -152,6 +209,9 @@ struct ip_conntrack
152 /* Current number of expected connections */ 209 /* Current number of expected connections */
153 unsigned int expecting; 210 unsigned int expecting;
154 211
212 /* Unique ID that identifies this conntrack*/
213 unsigned int id;
214
155 /* Helper, if any. */ 215 /* Helper, if any. */
156 struct ip_conntrack_helper *helper; 216 struct ip_conntrack_helper *helper;
157 217
@@ -171,7 +231,7 @@ struct ip_conntrack
171#endif /* CONFIG_IP_NF_NAT_NEEDED */ 231#endif /* CONFIG_IP_NF_NAT_NEEDED */
172 232
173#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 233#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
174 unsigned long mark; 234 u_int32_t mark;
175#endif 235#endif
176 236
177 /* Traversed often, so hopefully in different cacheline to top */ 237 /* Traversed often, so hopefully in different cacheline to top */
@@ -200,6 +260,9 @@ struct ip_conntrack_expect
200 /* Usage count. */ 260 /* Usage count. */
201 atomic_t use; 261 atomic_t use;
202 262
263 /* Unique ID */
264 unsigned int id;
265
203#ifdef CONFIG_IP_NF_NAT_NEEDED 266#ifdef CONFIG_IP_NF_NAT_NEEDED
204 /* This is the original per-proto part, used to map the 267 /* This is the original per-proto part, used to map the
205 * expected connection the way the recipient expects. */ 268 * expected connection the way the recipient expects. */
@@ -239,7 +302,12 @@ ip_conntrack_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
239} 302}
240 303
241/* decrement reference count on a conntrack */ 304/* decrement reference count on a conntrack */
242extern void ip_conntrack_put(struct ip_conntrack *ct); 305static inline void
306ip_conntrack_put(struct ip_conntrack *ct)
307{
308 IP_NF_ASSERT(ct);
309 nf_conntrack_put(&ct->ct_general);
310}
243 311
244/* call to create an explicit dependency on ip_conntrack. */ 312/* call to create an explicit dependency on ip_conntrack. */
245extern void need_ip_conntrack(void); 313extern void need_ip_conntrack(void);
@@ -274,12 +342,50 @@ extern void
274ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *data), 342ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *data),
275 void *data); 343 void *data);
276 344
345extern struct ip_conntrack_helper *
346__ip_conntrack_helper_find_byname(const char *);
347extern struct ip_conntrack_helper *
348ip_conntrack_helper_find_get(const struct ip_conntrack_tuple *tuple);
349extern void ip_conntrack_helper_put(struct ip_conntrack_helper *helper);
350
351extern struct ip_conntrack_protocol *
352__ip_conntrack_proto_find(u_int8_t protocol);
353extern struct ip_conntrack_protocol *
354ip_conntrack_proto_find_get(u_int8_t protocol);
355extern void ip_conntrack_proto_put(struct ip_conntrack_protocol *proto);
356
357extern void ip_ct_remove_expectations(struct ip_conntrack *ct);
358
359extern struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *,
360 struct ip_conntrack_tuple *);
361
362extern void ip_conntrack_free(struct ip_conntrack *ct);
363
364extern void ip_conntrack_hash_insert(struct ip_conntrack *ct);
365
366extern struct ip_conntrack_expect *
367__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
368
369extern struct ip_conntrack_expect *
370ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple);
371
372extern struct ip_conntrack_tuple_hash *
373__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
374 const struct ip_conntrack *ignored_conntrack);
375
376extern void ip_conntrack_flush(void);
377
277/* It's confirmed if it is, or has been in the hash table. */ 378/* It's confirmed if it is, or has been in the hash table. */
278static inline int is_confirmed(struct ip_conntrack *ct) 379static inline int is_confirmed(struct ip_conntrack *ct)
279{ 380{
280 return test_bit(IPS_CONFIRMED_BIT, &ct->status); 381 return test_bit(IPS_CONFIRMED_BIT, &ct->status);
281} 382}
282 383
384static inline int is_dying(struct ip_conntrack *ct)
385{
386 return test_bit(IPS_DYING_BIT, &ct->status);
387}
388
283extern unsigned int ip_conntrack_htable_size; 389extern unsigned int ip_conntrack_htable_size;
284 390
285struct ip_conntrack_stat 391struct ip_conntrack_stat
@@ -303,6 +409,85 @@ struct ip_conntrack_stat
303 409
304#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) 410#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
305 411
412#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
413#include <linux/notifier.h>
414#include <linux/interrupt.h>
415
416struct ip_conntrack_ecache {
417 struct ip_conntrack *ct;
418 unsigned int events;
419};
420DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
421
422#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x)
423
424extern struct notifier_block *ip_conntrack_chain;
425extern struct notifier_block *ip_conntrack_expect_chain;
426
427static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
428{
429 return notifier_chain_register(&ip_conntrack_chain, nb);
430}
431
432static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
433{
434 return notifier_chain_unregister(&ip_conntrack_chain, nb);
435}
436
437static inline int
438ip_conntrack_expect_register_notifier(struct notifier_block *nb)
439{
440 return notifier_chain_register(&ip_conntrack_expect_chain, nb);
441}
442
443static inline int
444ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
445{
446 return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
447}
448
449extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
450extern void __ip_ct_event_cache_init(struct ip_conntrack *ct);
451
452static inline void
453ip_conntrack_event_cache(enum ip_conntrack_events event,
454 const struct sk_buff *skb)
455{
456 struct ip_conntrack *ct = (struct ip_conntrack *)skb->nfct;
457 struct ip_conntrack_ecache *ecache;
458
459 local_bh_disable();
460 ecache = &__get_cpu_var(ip_conntrack_ecache);
461 if (ct != ecache->ct)
462 __ip_ct_event_cache_init(ct);
463 ecache->events |= event;
464 local_bh_enable();
465}
466
467static inline void ip_conntrack_event(enum ip_conntrack_events event,
468 struct ip_conntrack *ct)
469{
470 if (is_confirmed(ct) && !is_dying(ct))
471 notifier_call_chain(&ip_conntrack_chain, event, ct);
472}
473
474static inline void
475ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
476 struct ip_conntrack_expect *exp)
477{
478 notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
479}
480#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
481static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
482 const struct sk_buff *skb) {}
483static inline void ip_conntrack_event(enum ip_conntrack_events event,
484 struct ip_conntrack *ct) {}
485static inline void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) {}
486static inline void
487ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
488 struct ip_conntrack_expect *exp) {}
489#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
490
306#ifdef CONFIG_IP_NF_NAT_NEEDED 491#ifdef CONFIG_IP_NF_NAT_NEEDED
307static inline int ip_nat_initialized(struct ip_conntrack *conntrack, 492static inline int ip_nat_initialized(struct ip_conntrack *conntrack,
308 enum ip_nat_manip_type manip) 493 enum ip_nat_manip_type manip)
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h
index 694aec9b4784..dc4d2a0575de 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_core.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_core.h
@@ -2,6 +2,9 @@
2#define _IP_CONNTRACK_CORE_H 2#define _IP_CONNTRACK_CORE_H
3#include <linux/netfilter.h> 3#include <linux/netfilter.h>
4 4
5#define MAX_IP_CT_PROTO 256
6extern struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO];
7
5/* This header is used to share core functionality between the 8/* This header is used to share core functionality between the
6 standalone connection tracking module, and the compatibility layer's use 9 standalone connection tracking module, and the compatibility layer's use
7 of connection tracking. */ 10 of connection tracking. */
@@ -38,12 +41,19 @@ extern int __ip_conntrack_confirm(struct sk_buff **pskb);
38/* Confirm a connection: returns NF_DROP if packet must be dropped. */ 41/* Confirm a connection: returns NF_DROP if packet must be dropped. */
39static inline int ip_conntrack_confirm(struct sk_buff **pskb) 42static inline int ip_conntrack_confirm(struct sk_buff **pskb)
40{ 43{
41 if ((*pskb)->nfct 44 struct ip_conntrack *ct = (struct ip_conntrack *)(*pskb)->nfct;
42 && !is_confirmed((struct ip_conntrack *)(*pskb)->nfct)) 45 int ret = NF_ACCEPT;
43 return __ip_conntrack_confirm(pskb); 46
44 return NF_ACCEPT; 47 if (ct) {
48 if (!is_confirmed(ct))
49 ret = __ip_conntrack_confirm(pskb);
50 ip_ct_deliver_cached_events(ct);
51 }
52 return ret;
45} 53}
46 54
55extern void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp);
56
47extern struct list_head *ip_conntrack_hash; 57extern struct list_head *ip_conntrack_hash;
48extern struct list_head ip_conntrack_expect_list; 58extern struct list_head ip_conntrack_expect_list;
49extern rwlock_t ip_conntrack_lock; 59extern rwlock_t ip_conntrack_lock;
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper.h b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
index 3692daa93dec..8d69279ccfe4 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
@@ -24,6 +24,8 @@ struct ip_conntrack_helper
24 int (*help)(struct sk_buff **pskb, 24 int (*help)(struct sk_buff **pskb,
25 struct ip_conntrack *ct, 25 struct ip_conntrack *ct,
26 enum ip_conntrack_info conntrackinfo); 26 enum ip_conntrack_info conntrackinfo);
27
28 int (*to_nfattr)(struct sk_buff *skb, const struct ip_conntrack *ct);
27}; 29};
28 30
29extern int ip_conntrack_helper_register(struct ip_conntrack_helper *); 31extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h
index e20b57c5e1b7..b6b99be8632a 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h
@@ -2,6 +2,7 @@
2#ifndef _IP_CONNTRACK_PROTOCOL_H 2#ifndef _IP_CONNTRACK_PROTOCOL_H
3#define _IP_CONNTRACK_PROTOCOL_H 3#define _IP_CONNTRACK_PROTOCOL_H
4#include <linux/netfilter_ipv4/ip_conntrack.h> 4#include <linux/netfilter_ipv4/ip_conntrack.h>
5#include <linux/netfilter/nfnetlink_conntrack.h>
5 6
6struct seq_file; 7struct seq_file;
7 8
@@ -47,22 +48,22 @@ struct ip_conntrack_protocol
47 int (*error)(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, 48 int (*error)(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
48 unsigned int hooknum); 49 unsigned int hooknum);
49 50
51 /* convert protoinfo to nfnetink attributes */
52 int (*to_nfattr)(struct sk_buff *skb, struct nfattr *nfa,
53 const struct ip_conntrack *ct);
54
55 int (*tuple_to_nfattr)(struct sk_buff *skb,
56 const struct ip_conntrack_tuple *t);
57 int (*nfattr_to_tuple)(struct nfattr *tb[],
58 struct ip_conntrack_tuple *t);
59
50 /* Module (if any) which this is connected to. */ 60 /* Module (if any) which this is connected to. */
51 struct module *me; 61 struct module *me;
52}; 62};
53 63
54#define MAX_IP_CT_PROTO 256
55extern struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO];
56
57/* Protocol registration. */ 64/* Protocol registration. */
58extern int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto); 65extern int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto);
59extern void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto); 66extern void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto);
60
61static inline struct ip_conntrack_protocol *ip_ct_find_proto(u_int8_t protocol)
62{
63 return ip_ct_protos[protocol];
64}
65
66/* Existing built-in protocols */ 67/* Existing built-in protocols */
67extern struct ip_conntrack_protocol ip_conntrack_protocol_tcp; 68extern struct ip_conntrack_protocol ip_conntrack_protocol_tcp;
68extern struct ip_conntrack_protocol ip_conntrack_protocol_udp; 69extern struct ip_conntrack_protocol ip_conntrack_protocol_udp;
@@ -73,6 +74,11 @@ extern int ip_conntrack_protocol_tcp_init(void);
73/* Log invalid packets */ 74/* Log invalid packets */
74extern unsigned int ip_ct_log_invalid; 75extern unsigned int ip_ct_log_invalid;
75 76
77extern int ip_ct_port_tuple_to_nfattr(struct sk_buff *,
78 const struct ip_conntrack_tuple *);
79extern int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
80 struct ip_conntrack_tuple *);
81
76#ifdef CONFIG_SYSCTL 82#ifdef CONFIG_SYSCTL
77#ifdef DEBUG_INVALID_PACKETS 83#ifdef DEBUG_INVALID_PACKETS
78#define LOG_INVALID(proto) \ 84#define LOG_INVALID(proto) \
diff --git a/include/linux/netfilter_ipv4/ip_logging.h b/include/linux/netfilter_ipv4/ip_logging.h
deleted file mode 100644
index 0c5c52cb6589..000000000000
--- a/include/linux/netfilter_ipv4/ip_logging.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/* IPv4 macros for the internal logging interface. */
2#ifndef __IP_LOGGING_H
3#define __IP_LOGGING_H
4
5#ifdef __KERNEL__
6#include <linux/socket.h>
7#include <linux/netfilter_logging.h>
8
9#define nf_log_ip_packet(pskb,hooknum,in,out,fmt,args...) \
10 nf_log_packet(AF_INET,pskb,hooknum,in,out,fmt,##args)
11
12#define nf_log_ip(pfh,len,fmt,args...) \
13 nf_log(AF_INET,pfh,len,fmt,##args)
14
15#define nf_ip_log_register(logging) nf_log_register(AF_INET,logging)
16#define nf_ip_log_unregister(logging) nf_log_unregister(AF_INET,logging)
17
18#endif /*__KERNEL__*/
19
20#endif /*__IP_LOGGING_H*/
diff --git a/include/linux/netfilter_ipv4/ip_nat_protocol.h b/include/linux/netfilter_ipv4/ip_nat_protocol.h
index 129708c22386..ef63aa991a06 100644
--- a/include/linux/netfilter_ipv4/ip_nat_protocol.h
+++ b/include/linux/netfilter_ipv4/ip_nat_protocol.h
@@ -4,6 +4,9 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/list.h> 5#include <linux/list.h>
6 6
7#include <linux/netfilter_ipv4/ip_nat.h>
8#include <linux/netfilter/nfnetlink_conntrack.h>
9
7struct iphdr; 10struct iphdr;
8struct ip_nat_range; 11struct ip_nat_range;
9 12
@@ -15,6 +18,8 @@ struct ip_nat_protocol
15 /* Protocol number. */ 18 /* Protocol number. */
16 unsigned int protonum; 19 unsigned int protonum;
17 20
21 struct module *me;
22
18 /* Translate a packet to the target according to manip type. 23 /* Translate a packet to the target according to manip type.
19 Return true if succeeded. */ 24 Return true if succeeded. */
20 int (*manip_pkt)(struct sk_buff **pskb, 25 int (*manip_pkt)(struct sk_buff **pskb,
@@ -43,19 +48,20 @@ struct ip_nat_protocol
43 48
44 unsigned int (*print_range)(char *buffer, 49 unsigned int (*print_range)(char *buffer,
45 const struct ip_nat_range *range); 50 const struct ip_nat_range *range);
46};
47 51
48#define MAX_IP_NAT_PROTO 256 52 int (*range_to_nfattr)(struct sk_buff *skb,
49extern struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO]; 53 const struct ip_nat_range *range);
54
55 int (*nfattr_to_range)(struct nfattr *tb[],
56 struct ip_nat_range *range);
57};
50 58
51/* Protocol registration. */ 59/* Protocol registration. */
52extern int ip_nat_protocol_register(struct ip_nat_protocol *proto); 60extern int ip_nat_protocol_register(struct ip_nat_protocol *proto);
53extern void ip_nat_protocol_unregister(struct ip_nat_protocol *proto); 61extern void ip_nat_protocol_unregister(struct ip_nat_protocol *proto);
54 62
55static inline struct ip_nat_protocol *ip_nat_find_proto(u_int8_t protocol) 63extern struct ip_nat_protocol *ip_nat_proto_find_get(u_int8_t protocol);
56{ 64extern void ip_nat_proto_put(struct ip_nat_protocol *proto);
57 return ip_nat_protos[protocol];
58}
59 65
60/* Built-in protocols. */ 66/* Built-in protocols. */
61extern struct ip_nat_protocol ip_nat_protocol_tcp; 67extern struct ip_nat_protocol ip_nat_protocol_tcp;
@@ -67,4 +73,9 @@ extern int init_protocols(void) __init;
67extern void cleanup_protocols(void); 73extern void cleanup_protocols(void);
68extern struct ip_nat_protocol *find_nat_proto(u_int16_t protonum); 74extern struct ip_nat_protocol *find_nat_proto(u_int16_t protonum);
69 75
76extern int ip_nat_port_range_to_nfattr(struct sk_buff *skb,
77 const struct ip_nat_range *range);
78extern int ip_nat_port_nfattr_to_range(struct nfattr *tb[],
79 struct ip_nat_range *range);
80
70#endif /*_IP_NAT_PROTO_H*/ 81#endif /*_IP_NAT_PROTO_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 12ce47808e7d..d19d65cf4530 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -109,7 +109,8 @@ struct ipt_counters
109 109
110/* Values for "flag" field in struct ipt_ip (general ip structure). */ 110/* Values for "flag" field in struct ipt_ip (general ip structure). */
111#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ 111#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
112#define IPT_F_MASK 0x01 /* All possible flag bits mask. */ 112#define IPT_F_GOTO 0x02 /* Set if jump is a goto */
113#define IPT_F_MASK 0x03 /* All possible flag bits mask. */
113 114
114/* Values for "inv" field in struct ipt_ip. */ 115/* Values for "inv" field in struct ipt_ip. */
115#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 116#define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h
index d25f782e57d1..22d16177319b 100644
--- a/include/linux/netfilter_ipv4/ipt_LOG.h
+++ b/include/linux/netfilter_ipv4/ipt_LOG.h
@@ -1,6 +1,7 @@
1#ifndef _IPT_LOG_H 1#ifndef _IPT_LOG_H
2#define _IPT_LOG_H 2#define _IPT_LOG_H
3 3
4/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
4#define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ 5#define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
5#define IPT_LOG_TCPOPT 0x02 /* Log TCP options */ 6#define IPT_LOG_TCPOPT 0x02 /* Log TCP options */
6#define IPT_LOG_IPOPT 0x04 /* Log IP options */ 7#define IPT_LOG_IPOPT 0x04 /* Log IP options */
diff --git a/include/linux/netfilter_ipv4/ipt_NFQUEUE.h b/include/linux/netfilter_ipv4/ipt_NFQUEUE.h
new file mode 100644
index 000000000000..b5b2943b0c66
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_NFQUEUE.h
@@ -0,0 +1,16 @@
1/* iptables module for using NFQUEUE mechanism
2 *
3 * (C) 2005 Harald Welte <laforge@netfilter.org>
4 *
5 * This software is distributed under GNU GPL v2, 1991
6 *
7*/
8#ifndef _IPT_NFQ_TARGET_H
9#define _IPT_NFQ_TARGET_H
10
11/* target info */
12struct ipt_NFQ_info {
13 u_int16_t queuenum;
14};
15
16#endif /* _IPT_DSCP_TARGET_H */
diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h
new file mode 100644
index 000000000000..ee6611edc112
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_TTL.h
@@ -0,0 +1,21 @@
1/* TTL modification module for IP tables
2 * (C) 2000 by Harald Welte <laforge@netfilter.org> */
3
4#ifndef _IPT_TTL_H
5#define _IPT_TTL_H
6
7enum {
8 IPT_TTL_SET = 0,
9 IPT_TTL_INC,
10 IPT_TTL_DEC
11};
12
13#define IPT_TTL_MAXMODE IPT_TTL_DEC
14
15struct ipt_TTL_info {
16 u_int8_t mode;
17 u_int8_t ttl;
18};
19
20
21#endif
diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h
new file mode 100644
index 000000000000..9e5532f8d8ac
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_connbytes.h
@@ -0,0 +1,25 @@
1#ifndef _IPT_CONNBYTES_H
2#define _IPT_CONNBYTES_H
3
4enum ipt_connbytes_what {
5 IPT_CONNBYTES_PKTS,
6 IPT_CONNBYTES_BYTES,
7 IPT_CONNBYTES_AVGPKT,
8};
9
10enum ipt_connbytes_direction {
11 IPT_CONNBYTES_DIR_ORIGINAL,
12 IPT_CONNBYTES_DIR_REPLY,
13 IPT_CONNBYTES_DIR_BOTH,
14};
15
16struct ipt_connbytes_info
17{
18 struct {
19 aligned_u64 from; /* count to be matched */
20 aligned_u64 to; /* count to be matched */
21 } count;
22 u_int8_t what; /* ipt_connbytes_what */
23 u_int8_t direction; /* ipt_connbytes_direction */
24};
25#endif
diff --git a/include/linux/netfilter_ipv4/ipt_dccp.h b/include/linux/netfilter_ipv4/ipt_dccp.h
new file mode 100644
index 000000000000..3cb3a522e62b
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_dccp.h
@@ -0,0 +1,23 @@
1#ifndef _IPT_DCCP_H_
2#define _IPT_DCCP_H_
3
4#define IPT_DCCP_SRC_PORTS 0x01
5#define IPT_DCCP_DEST_PORTS 0x02
6#define IPT_DCCP_TYPE 0x04
7#define IPT_DCCP_OPTION 0x08
8
9#define IPT_DCCP_VALID_FLAGS 0x0f
10
11struct ipt_dccp_info {
12 u_int16_t dpts[2]; /* Min, Max */
13 u_int16_t spts[2]; /* Min, Max */
14
15 u_int16_t flags;
16 u_int16_t invflags;
17
18 u_int16_t typemask;
19 u_int8_t option;
20};
21
22#endif /* _IPT_DCCP_H_ */
23
diff --git a/include/linux/netfilter_ipv4/ipt_string.h b/include/linux/netfilter_ipv4/ipt_string.h
new file mode 100644
index 000000000000..a265f6e44eab
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_string.h
@@ -0,0 +1,18 @@
1#ifndef _IPT_STRING_H
2#define _IPT_STRING_H
3
4#define IPT_STRING_MAX_PATTERN_SIZE 128
5#define IPT_STRING_MAX_ALGO_NAME_SIZE 16
6
7struct ipt_string_info
8{
9 u_int16_t from_offset;
10 u_int16_t to_offset;
11 char algo[IPT_STRING_MAX_ALGO_NAME_SIZE];
12 char pattern[IPT_STRING_MAX_PATTERN_SIZE];
13 u_int8_t patlen;
14 u_int8_t invert;
15 struct ts_config __attribute__((aligned(8))) *config;
16};
17
18#endif /*_IPT_STRING_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index bee7a5ec7c66..edcc2c6eb5c7 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -10,6 +10,8 @@
10 10
11#include <linux/netfilter.h> 11#include <linux/netfilter.h>
12 12
13/* only for userspace compatibility */
14#ifndef __KERNEL__
13/* IP Cache bits. */ 15/* IP Cache bits. */
14/* Src IP address. */ 16/* Src IP address. */
15#define NFC_IP6_SRC 0x0001 17#define NFC_IP6_SRC 0x0001
@@ -38,6 +40,7 @@
38#define NFC_IP6_DST_PT 0x0400 40#define NFC_IP6_DST_PT 0x0400
39/* Something else about the proto */ 41/* Something else about the proto */
40#define NFC_IP6_PROTO_UNKNOWN 0x2000 42#define NFC_IP6_PROTO_UNKNOWN 0x2000
43#endif /* ! __KERNEL__ */
41 44
42 45
43/* IP6 Hooks */ 46/* IP6 Hooks */
@@ -68,4 +71,7 @@ enum nf_ip6_hook_priorities {
68 NF_IP6_PRI_LAST = INT_MAX, 71 NF_IP6_PRI_LAST = INT_MAX,
69}; 72};
70 73
74extern int ipv6_netfilter_init(void);
75extern void ipv6_netfilter_fini(void);
76
71#endif /*__LINUX_IP6_NETFILTER_H*/ 77#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_logging.h b/include/linux/netfilter_ipv6/ip6_logging.h
deleted file mode 100644
index a0b2ee3043aa..000000000000
--- a/include/linux/netfilter_ipv6/ip6_logging.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/* IPv6 macros for the nternal logging interface. */
2#ifndef __IP6_LOGGING_H
3#define __IP6_LOGGING_H
4
5#ifdef __KERNEL__
6#include <linux/socket.h>
7#include <linux/netfilter_logging.h>
8
9#define nf_log_ip6_packet(pskb,hooknum,in,out,fmt,args...) \
10 nf_log_packet(AF_INET6,pskb,hooknum,in,out,fmt,##args)
11
12#define nf_log_ip6(pfh,len,fmt,args...) \
13 nf_log(AF_INET6,pfh,len,fmt,##args)
14
15#define nf_ip6_log_register(logging) nf_log_register(AF_INET6,logging)
16#define nf_ip6_log_unregister(logging) nf_log_unregister(AF_INET6,logging)
17
18#endif /*__KERNEL__*/
19
20#endif /*__IP6_LOGGING_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index f1ce3b009853..58c72a52dc65 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -111,7 +111,8 @@ struct ip6t_counters
111#define IP6T_F_PROTO 0x01 /* Set if rule cares about upper 111#define IP6T_F_PROTO 0x01 /* Set if rule cares about upper
112 protocols */ 112 protocols */
113#define IP6T_F_TOS 0x02 /* Match the TOS. */ 113#define IP6T_F_TOS 0x02 /* Match the TOS. */
114#define IP6T_F_MASK 0x03 /* All possible flag bits mask. */ 114#define IP6T_F_GOTO 0x04 /* Set if jump is a goto */
115#define IP6T_F_MASK 0x07 /* All possible flag bits mask. */
115 116
116/* Values for "inv" field in struct ip6t_ip6. */ 117/* Values for "inv" field in struct ip6t_ip6. */
117#define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 118#define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h
new file mode 100644
index 000000000000..afb7813d45ab
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_HL.h
@@ -0,0 +1,22 @@
1/* Hop Limit modification module for ip6tables
2 * Maciej Soltysiak <solt@dns.toxicfilms.tv>
3 * Based on HW's TTL module */
4
5#ifndef _IP6T_HL_H
6#define _IP6T_HL_H
7
8enum {
9 IP6T_HL_SET = 0,
10 IP6T_HL_INC,
11 IP6T_HL_DEC
12};
13
14#define IP6T_HL_MAXMODE IP6T_HL_DEC
15
16struct ip6t_HL_info {
17 u_int8_t mode;
18 u_int8_t hop_limit;
19};
20
21
22#endif
diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h
index 42996a43bb39..9008ff5c40ae 100644
--- a/include/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/linux/netfilter_ipv6/ip6t_LOG.h
@@ -1,6 +1,7 @@
1#ifndef _IP6T_LOG_H 1#ifndef _IP6T_LOG_H
2#define _IP6T_LOG_H 2#define _IP6T_LOG_H
3 3
4/* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */
4#define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ 5#define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
5#define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */ 6#define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */
6#define IP6T_LOG_IPOPT 0x04 /* Log IP options */ 7#define IP6T_LOG_IPOPT 0x04 /* Log IP options */
diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h
new file mode 100644
index 000000000000..6be6504162bb
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -0,0 +1,18 @@
1#ifndef _IP6T_REJECT_H
2#define _IP6T_REJECT_H
3
4enum ip6t_reject_with {
5 IP6T_ICMP6_NO_ROUTE,
6 IP6T_ICMP6_ADM_PROHIBITED,
7 IP6T_ICMP6_NOT_NEIGHBOUR,
8 IP6T_ICMP6_ADDR_UNREACH,
9 IP6T_ICMP6_PORT_UNREACH,
10 IP6T_ICMP6_ECHOREPLY,
11 IP6T_TCP_RESET
12};
13
14struct ip6t_reject_info {
15 u_int32_t with; /* reject type */
16};
17
18#endif /*_IP6T_REJECT_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6552b71bfa73..167518668936 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -8,7 +8,7 @@
8#define NETLINK_W1 1 /* 1-wire subsystem */ 8#define NETLINK_W1 1 /* 1-wire subsystem */
9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ 9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
10#define NETLINK_FIREWALL 3 /* Firewalling hook */ 10#define NETLINK_FIREWALL 3 /* Firewalling hook */
11#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */ 11#define NETLINK_INET_DIAG 4 /* INET socket monitoring */
12#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ 12#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
13#define NETLINK_XFRM 6 /* ipsec */ 13#define NETLINK_XFRM 6 /* ipsec */
14#define NETLINK_SELINUX 7 /* SELinux event notifications */ 14#define NETLINK_SELINUX 7 /* SELinux event notifications */
@@ -90,6 +90,15 @@ struct nlmsgerr
90 struct nlmsghdr msg; 90 struct nlmsghdr msg;
91}; 91};
92 92
93#define NETLINK_ADD_MEMBERSHIP 1
94#define NETLINK_DROP_MEMBERSHIP 2
95#define NETLINK_PKTINFO 3
96
97struct nl_pktinfo
98{
99 __u32 group;
100};
101
93#define NET_MAJOR 36 /* Major 36 is reserved for networking */ 102#define NET_MAJOR 36 /* Major 36 is reserved for networking */
94 103
95enum { 104enum {
@@ -106,9 +115,8 @@ struct netlink_skb_parms
106{ 115{
107 struct ucred creds; /* Skb credentials */ 116 struct ucred creds; /* Skb credentials */
108 __u32 pid; 117 __u32 pid;
109 __u32 groups;
110 __u32 dst_pid; 118 __u32 dst_pid;
111 __u32 dst_groups; 119 __u32 dst_group;
112 kernel_cap_t eff_cap; 120 kernel_cap_t eff_cap;
113 __u32 loginuid; /* Login (audit) uid */ 121 __u32 loginuid; /* Login (audit) uid */
114}; 122};
@@ -117,11 +125,11 @@ struct netlink_skb_parms
117#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) 125#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
118 126
119 127
120extern struct sock *netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len)); 128extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module);
121extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 129extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
122extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 130extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
123extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 131extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
124 __u32 group, int allocation); 132 __u32 group, unsigned int __nocast allocation);
125extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); 133extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
126extern int netlink_register_notifier(struct notifier_block *nb); 134extern int netlink_register_notifier(struct notifier_block *nb);
127extern int netlink_unregister_notifier(struct notifier_block *nb); 135extern int netlink_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 927ed487630d..d513c1634006 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1249,6 +1249,7 @@
1249#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 1249#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
1250#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 1250#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
1251#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E 1251#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
1252#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F
1252#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 1253#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
1253#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 1254#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
1254#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B 1255#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B
@@ -2144,6 +2145,7 @@
2144#define PCI_DEVICE_ID_ENE_1225 0x1225 2145#define PCI_DEVICE_ID_ENE_1225 0x1225
2145#define PCI_DEVICE_ID_ENE_1410 0x1410 2146#define PCI_DEVICE_ID_ENE_1410 0x1410
2146#define PCI_DEVICE_ID_ENE_1420 0x1420 2147#define PCI_DEVICE_ID_ENE_1420 0x1420
2148#define PCI_VENDOR_ID_CHELSIO 0x1425
2147 2149
2148#define PCI_VENDOR_ID_SYBA 0x1592 2150#define PCI_VENDOR_ID_SYBA 0x1592
2149#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 2151#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 000000000000..72cb67b66e0c
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,377 @@
1/*
2 * include/linux/phy.h
3 *
4 * Framework and drivers for configuring and reading different PHYs
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#ifndef __PHY_H
19#define __PHY_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
25 SUPPORTED_10baseT_Full | \
26 SUPPORTED_100baseT_Half | \
27 SUPPORTED_100baseT_Full | \
28 SUPPORTED_Autoneg | \
29 SUPPORTED_TP | \
30 SUPPORTED_MII)
31
32#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
33 SUPPORTED_1000baseT_Half | \
34 SUPPORTED_1000baseT_Full)
35
36/* Set phydev->irq to PHY_POLL if interrupts are not supported,
37 * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
38 * the attached driver handles the interrupt
39 */
40#define PHY_POLL -1
41#define PHY_IGNORE_INTERRUPT -2
42
43#define PHY_HAS_INTERRUPT 0x00000001
44#define PHY_HAS_MAGICANEG 0x00000002
45
46#define MII_BUS_MAX 4
47
48
49#define PHY_INIT_TIMEOUT 100000
50#define PHY_STATE_TIME 1
51#define PHY_FORCE_TIMEOUT 10
52#define PHY_AN_TIMEOUT 10
53
54#define PHY_MAX_ADDR 32
55
56/* The Bus class for PHYs. Devices which provide access to
57 * PHYs should register using this structure */
58struct mii_bus {
59 const char *name;
60 int id;
61 void *priv;
62 int (*read)(struct mii_bus *bus, int phy_id, int regnum);
63 int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
64 int (*reset)(struct mii_bus *bus);
65
66 /* A lock to ensure that only one thing can read/write
67 * the MDIO bus at a time */
68 spinlock_t mdio_lock;
69
70 struct device *dev;
71
72 /* list of all PHYs on bus */
73 struct phy_device *phy_map[PHY_MAX_ADDR];
74
75 /* Pointer to an array of interrupts, each PHY's
76 * interrupt at the index matching its address */
77 int *irq;
78};
79
80#define PHY_INTERRUPT_DISABLED 0x0
81#define PHY_INTERRUPT_ENABLED 0x80000000
82
83/* PHY state machine states:
84 *
85 * DOWN: PHY device and driver are not ready for anything. probe
86 * should be called if and only if the PHY is in this state,
87 * given that the PHY device exists.
88 * - PHY driver probe function will, depending on the PHY, set
89 * the state to STARTING or READY
90 *
91 * STARTING: PHY device is coming up, and the ethernet driver is
92 * not ready. PHY drivers may set this in the probe function.
93 * If they do, they are responsible for making sure the state is
94 * eventually set to indicate whether the PHY is UP or READY,
95 * depending on the state when the PHY is done starting up.
96 * - PHY driver will set the state to READY
97 * - start will set the state to PENDING
98 *
99 * READY: PHY is ready to send and receive packets, but the
100 * controller is not. By default, PHYs which do not implement
101 * probe will be set to this state by phy_probe(). If the PHY
102 * driver knows the PHY is ready, and the PHY state is STARTING,
103 * then it sets this STATE.
104 * - start will set the state to UP
105 *
106 * PENDING: PHY device is coming up, but the ethernet driver is
107 * ready. phy_start will set this state if the PHY state is
108 * STARTING.
109 * - PHY driver will set the state to UP when the PHY is ready
110 *
111 * UP: The PHY and attached device are ready to do work.
112 * Interrupts should be started here.
113 * - timer moves to AN
114 *
115 * AN: The PHY is currently negotiating the link state. Link is
116 * therefore down for now. phy_timer will set this state when it
117 * detects the state is UP. config_aneg will set this state
118 * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
119 * - If autonegotiation finishes, but there's no link, it sets
120 * the state to NOLINK.
121 * - If aneg finishes with link, it sets the state to RUNNING,
122 * and calls adjust_link
123 * - If autonegotiation did not finish after an arbitrary amount
124 * of time, autonegotiation should be tried again if the PHY
125 * supports "magic" autonegotiation (back to AN)
126 * - If it didn't finish, and no magic_aneg, move to FORCING.
127 *
128 * NOLINK: PHY is up, but not currently plugged in.
129 * - If the timer notes that the link comes back, we move to RUNNING
130 * - config_aneg moves to AN
131 * - phy_stop moves to HALTED
132 *
133 * FORCING: PHY is being configured with forced settings
134 * - if link is up, move to RUNNING
135 * - If link is down, we drop to the next highest setting, and
136 * retry (FORCING) after a timeout
137 * - phy_stop moves to HALTED
138 *
139 * RUNNING: PHY is currently up, running, and possibly sending
140 * and/or receiving packets
141 * - timer will set CHANGELINK if we're polling (this ensures the
142 * link state is polled every other cycle of this state machine,
143 * which makes it every other second)
144 * - irq will set CHANGELINK
145 * - config_aneg will set AN
146 * - phy_stop moves to HALTED
147 *
148 * CHANGELINK: PHY experienced a change in link state
149 * - timer moves to RUNNING if link
150 * - timer moves to NOLINK if the link is down
151 * - phy_stop moves to HALTED
152 *
153 * HALTED: PHY is up, but no polling or interrupts are done. Or
154 * PHY is in an error state.
155 *
156 * - phy_start moves to RESUMING
157 *
158 * RESUMING: PHY was halted, but now wants to run again.
159 * - If we are forcing, or aneg is done, timer moves to RUNNING
160 * - If aneg is not done, timer moves to AN
161 * - phy_stop moves to HALTED
162 */
163enum phy_state {
164 PHY_DOWN=0,
165 PHY_STARTING,
166 PHY_READY,
167 PHY_PENDING,
168 PHY_UP,
169 PHY_AN,
170 PHY_RUNNING,
171 PHY_NOLINK,
172 PHY_FORCING,
173 PHY_CHANGELINK,
174 PHY_HALTED,
175 PHY_RESUMING
176};
177
178/* phy_device: An instance of a PHY
179 *
180 * drv: Pointer to the driver for this PHY instance
181 * bus: Pointer to the bus this PHY is on
182 * dev: driver model device structure for this PHY
183 * phy_id: UID for this device found during discovery
184 * state: state of the PHY for management purposes
185 * dev_flags: Device-specific flags used by the PHY driver.
186 * addr: Bus address of PHY
187 * link_timeout: The number of timer firings to wait before the
188 * giving up on the current attempt at acquiring a link
189 * irq: IRQ number of the PHY's interrupt (-1 if none)
190 * phy_timer: The timer for handling the state machine
191 * phy_queue: A work_queue for the interrupt
192 * attached_dev: The attached enet driver's device instance ptr
193 * adjust_link: Callback for the enet controller to respond to
194 * changes in the link state.
195 * adjust_state: Callback for the enet driver to respond to
196 * changes in the state machine.
197 *
198 * speed, duplex, pause, supported, advertising, and
199 * autoneg are used like in mii_if_info
200 *
201 * interrupts currently only supports enabled or disabled,
202 * but could be changed in the future to support enabling
203 * and disabling specific interrupts
204 *
205 * Contains some infrastructure for polling and interrupt
206 * handling, as well as handling shifts in PHY hardware state
207 */
208struct phy_device {
209 /* Information about the PHY type */
210 /* And management functions */
211 struct phy_driver *drv;
212
213 struct mii_bus *bus;
214
215 struct device dev;
216
217 u32 phy_id;
218
219 enum phy_state state;
220
221 u32 dev_flags;
222
223 /* Bus address of the PHY (0-32) */
224 int addr;
225
226 /* forced speed & duplex (no autoneg)
227 * partner speed & duplex & pause (autoneg)
228 */
229 int speed;
230 int duplex;
231 int pause;
232 int asym_pause;
233
234 /* The most recently read link state */
235 int link;
236
237 /* Enabled Interrupts */
238 u32 interrupts;
239
240 /* Union of PHY and Attached devices' supported modes */
241 /* See mii.h for more info */
242 u32 supported;
243 u32 advertising;
244
245 int autoneg;
246
247 int link_timeout;
248
249 /* Interrupt number for this PHY
250 * -1 means no interrupt */
251 int irq;
252
253 /* private data pointer */
254 /* For use by PHYs to maintain extra state */
255 void *priv;
256
257 /* Interrupt and Polling infrastructure */
258 struct work_struct phy_queue;
259 struct timer_list phy_timer;
260
261 spinlock_t lock;
262
263 struct net_device *attached_dev;
264
265 void (*adjust_link)(struct net_device *dev);
266
267 void (*adjust_state)(struct net_device *dev);
268};
269#define to_phy_device(d) container_of(d, struct phy_device, dev)
270
271/* struct phy_driver: Driver structure for a particular PHY type
272 *
273 * phy_id: The result of reading the UID registers of this PHY
274 * type, and ANDing them with the phy_id_mask. This driver
275 * only works for PHYs with IDs which match this field
276 * name: The friendly name of this PHY type
277 * phy_id_mask: Defines the important bits of the phy_id
278 * features: A list of features (speed, duplex, etc) supported
279 * by this PHY
280 * flags: A bitfield defining certain other features this PHY
281 * supports (like interrupts)
282 *
283 * The drivers must implement config_aneg and read_status. All
284 * other functions are optional. Note that none of these
285 * functions should be called from interrupt time. The goal is
286 * for the bus read/write functions to be able to block when the
287 * bus transaction is happening, and be freed up by an interrupt
288 * (The MPC85xx has this ability, though it is not currently
289 * supported in the driver).
290 */
291struct phy_driver {
292 u32 phy_id;
293 char *name;
294 unsigned int phy_id_mask;
295 u32 features;
296 u32 flags;
297
298 /* Called to initialize the PHY,
299 * including after a reset */
300 int (*config_init)(struct phy_device *phydev);
301
302 /* Called during discovery. Used to set
303 * up device-specific structures, if any */
304 int (*probe)(struct phy_device *phydev);
305
306 /* PHY Power Management */
307 int (*suspend)(struct phy_device *phydev);
308 int (*resume)(struct phy_device *phydev);
309
310 /* Configures the advertisement and resets
311 * autonegotiation if phydev->autoneg is on,
312 * forces the speed to the current settings in phydev
313 * if phydev->autoneg is off */
314 int (*config_aneg)(struct phy_device *phydev);
315
316 /* Determines the negotiated speed and duplex */
317 int (*read_status)(struct phy_device *phydev);
318
319 /* Clears any pending interrupts */
320 int (*ack_interrupt)(struct phy_device *phydev);
321
322 /* Enables or disables interrupts */
323 int (*config_intr)(struct phy_device *phydev);
324
325 /* Clears up any memory if needed */
326 void (*remove)(struct phy_device *phydev);
327
328 struct device_driver driver;
329};
330#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
331
332int phy_read(struct phy_device *phydev, u16 regnum);
333int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
334struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
335int phy_clear_interrupt(struct phy_device *phydev);
336int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
337struct phy_device * phy_attach(struct net_device *dev,
338 const char *phy_id, u32 flags);
339struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
340 void (*handler)(struct net_device *), u32 flags);
341void phy_disconnect(struct phy_device *phydev);
342void phy_detach(struct phy_device *phydev);
343void phy_start(struct phy_device *phydev);
344void phy_stop(struct phy_device *phydev);
345int phy_start_aneg(struct phy_device *phydev);
346
347int mdiobus_register(struct mii_bus *bus);
348void mdiobus_unregister(struct mii_bus *bus);
349void phy_sanitize_settings(struct phy_device *phydev);
350int phy_stop_interrupts(struct phy_device *phydev);
351
352static inline int phy_read_status(struct phy_device *phydev) {
353 return phydev->drv->read_status(phydev);
354}
355
356int genphy_config_advert(struct phy_device *phydev);
357int genphy_setup_forced(struct phy_device *phydev);
358int genphy_restart_aneg(struct phy_device *phydev);
359int genphy_config_aneg(struct phy_device *phydev);
360int genphy_update_link(struct phy_device *phydev);
361int genphy_read_status(struct phy_device *phydev);
362void phy_driver_unregister(struct phy_driver *drv);
363int phy_driver_register(struct phy_driver *new_driver);
364void phy_prepare_link(struct phy_device *phydev,
365 void (*adjust_link)(struct net_device *));
366void phy_start_machine(struct phy_device *phydev,
367 void (*handler)(struct net_device *));
368void phy_stop_machine(struct phy_device *phydev);
369int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
370int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
371int phy_mii_ioctl(struct phy_device *phydev,
372 struct mii_ioctl_data *mii_data, int cmd);
373int phy_start_interrupts(struct phy_device *phydev);
374void phy_print_status(struct phy_device *phydev);
375
376extern struct bus_type mdio_bus_type;
377#endif /* __PHY_H */
diff --git a/include/linux/random.h b/include/linux/random.h
index cc6703449916..7b2adb3322d5 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -59,6 +59,8 @@ extern __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
59 __u16 sport, __u16 dport); 59 __u16 sport, __u16 dport);
60extern __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr, 60extern __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
61 __u16 sport, __u16 dport); 61 __u16 sport, __u16 dport);
62extern u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
63 __u16 sport, __u16 dport);
62 64
63#ifndef MODULE 65#ifndef MODULE
64extern struct file_operations random_fops, urandom_fops; 66extern struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 657c05ab8f9e..c231e9a08f0b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -826,9 +826,8 @@ enum
826#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg)))) 826#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg))))
827#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg)) 827#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg))
828 828
829 829#ifndef __KERNEL__
830/* RTnetlink multicast groups */ 830/* RTnetlink multicast groups - backwards compatibility for userspace */
831
832#define RTMGRP_LINK 1 831#define RTMGRP_LINK 1
833#define RTMGRP_NOTIFY 2 832#define RTMGRP_NOTIFY 2
834#define RTMGRP_NEIGH 4 833#define RTMGRP_NEIGH 4
@@ -847,6 +846,43 @@ enum
847#define RTMGRP_DECnet_ROUTE 0x4000 846#define RTMGRP_DECnet_ROUTE 0x4000
848 847
849#define RTMGRP_IPV6_PREFIX 0x20000 848#define RTMGRP_IPV6_PREFIX 0x20000
849#endif
850
851/* RTnetlink multicast groups */
852enum rtnetlink_groups {
853 RTNLGRP_NONE,
854#define RTNLGRP_NONE RTNLGRP_NONE
855 RTNLGRP_LINK,
856#define RTNLGRP_LINK RTNLGRP_LINK
857 RTNLGRP_NOTIFY,
858#define RTNLGRP_NOTIFY RTNLGRP_NOTIFY
859 RTNLGRP_NEIGH,
860#define RTNLGRP_NEIGH RTNLGRP_NEIGH
861 RTNLGRP_TC,
862#define RTNLGRP_TC RTNLGRP_TC
863 RTNLGRP_IPV4_IFADDR,
864#define RTNLGRP_IPV4_IFADDR RTNLGRP_IPV4_IFADDR
865 RTNLGRP_IPV4_MROUTE,
866#define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE
867 RTNLGRP_IPV4_ROUTE,
868#define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE
869 RTNLGRP_IPV6_IFADDR,
870#define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR
871 RTNLGRP_IPV6_MROUTE,
872#define RTNLGRP_IPV6_MROUTE RTNLGRP_IPV6_MROUTE
873 RTNLGRP_IPV6_ROUTE,
874#define RTNLGRP_IPV6_ROUTE RTNLGRP_IPV6_ROUTE
875 RTNLGRP_IPV6_IFINFO,
876#define RTNLGRP_IPV6_IFINFO RTNLGRP_IPV6_IFINFO
877 RTNLGRP_DECnet_IFADDR,
878#define RTNLGRP_DECnet_IFADDR RTNLGRP_DECnet_IFADDR
879 RTNLGRP_DECnet_ROUTE,
880#define RTNLGRP_DECnet_ROUTE RTNLGRP_DECnet_ROUTE
881 RTNLGRP_IPV6_PREFIX,
882#define RTNLGRP_IPV6_PREFIX RTNLGRP_IPV6_PREFIX
883 __RTNLGRP_MAX
884};
885#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
850 886
851/* TC action piece */ 887/* TC action piece */
852struct tcamsg 888struct tcamsg
diff --git a/include/linux/security.h b/include/linux/security.h
index b42095a68b1c..7aab6ab7c57f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2727,7 +2727,8 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o
2727 return security_ops->socket_getpeersec(sock, optval, optlen, len); 2727 return security_ops->socket_getpeersec(sock, optval, optlen, len);
2728} 2728}
2729 2729
2730static inline int security_sk_alloc(struct sock *sk, int family, int priority) 2730static inline int security_sk_alloc(struct sock *sk, int family,
2731 unsigned int __nocast priority)
2731{ 2732{
2732 return security_ops->sk_alloc_security(sk, family, priority); 2733 return security_ops->sk_alloc_security(sk, family, priority);
2733} 2734}
@@ -2844,7 +2845,8 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o
2844 return -ENOPROTOOPT; 2845 return -ENOPROTOOPT;
2845} 2846}
2846 2847
2847static inline int security_sk_alloc(struct sock *sk, int family, int priority) 2848static inline int security_sk_alloc(struct sock *sk, int family,
2849 unsigned int __nocast priority)
2848{ 2850{
2849 return 0; 2851 return 0;
2850} 2852}
diff --git a/include/linux/selinux_netlink.h b/include/linux/selinux_netlink.h
index 957e6ebca4e6..bbf489decd84 100644
--- a/include/linux/selinux_netlink.h
+++ b/include/linux/selinux_netlink.h
@@ -20,10 +20,21 @@ enum {
20 SELNL_MSG_MAX 20 SELNL_MSG_MAX
21}; 21};
22 22
23/* Multicast groups */ 23#ifndef __KERNEL__
24/* Multicast groups - backwards compatiblility for userspace */
24#define SELNL_GRP_NONE 0x00000000 25#define SELNL_GRP_NONE 0x00000000
25#define SELNL_GRP_AVC 0x00000001 /* AVC notifications */ 26#define SELNL_GRP_AVC 0x00000001 /* AVC notifications */
26#define SELNL_GRP_ALL 0xffffffff 27#define SELNL_GRP_ALL 0xffffffff
28#endif
29
30enum selinux_nlgroups {
31 SELNLGRP_NONE,
32#define SELNLGRP_NONE SELNLGRP_NONE
33 SELNLGRP_AVC,
34#define SELNLGRP_AVC SELNLGRP_AVC
35 __SELNLGRP_MAX
36};
37#define SELNLGRP_MAX (__SELNLGRP_MAX - 1)
27 38
28/* Message structures */ 39/* Message structures */
29struct selnl_msg_setenforce { 40struct selnl_msg_setenforce {
diff --git a/include/linux/serialP.h b/include/linux/serialP.h
index 2b2f35a64d75..2b9e6b9554d5 100644
--- a/include/linux/serialP.h
+++ b/include/linux/serialP.h
@@ -140,44 +140,4 @@ struct rs_multiport_struct {
140#define ALPHA_KLUDGE_MCR 0 140#define ALPHA_KLUDGE_MCR 0
141#endif 141#endif
142 142
143/*
144 * Definitions for PCI support.
145 */
146#define SPCI_FL_BASE_MASK 0x0007
147#define SPCI_FL_BASE0 0x0000
148#define SPCI_FL_BASE1 0x0001
149#define SPCI_FL_BASE2 0x0002
150#define SPCI_FL_BASE3 0x0003
151#define SPCI_FL_BASE4 0x0004
152#define SPCI_FL_GET_BASE(x) (x & SPCI_FL_BASE_MASK)
153
154#define SPCI_FL_IRQ_MASK (0x0007 << 4)
155#define SPCI_FL_IRQBASE0 (0x0000 << 4)
156#define SPCI_FL_IRQBASE1 (0x0001 << 4)
157#define SPCI_FL_IRQBASE2 (0x0002 << 4)
158#define SPCI_FL_IRQBASE3 (0x0003 << 4)
159#define SPCI_FL_IRQBASE4 (0x0004 << 4)
160#define SPCI_FL_GET_IRQBASE(x) ((x & SPCI_FL_IRQ_MASK) >> 4)
161
162/* Use successive BARs (PCI base address registers),
163 else use offset into some specified BAR */
164#define SPCI_FL_BASE_TABLE 0x0100
165
166/* Use successive entries in the irq resource table */
167#define SPCI_FL_IRQ_TABLE 0x0200
168
169/* Use the irq resource table instead of dev->irq */
170#define SPCI_FL_IRQRESOURCE 0x0400
171
172/* Use the Base address register size to cap number of ports */
173#define SPCI_FL_REGION_SZ_CAP 0x0800
174
175/* Do not use irq sharing for this device */
176#define SPCI_FL_NO_SHIRQ 0x1000
177
178/* This is a PNP device */
179#define SPCI_FL_ISPNP 0x2000
180
181#define SPCI_FL_PNPDEFAULT (SPCI_FL_IRQRESOURCE|SPCI_FL_ISPNP)
182
183#endif /* _LINUX_SERIAL_H */ 143#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 948527e42a60..42edce6abe23 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -155,16 +155,29 @@ struct skb_shared_info {
155#define SKB_DATAREF_SHIFT 16 155#define SKB_DATAREF_SHIFT 16
156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
157 157
158extern struct timeval skb_tv_base;
159
160struct skb_timeval {
161 u32 off_sec;
162 u32 off_usec;
163};
164
165
166enum {
167 SKB_FCLONE_UNAVAILABLE,
168 SKB_FCLONE_ORIG,
169 SKB_FCLONE_CLONE,
170};
171
158/** 172/**
159 * struct sk_buff - socket buffer 173 * struct sk_buff - socket buffer
160 * @next: Next buffer in list 174 * @next: Next buffer in list
161 * @prev: Previous buffer in list 175 * @prev: Previous buffer in list
162 * @list: List we are on 176 * @list: List we are on
163 * @sk: Socket we are owned by 177 * @sk: Socket we are owned by
164 * @stamp: Time we arrived 178 * @tstamp: Time we arrived stored as offset to skb_tv_base
165 * @dev: Device we arrived on/are leaving by 179 * @dev: Device we arrived on/are leaving by
166 * @input_dev: Device we arrived on 180 * @input_dev: Device we arrived on
167 * @real_dev: The real device we are using
168 * @h: Transport layer header 181 * @h: Transport layer header
169 * @nh: Network layer header 182 * @nh: Network layer header
170 * @mac: Link layer header 183 * @mac: Link layer header
@@ -190,14 +203,11 @@ struct skb_shared_info {
190 * @end: End pointer 203 * @end: End pointer
191 * @destructor: Destruct function 204 * @destructor: Destruct function
192 * @nfmark: Can be used for communication between hooks 205 * @nfmark: Can be used for communication between hooks
193 * @nfcache: Cache info
194 * @nfct: Associated connection, if any 206 * @nfct: Associated connection, if any
195 * @nfctinfo: Relationship of this skb to the connection 207 * @nfctinfo: Relationship of this skb to the connection
196 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 208 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
197 * @private: Data which is private to the HIPPI implementation
198 * @tc_index: Traffic control index 209 * @tc_index: Traffic control index
199 * @tc_verd: traffic control verdict 210 * @tc_verd: traffic control verdict
200 * @tc_classid: traffic control classid
201 */ 211 */
202 212
203struct sk_buff { 213struct sk_buff {
@@ -205,12 +215,10 @@ struct sk_buff {
205 struct sk_buff *next; 215 struct sk_buff *next;
206 struct sk_buff *prev; 216 struct sk_buff *prev;
207 217
208 struct sk_buff_head *list;
209 struct sock *sk; 218 struct sock *sk;
210 struct timeval stamp; 219 struct skb_timeval tstamp;
211 struct net_device *dev; 220 struct net_device *dev;
212 struct net_device *input_dev; 221 struct net_device *input_dev;
213 struct net_device *real_dev;
214 222
215 union { 223 union {
216 struct tcphdr *th; 224 struct tcphdr *th;
@@ -252,33 +260,28 @@ struct sk_buff {
252 __u8 local_df:1, 260 __u8 local_df:1,
253 cloned:1, 261 cloned:1,
254 ip_summed:2, 262 ip_summed:2,
255 nohdr:1; 263 nohdr:1,
256 /* 3 bits spare */ 264 nfctinfo:3;
257 __u8 pkt_type; 265 __u8 pkt_type:3,
266 fclone:2;
258 __be16 protocol; 267 __be16 protocol;
259 268
260 void (*destructor)(struct sk_buff *skb); 269 void (*destructor)(struct sk_buff *skb);
261#ifdef CONFIG_NETFILTER 270#ifdef CONFIG_NETFILTER
262 unsigned long nfmark; 271 __u32 nfmark;
263 __u32 nfcache;
264 __u32 nfctinfo;
265 struct nf_conntrack *nfct; 272 struct nf_conntrack *nfct;
273#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
274 __u8 ipvs_property:1;
275#endif
266#ifdef CONFIG_BRIDGE_NETFILTER 276#ifdef CONFIG_BRIDGE_NETFILTER
267 struct nf_bridge_info *nf_bridge; 277 struct nf_bridge_info *nf_bridge;
268#endif 278#endif
269#endif /* CONFIG_NETFILTER */ 279#endif /* CONFIG_NETFILTER */
270#if defined(CONFIG_HIPPI)
271 union {
272 __u32 ifield;
273 } private;
274#endif
275#ifdef CONFIG_NET_SCHED 280#ifdef CONFIG_NET_SCHED
276 __u32 tc_index; /* traffic control index */ 281 __u16 tc_index; /* traffic control index */
277#ifdef CONFIG_NET_CLS_ACT 282#ifdef CONFIG_NET_CLS_ACT
278 __u32 tc_verd; /* traffic control verdict */ 283 __u16 tc_verd; /* traffic control verdict */
279 __u32 tc_classid; /* traffic control classid */
280#endif 284#endif
281
282#endif 285#endif
283 286
284 287
@@ -300,8 +303,20 @@ struct sk_buff {
300#include <asm/system.h> 303#include <asm/system.h>
301 304
302extern void __kfree_skb(struct sk_buff *skb); 305extern void __kfree_skb(struct sk_buff *skb);
303extern struct sk_buff *alloc_skb(unsigned int size, 306extern struct sk_buff *__alloc_skb(unsigned int size,
304 unsigned int __nocast priority); 307 unsigned int __nocast priority, int fclone);
308static inline struct sk_buff *alloc_skb(unsigned int size,
309 unsigned int __nocast priority)
310{
311 return __alloc_skb(size, priority, 0);
312}
313
314static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
315 unsigned int __nocast priority)
316{
317 return __alloc_skb(size, priority, 1);
318}
319
305extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 320extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
306 unsigned int size, 321 unsigned int size,
307 unsigned int __nocast priority); 322 unsigned int __nocast priority);
@@ -597,7 +612,6 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
597{ 612{
598 struct sk_buff *prev, *next; 613 struct sk_buff *prev, *next;
599 614
600 newsk->list = list;
601 list->qlen++; 615 list->qlen++;
602 prev = (struct sk_buff *)list; 616 prev = (struct sk_buff *)list;
603 next = prev->next; 617 next = prev->next;
@@ -622,7 +636,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
622{ 636{
623 struct sk_buff *prev, *next; 637 struct sk_buff *prev, *next;
624 638
625 newsk->list = list;
626 list->qlen++; 639 list->qlen++;
627 next = (struct sk_buff *)list; 640 next = (struct sk_buff *)list;
628 prev = next->prev; 641 prev = next->prev;
@@ -655,7 +668,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
655 next->prev = prev; 668 next->prev = prev;
656 prev->next = next; 669 prev->next = next;
657 result->next = result->prev = NULL; 670 result->next = result->prev = NULL;
658 result->list = NULL;
659 } 671 }
660 return result; 672 return result;
661} 673}
@@ -664,7 +676,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
664/* 676/*
665 * Insert a packet on a list. 677 * Insert a packet on a list.
666 */ 678 */
667extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk); 679extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
668static inline void __skb_insert(struct sk_buff *newsk, 680static inline void __skb_insert(struct sk_buff *newsk,
669 struct sk_buff *prev, struct sk_buff *next, 681 struct sk_buff *prev, struct sk_buff *next,
670 struct sk_buff_head *list) 682 struct sk_buff_head *list)
@@ -672,24 +684,23 @@ static inline void __skb_insert(struct sk_buff *newsk,
672 newsk->next = next; 684 newsk->next = next;
673 newsk->prev = prev; 685 newsk->prev = prev;
674 next->prev = prev->next = newsk; 686 next->prev = prev->next = newsk;
675 newsk->list = list;
676 list->qlen++; 687 list->qlen++;
677} 688}
678 689
679/* 690/*
680 * Place a packet after a given packet in a list. 691 * Place a packet after a given packet in a list.
681 */ 692 */
682extern void skb_append(struct sk_buff *old, struct sk_buff *newsk); 693extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
683static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk) 694static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
684{ 695{
685 __skb_insert(newsk, old, old->next, old->list); 696 __skb_insert(newsk, old, old->next, list);
686} 697}
687 698
688/* 699/*
689 * remove sk_buff from list. _Must_ be called atomically, and with 700 * remove sk_buff from list. _Must_ be called atomically, and with
690 * the list known.. 701 * the list known..
691 */ 702 */
692extern void skb_unlink(struct sk_buff *skb); 703extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
693static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 704static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
694{ 705{
695 struct sk_buff *next, *prev; 706 struct sk_buff *next, *prev;
@@ -698,7 +709,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
698 next = skb->next; 709 next = skb->next;
699 prev = skb->prev; 710 prev = skb->prev;
700 skb->next = skb->prev = NULL; 711 skb->next = skb->prev = NULL;
701 skb->list = NULL;
702 next->prev = prev; 712 next->prev = prev;
703 prev->next = next; 713 prev->next = next;
704} 714}
@@ -1213,6 +1223,8 @@ extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1213extern void skb_split(struct sk_buff *skb, 1223extern void skb_split(struct sk_buff *skb,
1214 struct sk_buff *skb1, const u32 len); 1224 struct sk_buff *skb1, const u32 len);
1215 1225
1226extern void skb_release_data(struct sk_buff *skb);
1227
1216static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1228static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1217 int len, void *buffer) 1229 int len, void *buffer)
1218{ 1230{
@@ -1230,6 +1242,42 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1230extern void skb_init(void); 1242extern void skb_init(void);
1231extern void skb_add_mtu(int mtu); 1243extern void skb_add_mtu(int mtu);
1232 1244
1245/**
1246 * skb_get_timestamp - get timestamp from a skb
1247 * @skb: skb to get stamp from
1248 * @stamp: pointer to struct timeval to store stamp in
1249 *
1250 * Timestamps are stored in the skb as offsets to a base timestamp.
1251 * This function converts the offset back to a struct timeval and stores
1252 * it in stamp.
1253 */
1254static inline void skb_get_timestamp(struct sk_buff *skb, struct timeval *stamp)
1255{
1256 stamp->tv_sec = skb->tstamp.off_sec;
1257 stamp->tv_usec = skb->tstamp.off_usec;
1258 if (skb->tstamp.off_sec) {
1259 stamp->tv_sec += skb_tv_base.tv_sec;
1260 stamp->tv_usec += skb_tv_base.tv_usec;
1261 }
1262}
1263
1264/**
1265 * skb_set_timestamp - set timestamp of a skb
1266 * @skb: skb to set stamp of
1267 * @stamp: pointer to struct timeval to get stamp from
1268 *
1269 * Timestamps are stored in the skb as offsets to a base timestamp.
1270 * This function converts a struct timeval to an offset and stores
1271 * it in the skb.
1272 */
1273static inline void skb_set_timestamp(struct sk_buff *skb, struct timeval *stamp)
1274{
1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec;
1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec;
1277}
1278
1279extern void __net_timestamp(struct sk_buff *skb);
1280
1233#ifdef CONFIG_NETFILTER 1281#ifdef CONFIG_NETFILTER
1234static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1282static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1235{ 1283{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index a5c7d96e4d2e..1739c2d5b95b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -26,6 +26,13 @@ struct __kernel_sockaddr_storage {
26#include <linux/types.h> /* pid_t */ 26#include <linux/types.h> /* pid_t */
27#include <linux/compiler.h> /* __user */ 27#include <linux/compiler.h> /* __user */
28 28
29extern int sysctl_somaxconn;
30extern void sock_init(void);
31#ifdef CONFIG_PROC_FS
32struct seq_file;
33extern void socket_seq_show(struct seq_file *seq);
34#endif
35
29typedef unsigned short sa_family_t; 36typedef unsigned short sa_family_t;
30 37
31/* 38/*
@@ -271,6 +278,8 @@ struct ucred {
271#define SOL_IRDA 266 278#define SOL_IRDA 266
272#define SOL_NETBEUI 267 279#define SOL_NETBEUI 267
273#define SOL_LLC 268 280#define SOL_LLC 268
281#define SOL_DCCP 269
282#define SOL_NETLINK 270
274 283
275/* IPX options */ 284/* IPX options */
276#define IPX_TYPE 1 285#define IPX_TYPE 1
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 428f59794f48..72b9af4c3fd4 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -29,7 +29,9 @@
29 * Sound core interface functions 29 * Sound core interface functions
30 */ 30 */
31 31
32struct device;
32extern int register_sound_special(struct file_operations *fops, int unit); 33extern int register_sound_special(struct file_operations *fops, int unit);
34extern int register_sound_special_device(struct file_operations *fops, int unit, struct device *dev);
33extern int register_sound_mixer(struct file_operations *fops, int dev); 35extern int register_sound_mixer(struct file_operations *fops, int dev);
34extern int register_sound_midi(struct file_operations *fops, int dev); 36extern int register_sound_midi(struct file_operations *fops, int dev);
35extern int register_sound_dsp(struct file_operations *fops, int dev); 37extern int register_sound_dsp(struct file_operations *fops, int dev);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e4fd82e42104..ac4ca44c75ca 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -55,24 +55,6 @@ struct tcphdr {
55 __u16 urg_ptr; 55 __u16 urg_ptr;
56}; 56};
57 57
58
59enum {
60 TCP_ESTABLISHED = 1,
61 TCP_SYN_SENT,
62 TCP_SYN_RECV,
63 TCP_FIN_WAIT1,
64 TCP_FIN_WAIT2,
65 TCP_TIME_WAIT,
66 TCP_CLOSE,
67 TCP_CLOSE_WAIT,
68 TCP_LAST_ACK,
69 TCP_LISTEN,
70 TCP_CLOSING, /* now a valid state */
71
72 TCP_MAX_STATES /* Leave at the end! */
73};
74
75#define TCP_STATE_MASK 0xF
76#define TCP_ACTION_FIN (1 << 7) 58#define TCP_ACTION_FIN (1 << 7)
77 59
78enum { 60enum {
@@ -195,8 +177,9 @@ struct tcp_info
195 177
196#include <linux/config.h> 178#include <linux/config.h>
197#include <linux/skbuff.h> 179#include <linux/skbuff.h>
198#include <linux/ip.h>
199#include <net/sock.h> 180#include <net/sock.h>
181#include <net/inet_connection_sock.h>
182#include <net/inet_timewait_sock.h>
200 183
201/* This defines a selective acknowledgement block. */ 184/* This defines a selective acknowledgement block. */
202struct tcp_sack_block { 185struct tcp_sack_block {
@@ -236,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
236} 219}
237 220
238struct tcp_sock { 221struct tcp_sock {
239 /* inet_sock has to be the first member of tcp_sock */ 222 /* inet_connection_sock has to be the first member of tcp_sock */
240 struct inet_sock inet; 223 struct inet_connection_sock inet_conn;
241 int tcp_header_len; /* Bytes of tcp header to send */ 224 int tcp_header_len; /* Bytes of tcp header to send */
242 225
243/* 226/*
@@ -258,19 +241,6 @@ struct tcp_sock {
258 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 241 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
259 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 242 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
260 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 243 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
261 struct tcp_bind_bucket *bind_hash;
262 /* Delayed ACK control data */
263 struct {
264 __u8 pending; /* ACK is pending */
265 __u8 quick; /* Scheduled number of quick acks */
266 __u8 pingpong; /* The session is interactive */
267 __u8 blocked; /* Delayed ACK was blocked by socket lock*/
268 __u32 ato; /* Predicted tick of soft clock */
269 unsigned long timeout; /* Currently scheduled timeout */
270 __u32 lrcvtime; /* timestamp of last received data packet*/
271 __u16 last_seg_size; /* Size of last incoming segment */
272 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
273 } ack;
274 244
275 /* Data for direct copy to user */ 245 /* Data for direct copy to user */
276 struct { 246 struct {
@@ -288,19 +258,15 @@ struct tcp_sock {
288 __u32 mss_cache; /* Cached effective mss, not including SACKS */ 258 __u32 mss_cache; /* Cached effective mss, not including SACKS */
289 __u16 xmit_size_goal; /* Goal for segmenting output packets */ 259 __u16 xmit_size_goal; /* Goal for segmenting output packets */
290 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ 260 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
291 __u8 ca_state; /* State of fast-retransmit machine */
292 __u8 retransmits; /* Number of unrecovered RTO timeouts. */
293 261
294 __u16 advmss; /* Advertised MSS */
295 __u32 window_clamp; /* Maximal window to advertise */ 262 __u32 window_clamp; /* Maximal window to advertise */
296 __u32 rcv_ssthresh; /* Current window clamp */ 263 __u32 rcv_ssthresh; /* Current window clamp */
297 264
298 __u32 frto_highmark; /* snd_nxt when RTO occurred */ 265 __u32 frto_highmark; /* snd_nxt when RTO occurred */
299 __u8 reordering; /* Packet reordering metric. */ 266 __u8 reordering; /* Packet reordering metric. */
300 __u8 frto_counter; /* Number of new acks after RTO */ 267 __u8 frto_counter; /* Number of new acks after RTO */
301 268 __u8 nonagle; /* Disable Nagle algorithm? */
302 __u8 unused; 269 __u8 keepalive_probes; /* num of allowed keep alive probes */
303 __u8 defer_accept; /* User waits for some data after accept() */
304 270
305/* RTT measurement */ 271/* RTT measurement */
306 __u32 srtt; /* smoothed round trip time << 3 */ 272 __u32 srtt; /* smoothed round trip time << 3 */
@@ -308,19 +274,13 @@ struct tcp_sock {
308 __u32 mdev_max; /* maximal mdev for the last rtt period */ 274 __u32 mdev_max; /* maximal mdev for the last rtt period */
309 __u32 rttvar; /* smoothed mdev_max */ 275 __u32 rttvar; /* smoothed mdev_max */
310 __u32 rtt_seq; /* sequence number to update rttvar */ 276 __u32 rtt_seq; /* sequence number to update rttvar */
311 __u32 rto; /* retransmit timeout */
312 277
313 __u32 packets_out; /* Packets which are "in flight" */ 278 __u32 packets_out; /* Packets which are "in flight" */
314 __u32 left_out; /* Packets which leaved network */ 279 __u32 left_out; /* Packets which leaved network */
315 __u32 retrans_out; /* Retransmitted packets out */ 280 __u32 retrans_out; /* Retransmitted packets out */
316 __u8 backoff; /* backoff */
317/* 281/*
318 * Options received (usually on last packet, some only on SYN packets). 282 * Options received (usually on last packet, some only on SYN packets).
319 */ 283 */
320 __u8 nonagle; /* Disable Nagle algorithm? */
321 __u8 keepalive_probes; /* num of allowed keep alive probes */
322
323 __u8 probes_out; /* unanswered 0 window probes */
324 struct tcp_options_received rx_opt; 284 struct tcp_options_received rx_opt;
325 285
326/* 286/*
@@ -333,11 +293,6 @@ struct tcp_sock {
333 __u32 snd_cwnd_used; 293 __u32 snd_cwnd_used;
334 __u32 snd_cwnd_stamp; 294 __u32 snd_cwnd_stamp;
335 295
336 /* Two commonly used timers in both sender and receiver paths. */
337 unsigned long timeout;
338 struct timer_list retransmit_timer; /* Resend (no ack) */
339 struct timer_list delack_timer; /* Ack delay */
340
341 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 296 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
342 297
343 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ 298 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
@@ -352,8 +307,7 @@ struct tcp_sock {
352 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 307 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
353 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 308 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
354 309
355 __u8 syn_retries; /* num of allowed syn retries */ 310 __u16 advmss; /* Advertised MSS */
356 __u8 ecn_flags; /* ECN status bits. */
357 __u16 prior_ssthresh; /* ssthresh saved at recovery start */ 311 __u16 prior_ssthresh; /* ssthresh saved at recovery start */
358 __u32 lost_out; /* Lost packets */ 312 __u32 lost_out; /* Lost packets */
359 __u32 sacked_out; /* SACK'd packets */ 313 __u32 sacked_out; /* SACK'd packets */
@@ -367,14 +321,12 @@ struct tcp_sock {
367 int undo_retrans; /* number of undoable retransmissions. */ 321 int undo_retrans; /* number of undoable retransmissions. */
368 __u32 urg_seq; /* Seq of received urgent pointer */ 322 __u32 urg_seq; /* Seq of received urgent pointer */
369 __u16 urg_data; /* Saved octet of OOB data and control flags */ 323 __u16 urg_data; /* Saved octet of OOB data and control flags */
370 __u8 pending; /* Scheduled timer event */
371 __u8 urg_mode; /* In urgent mode */ 324 __u8 urg_mode; /* In urgent mode */
325 __u8 ecn_flags; /* ECN status bits. */
372 __u32 snd_up; /* Urgent pointer */ 326 __u32 snd_up; /* Urgent pointer */
373 327
374 __u32 total_retrans; /* Total retransmits for entire connection */ 328 __u32 total_retrans; /* Total retransmits for entire connection */
375 329
376 struct request_sock_queue accept_queue; /* FIFO of established children */
377
378 unsigned int keepalive_time; /* time before keep alive takes place */ 330 unsigned int keepalive_time; /* time before keep alive takes place */
379 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 331 unsigned int keepalive_intvl; /* time interval between keep alive probes */
380 int linger2; 332 int linger2;
@@ -394,11 +346,6 @@ struct tcp_sock {
394 __u32 seq; 346 __u32 seq;
395 __u32 time; 347 __u32 time;
396 } rcvq_space; 348 } rcvq_space;
397
398 /* Pluggable TCP congestion control hook */
399 struct tcp_congestion_ops *ca_ops;
400 u32 ca_priv[16];
401#define TCP_CA_PRIV_SIZE (16*sizeof(u32))
402}; 349};
403 350
404static inline struct tcp_sock *tcp_sk(const struct sock *sk) 351static inline struct tcp_sock *tcp_sk(const struct sock *sk)
@@ -406,9 +353,18 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
406 return (struct tcp_sock *)sk; 353 return (struct tcp_sock *)sk;
407} 354}
408 355
409static inline void *tcp_ca(const struct tcp_sock *tp) 356struct tcp_timewait_sock {
357 struct inet_timewait_sock tw_sk;
358 __u32 tw_rcv_nxt;
359 __u32 tw_snd_nxt;
360 __u32 tw_rcv_wnd;
361 __u32 tw_ts_recent;
362 long tw_ts_recent_stamp;
363};
364
365static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
410{ 366{
411 return (void *) tp->ca_priv; 367 return (struct tcp_timewait_sock *)sk;
412} 368}
413 369
414#endif 370#endif
diff --git a/include/linux/tcp_diag.h b/include/linux/tcp_diag.h
deleted file mode 100644
index 7a5996743946..000000000000
--- a/include/linux/tcp_diag.h
+++ /dev/null
@@ -1,127 +0,0 @@
1#ifndef _TCP_DIAG_H_
2#define _TCP_DIAG_H_ 1
3
4/* Just some random number */
5#define TCPDIAG_GETSOCK 18
6
7/* Socket identity */
8struct tcpdiag_sockid
9{
10 __u16 tcpdiag_sport;
11 __u16 tcpdiag_dport;
12 __u32 tcpdiag_src[4];
13 __u32 tcpdiag_dst[4];
14 __u32 tcpdiag_if;
15 __u32 tcpdiag_cookie[2];
16#define TCPDIAG_NOCOOKIE (~0U)
17};
18
19/* Request structure */
20
21struct tcpdiagreq
22{
23 __u8 tcpdiag_family; /* Family of addresses. */
24 __u8 tcpdiag_src_len;
25 __u8 tcpdiag_dst_len;
26 __u8 tcpdiag_ext; /* Query extended information */
27
28 struct tcpdiag_sockid id;
29
30 __u32 tcpdiag_states; /* States to dump */
31 __u32 tcpdiag_dbs; /* Tables to dump (NI) */
32};
33
34enum
35{
36 TCPDIAG_REQ_NONE,
37 TCPDIAG_REQ_BYTECODE,
38};
39
40#define TCPDIAG_REQ_MAX TCPDIAG_REQ_BYTECODE
41
42/* Bytecode is sequence of 4 byte commands followed by variable arguments.
43 * All the commands identified by "code" are conditional jumps forward:
44 * to offset cc+"yes" or to offset cc+"no". "yes" is supposed to be
45 * length of the command and its arguments.
46 */
47
48struct tcpdiag_bc_op
49{
50 unsigned char code;
51 unsigned char yes;
52 unsigned short no;
53};
54
55enum
56{
57 TCPDIAG_BC_NOP,
58 TCPDIAG_BC_JMP,
59 TCPDIAG_BC_S_GE,
60 TCPDIAG_BC_S_LE,
61 TCPDIAG_BC_D_GE,
62 TCPDIAG_BC_D_LE,
63 TCPDIAG_BC_AUTO,
64 TCPDIAG_BC_S_COND,
65 TCPDIAG_BC_D_COND,
66};
67
68struct tcpdiag_hostcond
69{
70 __u8 family;
71 __u8 prefix_len;
72 int port;
73 __u32 addr[0];
74};
75
76/* Base info structure. It contains socket identity (addrs/ports/cookie)
77 * and, alas, the information shown by netstat. */
78struct tcpdiagmsg
79{
80 __u8 tcpdiag_family;
81 __u8 tcpdiag_state;
82 __u8 tcpdiag_timer;
83 __u8 tcpdiag_retrans;
84
85 struct tcpdiag_sockid id;
86
87 __u32 tcpdiag_expires;
88 __u32 tcpdiag_rqueue;
89 __u32 tcpdiag_wqueue;
90 __u32 tcpdiag_uid;
91 __u32 tcpdiag_inode;
92};
93
94/* Extensions */
95
96enum
97{
98 TCPDIAG_NONE,
99 TCPDIAG_MEMINFO,
100 TCPDIAG_INFO,
101 TCPDIAG_VEGASINFO,
102 TCPDIAG_CONG,
103};
104
105#define TCPDIAG_MAX TCPDIAG_CONG
106
107
108/* TCPDIAG_MEM */
109
110struct tcpdiag_meminfo
111{
112 __u32 tcpdiag_rmem;
113 __u32 tcpdiag_wmem;
114 __u32 tcpdiag_fmem;
115 __u32 tcpdiag_tmem;
116};
117
118/* TCPDIAG_VEGASINFO */
119
120struct tcpvegas_info {
121 __u32 tcpv_enabled;
122 __u32 tcpv_rttcnt;
123 __u32 tcpv_rtt;
124 __u32 tcpv_minrtt;
125};
126
127#endif /* _TCP_DIAG_H_ */
diff --git a/include/linux/types.h b/include/linux/types.h
index dcb13f865df9..2b678c22ca4a 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -123,6 +123,9 @@ typedef __u64 u_int64_t;
123typedef __s64 int64_t; 123typedef __s64 int64_t;
124#endif 124#endif
125 125
126/* this is a special 64bit data type that is 8-byte aligned */
127#define aligned_u64 unsigned long long __attribute__((aligned(8)))
128
126/* 129/*
127 * The type used for indexing onto a disc or disc partition. 130 * The type used for indexing onto a disc or disc partition.
128 * If required, asm/types.h can override it and define 131 * If required, asm/types.h can override it and define
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index f0d423300d84..0fb077d68441 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -258,9 +258,27 @@ struct xfrm_usersa_flush {
258 __u8 proto; 258 __u8 proto;
259}; 259};
260 260
261#ifndef __KERNEL__
262/* backwards compatibility for userspace */
261#define XFRMGRP_ACQUIRE 1 263#define XFRMGRP_ACQUIRE 1
262#define XFRMGRP_EXPIRE 2 264#define XFRMGRP_EXPIRE 2
263#define XFRMGRP_SA 4 265#define XFRMGRP_SA 4
264#define XFRMGRP_POLICY 8 266#define XFRMGRP_POLICY 8
267#endif
268
269enum xfrm_nlgroups {
270 XFRMNLGRP_NONE,
271#define XFRMNLGRP_NONE XFRMNLGRP_NONE
272 XFRMNLGRP_ACQUIRE,
273#define XFRMNLGRP_ACQUIRE XFRMNLGRP_ACQUIRE
274 XFRMNLGRP_EXPIRE,
275#define XFRMNLGRP_EXPIRE XFRMNLGRP_EXPIRE
276 XFRMNLGRP_SA,
277#define XFRMNLGRP_SA XFRMNLGRP_SA
278 XFRMNLGRP_POLICY,
279#define XFRMNLGRP_POLICY XFRMNLGRP_POLICY
280 __XFRMNLGRP_MAX
281};
282#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)
265 283
266#endif /* _LINUX_XFRM_H */ 284#endif /* _LINUX_XFRM_H */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index ed00a995f576..b55eb7c7f033 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -63,7 +63,7 @@ struct tc_action_ops
63 __u32 type; /* TBD to match kind */ 63 __u32 type; /* TBD to match kind */
64 __u32 capab; /* capabilities includes 4 bit version */ 64 __u32 capab; /* capabilities includes 4 bit version */
65 struct module *owner; 65 struct module *owner;
66 int (*act)(struct sk_buff **, struct tc_action *); 66 int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *);
67 int (*get_stats)(struct sk_buff *, struct tc_action *); 67 int (*get_stats)(struct sk_buff *, struct tc_action *);
68 int (*dump)(struct sk_buff *, struct tc_action *,int , int); 68 int (*dump)(struct sk_buff *, struct tc_action *,int , int);
69 int (*cleanup)(struct tc_action *, int bind); 69 int (*cleanup)(struct tc_action *, int bind);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a0ed93672176..750e2508dd90 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -45,6 +45,7 @@ struct prefix_info {
45 45
46#ifdef __KERNEL__ 46#ifdef __KERNEL__
47 47
48#include <linux/config.h>
48#include <linux/netdevice.h> 49#include <linux/netdevice.h>
49#include <net/if_inet6.h> 50#include <net/if_inet6.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
@@ -238,5 +239,10 @@ static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
238 addr->s6_addr32[3] == htonl(0x00000002)); 239 addr->s6_addr32[3] == htonl(0x00000002));
239} 240}
240 241
242#ifdef CONFIG_PROC_FS
243extern int if6_proc_init(void);
244extern void if6_proc_exit(void);
245#endif
246
241#endif 247#endif
242#endif 248#endif
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b60b3846b9d1..b5d785ab4a0e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -1,5 +1,11 @@
1#ifndef __LINUX_NET_AFUNIX_H 1#ifndef __LINUX_NET_AFUNIX_H
2#define __LINUX_NET_AFUNIX_H 2#define __LINUX_NET_AFUNIX_H
3
4#include <linux/config.h>
5#include <linux/socket.h>
6#include <linux/un.h>
7#include <net/sock.h>
8
3extern void unix_inflight(struct file *fp); 9extern void unix_inflight(struct file *fp);
4extern void unix_notinflight(struct file *fp); 10extern void unix_notinflight(struct file *fp);
5extern void unix_gc(void); 11extern void unix_gc(void);
@@ -74,5 +80,14 @@ struct unix_sock {
74 wait_queue_head_t peer_wait; 80 wait_queue_head_t peer_wait;
75}; 81};
76#define unix_sk(__sk) ((struct unix_sock *)__sk) 82#define unix_sk(__sk) ((struct unix_sock *)__sk)
83
84#ifdef CONFIG_SYSCTL
85extern int sysctl_unix_max_dgram_qlen;
86extern void unix_sysctl_register(void);
87extern void unix_sysctl_unregister(void);
88#else
89static inline void unix_sysctl_register(void) {}
90static inline void unix_sysctl_unregister(void) {}
91#endif
77#endif 92#endif
78#endif 93#endif
diff --git a/include/net/arp.h b/include/net/arp.h
index a1f09fad6a52..a13e30c35f42 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -11,7 +11,7 @@ extern struct neigh_table arp_tbl;
11 11
12extern void arp_init(void); 12extern void arp_init(void);
13extern int arp_rcv(struct sk_buff *skb, struct net_device *dev, 13extern int arp_rcv(struct sk_buff *skb, struct net_device *dev,
14 struct packet_type *pt); 14 struct packet_type *pt, struct net_device *orig_dev);
15extern int arp_find(unsigned char *haddr, struct sk_buff *skb); 15extern int arp_find(unsigned char *haddr, struct sk_buff *skb);
16extern int arp_ioctl(unsigned int cmd, void __user *arg); 16extern int arp_ioctl(unsigned int cmd, void __user *arg);
17extern void arp_send(int type, int ptype, u32 dest_ip, 17extern void arp_send(int type, int ptype, u32 dest_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3696f988a9f1..926eed543023 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -316,7 +316,7 @@ extern int ax25_protocol_is_registered(unsigned int);
316 316
317/* ax25_in.c */ 317/* ax25_in.c */
318extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *); 318extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
319extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *); 319extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
320 320
321/* ax25_ip.c */ 321/* ax25_ip.c */
322extern int ax25_encapsulate(struct sk_buff *, struct net_device *, unsigned short, void *, void *, unsigned int); 322extern int ax25_encapsulate(struct sk_buff *, struct net_device *, unsigned short, void *, void *, unsigned int);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 06b24f637026..6dfa4a61ffd0 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -131,11 +131,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
131 131
132/* Skb helpers */ 132/* Skb helpers */
133struct bt_skb_cb { 133struct bt_skb_cb {
134 int incoming; 134 __u8 pkt_type;
135 __u8 incoming;
135}; 136};
136#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) 137#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb))
137 138
138static inline struct sk_buff *bt_skb_alloc(unsigned int len, int how) 139static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how)
139{ 140{
140 struct sk_buff *skb; 141 struct sk_buff *skb;
141 142
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 6f0706f4af68..371e7d3f2e6f 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -453,6 +453,15 @@ struct inquiry_info_with_rssi {
453 __u16 clock_offset; 453 __u16 clock_offset;
454 __s8 rssi; 454 __s8 rssi;
455} __attribute__ ((packed)); 455} __attribute__ ((packed));
456struct inquiry_info_with_rssi_and_pscan_mode {
457 bdaddr_t bdaddr;
458 __u8 pscan_rep_mode;
459 __u8 pscan_period_mode;
460 __u8 pscan_mode;
461 __u8 dev_class[3];
462 __u16 clock_offset;
463 __s8 rssi;
464} __attribute__ ((packed));
456 465
457#define HCI_EV_CONN_COMPLETE 0x03 466#define HCI_EV_CONN_COMPLETE 0x03
458struct hci_ev_conn_complete { 467struct hci_ev_conn_complete {
@@ -584,6 +593,12 @@ struct hci_ev_clock_offset {
584 __u16 clock_offset; 593 __u16 clock_offset;
585} __attribute__ ((packed)); 594} __attribute__ ((packed));
586 595
596#define HCI_EV_PSCAN_REP_MODE 0x20
597struct hci_ev_pscan_rep_mode {
598 bdaddr_t bdaddr;
599 __u8 pscan_rep_mode;
600} __attribute__ ((packed));
601
587/* Internal events generated by Bluetooth stack */ 602/* Internal events generated by Bluetooth stack */
588#define HCI_EV_STACK_INTERNAL 0xFD 603#define HCI_EV_STACK_INTERNAL 0xFD
589struct hci_ev_stack_internal { 604struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6d63a47c731b..7f933f302078 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -404,7 +404,7 @@ static inline int hci_recv_frame(struct sk_buff *skb)
404 bt_cb(skb)->incoming = 1; 404 bt_cb(skb)->incoming = 1;
405 405
406 /* Time stamp */ 406 /* Time stamp */
407 do_gettimeofday(&skb->stamp); 407 __net_timestamp(skb);
408 408
409 /* Queue frame for rx task */ 409 /* Queue frame for rx task */
410 skb_queue_tail(&hdev->rx_q, skb); 410 skb_queue_tail(&hdev->rx_q, skb);
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 13669bad00b3..ffea9d54071f 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -80,9 +80,9 @@
80#define RFCOMM_RPN_STOP_15 1 80#define RFCOMM_RPN_STOP_15 1
81 81
82#define RFCOMM_RPN_PARITY_NONE 0x0 82#define RFCOMM_RPN_PARITY_NONE 0x0
83#define RFCOMM_RPN_PARITY_ODD 0x4 83#define RFCOMM_RPN_PARITY_ODD 0x1
84#define RFCOMM_RPN_PARITY_EVEN 0x5 84#define RFCOMM_RPN_PARITY_EVEN 0x3
85#define RFCOMM_RPN_PARITY_MARK 0x6 85#define RFCOMM_RPN_PARITY_MARK 0x5
86#define RFCOMM_RPN_PARITY_SPACE 0x7 86#define RFCOMM_RPN_PARITY_SPACE 0x7
87 87
88#define RFCOMM_RPN_FLOW_NONE 0x00 88#define RFCOMM_RPN_FLOW_NONE 0x00
@@ -223,8 +223,14 @@ struct rfcomm_dlc {
223#define RFCOMM_CFC_DISABLED 0 223#define RFCOMM_CFC_DISABLED 0
224#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS 224#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS
225 225
226/* ---- RFCOMM SEND RPN ---- */
227int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
228 u8 bit_rate, u8 data_bits, u8 stop_bits,
229 u8 parity, u8 flow_ctrl_settings,
230 u8 xon_char, u8 xoff_char, u16 param_mask);
231
226/* ---- RFCOMM DLCs (channels) ---- */ 232/* ---- RFCOMM DLCs (channels) ---- */
227struct rfcomm_dlc *rfcomm_dlc_alloc(int prio); 233struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio);
228void rfcomm_dlc_free(struct rfcomm_dlc *d); 234void rfcomm_dlc_free(struct rfcomm_dlc *d);
229int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); 235int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
230int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason); 236int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
diff --git a/include/net/datalink.h b/include/net/datalink.h
index 5797ba3d2eb5..deb7ca75db48 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -9,7 +9,7 @@ struct datalink_proto {
9 unsigned short header_length; 9 unsigned short header_length;
10 10
11 int (*rcvfunc)(struct sk_buff *, struct net_device *, 11 int (*rcvfunc)(struct sk_buff *, struct net_device *,
12 struct packet_type *); 12 struct packet_type *, struct net_device *);
13 int (*request)(struct datalink_proto *, struct sk_buff *, 13 int (*request)(struct datalink_proto *, struct sk_buff *,
14 unsigned char *); 14 unsigned char *);
15 struct list_head node; 15 struct list_head node;
diff --git a/include/net/dn.h b/include/net/dn.h
index 5551c46db397..c1dbbd222793 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/dn.h> 4#include <linux/dn.h>
5#include <net/sock.h> 5#include <net/sock.h>
6#include <net/tcp.h>
6#include <asm/byteorder.h> 7#include <asm/byteorder.h>
7 8
8typedef unsigned short dn_address; 9typedef unsigned short dn_address;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index e5ef0d15fb45..6cdebeee5f96 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -57,4 +57,11 @@ static inline struct raw_sock *raw_sk(const struct sock *sk)
57 return (struct raw_sock *)sk; 57 return (struct raw_sock *)sk;
58} 58}
59 59
60extern int sysctl_icmp_echo_ignore_all;
61extern int sysctl_icmp_echo_ignore_broadcasts;
62extern int sysctl_icmp_ignore_bogus_error_responses;
63extern int sysctl_icmp_errors_use_inbound_ifaddr;
64extern int sysctl_icmp_ratelimit;
65extern int sysctl_icmp_ratemask;
66
60#endif /* _ICMP_H */ 67#endif /* _ICMP_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
new file mode 100644
index 000000000000..03df3b157960
--- /dev/null
+++ b/include/net/inet6_hashtables.h
@@ -0,0 +1,130 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _INET6_HASHTABLES_H
15#define _INET6_HASHTABLES_H
16
17#include <linux/config.h>
18
19#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
20#include <linux/in6.h>
21#include <linux/ipv6.h>
22#include <linux/types.h>
23
24#include <net/ipv6.h>
25
26struct inet_hashinfo;
27
28/* I have no idea if this is a good hash for v6 or not. -DaveM */
29static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
30 const struct in6_addr *faddr, const u16 fport,
31 const int ehash_size)
32{
33 int hashent = (lport ^ fport);
34
35 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
36 hashent ^= hashent >> 16;
37 hashent ^= hashent >> 8;
38 return (hashent & (ehash_size - 1));
39}
40
41static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size)
42{
43 const struct inet_sock *inet = inet_sk(sk);
44 const struct ipv6_pinfo *np = inet6_sk(sk);
45 const struct in6_addr *laddr = &np->rcv_saddr;
46 const struct in6_addr *faddr = &np->daddr;
47 const __u16 lport = inet->num;
48 const __u16 fport = inet->dport;
49 return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size);
50}
51
52/*
53 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
54 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
55 *
56 * The sockhash lock must be held as a reader here.
57 */
58static inline struct sock *
59 __inet6_lookup_established(struct inet_hashinfo *hashinfo,
60 const struct in6_addr *saddr,
61 const u16 sport,
62 const struct in6_addr *daddr,
63 const u16 hnum,
64 const int dif)
65{
66 struct sock *sk;
67 const struct hlist_node *node;
68 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
69 /* Optimize here for direct hit, only listening connections can
70 * have wildcards anyways.
71 */
72 const int hash = inet6_ehashfn(daddr, hnum, saddr, sport,
73 hashinfo->ehash_size);
74 struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
75
76 read_lock(&head->lock);
77 sk_for_each(sk, node, &head->chain) {
78 /* For IPV6 do the cheaper port and family tests first. */
79 if (INET6_MATCH(sk, saddr, daddr, ports, dif))
80 goto hit; /* You sunk my battleship! */
81 }
82 /* Must check for a TIME_WAIT'er before going to listener hash. */
83 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
84 const struct inet_timewait_sock *tw = inet_twsk(sk);
85
86 if(*((__u32 *)&(tw->tw_dport)) == ports &&
87 sk->sk_family == PF_INET6) {
88 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
89
90 if (ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
91 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
92 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
93 goto hit;
94 }
95 }
96 read_unlock(&head->lock);
97 return NULL;
98
99hit:
100 sock_hold(sk);
101 read_unlock(&head->lock);
102 return sk;
103}
104
105extern struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
106 const struct in6_addr *daddr,
107 const unsigned short hnum,
108 const int dif);
109
110static inline struct sock *__inet6_lookup(struct inet_hashinfo *hashinfo,
111 const struct in6_addr *saddr,
112 const u16 sport,
113 const struct in6_addr *daddr,
114 const u16 hnum,
115 const int dif)
116{
117 struct sock *sk = __inet6_lookup_established(hashinfo, saddr, sport,
118 daddr, hnum, dif);
119 if (sk)
120 return sk;
121
122 return inet6_lookup_listener(hashinfo, daddr, hnum, dif);
123}
124
125extern struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
126 const struct in6_addr *saddr, const u16 sport,
127 const struct in6_addr *daddr, const u16 dport,
128 const int dif);
129#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
130#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index fbc1f4d140d8..f943306ce5ff 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -8,6 +8,11 @@ extern struct proto_ops inet_dgram_ops;
8 * INET4 prototypes used by INET6 8 * INET4 prototypes used by INET6
9 */ 9 */
10 10
11struct msghdr;
12struct sock;
13struct sockaddr;
14struct socket;
15
11extern void inet_remove_sock(struct sock *sk1); 16extern void inet_remove_sock(struct sock *sk1);
12extern void inet_put_sock(unsigned short num, 17extern void inet_put_sock(unsigned short num,
13 struct sock *sk); 18 struct sock *sk);
@@ -29,7 +34,6 @@ extern unsigned int inet_poll(struct file * file, struct socket *sock, struct p
29extern int inet_listen(struct socket *sock, int backlog); 34extern int inet_listen(struct socket *sock, int backlog);
30 35
31extern void inet_sock_destruct(struct sock *sk); 36extern void inet_sock_destruct(struct sock *sk);
32extern atomic_t inet_sock_nr;
33 37
34extern int inet_bind(struct socket *sock, 38extern int inet_bind(struct socket *sock,
35 struct sockaddr *uaddr, int addr_len); 39 struct sockaddr *uaddr, int addr_len);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
new file mode 100644
index 000000000000..651f824c1008
--- /dev/null
+++ b/include/net/inet_connection_sock.h
@@ -0,0 +1,276 @@
1/*
2 * NET Generic infrastructure for INET connection oriented protocols.
3 *
4 * Definitions for inet_connection_sock
5 *
6 * Authors: Many people, see the TCP sources
7 *
8 * From code originally in TCP
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _INET_CONNECTION_SOCK_H
16#define _INET_CONNECTION_SOCK_H
17
18#include <linux/ip.h>
19#include <linux/string.h>
20#include <linux/timer.h>
21#include <net/request_sock.h>
22
23#define INET_CSK_DEBUG 1
24
25/* Cancel timers, when they are not required. */
26#undef INET_CSK_CLEAR_TIMERS
27
28struct inet_bind_bucket;
29struct inet_hashinfo;
30struct tcp_congestion_ops;
31
32/** inet_connection_sock - INET connection oriented sock
33 *
34 * @icsk_accept_queue: FIFO of established children
35 * @icsk_bind_hash: Bind node
36 * @icsk_timeout: Timeout
37 * @icsk_retransmit_timer: Resend (no ack)
38 * @icsk_rto: Retransmit timeout
39 * @icsk_ca_ops Pluggable congestion control hook
40 * @icsk_ca_state: Congestion control state
41 * @icsk_retransmits: Number of unrecovered [RTO] timeouts
42 * @icsk_pending: Scheduled timer event
43 * @icsk_backoff: Backoff
44 * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries
45 * @icsk_probes_out: unanswered 0 window probes
46 * @icsk_ack: Delayed ACK control data
47 */
48struct inet_connection_sock {
49 /* inet_sock has to be the first member! */
50 struct inet_sock icsk_inet;
51 struct request_sock_queue icsk_accept_queue;
52 struct inet_bind_bucket *icsk_bind_hash;
53 unsigned long icsk_timeout;
54 struct timer_list icsk_retransmit_timer;
55 struct timer_list icsk_delack_timer;
56 __u32 icsk_rto;
57 struct tcp_congestion_ops *icsk_ca_ops;
58 __u8 icsk_ca_state;
59 __u8 icsk_retransmits;
60 __u8 icsk_pending;
61 __u8 icsk_backoff;
62 __u8 icsk_syn_retries;
63 __u8 icsk_probes_out;
64 /* 2 BYTES HOLE, TRY TO PACK! */
65 struct {
66 __u8 pending; /* ACK is pending */
67 __u8 quick; /* Scheduled number of quick acks */
68 __u8 pingpong; /* The session is interactive */
69 __u8 blocked; /* Delayed ACK was blocked by socket lock */
70 __u32 ato; /* Predicted tick of soft clock */
71 unsigned long timeout; /* Currently scheduled timeout */
72 __u32 lrcvtime; /* timestamp of last received data packet */
73 __u16 last_seg_size; /* Size of last incoming segment */
74 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
75 } icsk_ack;
76 u32 icsk_ca_priv[16];
77#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
78};
79
80#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
81#define ICSK_TIME_DACK 2 /* Delayed ack timer */
82#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
83#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
84
85static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
86{
87 return (struct inet_connection_sock *)sk;
88}
89
90static inline void *inet_csk_ca(const struct sock *sk)
91{
92 return (void *)inet_csk(sk)->icsk_ca_priv;
93}
94
95extern struct sock *inet_csk_clone(struct sock *sk,
96 const struct request_sock *req,
97 const unsigned int __nocast priority);
98
99enum inet_csk_ack_state_t {
100 ICSK_ACK_SCHED = 1,
101 ICSK_ACK_TIMER = 2,
102 ICSK_ACK_PUSHED = 4
103};
104
105extern void inet_csk_init_xmit_timers(struct sock *sk,
106 void (*retransmit_handler)(unsigned long),
107 void (*delack_handler)(unsigned long),
108 void (*keepalive_handler)(unsigned long));
109extern void inet_csk_clear_xmit_timers(struct sock *sk);
110
111static inline void inet_csk_schedule_ack(struct sock *sk)
112{
113 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
114}
115
116static inline int inet_csk_ack_scheduled(const struct sock *sk)
117{
118 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
119}
120
121static inline void inet_csk_delack_init(struct sock *sk)
122{
123 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
124}
125
126extern void inet_csk_delete_keepalive_timer(struct sock *sk);
127extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
128
129#ifdef INET_CSK_DEBUG
130extern const char inet_csk_timer_bug_msg[];
131#endif
132
133static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
134{
135 struct inet_connection_sock *icsk = inet_csk(sk);
136
137 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
138 icsk->icsk_pending = 0;
139#ifdef INET_CSK_CLEAR_TIMERS
140 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
141#endif
142 } else if (what == ICSK_TIME_DACK) {
143 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
144#ifdef INET_CSK_CLEAR_TIMERS
145 sk_stop_timer(sk, &icsk->icsk_delack_timer);
146#endif
147 }
148#ifdef INET_CSK_DEBUG
149 else {
150 pr_debug("%s", inet_csk_timer_bug_msg);
151 }
152#endif
153}
154
155/*
156 * Reset the retransmission timer
157 */
158static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
159 unsigned long when,
160 const unsigned long max_when)
161{
162 struct inet_connection_sock *icsk = inet_csk(sk);
163
164 if (when > max_when) {
165#ifdef INET_CSK_DEBUG
166 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
167 sk, what, when, current_text_addr());
168#endif
169 when = max_when;
170 }
171
172 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
173 icsk->icsk_pending = what;
174 icsk->icsk_timeout = jiffies + when;
175 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
176 } else if (what == ICSK_TIME_DACK) {
177 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
178 icsk->icsk_ack.timeout = jiffies + when;
179 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
180 }
181#ifdef INET_CSK_DEBUG
182 else {
183 pr_debug("%s", inet_csk_timer_bug_msg);
184 }
185#endif
186}
187
188extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
189
190extern struct request_sock *inet_csk_search_req(const struct sock *sk,
191 struct request_sock ***prevp,
192 const __u16 rport,
193 const __u32 raddr,
194 const __u32 laddr);
195extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
196 struct sock *sk, unsigned short snum);
197
198extern struct dst_entry* inet_csk_route_req(struct sock *sk,
199 const struct request_sock *req);
200
201static inline void inet_csk_reqsk_queue_add(struct sock *sk,
202 struct request_sock *req,
203 struct sock *child)
204{
205 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
206}
207
208extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
209 struct request_sock *req,
210 const unsigned timeout);
211
212static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
213 struct request_sock *req)
214{
215 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
216 inet_csk_delete_keepalive_timer(sk);
217}
218
219static inline void inet_csk_reqsk_queue_added(struct sock *sk,
220 const unsigned long timeout)
221{
222 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
223 inet_csk_reset_keepalive_timer(sk, timeout);
224}
225
226static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
227{
228 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
229}
230
231static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
232{
233 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
234}
235
236static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
237{
238 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
239}
240
241static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
242 struct request_sock *req,
243 struct request_sock **prev)
244{
245 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
246}
247
248static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
249 struct request_sock *req,
250 struct request_sock **prev)
251{
252 inet_csk_reqsk_queue_unlink(sk, req, prev);
253 inet_csk_reqsk_queue_removed(sk, req);
254 reqsk_free(req);
255}
256
257extern void inet_csk_reqsk_queue_prune(struct sock *parent,
258 const unsigned long interval,
259 const unsigned long timeout,
260 const unsigned long max_rto);
261
262extern void inet_csk_destroy_sock(struct sock *sk);
263
264/*
265 * LISTEN is a special case for poll..
266 */
267static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
268{
269 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
270 (POLLIN | POLLRDNORM) : 0;
271}
272
273extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
274extern void inet_csk_listen_stop(struct sock *sk);
275
276#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
new file mode 100644
index 000000000000..646b6ea7fe26
--- /dev/null
+++ b/include/net/inet_hashtables.h
@@ -0,0 +1,427 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _INET_HASHTABLES_H
15#define _INET_HASHTABLES_H
16
17#include <linux/config.h>
18
19#include <linux/interrupt.h>
20#include <linux/ipv6.h>
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/socket.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27
28#include <net/inet_connection_sock.h>
29#include <net/route.h>
30#include <net/sock.h>
31#include <net/tcp_states.h>
32
33#include <asm/atomic.h>
34#include <asm/byteorder.h>
35
36/* This is for all connections with a full identity, no wildcards.
37 * New scheme, half the table is for TIME_WAIT, the other half is
38 * for the rest. I'll experiment with dynamic table growth later.
39 */
40struct inet_ehash_bucket {
41 rwlock_t lock;
42 struct hlist_head chain;
43} __attribute__((__aligned__(8)));
44
45/* There are a few simple rules, which allow for local port reuse by
46 * an application. In essence:
47 *
48 * 1) Sockets bound to different interfaces may share a local port.
49 * Failing that, goto test 2.
50 * 2) If all sockets have sk->sk_reuse set, and none of them are in
51 * TCP_LISTEN state, the port may be shared.
52 * Failing that, goto test 3.
53 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
54 * address, and none of them are the same, the port may be
55 * shared.
56 * Failing this, the port cannot be shared.
57 *
58 * The interesting point, is test #2. This is what an FTP server does
59 * all day. To optimize this case we use a specific flag bit defined
60 * below. As we add sockets to a bind bucket list, we perform a
61 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
62 * As long as all sockets added to a bind bucket pass this test,
63 * the flag bit will be set.
64 * The resulting situation is that tcp_v[46]_verify_bind() can just check
65 * for this flag bit, if it is set and the socket trying to bind has
66 * sk->sk_reuse set, we don't even have to walk the owners list at all,
67 * we return that it is ok to bind this socket to the requested local port.
68 *
69 * Sounds like a lot of work, but it is worth it. In a more naive
70 * implementation (ie. current FreeBSD etc.) the entire list of ports
71 * must be walked for each data port opened by an ftp server. Needless
72 * to say, this does not scale at all. With a couple thousand FTP
73 * users logged onto your box, isn't it nice to know that new data
74 * ports are created in O(1) time? I thought so. ;-) -DaveM
75 */
76struct inet_bind_bucket {
77 unsigned short port;
78 signed short fastreuse;
79 struct hlist_node node;
80 struct hlist_head owners;
81};
82
83#define inet_bind_bucket_for_each(tb, node, head) \
84 hlist_for_each_entry(tb, node, head, node)
85
86struct inet_bind_hashbucket {
87 spinlock_t lock;
88 struct hlist_head chain;
89};
90
91/* This is for listening sockets, thus all sockets which possess wildcards. */
92#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
93
94struct inet_hashinfo {
95 /* This is for sockets with full identity only. Sockets here will
96 * always be without wildcards and will have the following invariant:
97 *
98 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
99 *
100 * First half of the table is for sockets not in TIME_WAIT, second half
101 * is for TIME_WAIT sockets only.
102 */
103 struct inet_ehash_bucket *ehash;
104
105 /* Ok, let's try this, I give up, we do need a local binding
106 * TCP hash as well as the others for fast bind/connect.
107 */
108 struct inet_bind_hashbucket *bhash;
109
110 int bhash_size;
111 int ehash_size;
112
113 /* All sockets in TCP_LISTEN state will be in here. This is the only
114 * table where wildcard'd TCP sockets can exist. Hash function here
115 * is just local port number.
116 */
117 struct hlist_head listening_hash[INET_LHTABLE_SIZE];
118
119 /* All the above members are written once at bootup and
120 * never written again _or_ are predominantly read-access.
121 *
122 * Now align to a new cache line as all the following members
123 * are often dirty.
124 */
125 rwlock_t lhash_lock ____cacheline_aligned;
126 atomic_t lhash_users;
127 wait_queue_head_t lhash_wait;
128 spinlock_t portalloc_lock;
129 kmem_cache_t *bind_bucket_cachep;
130 int port_rover;
131};
132
133static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
134 const __u32 faddr, const __u16 fport,
135 const int ehash_size)
136{
137 int h = (laddr ^ lport) ^ (faddr ^ fport);
138 h ^= h >> 16;
139 h ^= h >> 8;
140 return h & (ehash_size - 1);
141}
142
143static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size)
144{
145 const struct inet_sock *inet = inet_sk(sk);
146 const __u32 laddr = inet->rcv_saddr;
147 const __u16 lport = inet->num;
148 const __u32 faddr = inet->daddr;
149 const __u16 fport = inet->dport;
150
151 return inet_ehashfn(laddr, lport, faddr, fport, ehash_size);
152}
153
154extern struct inet_bind_bucket *
155 inet_bind_bucket_create(kmem_cache_t *cachep,
156 struct inet_bind_hashbucket *head,
157 const unsigned short snum);
158extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
159 struct inet_bind_bucket *tb);
160
161static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
162{
163 return lport & (bhash_size - 1);
164}
165
166extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
167 const unsigned short snum);
168
169/* These can have wildcards, don't try too hard. */
170static inline int inet_lhashfn(const unsigned short num)
171{
172 return num & (INET_LHTABLE_SIZE - 1);
173}
174
175static inline int inet_sk_listen_hashfn(const struct sock *sk)
176{
177 return inet_lhashfn(inet_sk(sk)->num);
178}
179
180/* Caller must disable local BH processing. */
181static inline void __inet_inherit_port(struct inet_hashinfo *table,
182 struct sock *sk, struct sock *child)
183{
184 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
185 struct inet_bind_hashbucket *head = &table->bhash[bhash];
186 struct inet_bind_bucket *tb;
187
188 spin_lock(&head->lock);
189 tb = inet_csk(sk)->icsk_bind_hash;
190 sk_add_bind_node(child, &tb->owners);
191 inet_csk(child)->icsk_bind_hash = tb;
192 spin_unlock(&head->lock);
193}
194
195static inline void inet_inherit_port(struct inet_hashinfo *table,
196 struct sock *sk, struct sock *child)
197{
198 local_bh_disable();
199 __inet_inherit_port(table, sk, child);
200 local_bh_enable();
201}
202
203extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
204
205extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
206
207/*
208 * - We may sleep inside this lock.
209 * - If sleeping is not required (or called from BH),
210 * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
211 */
212static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
213{
214 /* read_lock synchronizes to candidates to writers */
215 read_lock(&hashinfo->lhash_lock);
216 atomic_inc(&hashinfo->lhash_users);
217 read_unlock(&hashinfo->lhash_lock);
218}
219
220static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
221{
222 if (atomic_dec_and_test(&hashinfo->lhash_users))
223 wake_up(&hashinfo->lhash_wait);
224}
225
226static inline void __inet_hash(struct inet_hashinfo *hashinfo,
227 struct sock *sk, const int listen_possible)
228{
229 struct hlist_head *list;
230 rwlock_t *lock;
231
232 BUG_TRAP(sk_unhashed(sk));
233 if (listen_possible && sk->sk_state == TCP_LISTEN) {
234 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
235 lock = &hashinfo->lhash_lock;
236 inet_listen_wlock(hashinfo);
237 } else {
238 sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size);
239 list = &hashinfo->ehash[sk->sk_hashent].chain;
240 lock = &hashinfo->ehash[sk->sk_hashent].lock;
241 write_lock(lock);
242 }
243 __sk_add_node(sk, list);
244 sock_prot_inc_use(sk->sk_prot);
245 write_unlock(lock);
246 if (listen_possible && sk->sk_state == TCP_LISTEN)
247 wake_up(&hashinfo->lhash_wait);
248}
249
250static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
251{
252 if (sk->sk_state != TCP_CLOSE) {
253 local_bh_disable();
254 __inet_hash(hashinfo, sk, 1);
255 local_bh_enable();
256 }
257}
258
259static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
260{
261 rwlock_t *lock;
262
263 if (sk_unhashed(sk))
264 goto out;
265
266 if (sk->sk_state == TCP_LISTEN) {
267 local_bh_disable();
268 inet_listen_wlock(hashinfo);
269 lock = &hashinfo->lhash_lock;
270 } else {
271 struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent];
272 lock = &head->lock;
273 write_lock_bh(&head->lock);
274 }
275
276 if (__sk_del_node_init(sk))
277 sock_prot_dec_use(sk->sk_prot);
278 write_unlock_bh(lock);
279out:
280 if (sk->sk_state == TCP_LISTEN)
281 wake_up(&hashinfo->lhash_wait);
282}
283
284static inline int inet_iif(const struct sk_buff *skb)
285{
286 return ((struct rtable *)skb->dst)->rt_iif;
287}
288
289extern struct sock *__inet_lookup_listener(const struct hlist_head *head,
290 const u32 daddr,
291 const unsigned short hnum,
292 const int dif);
293
294/* Optimize the common listener case. */
295static inline struct sock *
296 inet_lookup_listener(struct inet_hashinfo *hashinfo,
297 const u32 daddr,
298 const unsigned short hnum, const int dif)
299{
300 struct sock *sk = NULL;
301 const struct hlist_head *head;
302
303 read_lock(&hashinfo->lhash_lock);
304 head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
305 if (!hlist_empty(head)) {
306 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
307
308 if (inet->num == hnum && !sk->sk_node.next &&
309 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
310 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
311 !sk->sk_bound_dev_if)
312 goto sherry_cache;
313 sk = __inet_lookup_listener(head, daddr, hnum, dif);
314 }
315 if (sk) {
316sherry_cache:
317 sock_hold(sk);
318 }
319 read_unlock(&hashinfo->lhash_lock);
320 return sk;
321}
322
323/* Socket demux engine toys. */
324#ifdef __BIG_ENDIAN
325#define INET_COMBINED_PORTS(__sport, __dport) \
326 (((__u32)(__sport) << 16) | (__u32)(__dport))
327#else /* __LITTLE_ENDIAN */
328#define INET_COMBINED_PORTS(__sport, __dport) \
329 (((__u32)(__dport) << 16) | (__u32)(__sport))
330#endif
331
332#if (BITS_PER_LONG == 64)
333#ifdef __BIG_ENDIAN
334#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
335 const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
336#else /* __LITTLE_ENDIAN */
337#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
338 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
339#endif /* __BIG_ENDIAN */
340#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
341 (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
342 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
343 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
344#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
345 (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
346 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
347 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
348#else /* 32-bit arch */
349#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
350#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
351 ((inet_sk(__sk)->daddr == (__saddr)) && \
352 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
353 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
354 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
355#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
356 ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \
357 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
358 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
359 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
360#endif /* 64-bit arch */
361
362/*
363 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
364 * not check it for lookups anymore, thanks Alexey. -DaveM
365 *
366 * Local BH must be disabled here.
367 */
368static inline struct sock *
369 __inet_lookup_established(struct inet_hashinfo *hashinfo,
370 const u32 saddr, const u16 sport,
371 const u32 daddr, const u16 hnum,
372 const int dif)
373{
374 INET_ADDR_COOKIE(acookie, saddr, daddr)
375 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
376 struct sock *sk;
377 const struct hlist_node *node;
378 /* Optimize here for direct hit, only listening connections can
379 * have wildcards anyways.
380 */
381 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size);
382 struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
383
384 read_lock(&head->lock);
385 sk_for_each(sk, node, &head->chain) {
386 if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif))
387 goto hit; /* You sunk my battleship! */
388 }
389
390 /* Must check for a TIME_WAIT'er before going to listener hash. */
391 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
392 if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
393 goto hit;
394 }
395 sk = NULL;
396out:
397 read_unlock(&head->lock);
398 return sk;
399hit:
400 sock_hold(sk);
401 goto out;
402}
403
404static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
405 const u32 saddr, const u16 sport,
406 const u32 daddr, const u16 hnum,
407 const int dif)
408{
409 struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
410 hnum, dif);
411 return sk ? : inet_lookup_listener(hashinfo, daddr, hnum, dif);
412}
413
414static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
415 const u32 saddr, const u16 sport,
416 const u32 daddr, const u16 dport,
417 const int dif)
418{
419 struct sock *sk;
420
421 local_bh_disable();
422 sk = __inet_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif);
423 local_bh_enable();
424
425 return sk;
426}
427#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
new file mode 100644
index 000000000000..3b070352e869
--- /dev/null
+++ b/include/net/inet_timewait_sock.h
@@ -0,0 +1,219 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for a generic INET TIMEWAIT sock
7 *
8 * From code originally in net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _INET_TIMEWAIT_SOCK_
16#define _INET_TIMEWAIT_SOCK_
17
18#include <linux/config.h>
19
20#include <linux/ip.h>
21#include <linux/list.h>
22#include <linux/timer.h>
23#include <linux/types.h>
24#include <linux/workqueue.h>
25
26#include <net/sock.h>
27#include <net/tcp_states.h>
28
29#include <asm/atomic.h>
30
31struct inet_hashinfo;
32
33#define INET_TWDR_RECYCLE_SLOTS_LOG 5
34#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG)
35
36/*
37 * If time > 4sec, it is "slow" path, no recycling is required,
38 * so that we select tick to get range about 4 seconds.
39 */
40#if HZ <= 16 || HZ > 4096
41# error Unsupported: HZ <= 16 or HZ > 4096
42#elif HZ <= 32
43# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
44#elif HZ <= 64
45# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
46#elif HZ <= 128
47# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
48#elif HZ <= 256
49# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
50#elif HZ <= 512
51# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
52#elif HZ <= 1024
53# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
54#elif HZ <= 2048
55# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
56#else
57# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
58#endif
59
60/* TIME_WAIT reaping mechanism. */
61#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
62
63#define INET_TWDR_TWKILL_QUOTA 100
64
65struct inet_timewait_death_row {
66 /* Short-time timewait calendar */
67 int twcal_hand;
68 int twcal_jiffie;
69 struct timer_list twcal_timer;
70 struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
71
72 spinlock_t death_lock;
73 int tw_count;
74 int period;
75 u32 thread_slots;
76 struct work_struct twkill_work;
77 struct timer_list tw_timer;
78 int slot;
79 struct hlist_head cells[INET_TWDR_TWKILL_SLOTS];
80 struct inet_hashinfo *hashinfo;
81 int sysctl_tw_recycle;
82 int sysctl_max_tw_buckets;
83};
84
85extern void inet_twdr_hangman(unsigned long data);
86extern void inet_twdr_twkill_work(void *data);
87extern void inet_twdr_twcal_tick(unsigned long data);
88
89#if (BITS_PER_LONG == 64)
90#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
91#else
92#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
93#endif
94
95struct inet_bind_bucket;
96
97/*
98 * This is a TIME_WAIT sock. It works around the memory consumption
99 * problems of sockets in such a state on heavily loaded servers, but
100 * without violating the protocol specification.
101 */
102struct inet_timewait_sock {
103 /*
104 * Now struct sock also uses sock_common, so please just
105 * don't add nothing before this first member (__tw_common) --acme
106 */
107 struct sock_common __tw_common;
108#define tw_family __tw_common.skc_family
109#define tw_state __tw_common.skc_state
110#define tw_reuse __tw_common.skc_reuse
111#define tw_bound_dev_if __tw_common.skc_bound_dev_if
112#define tw_node __tw_common.skc_node
113#define tw_bind_node __tw_common.skc_bind_node
114#define tw_refcnt __tw_common.skc_refcnt
115#define tw_prot __tw_common.skc_prot
116 volatile unsigned char tw_substate;
117 /* 3 bits hole, try to pack */
118 unsigned char tw_rcv_wscale;
119 /* Socket demultiplex comparisons on incoming packets. */
120 /* these five are in inet_sock */
121 __u16 tw_sport;
122 __u32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
123 __u32 tw_rcv_saddr;
124 __u16 tw_dport;
125 __u16 tw_num;
126 /* And these are ours. */
127 __u8 tw_ipv6only:1;
128 /* 31 bits hole, try to pack */
129 int tw_hashent;
130 int tw_timeout;
131 unsigned long tw_ttd;
132 struct inet_bind_bucket *tw_tb;
133 struct hlist_node tw_death_node;
134};
135
136static inline void inet_twsk_add_node(struct inet_timewait_sock *tw,
137 struct hlist_head *list)
138{
139 hlist_add_head(&tw->tw_node, list);
140}
141
142static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
143 struct hlist_head *list)
144{
145 hlist_add_head(&tw->tw_bind_node, list);
146}
147
148static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
149{
150 return tw->tw_death_node.pprev != NULL;
151}
152
153static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
154{
155 tw->tw_death_node.pprev = NULL;
156}
157
158static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
159{
160 __hlist_del(&tw->tw_death_node);
161 inet_twsk_dead_node_init(tw);
162}
163
164static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
165{
166 if (inet_twsk_dead_hashed(tw)) {
167 __inet_twsk_del_dead_node(tw);
168 return 1;
169 }
170 return 0;
171}
172
173#define inet_twsk_for_each(tw, node, head) \
174 hlist_for_each_entry(tw, node, head, tw_node)
175
176#define inet_twsk_for_each_inmate(tw, node, jail) \
177 hlist_for_each_entry(tw, node, jail, tw_death_node)
178
179#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
180 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
181
182static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
183{
184 return (struct inet_timewait_sock *)sk;
185}
186
187static inline u32 inet_rcv_saddr(const struct sock *sk)
188{
189 return likely(sk->sk_state != TCP_TIME_WAIT) ?
190 inet_sk(sk)->rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
191}
192
193static inline void inet_twsk_put(struct inet_timewait_sock *tw)
194{
195 if (atomic_dec_and_test(&tw->tw_refcnt)) {
196#ifdef SOCK_REFCNT_DEBUG
197 printk(KERN_DEBUG "%s timewait_sock %p released\n",
198 tw->tw_prot->name, tw);
199#endif
200 kmem_cache_free(tw->tw_prot->twsk_slab, tw);
201 }
202}
203
204extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
205 const int state);
206
207extern void __inet_twsk_kill(struct inet_timewait_sock *tw,
208 struct inet_hashinfo *hashinfo);
209
210extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
211 struct sock *sk,
212 struct inet_hashinfo *hashinfo);
213
214extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
215 struct inet_timewait_death_row *twdr,
216 const int timeo, const int timewait_len);
217extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
218 struct inet_timewait_death_row *twdr);
219#endif /* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/ip.h b/include/net/ip.h
index 32360bbe143f..e4563bbee6ea 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -86,7 +86,7 @@ extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
86 u32 saddr, u32 daddr, 86 u32 saddr, u32 daddr,
87 struct ip_options *opt); 87 struct ip_options *opt);
88extern int ip_rcv(struct sk_buff *skb, struct net_device *dev, 88extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
89 struct packet_type *pt); 89 struct packet_type *pt, struct net_device *orig_dev);
90extern int ip_local_deliver(struct sk_buff *skb); 90extern int ip_local_deliver(struct sk_buff *skb);
91extern int ip_mr_input(struct sk_buff *skb); 91extern int ip_mr_input(struct sk_buff *skb);
92extern int ip_output(struct sk_buff *skb); 92extern int ip_output(struct sk_buff *skb);
@@ -140,8 +140,6 @@ struct ip_reply_arg {
140void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg, 140void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
141 unsigned int len); 141 unsigned int len);
142 142
143extern int ip_finish_output(struct sk_buff *skb);
144
145struct ipv4_config 143struct ipv4_config
146{ 144{
147 int log_martians; 145 int log_martians;
@@ -165,6 +163,24 @@ extern int sysctl_local_port_range[2];
165extern int sysctl_ip_default_ttl; 163extern int sysctl_ip_default_ttl;
166extern int sysctl_ip_nonlocal_bind; 164extern int sysctl_ip_nonlocal_bind;
167 165
166/* From ip_fragment.c */
167extern int sysctl_ipfrag_high_thresh;
168extern int sysctl_ipfrag_low_thresh;
169extern int sysctl_ipfrag_time;
170extern int sysctl_ipfrag_secret_interval;
171
172/* From inetpeer.c */
173extern int inet_peer_threshold;
174extern int inet_peer_minttl;
175extern int inet_peer_maxttl;
176extern int inet_peer_gc_mintime;
177extern int inet_peer_gc_maxtime;
178
179/* From ip_output.c */
180extern int sysctl_ip_dynaddr;
181
182extern void ipfrag_init(void);
183
168#ifdef CONFIG_INET 184#ifdef CONFIG_INET
169/* The function in 2.2 was invalid, producing wrong result for 185/* The function in 2.2 was invalid, producing wrong result for
170 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ 186 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
@@ -319,7 +335,10 @@ extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, u32 da
319extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); 335extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
320extern void ip_options_fragment(struct sk_buff *skb); 336extern void ip_options_fragment(struct sk_buff *skb);
321extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb); 337extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb);
322extern int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user); 338extern int ip_options_get(struct ip_options **optp,
339 unsigned char *data, int optlen);
340extern int ip_options_get_from_user(struct ip_options **optp,
341 unsigned char __user *data, int optlen);
323extern void ip_options_undo(struct ip_options * opt); 342extern void ip_options_undo(struct ip_options * opt);
324extern void ip_forward_options(struct sk_buff *skb); 343extern void ip_forward_options(struct sk_buff *skb);
325extern int ip_options_rcv_srr(struct sk_buff *skb); 344extern int ip_options_rcv_srr(struct sk_buff *skb);
@@ -350,5 +369,10 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
350 void __user *oldval, size_t __user *oldlenp, 369 void __user *oldval, size_t __user *oldlenp,
351 void __user *newval, size_t newlen, 370 void __user *newval, size_t newlen,
352 void **context); 371 void **context);
372#ifdef CONFIG_PROC_FS
373extern int ip_misc_proc_init(void);
374#endif
375
376extern struct ctl_table ipv4_table[];
353 377
354#endif /* _IP_H */ 378#endif /* _IP_H */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f920706d526b..1f2e428ca364 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -12,7 +12,6 @@
12#include <net/flow.h> 12#include <net/flow.h>
13#include <net/ip6_fib.h> 13#include <net/ip6_fib.h>
14#include <net/sock.h> 14#include <net/sock.h>
15#include <linux/tcp.h>
16#include <linux/ip.h> 15#include <linux/ip.h>
17#include <linux/ipv6.h> 16#include <linux/ipv6.h>
18 17
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a4208a336ac0..14de4ebd1211 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -295,4 +295,9 @@ static inline void fib_res_put(struct fib_result *res)
295#endif 295#endif
296} 296}
297 297
298#ifdef CONFIG_PROC_FS
299extern int fib_proc_init(void);
300extern void fib_proc_exit(void);
301#endif
302
298#endif /* _NET_FIB_H */ 303#endif /* _NET_FIB_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 52da5d26617a..7a3c43711a17 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -255,7 +255,6 @@ struct ip_vs_daemon_user {
255#include <asm/atomic.h> /* for struct atomic_t */ 255#include <asm/atomic.h> /* for struct atomic_t */
256#include <linux/netdevice.h> /* for struct neighbour */ 256#include <linux/netdevice.h> /* for struct neighbour */
257#include <net/dst.h> /* for struct dst_entry */ 257#include <net/dst.h> /* for struct dst_entry */
258#include <net/tcp.h>
259#include <net/udp.h> 258#include <net/udp.h>
260#include <linux/compiler.h> 259#include <linux/compiler.h>
261 260
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 69324465e8b3..3203eaff4bd4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -104,6 +104,7 @@ struct frag_hdr {
104 104
105#ifdef __KERNEL__ 105#ifdef __KERNEL__
106 106
107#include <linux/config.h>
107#include <net/sock.h> 108#include <net/sock.h>
108 109
109/* sysctls */ 110/* sysctls */
@@ -145,7 +146,6 @@ DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6);
145#define UDP6_INC_STATS(field) SNMP_INC_STATS(udp_stats_in6, field) 146#define UDP6_INC_STATS(field) SNMP_INC_STATS(udp_stats_in6, field)
146#define UDP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_stats_in6, field) 147#define UDP6_INC_STATS_BH(field) SNMP_INC_STATS_BH(udp_stats_in6, field)
147#define UDP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_stats_in6, field) 148#define UDP6_INC_STATS_USER(field) SNMP_INC_STATS_USER(udp_stats_in6, field)
148extern atomic_t inet6_sock_nr;
149 149
150int snmp6_register_dev(struct inet6_dev *idev); 150int snmp6_register_dev(struct inet6_dev *idev);
151int snmp6_unregister_dev(struct inet6_dev *idev); 151int snmp6_unregister_dev(struct inet6_dev *idev);
@@ -346,7 +346,8 @@ static inline int ipv6_addr_any(const struct in6_addr *a)
346 346
347extern int ipv6_rcv(struct sk_buff *skb, 347extern int ipv6_rcv(struct sk_buff *skb,
348 struct net_device *dev, 348 struct net_device *dev,
349 struct packet_type *pt); 349 struct packet_type *pt,
350 struct net_device *orig_dev);
350 351
351/* 352/*
352 * upper-layer output functions 353 * upper-layer output functions
@@ -464,8 +465,38 @@ extern int sysctl_ip6frag_low_thresh;
464extern int sysctl_ip6frag_time; 465extern int sysctl_ip6frag_time;
465extern int sysctl_ip6frag_secret_interval; 466extern int sysctl_ip6frag_secret_interval;
466 467
467#endif /* __KERNEL__ */ 468extern struct proto_ops inet6_stream_ops;
468#endif /* _NET_IPV6_H */ 469extern struct proto_ops inet6_dgram_ops;
470
471extern int ip6_mc_source(int add, int omode, struct sock *sk,
472 struct group_source_req *pgsr);
473extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
474extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
475 struct group_filter __user *optval,
476 int __user *optlen);
477
478#ifdef CONFIG_PROC_FS
479extern int ac6_proc_init(void);
480extern void ac6_proc_exit(void);
481extern int raw6_proc_init(void);
482extern void raw6_proc_exit(void);
483extern int tcp6_proc_init(void);
484extern void tcp6_proc_exit(void);
485extern int udp6_proc_init(void);
486extern void udp6_proc_exit(void);
487extern int ipv6_misc_proc_init(void);
488extern void ipv6_misc_proc_exit(void);
489
490extern struct rt6_statistics rt6_stats;
491#endif
469 492
493#ifdef CONFIG_SYSCTL
494extern ctl_table ipv6_route_table[];
495extern ctl_table ipv6_icmp_table[];
470 496
497extern void ipv6_sysctl_register(void);
498extern void ipv6_sysctl_unregister(void);
499#endif
471 500
501#endif /* __KERNEL__ */
502#endif /* _NET_IPV6_H */
diff --git a/include/net/llc.h b/include/net/llc.h
index c9aed2a8b4e2..71769a5aeef3 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -46,7 +46,8 @@ struct llc_sap {
46 unsigned char f_bit; 46 unsigned char f_bit;
47 int (*rcv_func)(struct sk_buff *skb, 47 int (*rcv_func)(struct sk_buff *skb,
48 struct net_device *dev, 48 struct net_device *dev,
49 struct packet_type *pt); 49 struct packet_type *pt,
50 struct net_device *orig_dev);
50 struct llc_addr laddr; 51 struct llc_addr laddr;
51 struct list_head node; 52 struct list_head node;
52 struct { 53 struct {
@@ -64,7 +65,7 @@ extern rwlock_t llc_sap_list_lock;
64extern unsigned char llc_station_mac_sa[ETH_ALEN]; 65extern unsigned char llc_station_mac_sa[ETH_ALEN];
65 66
66extern int llc_rcv(struct sk_buff *skb, struct net_device *dev, 67extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
67 struct packet_type *pt); 68 struct packet_type *pt, struct net_device *orig_dev);
68 69
69extern int llc_mac_hdr_init(struct sk_buff *skb, 70extern int llc_mac_hdr_init(struct sk_buff *skb,
70 unsigned char *sa, unsigned char *da); 71 unsigned char *sa, unsigned char *da);
@@ -78,7 +79,8 @@ extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
78extern struct llc_sap *llc_sap_open(unsigned char lsap, 79extern struct llc_sap *llc_sap_open(unsigned char lsap,
79 int (*rcv)(struct sk_buff *skb, 80 int (*rcv)(struct sk_buff *skb,
80 struct net_device *dev, 81 struct net_device *dev,
81 struct packet_type *pt)); 82 struct packet_type *pt,
83 struct net_device *orig_dev));
82extern void llc_sap_close(struct llc_sap *sap); 84extern void llc_sap_close(struct llc_sap *sap);
83 85
84extern struct llc_sap *llc_sap_find(unsigned char sap_value); 86extern struct llc_sap *llc_sap_find(unsigned char sap_value);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 89809891e5ab..34c07731933d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -363,7 +363,14 @@ __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
363 return neigh_create(tbl, pkey, dev); 363 return neigh_create(tbl, pkey, dev);
364} 364}
365 365
366#define LOCALLY_ENQUEUED -2 366struct neighbour_cb {
367 unsigned long sched_next;
368 unsigned int flags;
369};
370
371#define LOCALLY_ENQUEUED 0x1
372
373#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
367 374
368#endif 375#endif
369#endif 376#endif
diff --git a/include/net/p8022.h b/include/net/p8022.h
index 3c99a86c3581..42e9fac51b31 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -4,7 +4,10 @@ extern struct datalink_proto *
4 register_8022_client(unsigned char type, 4 register_8022_client(unsigned char type,
5 int (*func)(struct sk_buff *skb, 5 int (*func)(struct sk_buff *skb,
6 struct net_device *dev, 6 struct net_device *dev,
7 struct packet_type *pt)); 7 struct packet_type *pt,
8 struct net_device *orig_dev));
8extern void unregister_8022_client(struct datalink_proto *proto); 9extern void unregister_8022_client(struct datalink_proto *proto);
9 10
11extern struct datalink_proto *make_8023_client(void);
12extern void destroy_8023_client(struct datalink_proto *dl);
10#endif 13#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 4abda6aec05a..b902d24a3256 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -352,10 +352,10 @@ tcf_change_indev(struct tcf_proto *tp, char *indev, struct rtattr *indev_tlv)
352static inline int 352static inline int
353tcf_match_indev(struct sk_buff *skb, char *indev) 353tcf_match_indev(struct sk_buff *skb, char *indev)
354{ 354{
355 if (0 != indev[0]) { 355 if (indev[0]) {
356 if (NULL == skb->input_dev) 356 if (!skb->input_dev)
357 return 0; 357 return 0;
358 else if (0 != strcmp(indev, skb->input_dev->name)) 358 if (strcmp(indev, skb->input_dev->name))
359 return 0; 359 return 0;
360 } 360 }
361 361
diff --git a/include/net/psnap.h b/include/net/psnap.h
index 9c94e8f98b36..b2e01cc3fc8a 100644
--- a/include/net/psnap.h
+++ b/include/net/psnap.h
@@ -1,7 +1,7 @@
1#ifndef _NET_PSNAP_H 1#ifndef _NET_PSNAP_H
2#define _NET_PSNAP_H 2#define _NET_PSNAP_H
3 3
4extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *)); 4extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *orig_dev));
5extern void unregister_snap_client(struct datalink_proto *proto); 5extern void unregister_snap_client(struct datalink_proto *proto);
6 6
7#endif 7#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 1c411c45587a..f47917469b12 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -17,10 +17,10 @@
17#ifndef _RAW_H 17#ifndef _RAW_H
18#define _RAW_H 18#define _RAW_H
19 19
20#include <linux/config.h>
20 21
21extern struct proto raw_prot; 22extern struct proto raw_prot;
22 23
23
24extern void raw_err(struct sock *, struct sk_buff *, u32 info); 24extern void raw_err(struct sock *, struct sk_buff *, u32 info);
25extern int raw_rcv(struct sock *, struct sk_buff *); 25extern int raw_rcv(struct sock *, struct sk_buff *);
26 26
@@ -37,6 +37,11 @@ extern struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
37 unsigned long raddr, unsigned long laddr, 37 unsigned long raddr, unsigned long laddr,
38 int dif); 38 int dif);
39 39
40extern void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash); 40extern int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash);
41
42#ifdef CONFIG_PROC_FS
43extern int raw_proc_init(void);
44extern void raw_proc_exit(void);
45#endif
41 46
42#endif /* _RAW_H */ 47#endif /* _RAW_H */
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 23fd9a6a221a..14476a71725e 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -7,10 +7,11 @@
7extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE]; 7extern struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
8extern rwlock_t raw_v6_lock; 8extern rwlock_t raw_v6_lock;
9 9
10extern void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr); 10extern int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr);
11 11
12extern struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num, 12extern struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
13 struct in6_addr *loc_addr, struct in6_addr *rmt_addr); 13 struct in6_addr *loc_addr, struct in6_addr *rmt_addr,
14 int dif);
14 15
15extern int rawv6_rcv(struct sock *sk, 16extern int rawv6_rcv(struct sock *sk,
16 struct sk_buff *skb); 17 struct sk_buff *skb);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 72fd6f5e86b1..b52cc52ffe39 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -89,6 +89,7 @@ struct listen_sock {
89 int qlen_young; 89 int qlen_young;
90 int clock_hand; 90 int clock_hand;
91 u32 hash_rnd; 91 u32 hash_rnd;
92 u32 nr_table_entries;
92 struct request_sock *syn_table[0]; 93 struct request_sock *syn_table[0];
93}; 94};
94 95
@@ -96,6 +97,7 @@ struct listen_sock {
96 * 97 *
97 * @rskq_accept_head - FIFO head of established children 98 * @rskq_accept_head - FIFO head of established children
98 * @rskq_accept_tail - FIFO tail of established children 99 * @rskq_accept_tail - FIFO tail of established children
100 * @rskq_defer_accept - User waits for some data after accept()
99 * @syn_wait_lock - serializer 101 * @syn_wait_lock - serializer
100 * 102 *
101 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main 103 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
@@ -111,6 +113,8 @@ struct request_sock_queue {
111 struct request_sock *rskq_accept_head; 113 struct request_sock *rskq_accept_head;
112 struct request_sock *rskq_accept_tail; 114 struct request_sock *rskq_accept_tail;
113 rwlock_t syn_wait_lock; 115 rwlock_t syn_wait_lock;
116 u8 rskq_defer_accept;
117 /* 3 bytes hole, try to pack */
114 struct listen_sock *listen_opt; 118 struct listen_sock *listen_opt;
115}; 119};
116 120
@@ -129,11 +133,13 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock
129 return lopt; 133 return lopt;
130} 134}
131 135
132static inline void reqsk_queue_destroy(struct request_sock_queue *queue) 136static inline void __reqsk_queue_destroy(struct request_sock_queue *queue)
133{ 137{
134 kfree(reqsk_queue_yank_listen_sk(queue)); 138 kfree(reqsk_queue_yank_listen_sk(queue));
135} 139}
136 140
141extern void reqsk_queue_destroy(struct request_sock_queue *queue);
142
137static inline struct request_sock * 143static inline struct request_sock *
138 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) 144 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
139{ 145{
@@ -221,17 +227,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue)
221 return prev_qlen; 227 return prev_qlen;
222} 228}
223 229
224static inline int reqsk_queue_len(struct request_sock_queue *queue) 230static inline int reqsk_queue_len(const struct request_sock_queue *queue)
225{ 231{
226 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; 232 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
227} 233}
228 234
229static inline int reqsk_queue_len_young(struct request_sock_queue *queue) 235static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
230{ 236{
231 return queue->listen_opt->qlen_young; 237 return queue->listen_opt->qlen_young;
232} 238}
233 239
234static inline int reqsk_queue_is_full(struct request_sock_queue *queue) 240static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
235{ 241{
236 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; 242 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
237} 243}
diff --git a/include/net/route.h b/include/net/route.h
index c3cd069a9aca..dbe79ca67d31 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -105,10 +105,6 @@ struct rt_cache_stat
105 unsigned int out_hlist_search; 105 unsigned int out_hlist_search;
106}; 106};
107 107
108extern struct rt_cache_stat *rt_cache_stat;
109#define RT_CACHE_STAT_INC(field) \
110 (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
111
112extern struct ip_rt_acct *ip_rt_acct; 108extern struct ip_rt_acct *ip_rt_acct;
113 109
114struct in_device; 110struct in_device;
@@ -199,4 +195,6 @@ static inline struct inet_peer *rt_get_peer(struct rtable *rt)
199 return rt->peer; 195 return rt->peer;
200} 196}
201 197
198extern ctl_table ipv4_route_table[];
199
202#endif /* _ROUTE_H */ 200#endif /* _ROUTE_H */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 5999e5684bbf..c51541ee0247 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -47,10 +47,10 @@
47#ifndef __sctp_constants_h__ 47#ifndef __sctp_constants_h__
48#define __sctp_constants_h__ 48#define __sctp_constants_h__
49 49
50#include <linux/tcp.h> /* For TCP states used in sctp_sock_state_t */
51#include <linux/sctp.h> 50#include <linux/sctp.h>
52#include <linux/ipv6.h> /* For ipv6hdr. */ 51#include <linux/ipv6.h> /* For ipv6hdr. */
53#include <net/sctp/user.h> 52#include <net/sctp/user.h>
53#include <net/tcp_states.h> /* For TCP states used in sctp_sock_state_t */
54 54
55/* Value used for stream negotiation. */ 55/* Value used for stream negotiation. */
56enum { SCTP_MAX_STREAM = 0xffff }; 56enum { SCTP_MAX_STREAM = 0xffff };
diff --git a/include/net/sock.h b/include/net/sock.h
index e9b1dbab90d0..312cb25cbd18 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -88,6 +88,7 @@ do { spin_lock_init(&((__sk)->sk_lock.slock)); \
88} while(0) 88} while(0)
89 89
90struct sock; 90struct sock;
91struct proto;
91 92
92/** 93/**
93 * struct sock_common - minimal network layer representation of sockets 94 * struct sock_common - minimal network layer representation of sockets
@@ -98,10 +99,11 @@ struct sock;
98 * @skc_node: main hash linkage for various protocol lookup tables 99 * @skc_node: main hash linkage for various protocol lookup tables
99 * @skc_bind_node: bind hash linkage for various protocol lookup tables 100 * @skc_bind_node: bind hash linkage for various protocol lookup tables
100 * @skc_refcnt: reference count 101 * @skc_refcnt: reference count
102 * @skc_prot: protocol handlers inside a network family
101 * 103 *
102 * This is the minimal network layer representation of sockets, the header 104 * This is the minimal network layer representation of sockets, the header
103 * for struct sock and struct tcp_tw_bucket. 105 * for struct sock and struct inet_timewait_sock.
104 */ 106 */
105struct sock_common { 107struct sock_common {
106 unsigned short skc_family; 108 unsigned short skc_family;
107 volatile unsigned char skc_state; 109 volatile unsigned char skc_state;
@@ -110,11 +112,12 @@ struct sock_common {
110 struct hlist_node skc_node; 112 struct hlist_node skc_node;
111 struct hlist_node skc_bind_node; 113 struct hlist_node skc_bind_node;
112 atomic_t skc_refcnt; 114 atomic_t skc_refcnt;
115 struct proto *skc_prot;
113}; 116};
114 117
115/** 118/**
116 * struct sock - network layer representation of sockets 119 * struct sock - network layer representation of sockets
117 * @__sk_common: shared layout with tcp_tw_bucket 120 * @__sk_common: shared layout with inet_timewait_sock
118 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 121 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
119 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 122 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
120 * @sk_lock: synchronizer 123 * @sk_lock: synchronizer
@@ -136,11 +139,10 @@ struct sock_common {
136 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 139 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
137 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 140 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
138 * @sk_lingertime: %SO_LINGER l_linger setting 141 * @sk_lingertime: %SO_LINGER l_linger setting
139 * @sk_hashent: hash entry in several tables (e.g. tcp_ehash) 142 * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
140 * @sk_backlog: always used with the per-socket spinlock held 143 * @sk_backlog: always used with the per-socket spinlock held
141 * @sk_callback_lock: used with the callbacks in the end of this struct 144 * @sk_callback_lock: used with the callbacks in the end of this struct
142 * @sk_error_queue: rarely used 145 * @sk_error_queue: rarely used
143 * @sk_prot: protocol handlers inside a network family
144 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) 146 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
145 * @sk_err: last error 147 * @sk_err: last error
146 * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' 148 * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
@@ -173,7 +175,7 @@ struct sock_common {
173 */ 175 */
174struct sock { 176struct sock {
175 /* 177 /*
176 * Now struct tcp_tw_bucket also uses sock_common, so please just 178 * Now struct inet_timewait_sock also uses sock_common, so please just
177 * don't add nothing before this first member (__sk_common) --acme 179 * don't add nothing before this first member (__sk_common) --acme
178 */ 180 */
179 struct sock_common __sk_common; 181 struct sock_common __sk_common;
@@ -184,6 +186,7 @@ struct sock {
184#define sk_node __sk_common.skc_node 186#define sk_node __sk_common.skc_node
185#define sk_bind_node __sk_common.skc_bind_node 187#define sk_bind_node __sk_common.skc_bind_node
186#define sk_refcnt __sk_common.skc_refcnt 188#define sk_refcnt __sk_common.skc_refcnt
189#define sk_prot __sk_common.skc_prot
187 unsigned char sk_shutdown : 2, 190 unsigned char sk_shutdown : 2,
188 sk_no_check : 2, 191 sk_no_check : 2,
189 sk_userlocks : 4; 192 sk_userlocks : 4;
@@ -218,7 +221,6 @@ struct sock {
218 struct sk_buff *tail; 221 struct sk_buff *tail;
219 } sk_backlog; 222 } sk_backlog;
220 struct sk_buff_head sk_error_queue; 223 struct sk_buff_head sk_error_queue;
221 struct proto *sk_prot;
222 struct proto *sk_prot_creator; 224 struct proto *sk_prot_creator;
223 rwlock_t sk_callback_lock; 225 rwlock_t sk_callback_lock;
224 int sk_err, 226 int sk_err,
@@ -253,28 +255,28 @@ struct sock {
253/* 255/*
254 * Hashed lists helper routines 256 * Hashed lists helper routines
255 */ 257 */
256static inline struct sock *__sk_head(struct hlist_head *head) 258static inline struct sock *__sk_head(const struct hlist_head *head)
257{ 259{
258 return hlist_entry(head->first, struct sock, sk_node); 260 return hlist_entry(head->first, struct sock, sk_node);
259} 261}
260 262
261static inline struct sock *sk_head(struct hlist_head *head) 263static inline struct sock *sk_head(const struct hlist_head *head)
262{ 264{
263 return hlist_empty(head) ? NULL : __sk_head(head); 265 return hlist_empty(head) ? NULL : __sk_head(head);
264} 266}
265 267
266static inline struct sock *sk_next(struct sock *sk) 268static inline struct sock *sk_next(const struct sock *sk)
267{ 269{
268 return sk->sk_node.next ? 270 return sk->sk_node.next ?
269 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 271 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
270} 272}
271 273
272static inline int sk_unhashed(struct sock *sk) 274static inline int sk_unhashed(const struct sock *sk)
273{ 275{
274 return hlist_unhashed(&sk->sk_node); 276 return hlist_unhashed(&sk->sk_node);
275} 277}
276 278
277static inline int sk_hashed(struct sock *sk) 279static inline int sk_hashed(const struct sock *sk)
278{ 280{
279 return sk->sk_node.pprev != NULL; 281 return sk->sk_node.pprev != NULL;
280} 282}
@@ -554,6 +556,10 @@ struct proto {
554 kmem_cache_t *slab; 556 kmem_cache_t *slab;
555 unsigned int obj_size; 557 unsigned int obj_size;
556 558
559 kmem_cache_t *twsk_slab;
560 unsigned int twsk_obj_size;
561 atomic_t *orphan_count;
562
557 struct request_sock_ops *rsk_prot; 563 struct request_sock_ops *rsk_prot;
558 564
559 struct module *owner; 565 struct module *owner;
@@ -561,7 +567,9 @@ struct proto {
561 char name[32]; 567 char name[32];
562 568
563 struct list_head node; 569 struct list_head node;
564 570#ifdef SOCK_REFCNT_DEBUG
571 atomic_t socks;
572#endif
565 struct { 573 struct {
566 int inuse; 574 int inuse;
567 u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; 575 u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
@@ -571,6 +579,31 @@ struct proto {
571extern int proto_register(struct proto *prot, int alloc_slab); 579extern int proto_register(struct proto *prot, int alloc_slab);
572extern void proto_unregister(struct proto *prot); 580extern void proto_unregister(struct proto *prot);
573 581
582#ifdef SOCK_REFCNT_DEBUG
583static inline void sk_refcnt_debug_inc(struct sock *sk)
584{
585 atomic_inc(&sk->sk_prot->socks);
586}
587
588static inline void sk_refcnt_debug_dec(struct sock *sk)
589{
590 atomic_dec(&sk->sk_prot->socks);
591 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
592 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
593}
594
595static inline void sk_refcnt_debug_release(const struct sock *sk)
596{
597 if (atomic_read(&sk->sk_refcnt) != 1)
598 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
599 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
600}
601#else /* SOCK_REFCNT_DEBUG */
602#define sk_refcnt_debug_inc(sk) do { } while (0)
603#define sk_refcnt_debug_dec(sk) do { } while (0)
604#define sk_refcnt_debug_release(sk) do { } while (0)
605#endif /* SOCK_REFCNT_DEBUG */
606
574/* Called with local bh disabled */ 607/* Called with local bh disabled */
575static __inline__ void sock_prot_inc_use(struct proto *prot) 608static __inline__ void sock_prot_inc_use(struct proto *prot)
576{ 609{
@@ -582,6 +615,15 @@ static __inline__ void sock_prot_dec_use(struct proto *prot)
582 prot->stats[smp_processor_id()].inuse--; 615 prot->stats[smp_processor_id()].inuse--;
583} 616}
584 617
618/* With per-bucket locks this operation is not-atomic, so that
619 * this version is not worse.
620 */
621static inline void __sk_prot_rehash(struct sock *sk)
622{
623 sk->sk_prot->unhash(sk);
624 sk->sk_prot->hash(sk);
625}
626
585/* About 10 seconds */ 627/* About 10 seconds */
586#define SOCK_DESTROY_TIME (10*HZ) 628#define SOCK_DESTROY_TIME (10*HZ)
587 629
@@ -693,6 +735,8 @@ extern struct sock *sk_alloc(int family,
693 unsigned int __nocast priority, 735 unsigned int __nocast priority,
694 struct proto *prot, int zero_it); 736 struct proto *prot, int zero_it);
695extern void sk_free(struct sock *sk); 737extern void sk_free(struct sock *sk);
738extern struct sock *sk_clone(const struct sock *sk,
739 const unsigned int __nocast priority);
696 740
697extern struct sk_buff *sock_wmalloc(struct sock *sk, 741extern struct sk_buff *sock_wmalloc(struct sock *sk,
698 unsigned long size, int force, 742 unsigned long size, int force,
@@ -986,6 +1030,16 @@ sk_dst_check(struct sock *sk, u32 cookie)
986 return dst; 1030 return dst;
987} 1031}
988 1032
1033static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1034{
1035 __sk_dst_set(sk, dst);
1036 sk->sk_route_caps = dst->dev->features;
1037 if (sk->sk_route_caps & NETIF_F_TSO) {
1038 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1039 sk->sk_route_caps &= ~NETIF_F_TSO;
1040 }
1041}
1042
989static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) 1043static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
990{ 1044{
991 sk->sk_wmem_queued += skb->truesize; 1045 sk->sk_wmem_queued += skb->truesize;
@@ -1146,7 +1200,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1146 int hdr_len; 1200 int hdr_len;
1147 1201
1148 hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); 1202 hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
1149 skb = alloc_skb(size + hdr_len, gfp); 1203 skb = alloc_skb_fclone(size + hdr_len, gfp);
1150 if (skb) { 1204 if (skb) {
1151 skb->truesize += mem; 1205 skb->truesize += mem;
1152 if (sk->sk_forward_alloc >= (int)skb->truesize || 1206 if (sk->sk_forward_alloc >= (int)skb->truesize ||
@@ -1228,16 +1282,19 @@ static inline int sock_intr_errno(long timeo)
1228static __inline__ void 1282static __inline__ void
1229sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1283sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1230{ 1284{
1231 struct timeval *stamp = &skb->stamp; 1285 struct timeval stamp;
1286
1287 skb_get_timestamp(skb, &stamp);
1232 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 1288 if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1233 /* Race occurred between timestamp enabling and packet 1289 /* Race occurred between timestamp enabling and packet
1234 receiving. Fill in the current time for now. */ 1290 receiving. Fill in the current time for now. */
1235 if (stamp->tv_sec == 0) 1291 if (stamp.tv_sec == 0)
1236 do_gettimeofday(stamp); 1292 do_gettimeofday(&stamp);
1293 skb_set_timestamp(skb, &stamp);
1237 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), 1294 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1238 stamp); 1295 &stamp);
1239 } else 1296 } else
1240 sk->sk_stamp = *stamp; 1297 sk->sk_stamp = stamp;
1241} 1298}
1242 1299
1243/** 1300/**
@@ -1262,11 +1319,11 @@ extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1262 */ 1319 */
1263 1320
1264#if 0 1321#if 0
1265#define NETDEBUG(x) do { } while (0) 1322#define NETDEBUG(fmt, args...) do { } while (0)
1266#define LIMIT_NETDEBUG(x) do {} while(0) 1323#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1267#else 1324#else
1268#define NETDEBUG(x) do { x; } while (0) 1325#define NETDEBUG(fmt, args...) printk(fmt,##args)
1269#define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0) 1326#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
1270#endif 1327#endif
1271 1328
1272/* 1329/*
@@ -1313,4 +1370,14 @@ static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsign
1313} 1370}
1314#endif 1371#endif
1315 1372
1373extern void sk_init(void);
1374
1375#ifdef CONFIG_SYSCTL
1376extern struct ctl_table core_table[];
1377extern int sysctl_optmem_max;
1378#endif
1379
1380extern __u32 sysctl_wmem_default;
1381extern __u32 sysctl_rmem_default;
1382
1316#endif /* _SOCK_H */ 1383#endif /* _SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5010f0c5a56e..d6bcf1317a6a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -21,360 +21,29 @@
21#define TCP_DEBUG 1 21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1 22#define FASTRETRANS_DEBUG 1
23 23
24/* Cancel timers, when they are not required. */
25#undef TCP_CLEAR_TIMERS
26
27#include <linux/config.h> 24#include <linux/config.h>
28#include <linux/list.h> 25#include <linux/list.h>
29#include <linux/tcp.h> 26#include <linux/tcp.h>
30#include <linux/slab.h> 27#include <linux/slab.h>
31#include <linux/cache.h> 28#include <linux/cache.h>
32#include <linux/percpu.h> 29#include <linux/percpu.h>
30
31#include <net/inet_connection_sock.h>
32#include <net/inet_timewait_sock.h>
33#include <net/inet_hashtables.h>
33#include <net/checksum.h> 34#include <net/checksum.h>
34#include <net/request_sock.h> 35#include <net/request_sock.h>
35#include <net/sock.h> 36#include <net/sock.h>
36#include <net/snmp.h> 37#include <net/snmp.h>
37#include <net/ip.h> 38#include <net/ip.h>
38#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 39#include <net/tcp_states.h>
39#include <linux/ipv6.h>
40#endif
41#include <linux/seq_file.h>
42
43/* This is for all connections with a full identity, no wildcards.
44 * New scheme, half the table is for TIME_WAIT, the other half is
45 * for the rest. I'll experiment with dynamic table growth later.
46 */
47struct tcp_ehash_bucket {
48 rwlock_t lock;
49 struct hlist_head chain;
50} __attribute__((__aligned__(8)));
51
52/* This is for listening sockets, thus all sockets which possess wildcards. */
53#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
54
55/* There are a few simple rules, which allow for local port reuse by
56 * an application. In essence:
57 *
58 * 1) Sockets bound to different interfaces may share a local port.
59 * Failing that, goto test 2.
60 * 2) If all sockets have sk->sk_reuse set, and none of them are in
61 * TCP_LISTEN state, the port may be shared.
62 * Failing that, goto test 3.
63 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
64 * address, and none of them are the same, the port may be
65 * shared.
66 * Failing this, the port cannot be shared.
67 *
68 * The interesting point, is test #2. This is what an FTP server does
69 * all day. To optimize this case we use a specific flag bit defined
70 * below. As we add sockets to a bind bucket list, we perform a
71 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
72 * As long as all sockets added to a bind bucket pass this test,
73 * the flag bit will be set.
74 * The resulting situation is that tcp_v[46]_verify_bind() can just check
75 * for this flag bit, if it is set and the socket trying to bind has
76 * sk->sk_reuse set, we don't even have to walk the owners list at all,
77 * we return that it is ok to bind this socket to the requested local port.
78 *
79 * Sounds like a lot of work, but it is worth it. In a more naive
80 * implementation (ie. current FreeBSD etc.) the entire list of ports
81 * must be walked for each data port opened by an ftp server. Needless
82 * to say, this does not scale at all. With a couple thousand FTP
83 * users logged onto your box, isn't it nice to know that new data
84 * ports are created in O(1) time? I thought so. ;-) -DaveM
85 */
86struct tcp_bind_bucket {
87 unsigned short port;
88 signed short fastreuse;
89 struct hlist_node node;
90 struct hlist_head owners;
91};
92
93#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
94
95struct tcp_bind_hashbucket {
96 spinlock_t lock;
97 struct hlist_head chain;
98};
99
100static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
101{
102 return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
103}
104
105static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
106{
107 return hlist_empty(&head->chain) ? NULL : __tb_head(head);
108}
109
110extern struct tcp_hashinfo {
111 /* This is for sockets with full identity only. Sockets here will
112 * always be without wildcards and will have the following invariant:
113 *
114 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
115 *
116 * First half of the table is for sockets not in TIME_WAIT, second half
117 * is for TIME_WAIT sockets only.
118 */
119 struct tcp_ehash_bucket *__tcp_ehash;
120
121 /* Ok, let's try this, I give up, we do need a local binding
122 * TCP hash as well as the others for fast bind/connect.
123 */
124 struct tcp_bind_hashbucket *__tcp_bhash;
125 40
126 int __tcp_bhash_size; 41#include <linux/seq_file.h>
127 int __tcp_ehash_size;
128
129 /* All sockets in TCP_LISTEN state will be in here. This is the only
130 * table where wildcard'd TCP sockets can exist. Hash function here
131 * is just local port number.
132 */
133 struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
134
135 /* All the above members are written once at bootup and
136 * never written again _or_ are predominantly read-access.
137 *
138 * Now align to a new cache line as all the following members
139 * are often dirty.
140 */
141 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
142 atomic_t __tcp_lhash_users;
143 wait_queue_head_t __tcp_lhash_wait;
144 spinlock_t __tcp_portalloc_lock;
145} tcp_hashinfo;
146
147#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
148#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
149#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
150#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
151#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
152#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
153#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
154#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
155#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
156
157extern kmem_cache_t *tcp_bucket_cachep;
158extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
159 unsigned short snum);
160extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
161extern void tcp_bucket_unlock(struct sock *sk);
162extern int tcp_port_rover;
163
164/* These are AF independent. */
165static __inline__ int tcp_bhashfn(__u16 lport)
166{
167 return (lport & (tcp_bhash_size - 1));
168}
169
170extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
171 unsigned short snum);
172
173#if (BITS_PER_LONG == 64)
174#define TCP_ADDRCMP_ALIGN_BYTES 8
175#else
176#define TCP_ADDRCMP_ALIGN_BYTES 4
177#endif
178
179/* This is a TIME_WAIT bucket. It works around the memory consumption
180 * problems of sockets in such a state on heavily loaded servers, but
181 * without violating the protocol specification.
182 */
183struct tcp_tw_bucket {
184 /*
185 * Now struct sock also uses sock_common, so please just
186 * don't add nothing before this first member (__tw_common) --acme
187 */
188 struct sock_common __tw_common;
189#define tw_family __tw_common.skc_family
190#define tw_state __tw_common.skc_state
191#define tw_reuse __tw_common.skc_reuse
192#define tw_bound_dev_if __tw_common.skc_bound_dev_if
193#define tw_node __tw_common.skc_node
194#define tw_bind_node __tw_common.skc_bind_node
195#define tw_refcnt __tw_common.skc_refcnt
196 volatile unsigned char tw_substate;
197 unsigned char tw_rcv_wscale;
198 __u16 tw_sport;
199 /* Socket demultiplex comparisons on incoming packets. */
200 /* these five are in inet_sock */
201 __u32 tw_daddr
202 __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
203 __u32 tw_rcv_saddr;
204 __u16 tw_dport;
205 __u16 tw_num;
206 /* And these are ours. */
207 int tw_hashent;
208 int tw_timeout;
209 __u32 tw_rcv_nxt;
210 __u32 tw_snd_nxt;
211 __u32 tw_rcv_wnd;
212 __u32 tw_ts_recent;
213 long tw_ts_recent_stamp;
214 unsigned long tw_ttd;
215 struct tcp_bind_bucket *tw_tb;
216 struct hlist_node tw_death_node;
217#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
218 struct in6_addr tw_v6_daddr;
219 struct in6_addr tw_v6_rcv_saddr;
220 int tw_v6_ipv6only;
221#endif
222};
223
224static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
225 struct hlist_head *list)
226{
227 hlist_add_head(&tw->tw_node, list);
228}
229
230static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
231 struct hlist_head *list)
232{
233 hlist_add_head(&tw->tw_bind_node, list);
234}
235
236static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
237{
238 return tw->tw_death_node.pprev != NULL;
239}
240
241static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
242{
243 tw->tw_death_node.pprev = NULL;
244}
245
246static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
247{
248 __hlist_del(&tw->tw_death_node);
249 tw_dead_node_init(tw);
250}
251
252static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
253{
254 if (tw_dead_hashed(tw)) {
255 __tw_del_dead_node(tw);
256 return 1;
257 }
258 return 0;
259}
260
261#define tw_for_each(tw, node, head) \
262 hlist_for_each_entry(tw, node, head, tw_node)
263
264#define tw_for_each_inmate(tw, node, jail) \
265 hlist_for_each_entry(tw, node, jail, tw_death_node)
266
267#define tw_for_each_inmate_safe(tw, node, safe, jail) \
268 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
269
270#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
271
272static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
273{
274 return likely(sk->sk_state != TCP_TIME_WAIT) ?
275 inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
276}
277
278#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
279static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
280{
281 return likely(sk->sk_state != TCP_TIME_WAIT) ?
282 &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
283}
284
285static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
286{
287 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
288}
289
290#define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
291
292static inline int tcp_v6_ipv6only(const struct sock *sk)
293{
294 return likely(sk->sk_state != TCP_TIME_WAIT) ?
295 ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
296}
297#else
298# define __tcp_v6_rcv_saddr(__sk) NULL
299# define tcp_v6_rcv_saddr(__sk) NULL
300# define tcptw_sk_ipv6only(__sk) 0
301# define tcp_v6_ipv6only(__sk) 0
302#endif
303 42
304extern kmem_cache_t *tcp_timewait_cachep; 43extern struct inet_hashinfo tcp_hashinfo;
305
306static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
307{
308 if (atomic_dec_and_test(&tw->tw_refcnt)) {
309#ifdef INET_REFCNT_DEBUG
310 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
311#endif
312 kmem_cache_free(tcp_timewait_cachep, tw);
313 }
314}
315 44
316extern atomic_t tcp_orphan_count; 45extern atomic_t tcp_orphan_count;
317extern int tcp_tw_count;
318extern void tcp_time_wait(struct sock *sk, int state, int timeo); 46extern void tcp_time_wait(struct sock *sk, int state, int timeo);
319extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
320
321
322/* Socket demux engine toys. */
323#ifdef __BIG_ENDIAN
324#define TCP_COMBINED_PORTS(__sport, __dport) \
325 (((__u32)(__sport)<<16) | (__u32)(__dport))
326#else /* __LITTLE_ENDIAN */
327#define TCP_COMBINED_PORTS(__sport, __dport) \
328 (((__u32)(__dport)<<16) | (__u32)(__sport))
329#endif
330
331#if (BITS_PER_LONG == 64)
332#ifdef __BIG_ENDIAN
333#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
334 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
335#else /* __LITTLE_ENDIAN */
336#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
337 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
338#endif /* __BIG_ENDIAN */
339#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
340 (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
341 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
342 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
343#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
344 (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
345 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
346 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
347#else /* 32-bit arch */
348#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
349#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
350 ((inet_sk(__sk)->daddr == (__saddr)) && \
351 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
352 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
353 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
354#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
355 ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
356 (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
357 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
358 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
359#endif /* 64-bit arch */
360
361#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
362 (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
363 ((__sk)->sk_family == AF_INET6) && \
364 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
365 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
366 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
367
368/* These can have wildcards, don't try too hard. */
369static __inline__ int tcp_lhashfn(unsigned short num)
370{
371 return num & (TCP_LHTABLE_SIZE - 1);
372}
373
374static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
375{
376 return tcp_lhashfn(inet_sk(sk)->num);
377}
378 47
379#define MAX_TCP_HEADER (128 + MAX_HEADER) 48#define MAX_TCP_HEADER (128 + MAX_HEADER)
380 49
@@ -478,33 +147,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
478 * timestamps. It must be less than 147 * timestamps. It must be less than
479 * minimal timewait lifetime. 148 * minimal timewait lifetime.
480 */ 149 */
481
482#define TCP_TW_RECYCLE_SLOTS_LOG 5
483#define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
484
485/* If time > 4sec, it is "slow" path, no recycling is required,
486 so that we select tick to get range about 4 seconds.
487 */
488
489#if HZ <= 16 || HZ > 4096
490# error Unsupported: HZ <= 16 or HZ > 4096
491#elif HZ <= 32
492# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
493#elif HZ <= 64
494# define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
495#elif HZ <= 128
496# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
497#elif HZ <= 256
498# define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
499#elif HZ <= 512
500# define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
501#elif HZ <= 1024
502# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
503#elif HZ <= 2048
504# define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
505#else
506# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
507#endif
508/* 150/*
509 * TCP option 151 * TCP option
510 */ 152 */
@@ -534,22 +176,18 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
534#define TCPOLEN_SACK_BASE_ALIGNED 4 176#define TCPOLEN_SACK_BASE_ALIGNED 4
535#define TCPOLEN_SACK_PERBLOCK 8 177#define TCPOLEN_SACK_PERBLOCK 8
536 178
537#define TCP_TIME_RETRANS 1 /* Retransmit timer */
538#define TCP_TIME_DACK 2 /* Delayed ack timer */
539#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
540#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
541
542/* Flags in tp->nonagle */ 179/* Flags in tp->nonagle */
543#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 180#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
544#define TCP_NAGLE_CORK 2 /* Socket is corked */ 181#define TCP_NAGLE_CORK 2 /* Socket is corked */
545#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ 182#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
546 183
184extern struct inet_timewait_death_row tcp_death_row;
185
547/* sysctl variables for tcp */ 186/* sysctl variables for tcp */
548extern int sysctl_tcp_timestamps; 187extern int sysctl_tcp_timestamps;
549extern int sysctl_tcp_window_scaling; 188extern int sysctl_tcp_window_scaling;
550extern int sysctl_tcp_sack; 189extern int sysctl_tcp_sack;
551extern int sysctl_tcp_fin_timeout; 190extern int sysctl_tcp_fin_timeout;
552extern int sysctl_tcp_tw_recycle;
553extern int sysctl_tcp_keepalive_time; 191extern int sysctl_tcp_keepalive_time;
554extern int sysctl_tcp_keepalive_probes; 192extern int sysctl_tcp_keepalive_probes;
555extern int sysctl_tcp_keepalive_intvl; 193extern int sysctl_tcp_keepalive_intvl;
@@ -564,7 +202,6 @@ extern int sysctl_tcp_stdurg;
564extern int sysctl_tcp_rfc1337; 202extern int sysctl_tcp_rfc1337;
565extern int sysctl_tcp_abort_on_overflow; 203extern int sysctl_tcp_abort_on_overflow;
566extern int sysctl_tcp_max_orphans; 204extern int sysctl_tcp_max_orphans;
567extern int sysctl_tcp_max_tw_buckets;
568extern int sysctl_tcp_fack; 205extern int sysctl_tcp_fack;
569extern int sysctl_tcp_reordering; 206extern int sysctl_tcp_reordering;
570extern int sysctl_tcp_ecn; 207extern int sysctl_tcp_ecn;
@@ -585,12 +222,6 @@ extern atomic_t tcp_memory_allocated;
585extern atomic_t tcp_sockets_allocated; 222extern atomic_t tcp_sockets_allocated;
586extern int tcp_memory_pressure; 223extern int tcp_memory_pressure;
587 224
588#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
589#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
590#else
591#define TCP_INET_FAMILY(fam) 1
592#endif
593
594/* 225/*
595 * Pointers to address related TCP functions 226 * Pointers to address related TCP functions
596 * (i.e. things that depend on the address family) 227 * (i.e. things that depend on the address family)
@@ -671,9 +302,6 @@ DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
671#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val) 302#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
672#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val) 303#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
673 304
674extern void tcp_put_port(struct sock *sk);
675extern void tcp_inherit_port(struct sock *sk, struct sock *child);
676
677extern void tcp_v4_err(struct sk_buff *skb, u32); 305extern void tcp_v4_err(struct sk_buff *skb, u32);
678 306
679extern void tcp_shutdown (struct sock *sk, int how); 307extern void tcp_shutdown (struct sock *sk, int how);
@@ -682,7 +310,7 @@ extern int tcp_v4_rcv(struct sk_buff *skb);
682 310
683extern int tcp_v4_remember_stamp(struct sock *sk); 311extern int tcp_v4_remember_stamp(struct sock *sk);
684 312
685extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw); 313extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
686 314
687extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, 315extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
688 struct msghdr *msg, size_t size); 316 struct msghdr *msg, size_t size);
@@ -704,42 +332,22 @@ extern int tcp_rcv_established(struct sock *sk,
704 332
705extern void tcp_rcv_space_adjust(struct sock *sk); 333extern void tcp_rcv_space_adjust(struct sock *sk);
706 334
707enum tcp_ack_state_t 335static inline void tcp_dec_quickack_mode(struct sock *sk,
708{ 336 const unsigned int pkts)
709 TCP_ACK_SCHED = 1,
710 TCP_ACK_TIMER = 2,
711 TCP_ACK_PUSHED= 4
712};
713
714static inline void tcp_schedule_ack(struct tcp_sock *tp)
715{ 337{
716 tp->ack.pending |= TCP_ACK_SCHED; 338 struct inet_connection_sock *icsk = inet_csk(sk);
717}
718
719static inline int tcp_ack_scheduled(struct tcp_sock *tp)
720{
721 return tp->ack.pending&TCP_ACK_SCHED;
722}
723
724static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts)
725{
726 if (tp->ack.quick) {
727 if (pkts >= tp->ack.quick) {
728 tp->ack.quick = 0;
729 339
340 if (icsk->icsk_ack.quick) {
341 if (pkts >= icsk->icsk_ack.quick) {
342 icsk->icsk_ack.quick = 0;
730 /* Leaving quickack mode we deflate ATO. */ 343 /* Leaving quickack mode we deflate ATO. */
731 tp->ack.ato = TCP_ATO_MIN; 344 icsk->icsk_ack.ato = TCP_ATO_MIN;
732 } else 345 } else
733 tp->ack.quick -= pkts; 346 icsk->icsk_ack.quick -= pkts;
734 } 347 }
735} 348}
736 349
737extern void tcp_enter_quickack_mode(struct tcp_sock *tp); 350extern void tcp_enter_quickack_mode(struct sock *sk);
738
739static __inline__ void tcp_delack_init(struct tcp_sock *tp)
740{
741 memset(&tp->ack, 0, sizeof(tp->ack));
742}
743 351
744static inline void tcp_clear_options(struct tcp_options_received *rx_opt) 352static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
745{ 353{
@@ -755,10 +363,9 @@ enum tcp_tw_status
755}; 363};
756 364
757 365
758extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, 366extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
759 struct sk_buff *skb, 367 struct sk_buff *skb,
760 struct tcphdr *th, 368 const struct tcphdr *th);
761 unsigned len);
762 369
763extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 370extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
764 struct request_sock *req, 371 struct request_sock *req,
@@ -773,7 +380,6 @@ extern void tcp_update_metrics(struct sock *sk);
773 380
774extern void tcp_close(struct sock *sk, 381extern void tcp_close(struct sock *sk,
775 long timeout); 382 long timeout);
776extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
777extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); 383extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
778 384
779extern int tcp_getsockopt(struct sock *sk, int level, 385extern int tcp_getsockopt(struct sock *sk, int level,
@@ -789,8 +395,6 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
789 size_t len, int nonblock, 395 size_t len, int nonblock,
790 int flags, int *addr_len); 396 int flags, int *addr_len);
791 397
792extern int tcp_listen_start(struct sock *sk);
793
794extern void tcp_parse_options(struct sk_buff *skb, 398extern void tcp_parse_options(struct sk_buff *skb,
795 struct tcp_options_received *opt_rx, 399 struct tcp_options_received *opt_rx,
796 int estab); 400 int estab);
@@ -799,11 +403,6 @@ extern void tcp_parse_options(struct sk_buff *skb,
799 * TCP v4 functions exported for the inet6 API 403 * TCP v4 functions exported for the inet6 API
800 */ 404 */
801 405
802extern int tcp_v4_rebuild_header(struct sock *sk);
803
804extern int tcp_v4_build_header(struct sock *sk,
805 struct sk_buff *skb);
806
807extern void tcp_v4_send_check(struct sock *sk, 406extern void tcp_v4_send_check(struct sock *sk,
808 struct tcphdr *th, int len, 407 struct tcphdr *th, int len,
809 struct sk_buff *skb); 408 struct sk_buff *skb);
@@ -872,18 +471,15 @@ extern void tcp_cwnd_application_limited(struct sock *sk);
872 471
873/* tcp_timer.c */ 472/* tcp_timer.c */
874extern void tcp_init_xmit_timers(struct sock *); 473extern void tcp_init_xmit_timers(struct sock *);
875extern void tcp_clear_xmit_timers(struct sock *); 474static inline void tcp_clear_xmit_timers(struct sock *sk)
475{
476 inet_csk_clear_xmit_timers(sk);
477}
876 478
877extern void tcp_delete_keepalive_timer(struct sock *);
878extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
879extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 479extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
880extern unsigned int tcp_current_mss(struct sock *sk, int large); 480extern unsigned int tcp_current_mss(struct sock *sk, int large);
881 481
882#ifdef TCP_DEBUG 482/* tcp.c */
883extern const char tcp_timer_bug_msg[];
884#endif
885
886/* tcp_diag.c */
887extern void tcp_get_info(struct sock *, struct tcp_info *); 483extern void tcp_get_info(struct sock *, struct tcp_info *);
888 484
889/* Read 'sendfile()'-style from a TCP socket */ 485/* Read 'sendfile()'-style from a TCP socket */
@@ -892,72 +488,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
892extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 488extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
893 sk_read_actor_t recv_actor); 489 sk_read_actor_t recv_actor);
894 490
895static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
896{
897 struct tcp_sock *tp = tcp_sk(sk);
898
899 switch (what) {
900 case TCP_TIME_RETRANS:
901 case TCP_TIME_PROBE0:
902 tp->pending = 0;
903
904#ifdef TCP_CLEAR_TIMERS
905 sk_stop_timer(sk, &tp->retransmit_timer);
906#endif
907 break;
908 case TCP_TIME_DACK:
909 tp->ack.blocked = 0;
910 tp->ack.pending = 0;
911
912#ifdef TCP_CLEAR_TIMERS
913 sk_stop_timer(sk, &tp->delack_timer);
914#endif
915 break;
916 default:
917#ifdef TCP_DEBUG
918 printk(tcp_timer_bug_msg);
919#endif
920 return;
921 };
922
923}
924
925/*
926 * Reset the retransmission timer
927 */
928static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
929{
930 struct tcp_sock *tp = tcp_sk(sk);
931
932 if (when > TCP_RTO_MAX) {
933#ifdef TCP_DEBUG
934 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
935#endif
936 when = TCP_RTO_MAX;
937 }
938
939 switch (what) {
940 case TCP_TIME_RETRANS:
941 case TCP_TIME_PROBE0:
942 tp->pending = what;
943 tp->timeout = jiffies+when;
944 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
945 break;
946
947 case TCP_TIME_DACK:
948 tp->ack.pending |= TCP_ACK_TIMER;
949 tp->ack.timeout = jiffies+when;
950 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
951 break;
952
953 default:
954#ifdef TCP_DEBUG
955 printk(tcp_timer_bug_msg);
956#endif
957 return;
958 };
959}
960
961/* Initialize RCV_MSS value. 491/* Initialize RCV_MSS value.
962 * RCV_MSS is an our guess about MSS used by the peer. 492 * RCV_MSS is an our guess about MSS used by the peer.
963 * We haven't any direct information about the MSS. 493 * We haven't any direct information about the MSS.
@@ -975,7 +505,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk)
975 hint = min(hint, TCP_MIN_RCVMSS); 505 hint = min(hint, TCP_MIN_RCVMSS);
976 hint = max(hint, TCP_MIN_MSS); 506 hint = max(hint, TCP_MIN_MSS);
977 507
978 tp->ack.rcv_mss = hint; 508 inet_csk(sk)->icsk_ack.rcv_mss = hint;
979} 509}
980 510
981static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 511static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -1110,7 +640,8 @@ static inline void tcp_packets_out_inc(struct sock *sk,
1110 640
1111 tp->packets_out += tcp_skb_pcount(skb); 641 tp->packets_out += tcp_skb_pcount(skb);
1112 if (!orig) 642 if (!orig)
1113 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 643 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
644 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1114} 645}
1115 646
1116static inline void tcp_packets_out_dec(struct tcp_sock *tp, 647static inline void tcp_packets_out_dec(struct tcp_sock *tp,
@@ -1138,29 +669,29 @@ struct tcp_congestion_ops {
1138 struct list_head list; 669 struct list_head list;
1139 670
1140 /* initialize private data (optional) */ 671 /* initialize private data (optional) */
1141 void (*init)(struct tcp_sock *tp); 672 void (*init)(struct sock *sk);
1142 /* cleanup private data (optional) */ 673 /* cleanup private data (optional) */
1143 void (*release)(struct tcp_sock *tp); 674 void (*release)(struct sock *sk);
1144 675
1145 /* return slow start threshold (required) */ 676 /* return slow start threshold (required) */
1146 u32 (*ssthresh)(struct tcp_sock *tp); 677 u32 (*ssthresh)(struct sock *sk);
1147 /* lower bound for congestion window (optional) */ 678 /* lower bound for congestion window (optional) */
1148 u32 (*min_cwnd)(struct tcp_sock *tp); 679 u32 (*min_cwnd)(struct sock *sk);
1149 /* do new cwnd calculation (required) */ 680 /* do new cwnd calculation (required) */
1150 void (*cong_avoid)(struct tcp_sock *tp, u32 ack, 681 void (*cong_avoid)(struct sock *sk, u32 ack,
1151 u32 rtt, u32 in_flight, int good_ack); 682 u32 rtt, u32 in_flight, int good_ack);
1152 /* round trip time sample per acked packet (optional) */ 683 /* round trip time sample per acked packet (optional) */
1153 void (*rtt_sample)(struct tcp_sock *tp, u32 usrtt); 684 void (*rtt_sample)(struct sock *sk, u32 usrtt);
1154 /* call before changing ca_state (optional) */ 685 /* call before changing ca_state (optional) */
1155 void (*set_state)(struct tcp_sock *tp, u8 new_state); 686 void (*set_state)(struct sock *sk, u8 new_state);
1156 /* call when cwnd event occurs (optional) */ 687 /* call when cwnd event occurs (optional) */
1157 void (*cwnd_event)(struct tcp_sock *tp, enum tcp_ca_event ev); 688 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1158 /* new value of cwnd after loss (optional) */ 689 /* new value of cwnd after loss (optional) */
1159 u32 (*undo_cwnd)(struct tcp_sock *tp); 690 u32 (*undo_cwnd)(struct sock *sk);
1160 /* hook for packet ack accounting (optional) */ 691 /* hook for packet ack accounting (optional) */
1161 void (*pkts_acked)(struct tcp_sock *tp, u32 num_acked); 692 void (*pkts_acked)(struct sock *sk, u32 num_acked);
1162 /* get info for tcp_diag (optional) */ 693 /* get info for inet_diag (optional) */
1163 void (*get_info)(struct tcp_sock *tp, u32 ext, struct sk_buff *skb); 694 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
1164 695
1165 char name[TCP_CA_NAME_MAX]; 696 char name[TCP_CA_NAME_MAX];
1166 struct module *owner; 697 struct module *owner;
@@ -1169,30 +700,34 @@ struct tcp_congestion_ops {
1169extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); 700extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1170extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 701extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1171 702
1172extern void tcp_init_congestion_control(struct tcp_sock *tp); 703extern void tcp_init_congestion_control(struct sock *sk);
1173extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); 704extern void tcp_cleanup_congestion_control(struct sock *sk);
1174extern int tcp_set_default_congestion_control(const char *name); 705extern int tcp_set_default_congestion_control(const char *name);
1175extern void tcp_get_default_congestion_control(char *name); 706extern void tcp_get_default_congestion_control(char *name);
1176extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name); 707extern int tcp_set_congestion_control(struct sock *sk, const char *name);
1177 708
1178extern struct tcp_congestion_ops tcp_init_congestion_ops; 709extern struct tcp_congestion_ops tcp_init_congestion_ops;
1179extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); 710extern u32 tcp_reno_ssthresh(struct sock *sk);
1180extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, 711extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
1181 u32 rtt, u32 in_flight, int flag); 712 u32 rtt, u32 in_flight, int flag);
1182extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); 713extern u32 tcp_reno_min_cwnd(struct sock *sk);
1183extern struct tcp_congestion_ops tcp_reno; 714extern struct tcp_congestion_ops tcp_reno;
1184 715
1185static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) 716static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1186{ 717{
1187 if (tp->ca_ops->set_state) 718 struct inet_connection_sock *icsk = inet_csk(sk);
1188 tp->ca_ops->set_state(tp, ca_state); 719
1189 tp->ca_state = ca_state; 720 if (icsk->icsk_ca_ops->set_state)
721 icsk->icsk_ca_ops->set_state(sk, ca_state);
722 icsk->icsk_ca_state = ca_state;
1190} 723}
1191 724
1192static inline void tcp_ca_event(struct tcp_sock *tp, enum tcp_ca_event event) 725static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1193{ 726{
1194 if (tp->ca_ops->cwnd_event) 727 const struct inet_connection_sock *icsk = inet_csk(sk);
1195 tp->ca_ops->cwnd_event(tp, event); 728
729 if (icsk->icsk_ca_ops->cwnd_event)
730 icsk->icsk_ca_ops->cwnd_event(sk, event);
1196} 731}
1197 732
1198/* This determines how many packets are "in the network" to the best 733/* This determines how many packets are "in the network" to the best
@@ -1218,9 +753,10 @@ static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1218 * The exception is rate halving phase, when cwnd is decreasing towards 753 * The exception is rate halving phase, when cwnd is decreasing towards
1219 * ssthresh. 754 * ssthresh.
1220 */ 755 */
1221static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp) 756static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1222{ 757{
1223 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery)) 758 const struct tcp_sock *tp = tcp_sk(sk);
759 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
1224 return tp->snd_ssthresh; 760 return tp->snd_ssthresh;
1225 else 761 else
1226 return max(tp->snd_ssthresh, 762 return max(tp->snd_ssthresh,
@@ -1237,10 +773,13 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
1237} 773}
1238 774
1239/* Set slow start threshold and cwnd not falling to slow start */ 775/* Set slow start threshold and cwnd not falling to slow start */
1240static inline void __tcp_enter_cwr(struct tcp_sock *tp) 776static inline void __tcp_enter_cwr(struct sock *sk)
1241{ 777{
778 const struct inet_connection_sock *icsk = inet_csk(sk);
779 struct tcp_sock *tp = tcp_sk(sk);
780
1242 tp->undo_marker = 0; 781 tp->undo_marker = 0;
1243 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 782 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1244 tp->snd_cwnd = min(tp->snd_cwnd, 783 tp->snd_cwnd = min(tp->snd_cwnd,
1245 tcp_packets_in_flight(tp) + 1U); 784 tcp_packets_in_flight(tp) + 1U);
1246 tp->snd_cwnd_cnt = 0; 785 tp->snd_cwnd_cnt = 0;
@@ -1249,12 +788,14 @@ static inline void __tcp_enter_cwr(struct tcp_sock *tp)
1249 TCP_ECN_queue_cwr(tp); 788 TCP_ECN_queue_cwr(tp);
1250} 789}
1251 790
1252static inline void tcp_enter_cwr(struct tcp_sock *tp) 791static inline void tcp_enter_cwr(struct sock *sk)
1253{ 792{
793 struct tcp_sock *tp = tcp_sk(sk);
794
1254 tp->prior_ssthresh = 0; 795 tp->prior_ssthresh = 0;
1255 if (tp->ca_state < TCP_CA_CWR) { 796 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
1256 __tcp_enter_cwr(tp); 797 __tcp_enter_cwr(sk);
1257 tcp_set_ca_state(tp, TCP_CA_CWR); 798 tcp_set_ca_state(sk, TCP_CA_CWR);
1258 } 799 }
1259} 800}
1260 801
@@ -1277,8 +818,10 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
1277 818
1278static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 819static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
1279{ 820{
1280 if (!tp->packets_out && !tp->pending) 821 const struct inet_connection_sock *icsk = inet_csk(sk);
1281 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); 822 if (!tp->packets_out && !icsk->icsk_pending)
823 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
824 icsk->icsk_rto, TCP_RTO_MAX);
1282} 825}
1283 826
1284static __inline__ void tcp_push_pending_frames(struct sock *sk, 827static __inline__ void tcp_push_pending_frames(struct sock *sk,
@@ -1297,9 +840,6 @@ static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1297 tp->snd_wl1 = seq; 840 tp->snd_wl1 = seq;
1298} 841}
1299 842
1300extern void tcp_destroy_sock(struct sock *sk);
1301
1302
1303/* 843/*
1304 * Calculate(/check) TCP checksum 844 * Calculate(/check) TCP checksum
1305 */ 845 */
@@ -1359,8 +899,10 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1359 tp->ucopy.memory = 0; 899 tp->ucopy.memory = 0;
1360 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 900 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1361 wake_up_interruptible(sk->sk_sleep); 901 wake_up_interruptible(sk->sk_sleep);
1362 if (!tcp_ack_scheduled(tp)) 902 if (!inet_csk_ack_scheduled(sk))
1363 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); 903 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
904 (3 * TCP_RTO_MIN) / 4,
905 TCP_RTO_MAX);
1364 } 906 }
1365 return 1; 907 return 1;
1366 } 908 }
@@ -1393,9 +935,9 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
1393 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 935 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1394 936
1395 sk->sk_prot->unhash(sk); 937 sk->sk_prot->unhash(sk);
1396 if (tcp_sk(sk)->bind_hash && 938 if (inet_csk(sk)->icsk_bind_hash &&
1397 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 939 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1398 tcp_put_port(sk); 940 inet_put_port(&tcp_hashinfo, sk);
1399 /* fall through */ 941 /* fall through */
1400 default: 942 default:
1401 if (oldstate==TCP_ESTABLISHED) 943 if (oldstate==TCP_ESTABLISHED)
@@ -1422,7 +964,7 @@ static __inline__ void tcp_done(struct sock *sk)
1422 if (!sock_flag(sk, SOCK_DEAD)) 964 if (!sock_flag(sk, SOCK_DEAD))
1423 sk->sk_state_change(sk); 965 sk->sk_state_change(sk);
1424 else 966 else
1425 tcp_destroy_sock(sk); 967 inet_csk_destroy_sock(sk);
1426} 968}
1427 969
1428static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) 970static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
@@ -1524,54 +1066,6 @@ static inline int tcp_full_space(const struct sock *sk)
1524 return tcp_win_from_space(sk->sk_rcvbuf); 1066 return tcp_win_from_space(sk->sk_rcvbuf);
1525} 1067}
1526 1068
1527static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1528 struct sock *child)
1529{
1530 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1531}
1532
1533static inline void
1534tcp_synq_removed(struct sock *sk, struct request_sock *req)
1535{
1536 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1537 tcp_delete_keepalive_timer(sk);
1538}
1539
1540static inline void tcp_synq_added(struct sock *sk)
1541{
1542 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1543 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1544}
1545
1546static inline int tcp_synq_len(struct sock *sk)
1547{
1548 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1549}
1550
1551static inline int tcp_synq_young(struct sock *sk)
1552{
1553 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1554}
1555
1556static inline int tcp_synq_is_full(struct sock *sk)
1557{
1558 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1559}
1560
1561static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
1562 struct request_sock **prev)
1563{
1564 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1565}
1566
1567static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1568 struct request_sock **prev)
1569{
1570 tcp_synq_unlink(tcp_sk(sk), req, prev);
1571 tcp_synq_removed(sk, req);
1572 reqsk_free(req);
1573}
1574
1575static __inline__ void tcp_openreq_init(struct request_sock *req, 1069static __inline__ void tcp_openreq_init(struct request_sock *req,
1576 struct tcp_options_received *rx_opt, 1070 struct tcp_options_received *rx_opt,
1577 struct sk_buff *skb) 1071 struct sk_buff *skb)
@@ -1593,27 +1087,6 @@ static __inline__ void tcp_openreq_init(struct request_sock *req,
1593 1087
1594extern void tcp_enter_memory_pressure(void); 1088extern void tcp_enter_memory_pressure(void);
1595 1089
1596extern void tcp_listen_wlock(void);
1597
1598/* - We may sleep inside this lock.
1599 * - If sleeping is not required (or called from BH),
1600 * use plain read_(un)lock(&tcp_lhash_lock).
1601 */
1602
1603static inline void tcp_listen_lock(void)
1604{
1605 /* read_lock synchronizes to candidates to writers */
1606 read_lock(&tcp_lhash_lock);
1607 atomic_inc(&tcp_lhash_users);
1608 read_unlock(&tcp_lhash_lock);
1609}
1610
1611static inline void tcp_listen_unlock(void)
1612{
1613 if (atomic_dec_and_test(&tcp_lhash_users))
1614 wake_up(&tcp_lhash_wait);
1615}
1616
1617static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1090static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1618{ 1091{
1619 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; 1092 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
@@ -1624,12 +1097,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp)
1624 return tp->keepalive_time ? : sysctl_tcp_keepalive_time; 1097 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1625} 1098}
1626 1099
1627static inline int tcp_fin_time(const struct tcp_sock *tp) 1100static inline int tcp_fin_time(const struct sock *sk)
1628{ 1101{
1629 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; 1102 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1103 const int rto = inet_csk(sk)->icsk_rto;
1630 1104
1631 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) 1105 if (fin_timeout < (rto << 2) - (rto >> 1))
1632 fin_timeout = (tp->rto<<2) - (tp->rto>>1); 1106 fin_timeout = (rto << 2) - (rto >> 1);
1633 1107
1634 return fin_timeout; 1108 return fin_timeout;
1635} 1109}
@@ -1658,15 +1132,6 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
1658 return 1; 1132 return 1;
1659} 1133}
1660 1134
1661static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
1662{
1663 sk->sk_route_caps = dst->dev->features;
1664 if (sk->sk_route_caps & NETIF_F_TSO) {
1665 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1666 sk->sk_route_caps &= ~NETIF_F_TSO;
1667 }
1668}
1669
1670#define TCP_CHECK_TIMER(sk) do { } while (0) 1135#define TCP_CHECK_TIMER(sk) do { } while (0)
1671 1136
1672static inline int tcp_use_frto(const struct sock *sk) 1137static inline int tcp_use_frto(const struct sock *sk)
@@ -1718,4 +1183,16 @@ struct tcp_iter_state {
1718extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); 1183extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1719extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); 1184extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1720 1185
1186extern struct request_sock_ops tcp_request_sock_ops;
1187
1188extern int tcp_v4_destroy_sock(struct sock *sk);
1189
1190#ifdef CONFIG_PROC_FS
1191extern int tcp4_proc_init(void);
1192extern void tcp4_proc_exit(void);
1193#endif
1194
1195extern void tcp_v4_init(struct net_proto_family *ops);
1196extern void tcp_init(void);
1197
1721#endif /* _TCP_H */ 1198#endif /* _TCP_H */
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index 64980ee8c92a..c6b84397448d 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
88 * it is surely retransmit. It is not in ECN RFC, 88 * it is surely retransmit. It is not in ECN RFC,
89 * but Linux follows this rule. */ 89 * but Linux follows this rule. */
90 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 90 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
91 tcp_enter_quickack_mode(tp); 91 tcp_enter_quickack_mode((struct sock *)tp);
92 } 92 }
93} 93}
94 94
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
new file mode 100644
index 000000000000..b9d4176b2d15
--- /dev/null
+++ b/include/net/tcp_states.h
@@ -0,0 +1,34 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP protocol sk_state field.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#ifndef _LINUX_TCP_STATES_H
14#define _LINUX_TCP_STATES_H
15
16enum {
17 TCP_ESTABLISHED = 1,
18 TCP_SYN_SENT,
19 TCP_SYN_RECV,
20 TCP_FIN_WAIT1,
21 TCP_FIN_WAIT2,
22 TCP_TIME_WAIT,
23 TCP_CLOSE,
24 TCP_CLOSE_WAIT,
25 TCP_LAST_ACK,
26 TCP_LISTEN,
27 TCP_CLOSING, /* Now a valid state */
28
29 TCP_MAX_STATES /* Leave at the end! */
30};
31
32#define TCP_STATE_MASK 0xF
33
34#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index ac229b761dbc..107b9d791a1f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -94,6 +94,11 @@ struct udp_iter_state {
94 struct seq_operations seq_ops; 94 struct seq_operations seq_ops;
95}; 95};
96 96
97#ifdef CONFIG_PROC_FS
97extern int udp_proc_register(struct udp_seq_afinfo *afinfo); 98extern int udp_proc_register(struct udp_seq_afinfo *afinfo);
98extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo); 99extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo);
100
101extern int udp4_proc_init(void);
102extern void udp4_proc_exit(void);
103#endif
99#endif /* _UDP_H */ 104#endif /* _UDP_H */
diff --git a/include/net/x25.h b/include/net/x25.h
index 8b39b98876e8..fee62ff8c194 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -175,7 +175,7 @@ extern void x25_kill_by_neigh(struct x25_neigh *);
175 175
176/* x25_dev.c */ 176/* x25_dev.c */
177extern void x25_send_frame(struct sk_buff *, struct x25_neigh *); 177extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
178extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *); 178extern int x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
179extern void x25_establish_link(struct x25_neigh *); 179extern void x25_establish_link(struct x25_neigh *);
180extern void x25_terminate_link(struct x25_neigh *); 180extern void x25_terminate_link(struct x25_neigh *);
181 181
diff --git a/include/net/x25device.h b/include/net/x25device.h
index d45ae883bd1d..1a318374faef 100644
--- a/include/net/x25device.h
+++ b/include/net/x25device.h
@@ -8,7 +8,6 @@
8static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev) 8static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev)
9{ 9{
10 skb->mac.raw = skb->data; 10 skb->mac.raw = skb->data;
11 skb->input_dev = skb->dev = dev;
12 skb->pkt_type = PACKET_HOST; 11 skb->pkt_type = PACKET_HOST;
13 12
14 return htons(ETH_P_X25); 13 return htons(ETH_P_X25);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 868ef88ef971..a9d0d8c5dfbf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -818,7 +818,6 @@ extern void xfrm6_init(void);
818extern void xfrm6_fini(void); 818extern void xfrm6_fini(void);
819extern void xfrm_state_init(void); 819extern void xfrm_state_init(void);
820extern void xfrm4_state_init(void); 820extern void xfrm4_state_init(void);
821extern void xfrm4_state_fini(void);
822extern void xfrm6_state_init(void); 821extern void xfrm6_state_init(void);
823extern void xfrm6_state_fini(void); 822extern void xfrm6_state_fini(void);
824 823
diff --git a/drivers/infiniband/include/ib_cache.h b/include/rdma/ib_cache.h
index 44ef6bb9b9df..5bf9834f7dca 100644
--- a/drivers/infiniband/include/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +37,7 @@
35#ifndef _IB_CACHE_H 37#ifndef _IB_CACHE_H
36#define _IB_CACHE_H 38#define _IB_CACHE_H
37 39
38#include <ib_verbs.h> 40#include <rdma/ib_verbs.h>
39 41
40/** 42/**
41 * ib_get_cached_gid - Returns a cached GID table entry 43 * ib_get_cached_gid - Returns a cached GID table entry
diff --git a/drivers/infiniband/include/ib_cm.h b/include/rdma/ib_cm.h
index da650115e79a..77fe9039209b 100644
--- a/drivers/infiniband/include/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -37,8 +37,8 @@
37#if !defined(IB_CM_H) 37#if !defined(IB_CM_H)
38#define IB_CM_H 38#define IB_CM_H
39 39
40#include <ib_mad.h> 40#include <rdma/ib_mad.h>
41#include <ib_sa.h> 41#include <rdma/ib_sa.h>
42 42
43enum ib_cm_state { 43enum ib_cm_state {
44 IB_CM_IDLE, 44 IB_CM_IDLE,
@@ -115,7 +115,7 @@ struct ib_cm_req_event_param {
115 struct ib_sa_path_rec *primary_path; 115 struct ib_sa_path_rec *primary_path;
116 struct ib_sa_path_rec *alternate_path; 116 struct ib_sa_path_rec *alternate_path;
117 117
118 u64 remote_ca_guid; 118 __be64 remote_ca_guid;
119 u32 remote_qkey; 119 u32 remote_qkey;
120 u32 remote_qpn; 120 u32 remote_qpn;
121 enum ib_qp_type qp_type; 121 enum ib_qp_type qp_type;
@@ -132,7 +132,7 @@ struct ib_cm_req_event_param {
132}; 132};
133 133
134struct ib_cm_rep_event_param { 134struct ib_cm_rep_event_param {
135 u64 remote_ca_guid; 135 __be64 remote_ca_guid;
136 u32 remote_qkey; 136 u32 remote_qkey;
137 u32 remote_qpn; 137 u32 remote_qpn;
138 u32 starting_psn; 138 u32 starting_psn;
@@ -146,39 +146,39 @@ struct ib_cm_rep_event_param {
146}; 146};
147 147
148enum ib_cm_rej_reason { 148enum ib_cm_rej_reason {
149 IB_CM_REJ_NO_QP = __constant_htons(1), 149 IB_CM_REJ_NO_QP = 1,
150 IB_CM_REJ_NO_EEC = __constant_htons(2), 150 IB_CM_REJ_NO_EEC = 2,
151 IB_CM_REJ_NO_RESOURCES = __constant_htons(3), 151 IB_CM_REJ_NO_RESOURCES = 3,
152 IB_CM_REJ_TIMEOUT = __constant_htons(4), 152 IB_CM_REJ_TIMEOUT = 4,
153 IB_CM_REJ_UNSUPPORTED = __constant_htons(5), 153 IB_CM_REJ_UNSUPPORTED = 5,
154 IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6), 154 IB_CM_REJ_INVALID_COMM_ID = 6,
155 IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7), 155 IB_CM_REJ_INVALID_COMM_INSTANCE = 7,
156 IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8), 156 IB_CM_REJ_INVALID_SERVICE_ID = 8,
157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9), 157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9,
158 IB_CM_REJ_STALE_CONN = __constant_htons(10), 158 IB_CM_REJ_STALE_CONN = 10,
159 IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11), 159 IB_CM_REJ_RDC_NOT_EXIST = 11,
160 IB_CM_REJ_INVALID_GID = __constant_htons(12), 160 IB_CM_REJ_INVALID_GID = 12,
161 IB_CM_REJ_INVALID_LID = __constant_htons(13), 161 IB_CM_REJ_INVALID_LID = 13,
162 IB_CM_REJ_INVALID_SL = __constant_htons(14), 162 IB_CM_REJ_INVALID_SL = 14,
163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15), 163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15,
164 IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16), 164 IB_CM_REJ_INVALID_HOP_LIMIT = 16,
165 IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17), 165 IB_CM_REJ_INVALID_PACKET_RATE = 17,
166 IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18), 166 IB_CM_REJ_INVALID_ALT_GID = 18,
167 IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19), 167 IB_CM_REJ_INVALID_ALT_LID = 19,
168 IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20), 168 IB_CM_REJ_INVALID_ALT_SL = 20,
169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21), 169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21,
170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22), 170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22,
171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23), 171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23,
172 IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24), 172 IB_CM_REJ_PORT_CM_REDIRECT = 24,
173 IB_CM_REJ_PORT_REDIRECT = __constant_htons(25), 173 IB_CM_REJ_PORT_REDIRECT = 25,
174 IB_CM_REJ_INVALID_MTU = __constant_htons(26), 174 IB_CM_REJ_INVALID_MTU = 26,
175 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27), 175 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27,
176 IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28), 176 IB_CM_REJ_CONSUMER_DEFINED = 28,
177 IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29), 177 IB_CM_REJ_INVALID_RNR_RETRY = 29,
178 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30), 178 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
179 IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31), 179 IB_CM_REJ_INVALID_CLASS_VERSION = 31,
180 IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32), 180 IB_CM_REJ_INVALID_FLOW_LABEL = 32,
181 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33) 181 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
182}; 182};
183 183
184struct ib_cm_rej_event_param { 184struct ib_cm_rej_event_param {
@@ -222,8 +222,7 @@ struct ib_cm_sidr_req_event_param {
222 struct ib_cm_id *listen_id; 222 struct ib_cm_id *listen_id;
223 struct ib_device *device; 223 struct ib_device *device;
224 u8 port; 224 u8 port;
225 225 u16 pkey;
226 u16 pkey;
227}; 226};
228 227
229enum ib_cm_sidr_status { 228enum ib_cm_sidr_status {
@@ -285,12 +284,12 @@ typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
285struct ib_cm_id { 284struct ib_cm_id {
286 ib_cm_handler cm_handler; 285 ib_cm_handler cm_handler;
287 void *context; 286 void *context;
288 u64 service_id; 287 __be64 service_id;
289 u64 service_mask; 288 __be64 service_mask;
290 enum ib_cm_state state; /* internal CM/debug use */ 289 enum ib_cm_state state; /* internal CM/debug use */
291 enum ib_cm_lap_state lap_state; /* internal CM/debug use */ 290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
292 u32 local_id; 291 __be32 local_id;
293 u32 remote_id; 292 __be32 remote_id;
294}; 293};
295 294
296/** 295/**
@@ -330,13 +329,13 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
330 * IB_CM_ASSIGN_SERVICE_ID. 329 * IB_CM_ASSIGN_SERVICE_ID.
331 */ 330 */
332int ib_cm_listen(struct ib_cm_id *cm_id, 331int ib_cm_listen(struct ib_cm_id *cm_id,
333 u64 service_id, 332 __be64 service_id,
334 u64 service_mask); 333 __be64 service_mask);
335 334
336struct ib_cm_req_param { 335struct ib_cm_req_param {
337 struct ib_sa_path_rec *primary_path; 336 struct ib_sa_path_rec *primary_path;
338 struct ib_sa_path_rec *alternate_path; 337 struct ib_sa_path_rec *alternate_path;
339 u64 service_id; 338 __be64 service_id;
340 u32 qp_num; 339 u32 qp_num;
341 enum ib_qp_type qp_type; 340 enum ib_qp_type qp_type;
342 u32 starting_psn; 341 u32 starting_psn;
@@ -528,7 +527,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id,
528 527
529struct ib_cm_sidr_req_param { 528struct ib_cm_sidr_req_param {
530 struct ib_sa_path_rec *path; 529 struct ib_sa_path_rec *path;
531 u64 service_id; 530 __be64 service_id;
532 int timeout_ms; 531 int timeout_ms;
533 const void *private_data; 532 const void *private_data;
534 u8 private_data_len; 533 u8 private_data_len;
diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index 6c9e24d6e144..86b7e93f198b 100644
--- a/drivers/infiniband/include/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -36,7 +36,7 @@
36#if !defined(IB_FMR_POOL_H) 36#if !defined(IB_FMR_POOL_H)
37#define IB_FMR_POOL_H 37#define IB_FMR_POOL_H
38 38
39#include <ib_verbs.h> 39#include <rdma/ib_verbs.h>
40 40
41struct ib_fmr_pool; 41struct ib_fmr_pool;
42 42
diff --git a/drivers/infiniband/include/ib_mad.h b/include/rdma/ib_mad.h
index 491b6f25b3b8..fc6b1c18ffc6 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -41,7 +41,7 @@
41 41
42#include <linux/pci.h> 42#include <linux/pci.h>
43 43
44#include <ib_verbs.h> 44#include <rdma/ib_verbs.h>
45 45
46/* Management base version */ 46/* Management base version */
47#define IB_MGMT_BASE_VERSION 1 47#define IB_MGMT_BASE_VERSION 1
@@ -90,6 +90,7 @@
90 90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0 91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1 92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
93#define IB_MGMT_RMPP_STATUS_T2L 118 94#define IB_MGMT_RMPP_STATUS_T2L 118
94#define IB_MGMT_RMPP_STATUS_BAD_LEN 119 95#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
95#define IB_MGMT_RMPP_STATUS_BAD_SEG 120 96#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
@@ -100,6 +101,7 @@
100#define IB_MGMT_RMPP_STATUS_UNV 125 101#define IB_MGMT_RMPP_STATUS_UNV 125
101#define IB_MGMT_RMPP_STATUS_TMR 126 102#define IB_MGMT_RMPP_STATUS_TMR 126
102#define IB_MGMT_RMPP_STATUS_UNSPEC 127 103#define IB_MGMT_RMPP_STATUS_UNSPEC 127
104#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
103 105
104#define IB_QP0 0 106#define IB_QP0 0
105#define IB_QP1 __constant_htonl(1) 107#define IB_QP1 __constant_htonl(1)
@@ -111,12 +113,12 @@ struct ib_mad_hdr {
111 u8 mgmt_class; 113 u8 mgmt_class;
112 u8 class_version; 114 u8 class_version;
113 u8 method; 115 u8 method;
114 u16 status; 116 __be16 status;
115 u16 class_specific; 117 __be16 class_specific;
116 u64 tid; 118 __be64 tid;
117 u16 attr_id; 119 __be16 attr_id;
118 u16 resv; 120 __be16 resv;
119 u32 attr_mod; 121 __be32 attr_mod;
120}; 122};
121 123
122struct ib_rmpp_hdr { 124struct ib_rmpp_hdr {
@@ -124,8 +126,8 @@ struct ib_rmpp_hdr {
124 u8 rmpp_type; 126 u8 rmpp_type;
125 u8 rmpp_rtime_flags; 127 u8 rmpp_rtime_flags;
126 u8 rmpp_status; 128 u8 rmpp_status;
127 u32 seg_num; 129 __be32 seg_num;
128 u32 paylen_newwin; 130 __be32 paylen_newwin;
129}; 131};
130 132
131typedef u64 __bitwise ib_sa_comp_mask; 133typedef u64 __bitwise ib_sa_comp_mask;
@@ -139,9 +141,9 @@ typedef u64 __bitwise ib_sa_comp_mask;
139 * the wire so we can't change the layout) 141 * the wire so we can't change the layout)
140 */ 142 */
141struct ib_sa_hdr { 143struct ib_sa_hdr {
142 u64 sm_key; 144 __be64 sm_key;
143 u16 attr_offset; 145 __be16 attr_offset;
144 u16 reserved; 146 __be16 reserved;
145 ib_sa_comp_mask comp_mask; 147 ib_sa_comp_mask comp_mask;
146} __attribute__ ((packed)); 148} __attribute__ ((packed));
147 149
diff --git a/drivers/infiniband/include/ib_pack.h b/include/rdma/ib_pack.h
index fe480f3e8654..f926020d6331 100644
--- a/drivers/infiniband/include/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -35,7 +35,7 @@
35#ifndef IB_PACK_H 35#ifndef IB_PACK_H
36#define IB_PACK_H 36#define IB_PACK_H
37 37
38#include <ib_verbs.h> 38#include <rdma/ib_verbs.h>
39 39
40enum { 40enum {
41 IB_LRH_BYTES = 8, 41 IB_LRH_BYTES = 8,
diff --git a/drivers/infiniband/include/ib_sa.h b/include/rdma/ib_sa.h
index 6d999f7b5d93..c022edfc49da 100644
--- a/drivers/infiniband/include/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -38,8 +38,8 @@
38 38
39#include <linux/compiler.h> 39#include <linux/compiler.h>
40 40
41#include <ib_verbs.h> 41#include <rdma/ib_verbs.h>
42#include <ib_mad.h> 42#include <rdma/ib_mad.h>
43 43
44enum { 44enum {
45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ 45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
@@ -133,16 +133,16 @@ struct ib_sa_path_rec {
133 /* reserved */ 133 /* reserved */
134 union ib_gid dgid; 134 union ib_gid dgid;
135 union ib_gid sgid; 135 union ib_gid sgid;
136 u16 dlid; 136 __be16 dlid;
137 u16 slid; 137 __be16 slid;
138 int raw_traffic; 138 int raw_traffic;
139 /* reserved */ 139 /* reserved */
140 u32 flow_label; 140 __be32 flow_label;
141 u8 hop_limit; 141 u8 hop_limit;
142 u8 traffic_class; 142 u8 traffic_class;
143 int reversible; 143 int reversible;
144 u8 numb_path; 144 u8 numb_path;
145 u16 pkey; 145 __be16 pkey;
146 /* reserved */ 146 /* reserved */
147 u8 sl; 147 u8 sl;
148 u8 mtu_selector; 148 u8 mtu_selector;
@@ -176,18 +176,18 @@ struct ib_sa_path_rec {
176struct ib_sa_mcmember_rec { 176struct ib_sa_mcmember_rec {
177 union ib_gid mgid; 177 union ib_gid mgid;
178 union ib_gid port_gid; 178 union ib_gid port_gid;
179 u32 qkey; 179 __be32 qkey;
180 u16 mlid; 180 __be16 mlid;
181 u8 mtu_selector; 181 u8 mtu_selector;
182 u8 mtu; 182 u8 mtu;
183 u8 traffic_class; 183 u8 traffic_class;
184 u16 pkey; 184 __be16 pkey;
185 u8 rate_selector; 185 u8 rate_selector;
186 u8 rate; 186 u8 rate;
187 u8 packet_life_time_selector; 187 u8 packet_life_time_selector;
188 u8 packet_life_time; 188 u8 packet_life_time;
189 u8 sl; 189 u8 sl;
190 u32 flow_label; 190 __be32 flow_label;
191 u8 hop_limit; 191 u8 hop_limit;
192 u8 scope; 192 u8 scope;
193 u8 join_state; 193 u8 join_state;
@@ -238,7 +238,7 @@ struct ib_sa_mcmember_rec {
238struct ib_sa_service_rec { 238struct ib_sa_service_rec {
239 u64 id; 239 u64 id;
240 union ib_gid gid; 240 union ib_gid gid;
241 u16 pkey; 241 __be16 pkey;
242 /* reserved */ 242 /* reserved */
243 u32 lease; 243 u32 lease;
244 u8 key[16]; 244 u8 key[16];
diff --git a/drivers/infiniband/include/ib_smi.h b/include/rdma/ib_smi.h
index ca8216514963..87f60737f695 100644
--- a/drivers/infiniband/include/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -39,9 +39,7 @@
39#if !defined( IB_SMI_H ) 39#if !defined( IB_SMI_H )
40#define IB_SMI_H 40#define IB_SMI_H
41 41
42#include <ib_mad.h> 42#include <rdma/ib_mad.h>
43
44#define IB_LID_PERMISSIVE 0xFFFF
45 43
46#define IB_SMP_DATA_SIZE 64 44#define IB_SMP_DATA_SIZE 64
47#define IB_SMP_MAX_PATH_HOPS 64 45#define IB_SMP_MAX_PATH_HOPS 64
@@ -51,16 +49,16 @@ struct ib_smp {
51 u8 mgmt_class; 49 u8 mgmt_class;
52 u8 class_version; 50 u8 class_version;
53 u8 method; 51 u8 method;
54 u16 status; 52 __be16 status;
55 u8 hop_ptr; 53 u8 hop_ptr;
56 u8 hop_cnt; 54 u8 hop_cnt;
57 u64 tid; 55 __be64 tid;
58 u16 attr_id; 56 __be16 attr_id;
59 u16 resv; 57 __be16 resv;
60 u32 attr_mod; 58 __be32 attr_mod;
61 u64 mkey; 59 __be64 mkey;
62 u16 dr_slid; 60 __be16 dr_slid;
63 u16 dr_dlid; 61 __be16 dr_dlid;
64 u8 reserved[28]; 62 u8 reserved[28];
65 u8 data[IB_SMP_DATA_SIZE]; 63 u8 data[IB_SMP_DATA_SIZE];
66 u8 initial_path[IB_SMP_MAX_PATH_HOPS]; 64 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
diff --git a/drivers/infiniband/include/ib_user_cm.h b/include/rdma/ib_user_cm.h
index 500b1af6ff77..72182d16778b 100644
--- a/drivers/infiniband/include/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -88,15 +88,15 @@ struct ib_ucm_attr_id {
88}; 88};
89 89
90struct ib_ucm_attr_id_resp { 90struct ib_ucm_attr_id_resp {
91 __u64 service_id; 91 __be64 service_id;
92 __u64 service_mask; 92 __be64 service_mask;
93 __u32 local_id; 93 __be32 local_id;
94 __u32 remote_id; 94 __be32 remote_id;
95}; 95};
96 96
97struct ib_ucm_listen { 97struct ib_ucm_listen {
98 __u64 service_id; 98 __be64 service_id;
99 __u64 service_mask; 99 __be64 service_mask;
100 __u32 id; 100 __u32 id;
101}; 101};
102 102
@@ -114,13 +114,13 @@ struct ib_ucm_private_data {
114struct ib_ucm_path_rec { 114struct ib_ucm_path_rec {
115 __u8 dgid[16]; 115 __u8 dgid[16];
116 __u8 sgid[16]; 116 __u8 sgid[16];
117 __u16 dlid; 117 __be16 dlid;
118 __u16 slid; 118 __be16 slid;
119 __u32 raw_traffic; 119 __u32 raw_traffic;
120 __u32 flow_label; 120 __be32 flow_label;
121 __u32 reversible; 121 __u32 reversible;
122 __u32 mtu; 122 __u32 mtu;
123 __u16 pkey; 123 __be16 pkey;
124 __u8 hop_limit; 124 __u8 hop_limit;
125 __u8 traffic_class; 125 __u8 traffic_class;
126 __u8 numb_path; 126 __u8 numb_path;
@@ -138,7 +138,7 @@ struct ib_ucm_req {
138 __u32 qpn; 138 __u32 qpn;
139 __u32 qp_type; 139 __u32 qp_type;
140 __u32 psn; 140 __u32 psn;
141 __u64 sid; 141 __be64 sid;
142 __u64 data; 142 __u64 data;
143 __u64 primary_path; 143 __u64 primary_path;
144 __u64 alternate_path; 144 __u64 alternate_path;
@@ -200,7 +200,7 @@ struct ib_ucm_lap {
200struct ib_ucm_sidr_req { 200struct ib_ucm_sidr_req {
201 __u32 id; 201 __u32 id;
202 __u32 timeout; 202 __u32 timeout;
203 __u64 sid; 203 __be64 sid;
204 __u64 data; 204 __u64 data;
205 __u64 path; 205 __u64 path;
206 __u16 pkey; 206 __u16 pkey;
@@ -237,7 +237,7 @@ struct ib_ucm_req_event_resp {
237 /* port */ 237 /* port */
238 struct ib_ucm_path_rec primary_path; 238 struct ib_ucm_path_rec primary_path;
239 struct ib_ucm_path_rec alternate_path; 239 struct ib_ucm_path_rec alternate_path;
240 __u64 remote_ca_guid; 240 __be64 remote_ca_guid;
241 __u32 remote_qkey; 241 __u32 remote_qkey;
242 __u32 remote_qpn; 242 __u32 remote_qpn;
243 __u32 qp_type; 243 __u32 qp_type;
@@ -253,7 +253,7 @@ struct ib_ucm_req_event_resp {
253}; 253};
254 254
255struct ib_ucm_rep_event_resp { 255struct ib_ucm_rep_event_resp {
256 __u64 remote_ca_guid; 256 __be64 remote_ca_guid;
257 __u32 remote_qkey; 257 __u32 remote_qkey;
258 __u32 remote_qpn; 258 __u32 remote_qpn;
259 __u32 starting_psn; 259 __u32 starting_psn;
diff --git a/drivers/infiniband/include/ib_user_mad.h b/include/rdma/ib_user_mad.h
index a9a56b50aacc..44537aa32e62 100644
--- a/drivers/infiniband/include/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -70,8 +70,6 @@
70 * @traffic_class - Traffic class in GRH 70 * @traffic_class - Traffic class in GRH
71 * @gid - Remote GID in GRH 71 * @gid - Remote GID in GRH
72 * @flow_label - Flow label in GRH 72 * @flow_label - Flow label in GRH
73 *
74 * All multi-byte quantities are stored in network (big endian) byte order.
75 */ 73 */
76struct ib_user_mad_hdr { 74struct ib_user_mad_hdr {
77 __u32 id; 75 __u32 id;
@@ -79,9 +77,9 @@ struct ib_user_mad_hdr {
79 __u32 timeout_ms; 77 __u32 timeout_ms;
80 __u32 retries; 78 __u32 retries;
81 __u32 length; 79 __u32 length;
82 __u32 qpn; 80 __be32 qpn;
83 __u32 qkey; 81 __be32 qkey;
84 __u16 lid; 82 __be16 lid;
85 __u8 sl; 83 __u8 sl;
86 __u8 path_bits; 84 __u8 path_bits;
87 __u8 grh_present; 85 __u8 grh_present;
@@ -89,7 +87,7 @@ struct ib_user_mad_hdr {
89 __u8 hop_limit; 87 __u8 hop_limit;
90 __u8 traffic_class; 88 __u8 traffic_class;
91 __u8 gid[16]; 89 __u8 gid[16];
92 __u32 flow_label; 90 __be32 flow_label;
93}; 91};
94 92
95/** 93/**
diff --git a/drivers/infiniband/include/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 7c613706af72..7ebb01c8f996 100644
--- a/drivers/infiniband/include/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -78,7 +78,12 @@ enum {
78 IB_USER_VERBS_CMD_POST_SEND, 78 IB_USER_VERBS_CMD_POST_SEND,
79 IB_USER_VERBS_CMD_POST_RECV, 79 IB_USER_VERBS_CMD_POST_RECV,
80 IB_USER_VERBS_CMD_ATTACH_MCAST, 80 IB_USER_VERBS_CMD_ATTACH_MCAST,
81 IB_USER_VERBS_CMD_DETACH_MCAST 81 IB_USER_VERBS_CMD_DETACH_MCAST,
82 IB_USER_VERBS_CMD_CREATE_SRQ,
83 IB_USER_VERBS_CMD_MODIFY_SRQ,
84 IB_USER_VERBS_CMD_QUERY_SRQ,
85 IB_USER_VERBS_CMD_DESTROY_SRQ,
86 IB_USER_VERBS_CMD_POST_SRQ_RECV
82}; 87};
83 88
84/* 89/*
@@ -143,8 +148,8 @@ struct ib_uverbs_query_device {
143 148
144struct ib_uverbs_query_device_resp { 149struct ib_uverbs_query_device_resp {
145 __u64 fw_ver; 150 __u64 fw_ver;
146 __u64 node_guid; 151 __be64 node_guid;
147 __u64 sys_image_guid; 152 __be64 sys_image_guid;
148 __u64 max_mr_size; 153 __u64 max_mr_size;
149 __u64 page_size_cap; 154 __u64 page_size_cap;
150 __u32 vendor_id; 155 __u32 vendor_id;
@@ -386,4 +391,32 @@ struct ib_uverbs_detach_mcast {
386 __u64 driver_data[0]; 391 __u64 driver_data[0];
387}; 392};
388 393
394struct ib_uverbs_create_srq {
395 __u64 response;
396 __u64 user_handle;
397 __u32 pd_handle;
398 __u32 max_wr;
399 __u32 max_sge;
400 __u32 srq_limit;
401 __u64 driver_data[0];
402};
403
404struct ib_uverbs_create_srq_resp {
405 __u32 srq_handle;
406};
407
408struct ib_uverbs_modify_srq {
409 __u32 srq_handle;
410 __u32 attr_mask;
411 __u32 max_wr;
412 __u32 max_sge;
413 __u32 srq_limit;
414 __u32 reserved;
415 __u64 driver_data[0];
416};
417
418struct ib_uverbs_destroy_srq {
419 __u32 srq_handle;
420};
421
389#endif /* IB_USER_VERBS_H */ 422#endif /* IB_USER_VERBS_H */
diff --git a/drivers/infiniband/include/ib_verbs.h b/include/rdma/ib_verbs.h
index 5d24edaa66e6..e16cf94870f2 100644
--- a/drivers/infiniband/include/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
8 * 9 *
9 * This software is available to you under a choice of one of two 10 * This software is available to you under a choice of one of two
@@ -50,8 +51,8 @@
50union ib_gid { 51union ib_gid {
51 u8 raw[16]; 52 u8 raw[16];
52 struct { 53 struct {
53 u64 subnet_prefix; 54 __be64 subnet_prefix;
54 u64 interface_id; 55 __be64 interface_id;
55 } global; 56 } global;
56}; 57};
57 58
@@ -87,8 +88,8 @@ enum ib_atomic_cap {
87 88
88struct ib_device_attr { 89struct ib_device_attr {
89 u64 fw_ver; 90 u64 fw_ver;
90 u64 node_guid; 91 __be64 node_guid;
91 u64 sys_image_guid; 92 __be64 sys_image_guid;
92 u64 max_mr_size; 93 u64 max_mr_size;
93 u64 page_size_cap; 94 u64 page_size_cap;
94 u32 vendor_id; 95 u32 vendor_id;
@@ -255,7 +256,10 @@ enum ib_event_type {
255 IB_EVENT_PORT_ERR, 256 IB_EVENT_PORT_ERR,
256 IB_EVENT_LID_CHANGE, 257 IB_EVENT_LID_CHANGE,
257 IB_EVENT_PKEY_CHANGE, 258 IB_EVENT_PKEY_CHANGE,
258 IB_EVENT_SM_CHANGE 259 IB_EVENT_SM_CHANGE,
260 IB_EVENT_SRQ_ERR,
261 IB_EVENT_SRQ_LIMIT_REACHED,
262 IB_EVENT_QP_LAST_WQE_REACHED
259}; 263};
260 264
261struct ib_event { 265struct ib_event {
@@ -263,6 +267,7 @@ struct ib_event {
263 union { 267 union {
264 struct ib_cq *cq; 268 struct ib_cq *cq;
265 struct ib_qp *qp; 269 struct ib_qp *qp;
270 struct ib_srq *srq;
266 u8 port_num; 271 u8 port_num;
267 } element; 272 } element;
268 enum ib_event_type event; 273 enum ib_event_type event;
@@ -290,8 +295,8 @@ struct ib_global_route {
290}; 295};
291 296
292struct ib_grh { 297struct ib_grh {
293 u32 version_tclass_flow; 298 __be32 version_tclass_flow;
294 u16 paylen; 299 __be16 paylen;
295 u8 next_hdr; 300 u8 next_hdr;
296 u8 hop_limit; 301 u8 hop_limit;
297 union ib_gid sgid; 302 union ib_gid sgid;
@@ -302,6 +307,8 @@ enum {
302 IB_MULTICAST_QPN = 0xffffff 307 IB_MULTICAST_QPN = 0xffffff
303}; 308};
304 309
310#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
311
305enum ib_ah_flags { 312enum ib_ah_flags {
306 IB_AH_GRH = 1 313 IB_AH_GRH = 1
307}; 314};
@@ -383,6 +390,23 @@ enum ib_cq_notify {
383 IB_CQ_NEXT_COMP 390 IB_CQ_NEXT_COMP
384}; 391};
385 392
393enum ib_srq_attr_mask {
394 IB_SRQ_MAX_WR = 1 << 0,
395 IB_SRQ_LIMIT = 1 << 1,
396};
397
398struct ib_srq_attr {
399 u32 max_wr;
400 u32 max_sge;
401 u32 srq_limit;
402};
403
404struct ib_srq_init_attr {
405 void (*event_handler)(struct ib_event *, void *);
406 void *srq_context;
407 struct ib_srq_attr attr;
408};
409
386struct ib_qp_cap { 410struct ib_qp_cap {
387 u32 max_send_wr; 411 u32 max_send_wr;
388 u32 max_recv_wr; 412 u32 max_recv_wr;
@@ -710,10 +734,11 @@ struct ib_cq {
710}; 734};
711 735
712struct ib_srq { 736struct ib_srq {
713 struct ib_device *device; 737 struct ib_device *device;
714 struct ib_uobject *uobject; 738 struct ib_pd *pd;
715 struct ib_pd *pd; 739 struct ib_uobject *uobject;
716 void *srq_context; 740 void (*event_handler)(struct ib_event *, void *);
741 void *srq_context;
717 atomic_t usecnt; 742 atomic_t usecnt;
718}; 743};
719 744
@@ -827,6 +852,18 @@ struct ib_device {
827 int (*query_ah)(struct ib_ah *ah, 852 int (*query_ah)(struct ib_ah *ah,
828 struct ib_ah_attr *ah_attr); 853 struct ib_ah_attr *ah_attr);
829 int (*destroy_ah)(struct ib_ah *ah); 854 int (*destroy_ah)(struct ib_ah *ah);
855 struct ib_srq * (*create_srq)(struct ib_pd *pd,
856 struct ib_srq_init_attr *srq_init_attr,
857 struct ib_udata *udata);
858 int (*modify_srq)(struct ib_srq *srq,
859 struct ib_srq_attr *srq_attr,
860 enum ib_srq_attr_mask srq_attr_mask);
861 int (*query_srq)(struct ib_srq *srq,
862 struct ib_srq_attr *srq_attr);
863 int (*destroy_srq)(struct ib_srq *srq);
864 int (*post_srq_recv)(struct ib_srq *srq,
865 struct ib_recv_wr *recv_wr,
866 struct ib_recv_wr **bad_recv_wr);
830 struct ib_qp * (*create_qp)(struct ib_pd *pd, 867 struct ib_qp * (*create_qp)(struct ib_pd *pd,
831 struct ib_qp_init_attr *qp_init_attr, 868 struct ib_qp_init_attr *qp_init_attr,
832 struct ib_udata *udata); 869 struct ib_udata *udata);
@@ -1039,6 +1076,65 @@ int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1039int ib_destroy_ah(struct ib_ah *ah); 1076int ib_destroy_ah(struct ib_ah *ah);
1040 1077
1041/** 1078/**
1079 * ib_create_srq - Creates a SRQ associated with the specified protection
1080 * domain.
1081 * @pd: The protection domain associated with the SRQ.
1082 * @srq_init_attr: A list of initial attributes required to create the SRQ.
1083 *
1084 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1085 * requested size of the SRQ, and set to the actual values allocated
1086 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1087 * will always be at least as large as the requested values.
1088 */
1089struct ib_srq *ib_create_srq(struct ib_pd *pd,
1090 struct ib_srq_init_attr *srq_init_attr);
1091
1092/**
1093 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1094 * @srq: The SRQ to modify.
1095 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1096 * the current values of selected SRQ attributes are returned.
1097 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1098 * are being modified.
1099 *
1100 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1101 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1102 * the number of receives queued drops below the limit.
1103 */
1104int ib_modify_srq(struct ib_srq *srq,
1105 struct ib_srq_attr *srq_attr,
1106 enum ib_srq_attr_mask srq_attr_mask);
1107
1108/**
1109 * ib_query_srq - Returns the attribute list and current values for the
1110 * specified SRQ.
1111 * @srq: The SRQ to query.
1112 * @srq_attr: The attributes of the specified SRQ.
1113 */
1114int ib_query_srq(struct ib_srq *srq,
1115 struct ib_srq_attr *srq_attr);
1116
1117/**
1118 * ib_destroy_srq - Destroys the specified SRQ.
1119 * @srq: The SRQ to destroy.
1120 */
1121int ib_destroy_srq(struct ib_srq *srq);
1122
1123/**
1124 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1125 * @srq: The SRQ to post the work request on.
1126 * @recv_wr: A list of work requests to post on the receive queue.
1127 * @bad_recv_wr: On an immediate failure, this parameter will reference
1128 * the work request that failed to be posted on the QP.
1129 */
1130static inline int ib_post_srq_recv(struct ib_srq *srq,
1131 struct ib_recv_wr *recv_wr,
1132 struct ib_recv_wr **bad_recv_wr)
1133{
1134 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1135}
1136
1137/**
1042 * ib_create_qp - Creates a QP associated with the specified protection 1138 * ib_create_qp - Creates a QP associated with the specified protection
1043 * domain. 1139 * domain.
1044 * @pd: The protection domain associated with the QP. 1140 * @pd: The protection domain associated with the QP.
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 1309c12b8f71..2857cf0472df 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/device.h>
29#include "pcm.h" 30#include "pcm.h"
30#include "control.h" 31#include "control.h"
31#include "info.h" 32#include "info.h"
@@ -374,6 +375,9 @@
374#define AC97_HAS_NO_PC_BEEP (1<<12) /* no PC Beep volume */ 375#define AC97_HAS_NO_PC_BEEP (1<<12) /* no PC Beep volume */
375#define AC97_HAS_NO_VIDEO (1<<13) /* no Video volume */ 376#define AC97_HAS_NO_VIDEO (1<<13) /* no Video volume */
376#define AC97_HAS_NO_CD (1<<14) /* no CD volume */ 377#define AC97_HAS_NO_CD (1<<14) /* no CD volume */
378#define AC97_HAS_NO_MIC (1<<15) /* no MIC volume */
379#define AC97_HAS_NO_TONE (1<<16) /* no Tone volume */
380#define AC97_HAS_NO_STD_PCM (1<<17) /* no standard AC97 PCM volume and mute */
377 381
378/* rates indexes */ 382/* rates indexes */
379#define AC97_RATES_FRONT_DAC 0 383#define AC97_RATES_FRONT_DAC 0
@@ -520,6 +524,7 @@ struct _snd_ac97 {
520 /* jack-sharing info */ 524 /* jack-sharing info */
521 unsigned char indep_surround; 525 unsigned char indep_surround;
522 unsigned char channel_mode; 526 unsigned char channel_mode;
527 struct device dev;
523}; 528};
524 529
525/* conditions */ 530/* conditions */
@@ -599,4 +604,8 @@ struct ac97_enum {
599 unsigned short mask; 604 unsigned short mask;
600 const char **texts; 605 const char **texts;
601}; 606};
607
608/* ad hoc AC97 device driver access */
609extern struct bus_type ac97_bus_type;
610
602#endif /* __SOUND_AC97_CODEC_H */ 611#endif /* __SOUND_AC97_CODEC_H */
diff --git a/include/sound/ad1816a.h b/include/sound/ad1816a.h
index 395978e375cf..ca2e0e4fa937 100644
--- a/include/sound/ad1816a.h
+++ b/include/sound/ad1816a.h
@@ -138,6 +138,7 @@ struct _snd_ad1816a {
138 spinlock_t lock; 138 spinlock_t lock;
139 139
140 unsigned short mode; 140 unsigned short mode;
141 unsigned int clock_freq;
141 142
142 snd_card_t *card; 143 snd_card_t *card;
143 snd_pcm_t *pcm; 144 snd_pcm_t *pcm;
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 9974f83cca44..8e552d627fa5 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -560,7 +560,7 @@ enum {
560 * Timer section - /dev/snd/timer 560 * Timer section - /dev/snd/timer
561 */ 561 */
562 562
563#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4) 563#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 5)
564 564
565enum sndrv_timer_class { 565enum sndrv_timer_class {
566 SNDRV_TIMER_CLASS_NONE = -1, 566 SNDRV_TIMER_CLASS_NONE = -1,
@@ -693,11 +693,15 @@ enum sndrv_timer_event {
693 SNDRV_TIMER_EVENT_CONTINUE, /* val = resolution in ns */ 693 SNDRV_TIMER_EVENT_CONTINUE, /* val = resolution in ns */
694 SNDRV_TIMER_EVENT_PAUSE, /* val = 0 */ 694 SNDRV_TIMER_EVENT_PAUSE, /* val = 0 */
695 SNDRV_TIMER_EVENT_EARLY, /* val = 0, early event */ 695 SNDRV_TIMER_EVENT_EARLY, /* val = 0, early event */
696 SNDRV_TIMER_EVENT_SUSPEND, /* val = 0 */
697 SNDRV_TIMER_EVENT_RESUME, /* val = resolution in ns */
696 /* master timer events for slave timer instances */ 698 /* master timer events for slave timer instances */
697 SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10, 699 SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10,
698 SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10, 700 SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10,
699 SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10, 701 SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10,
700 SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10, 702 SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10,
703 SNDRV_TIMER_EVENT_MSUSPEND = SNDRV_TIMER_EVENT_SUSPEND + 10,
704 SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
701}; 705};
702 706
703struct sndrv_timer_tread { 707struct sndrv_timer_tread {
diff --git a/include/sound/cs46xx.h b/include/sound/cs46xx.h
index 182dd276ee74..9b94510eda60 100644
--- a/include/sound/cs46xx.h
+++ b/include/sound/cs46xx.h
@@ -1748,7 +1748,7 @@ int snd_cs46xx_pcm(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
1748int snd_cs46xx_pcm_rear(cs46xx_t *chip, int device, snd_pcm_t **rpcm); 1748int snd_cs46xx_pcm_rear(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
1749int snd_cs46xx_pcm_iec958(cs46xx_t *chip, int device, snd_pcm_t **rpcm); 1749int snd_cs46xx_pcm_iec958(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
1750int snd_cs46xx_pcm_center_lfe(cs46xx_t *chip, int device, snd_pcm_t **rpcm); 1750int snd_cs46xx_pcm_center_lfe(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
1751int snd_cs46xx_mixer(cs46xx_t *chip); 1751int snd_cs46xx_mixer(cs46xx_t *chip, int spdif_device);
1752int snd_cs46xx_midi(cs46xx_t *chip, int device, snd_rawmidi_t **rmidi); 1752int snd_cs46xx_midi(cs46xx_t *chip, int device, snd_rawmidi_t **rmidi);
1753int snd_cs46xx_start_dsp(cs46xx_t *chip); 1753int snd_cs46xx_start_dsp(cs46xx_t *chip);
1754int snd_cs46xx_gameport(cs46xx_t *chip); 1754int snd_cs46xx_gameport(cs46xx_t *chip);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index c2ef3f023687..4e3993dfcefe 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1178,7 +1178,7 @@ int snd_p16v_free(emu10k1_t * emu);
1178int snd_p16v_mixer(emu10k1_t * emu); 1178int snd_p16v_mixer(emu10k1_t * emu);
1179int snd_emu10k1_pcm_multi(emu10k1_t * emu, int device, snd_pcm_t ** rpcm); 1179int snd_emu10k1_pcm_multi(emu10k1_t * emu, int device, snd_pcm_t ** rpcm);
1180int snd_emu10k1_fx8010_pcm(emu10k1_t * emu, int device, snd_pcm_t ** rpcm); 1180int snd_emu10k1_fx8010_pcm(emu10k1_t * emu, int device, snd_pcm_t ** rpcm);
1181int snd_emu10k1_mixer(emu10k1_t * emu); 1181int snd_emu10k1_mixer(emu10k1_t * emu, int pcm_device, int multi_device);
1182int snd_emu10k1_timer(emu10k1_t * emu, int device); 1182int snd_emu10k1_timer(emu10k1_t * emu, int device);
1183int snd_emu10k1_fx8010_new(emu10k1_t *emu, int device, snd_hwdep_t ** rhwdep); 1183int snd_emu10k1_fx8010_new(emu10k1_t *emu, int device, snd_hwdep_t ** rhwdep);
1184 1184
diff --git a/include/sound/gus.h b/include/sound/gus.h
index b4b461ca173d..7000d9d9199d 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -512,13 +512,13 @@ extern void snd_gf1_ctrl_stop(snd_gus_card_t * gus, unsigned char reg);
512 512
513extern void snd_gf1_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data); 513extern void snd_gf1_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data);
514extern unsigned char snd_gf1_look8(snd_gus_card_t * gus, unsigned char reg); 514extern unsigned char snd_gf1_look8(snd_gus_card_t * gus, unsigned char reg);
515extern inline unsigned char snd_gf1_read8(snd_gus_card_t * gus, unsigned char reg) 515static inline unsigned char snd_gf1_read8(snd_gus_card_t * gus, unsigned char reg)
516{ 516{
517 return snd_gf1_look8(gus, reg | 0x80); 517 return snd_gf1_look8(gus, reg | 0x80);
518} 518}
519extern void snd_gf1_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data); 519extern void snd_gf1_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data);
520extern unsigned short snd_gf1_look16(snd_gus_card_t * gus, unsigned char reg); 520extern unsigned short snd_gf1_look16(snd_gus_card_t * gus, unsigned char reg);
521extern inline unsigned short snd_gf1_read16(snd_gus_card_t * gus, unsigned char reg) 521static inline unsigned short snd_gf1_read16(snd_gus_card_t * gus, unsigned char reg)
522{ 522{
523 return snd_gf1_look16(gus, reg | 0x80); 523 return snd_gf1_look16(gus, reg | 0x80);
524} 524}
@@ -532,12 +532,12 @@ extern void snd_gf1_i_ctrl_stop(snd_gus_card_t * gus, unsigned char reg);
532extern void snd_gf1_i_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data); 532extern void snd_gf1_i_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data);
533extern unsigned char snd_gf1_i_look8(snd_gus_card_t * gus, unsigned char reg); 533extern unsigned char snd_gf1_i_look8(snd_gus_card_t * gus, unsigned char reg);
534extern void snd_gf1_i_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data); 534extern void snd_gf1_i_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data);
535extern inline unsigned char snd_gf1_i_read8(snd_gus_card_t * gus, unsigned char reg) 535static inline unsigned char snd_gf1_i_read8(snd_gus_card_t * gus, unsigned char reg)
536{ 536{
537 return snd_gf1_i_look8(gus, reg | 0x80); 537 return snd_gf1_i_look8(gus, reg | 0x80);
538} 538}
539extern unsigned short snd_gf1_i_look16(snd_gus_card_t * gus, unsigned char reg); 539extern unsigned short snd_gf1_i_look16(snd_gus_card_t * gus, unsigned char reg);
540extern inline unsigned short snd_gf1_i_read16(snd_gus_card_t * gus, unsigned char reg) 540static inline unsigned short snd_gf1_i_read16(snd_gus_card_t * gus, unsigned char reg)
541{ 541{
542 return snd_gf1_i_look16(gus, reg | 0x80); 542 return snd_gf1_i_look16(gus, reg | 0x80);
543} 543}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index d935417575b5..fa23ebfb857a 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -379,7 +379,6 @@ struct _snd_pcm_substream {
379 unsigned int dma_buf_id; 379 unsigned int dma_buf_id;
380 size_t dma_max; 380 size_t dma_max;
381 /* -- hardware operations -- */ 381 /* -- hardware operations -- */
382 unsigned int open_flag: 1; /* lowlevel device has been opened */
383 snd_pcm_ops_t *ops; 382 snd_pcm_ops_t *ops;
384 /* -- runtime information -- */ 383 /* -- runtime information -- */
385 snd_pcm_runtime_t *runtime; 384 snd_pcm_runtime_t *runtime;
diff --git a/include/sound/version.h b/include/sound/version.h
index c085136f391f..8d19bfabb7e0 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
1/* include/version.h. Generated by configure. */ 1/* include/version.h. Generated by configure. */
2#define CONFIG_SND_VERSION "1.0.9b" 2#define CONFIG_SND_VERSION "1.0.10rc1"
3#define CONFIG_SND_DATE " (Thu Jul 28 12:20:13 2005 UTC)" 3#define CONFIG_SND_DATE " (Tue Aug 30 05:31:08 2005 UTC)"
diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
index 4b570684a6aa..9a3c1e6c820a 100644
--- a/include/sound/ymfpci.h
+++ b/include/sound/ymfpci.h
@@ -295,6 +295,7 @@ struct _snd_ymfpci_pcm {
295 unsigned int running: 1; 295 unsigned int running: 1;
296 unsigned int output_front: 1; 296 unsigned int output_front: 1;
297 unsigned int output_rear: 1; 297 unsigned int output_rear: 1;
298 unsigned int update_pcm_vol;
298 u32 period_size; /* cached from runtime->period_size */ 299 u32 period_size; /* cached from runtime->period_size */
299 u32 buffer_size; /* cached from runtime->buffer_size */ 300 u32 buffer_size; /* cached from runtime->buffer_size */
300 u32 period_pos; 301 u32 period_pos;
@@ -367,6 +368,11 @@ struct _snd_ymfpci {
367 int mode_dup4ch; 368 int mode_dup4ch;
368 int rear_opened; 369 int rear_opened;
369 int spdif_opened; 370 int spdif_opened;
371 struct {
372 u16 left;
373 u16 right;
374 snd_kcontrol_t *ctl;
375 } pcm_mixer[32];
370 376
371 spinlock_t reg_lock; 377 spinlock_t reg_lock;
372 spinlock_t voice_lock; 378 spinlock_t voice_lock;
diff --git a/init/main.c b/init/main.c
index c9c311cf1771..ff410063e4e1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -47,6 +47,7 @@
47#include <linux/rmap.h> 47#include <linux/rmap.h>
48#include <linux/mempolicy.h> 48#include <linux/mempolicy.h>
49#include <linux/key.h> 49#include <linux/key.h>
50#include <net/sock.h>
50 51
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/bugs.h> 53#include <asm/bugs.h>
@@ -80,7 +81,6 @@
80static int init(void *); 81static int init(void *);
81 82
82extern void init_IRQ(void); 83extern void init_IRQ(void);
83extern void sock_init(void);
84extern void fork_init(unsigned long); 84extern void fork_init(unsigned long);
85extern void mca_init(void); 85extern void mca_init(void);
86extern void sbus_init(void); 86extern void sbus_init(void);
diff --git a/kernel/audit.c b/kernel/audit.c
index ef35166fdc29..7f0699790d46 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -514,7 +514,8 @@ static int __init audit_init(void)
514{ 514{
515 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 515 printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
516 audit_default ? "enabled" : "disabled"); 516 audit_default ? "enabled" : "disabled");
517 audit_sock = netlink_kernel_create(NETLINK_AUDIT, audit_receive); 517 audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive,
518 THIS_MODULE);
518 if (!audit_sock) 519 if (!audit_sock)
519 audit_panic("cannot initialize netlink socket"); 520 audit_panic("cannot initialize netlink socket");
520 521
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e0d296c5b302..8ab1b4e518b8 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -627,6 +627,14 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
627 * Call with cpuset_sem held. May nest a call to the 627 * Call with cpuset_sem held. May nest a call to the
628 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 628 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
629 */ 629 */
630
631/*
632 * Hack to avoid 2.6.13 partial node dynamic sched domain bug.
633 * Disable letting 'cpu_exclusive' cpusets define dynamic sched
634 * domains, until the sched domain can handle partial nodes.
635 * Remove this #if hackery when sched domains fixed.
636 */
637#if 0
630static void update_cpu_domains(struct cpuset *cur) 638static void update_cpu_domains(struct cpuset *cur)
631{ 639{
632 struct cpuset *c, *par = cur->parent; 640 struct cpuset *c, *par = cur->parent;
@@ -636,23 +644,6 @@ static void update_cpu_domains(struct cpuset *cur)
636 return; 644 return;
637 645
638 /* 646 /*
639 * Hack to avoid 2.6.13 partial node dynamic sched domain bug.
640 * Require the 'cpu_exclusive' cpuset to include all (or none)
641 * of the CPUs on each node, or return w/o changing sched domains.
642 * Remove this hack when dynamic sched domains fixed.
643 */
644 {
645 int i, j;
646
647 for_each_cpu_mask(i, cur->cpus_allowed) {
648 for_each_cpu_mask(j, node_to_cpumask(cpu_to_node(i))) {
649 if (!cpu_isset(j, cur->cpus_allowed))
650 return;
651 }
652 }
653 }
654
655 /*
656 * Get all cpus from parent's cpus_allowed not part of exclusive 647 * Get all cpus from parent's cpus_allowed not part of exclusive
657 * children 648 * children
658 */ 649 */
@@ -684,6 +675,11 @@ static void update_cpu_domains(struct cpuset *cur)
684 partition_sched_domains(&pspan, &cspan); 675 partition_sched_domains(&pspan, &cspan);
685 unlock_cpu_hotplug(); 676 unlock_cpu_hotplug();
686} 677}
678#else
679static void update_cpu_domains(struct cpuset *cur)
680{
681}
682#endif
687 683
688static int update_cpumask(struct cpuset *cs, char *buf) 684static int update_cpumask(struct cpuset *cs, char *buf)
689{ 685{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3e0bbee549ea..8e56e2495542 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -31,6 +31,7 @@
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/net.h>
34#include <linux/sysrq.h> 35#include <linux/sysrq.h>
35#include <linux/highuid.h> 36#include <linux/highuid.h>
36#include <linux/writeback.h> 37#include <linux/writeback.h>
@@ -136,9 +137,6 @@ static struct ctl_table_header root_table_header =
136 137
137static ctl_table kern_table[]; 138static ctl_table kern_table[];
138static ctl_table vm_table[]; 139static ctl_table vm_table[];
139#ifdef CONFIG_NET
140extern ctl_table net_table[];
141#endif
142static ctl_table proc_table[]; 140static ctl_table proc_table[];
143static ctl_table fs_table[]; 141static ctl_table fs_table[];
144static ctl_table debug_table[]; 142static ctl_table debug_table[];
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb429a52152..e43197efeb9c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -72,6 +72,9 @@ config TEXTSEARCH
72config TEXTSEARCH_KMP 72config TEXTSEARCH_KMP
73 tristate 73 tristate
74 74
75config TEXTSEARCH_BM
76 tristate
77
75config TEXTSEARCH_FSM 78config TEXTSEARCH_FSM
76 tristate 79 tristate
77 80
diff --git a/lib/Makefile b/lib/Makefile
index f28d9031303c..52f83380f704 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
38 38
39obj-$(CONFIG_TEXTSEARCH) += textsearch.o 39obj-$(CONFIG_TEXTSEARCH) += textsearch.o
40obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 40obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
41obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
41obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o 42obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
42 43
43hostprogs-y := gen_crc32table 44hostprogs-y := gen_crc32table
diff --git a/lib/idr.c b/lib/idr.c
index c5be889de449..6415d053e2bf 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -207,7 +207,7 @@ build_up:
207} 207}
208 208
209/** 209/**
210 * idr_get_new_above - allocate new idr entry above a start id 210 * idr_get_new_above - allocate new idr entry above or equal to a start id
211 * @idp: idr handle 211 * @idp: idr handle
212 * @ptr: pointer you want associated with the ide 212 * @ptr: pointer you want associated with the ide
213 * @start_id: id to start search at 213 * @start_id: id to start search at
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 8e49d21057e4..04ca4429ddfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -93,6 +93,7 @@ static int send_uevent(const char *signal, const char *obj,
93 } 93 }
94 } 94 }
95 95
96 NETLINK_CB(skb).dst_group = 1;
96 return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); 97 return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask);
97} 98}
98 99
@@ -153,7 +154,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_atomic);
153 154
154static int __init kobject_uevent_init(void) 155static int __init kobject_uevent_init(void)
155{ 156{
156 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, NULL); 157 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL,
158 THIS_MODULE);
157 159
158 if (!uevent_sock) { 160 if (!uevent_sock) {
159 printk(KERN_ERR 161 printk(KERN_ERR
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
new file mode 100644
index 000000000000..2cc79112ecc3
--- /dev/null
+++ b/lib/ts_bm.c
@@ -0,0 +1,185 @@
1/*
2 * lib/ts_bm.c Boyer-Moore text search implementation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pablo Neira Ayuso <pablo@eurodev.net>
10 *
11 * ==========================================================================
12 *
13 * Implements Boyer-Moore string matching algorithm:
14 *
15 * [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
16 * Communications of the Association for Computing Machinery,
17 * 20(10), 1977, pp. 762-772.
18 * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
19 *
20 * [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
21 * http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
22 *
23 * Note: Since Boyer-Moore (BM) performs searches for matchings from right
24 * to left, it's still possible that a matching could be spread over
25 * multiple blocks, in that case this algorithm won't find any coincidence.
26 *
27 * If you're willing to ensure that such thing won't ever happen, use the
28 * Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
29 * the proper string search algorithm depending on your setting.
30 *
31 * Say you're using the textsearch infrastructure for filtering, NIDS or
32 * any similar security focused purpose, then go KMP. Otherwise, if you
33 * really care about performance, say you're classifying packets to apply
34 * Quality of Service (QoS) policies, and you don't mind about possible
35 * matchings spread over multiple fragments, then go BM.
36 */
37
38#include <linux/config.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/string.h>
43#include <linux/textsearch.h>
44
45/* Alphabet size, use ASCII */
46#define ASIZE 256
47
48#if 0
49#define DEBUGP printk
50#else
51#define DEBUGP(args, format...)
52#endif
53
54struct ts_bm
55{
56 u8 * pattern;
57 unsigned int patlen;
58 unsigned int bad_shift[ASIZE];
59 unsigned int good_shift[0];
60};
61
62static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63{
64 struct ts_bm *bm = ts_config_priv(conf);
65 unsigned int i, text_len, consumed = state->offset;
66 const u8 *text;
67 int shift = bm->patlen, bs;
68
69 for (;;) {
70 text_len = conf->get_next_block(consumed, &text, conf, state);
71
72 if (unlikely(text_len == 0))
73 break;
74
75 while (shift < text_len) {
76 DEBUGP("Searching in position %d (%c)\n",
77 shift, text[shift]);
78 for (i = 0; i < bm->patlen; i++)
79 if (text[shift-i] != bm->pattern[bm->patlen-1-i])
80 goto next;
81
82 /* London calling... */
83 DEBUGP("found!\n");
84 return consumed += (shift-(bm->patlen-1));
85
86next: bs = bm->bad_shift[text[shift-i]];
87
88 /* Now jumping to... */
89 shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
90 }
91 consumed += text_len;
92 }
93
94 return UINT_MAX;
95}
96
97static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
98 unsigned int len)
99{
100 int i, j, ended, l[ASIZE];
101
102 for (i = 0; i < ASIZE; i++)
103 bm->bad_shift[i] = len;
104 for (i = 0; i < len - 1; i++)
105 bm->bad_shift[pattern[i]] = len - 1 - i;
106
107 /* Compute the good shift array, used to match reocurrences
108 * of a subpattern */
109 for (i = 1; i < bm->patlen; i++) {
110 for (j = 0; j < bm->patlen && bm->pattern[bm->patlen - 1 - j]
111 == bm->pattern[bm->patlen - 1 - i - j]; j++);
112 l[i] = j;
113 }
114
115 bm->good_shift[0] = 1;
116 for (i = 1; i < bm->patlen; i++)
117 bm->good_shift[i] = bm->patlen;
118 for (i = bm->patlen - 1; i > 0; i--)
119 bm->good_shift[l[i]] = i;
120 ended = 0;
121 for (i = 0; i < bm->patlen; i++) {
122 if (l[i] == bm->patlen - 1 - i)
123 ended = i;
124 if (ended)
125 bm->good_shift[i] = ended;
126 }
127}
128
129static struct ts_config *bm_init(const void *pattern, unsigned int len,
130 int gfp_mask)
131{
132 struct ts_config *conf;
133 struct ts_bm *bm;
134 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
135 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
136
137 conf = alloc_ts_config(priv_size, gfp_mask);
138 if (IS_ERR(conf))
139 return conf;
140
141 bm = ts_config_priv(conf);
142 bm->patlen = len;
143 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
144 compute_prefix_tbl(bm, pattern, len);
145 memcpy(bm->pattern, pattern, len);
146
147 return conf;
148}
149
150static void *bm_get_pattern(struct ts_config *conf)
151{
152 struct ts_bm *bm = ts_config_priv(conf);
153 return bm->pattern;
154}
155
156static unsigned int bm_get_pattern_len(struct ts_config *conf)
157{
158 struct ts_bm *bm = ts_config_priv(conf);
159 return bm->patlen;
160}
161
162static struct ts_ops bm_ops = {
163 .name = "bm",
164 .find = bm_find,
165 .init = bm_init,
166 .get_pattern = bm_get_pattern,
167 .get_pattern_len = bm_get_pattern_len,
168 .owner = THIS_MODULE,
169 .list = LIST_HEAD_INIT(bm_ops.list)
170};
171
172static int __init init_bm(void)
173{
174 return textsearch_register(&bm_ops);
175}
176
177static void __exit exit_bm(void)
178{
179 textsearch_unregister(&bm_ops);
180}
181
182MODULE_LICENSE("GPL");
183
184module_init(init_bm);
185module_exit(exit_bm);
diff --git a/mm/memory.c b/mm/memory.c
index e046b7e4b530..a596c1172248 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
498 unsigned long addr = vma->vm_start; 498 unsigned long addr = vma->vm_start;
499 unsigned long end = vma->vm_end; 499 unsigned long end = vma->vm_end;
500 500
501 /*
502 * Don't copy ptes where a page fault will fill them correctly.
503 * Fork becomes much lighter when there are big shared or private
504 * readonly mappings. The tradeoff is that copy_page_range is more
505 * efficient than faulting.
506 */
507 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
508 if (!vma->anon_vma)
509 return 0;
510 }
511
501 if (is_vm_hugetlb_page(vma)) 512 if (is_vm_hugetlb_page(vma))
502 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 513 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
503 514
diff --git a/net/802/fc.c b/net/802/fc.c
index 640d34e026c2..282c4ab1abe6 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -87,7 +87,7 @@ static int fc_rebuild_header(struct sk_buff *skb)
87 struct fch_hdr *fch=(struct fch_hdr *)skb->data; 87 struct fch_hdr *fch=(struct fch_hdr *)skb->data;
88 struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr)); 88 struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr));
89 if(fcllc->ethertype != htons(ETH_P_IP)) { 89 if(fcllc->ethertype != htons(ETH_P_IP)) {
90 printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n",(unsigned int)htons(fcllc->ethertype)); 90 printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype));
91 return 0; 91 return 0;
92 } 92 }
93#ifdef CONFIG_INET 93#ifdef CONFIG_INET
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 5ce24c4bb840..ac242a4bc346 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -108,8 +108,8 @@ static int fddi_rebuild_header(struct sk_buff *skb)
108 else 108 else
109#endif 109#endif
110 { 110 {
111 printk("%s: Don't know how to resolve type %02X addresses.\n", 111 printk("%s: Don't know how to resolve type %04X addresses.\n",
112 skb->dev->name, htons(fddi->hdr.llc_snap.ethertype)); 112 skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
113 return(0); 113 return(0);
114 } 114 }
115} 115}
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 051e8af56a77..6d7fed3dd99a 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -51,6 +51,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
51 unsigned len) 51 unsigned len)
52{ 52{
53 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); 53 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
54 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
54 55
55 if (!len){ 56 if (!len){
56 len = skb->len - HIPPI_HLEN; 57 len = skb->len - HIPPI_HLEN;
@@ -84,9 +85,10 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
84 if (daddr) 85 if (daddr)
85 { 86 {
86 memcpy(hip->le.dest_switch_addr, daddr + 3, 3); 87 memcpy(hip->le.dest_switch_addr, daddr + 3, 3);
87 memcpy(&skb->private.ifield, daddr + 2, 4); 88 memcpy(&hcb->ifield, daddr + 2, 4);
88 return HIPPI_HLEN; 89 return HIPPI_HLEN;
89 } 90 }
91 hcb->ifield = 0;
90 return -((int)HIPPI_HLEN); 92 return -((int)HIPPI_HLEN);
91} 93}
92 94
@@ -122,7 +124,7 @@ static int hippi_rebuild_header(struct sk_buff *skb)
122 * Determine the packet's protocol ID. 124 * Determine the packet's protocol ID.
123 */ 125 */
124 126
125unsigned short hippi_type_trans(struct sk_buff *skb, struct net_device *dev) 127__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
126{ 128{
127 struct hippi_hdr *hip; 129 struct hippi_hdr *hip;
128 130
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 5ae63416df6d..b24817c63ca8 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -35,7 +35,8 @@ static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
35struct datalink_proto *register_8022_client(unsigned char type, 35struct datalink_proto *register_8022_client(unsigned char type,
36 int (*func)(struct sk_buff *skb, 36 int (*func)(struct sk_buff *skb,
37 struct net_device *dev, 37 struct net_device *dev,
38 struct packet_type *pt)) 38 struct packet_type *pt,
39 struct net_device *orig_dev))
39{ 40{
40 struct datalink_proto *proto; 41 struct datalink_proto *proto;
41 42
diff --git a/net/802/p8023.c b/net/802/p8023.c
index a0b61b40225f..6368d3dce444 100644
--- a/net/802/p8023.c
+++ b/net/802/p8023.c
@@ -20,6 +20,7 @@
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21 21
22#include <net/datalink.h> 22#include <net/datalink.h>
23#include <net/p8022.h>
23 24
24/* 25/*
25 * Place an 802.3 header on a packet. The driver will do the mac 26 * Place an 802.3 header on a packet. The driver will do the mac
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 1053821ddf93..ab80b1fab53c 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -47,7 +47,7 @@ static struct datalink_proto *find_snap_client(unsigned char *desc)
47 * A SNAP packet has arrived 47 * A SNAP packet has arrived
48 */ 48 */
49static int snap_rcv(struct sk_buff *skb, struct net_device *dev, 49static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
50 struct packet_type *pt) 50 struct packet_type *pt, struct net_device *orig_dev)
51{ 51{
52 int rc = 1; 52 int rc = 1;
53 struct datalink_proto *proto; 53 struct datalink_proto *proto;
@@ -61,7 +61,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
61 /* Pass the frame on. */ 61 /* Pass the frame on. */
62 skb->h.raw += 5; 62 skb->h.raw += 5;
63 skb_pull(skb, 5); 63 skb_pull(skb, 5);
64 rc = proto->rcvfunc(skb, dev, &snap_packet_type); 64 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
65 } else { 65 } else {
66 skb->sk = NULL; 66 skb->sk = NULL;
67 kfree_skb(skb); 67 kfree_skb(skb);
@@ -118,7 +118,8 @@ module_exit(snap_exit);
118struct datalink_proto *register_snap_client(unsigned char *desc, 118struct datalink_proto *register_snap_client(unsigned char *desc,
119 int (*rcvfunc)(struct sk_buff *, 119 int (*rcvfunc)(struct sk_buff *,
120 struct net_device *, 120 struct net_device *,
121 struct packet_type *)) 121 struct packet_type *,
122 struct net_device *))
122{ 123{
123 struct datalink_proto *proto = NULL; 124 struct datalink_proto *proto = NULL;
124 125
diff --git a/net/802/sysctl_net_802.c b/net/802/sysctl_net_802.c
index 36079630c49f..700129556c13 100644
--- a/net/802/sysctl_net_802.c
+++ b/net/802/sysctl_net_802.c
@@ -10,9 +10,10 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13#include <linux/config.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/if_tr.h>
14#include <linux/sysctl.h> 16#include <linux/sysctl.h>
15#include <linux/config.h>
16 17
17#ifdef CONFIG_TR 18#ifdef CONFIG_TR
18extern int sysctl_tr_rif_timeout; 19extern int sysctl_tr_rif_timeout;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 508b1fa14546..9ae3a14dd016 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -51,7 +51,7 @@ struct net_device *__find_vlan_dev(struct net_device* real_dev,
51/* found in vlan_dev.c */ 51/* found in vlan_dev.c */
52int vlan_dev_rebuild_header(struct sk_buff *skb); 52int vlan_dev_rebuild_header(struct sk_buff *skb);
53int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 53int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
54 struct packet_type* ptype); 54 struct packet_type *ptype, struct net_device *orig_dev);
55int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 55int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
56 unsigned short type, void *daddr, void *saddr, 56 unsigned short type, void *daddr, void *saddr,
57 unsigned len); 57 unsigned len);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 49c487413518..145f5cde96cf 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -113,7 +113,7 @@ static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
113 * 113 *
114 */ 114 */
115int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 115int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
116 struct packet_type* ptype) 116 struct packet_type* ptype, struct net_device *orig_dev)
117{ 117{
118 unsigned char *rawp = NULL; 118 unsigned char *rawp = NULL;
119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); 119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
diff --git a/net/Kconfig b/net/Kconfig
index 32327d0a56ad..2bdd5623fdd5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -147,6 +147,7 @@ source "net/bridge/netfilter/Kconfig"
147 147
148endif 148endif
149 149
150source "net/dccp/Kconfig"
150source "net/sctp/Kconfig" 151source "net/sctp/Kconfig"
151source "net/atm/Kconfig" 152source "net/atm/Kconfig"
152source "net/bridge/Kconfig" 153source "net/bridge/Kconfig"
@@ -205,6 +206,8 @@ config NET_PKTGEN
205 To compile this code as a module, choose M here: the 206 To compile this code as a module, choose M here: the
206 module will be called pktgen. 207 module will be called pktgen.
207 208
209source "net/netfilter/Kconfig"
210
208endmenu 211endmenu
209 212
210endmenu 213endmenu
diff --git a/net/Makefile b/net/Makefile
index 83bc52d87bae..4aa2f46d2a56 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_NET) += $(tmp-y)
16obj-$(CONFIG_LLC) += llc/ 16obj-$(CONFIG_LLC) += llc/
17obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ 17obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/
18obj-$(CONFIG_INET) += ipv4/ 18obj-$(CONFIG_INET) += ipv4/
19obj-$(CONFIG_NETFILTER) += netfilter/
19obj-$(CONFIG_XFRM) += xfrm/ 20obj-$(CONFIG_XFRM) += xfrm/
20obj-$(CONFIG_UNIX) += unix/ 21obj-$(CONFIG_UNIX) += unix/
21ifneq ($(CONFIG_IPV6),) 22ifneq ($(CONFIG_IPV6),)
@@ -41,6 +42,7 @@ obj-$(CONFIG_ATM) += atm/
41obj-$(CONFIG_DECNET) += decnet/ 42obj-$(CONFIG_DECNET) += decnet/
42obj-$(CONFIG_ECONET) += econet/ 43obj-$(CONFIG_ECONET) += econet/
43obj-$(CONFIG_VLAN_8021Q) += 8021q/ 44obj-$(CONFIG_VLAN_8021Q) += 8021q/
45obj-$(CONFIG_IP_DCCP) += dccp/
44obj-$(CONFIG_IP_SCTP) += sctp/ 46obj-$(CONFIG_IP_SCTP) += sctp/
45obj-$(CONFIG_IEEE80211) += ieee80211/ 47obj-$(CONFIG_IEEE80211) += ieee80211/
46 48
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index c34614ea5fce..7076097debc2 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -698,7 +698,7 @@ static void __aarp_resolved(struct aarp_entry **list, struct aarp_entry *a,
698 * frame. We currently only support Ethernet. 698 * frame. We currently only support Ethernet.
699 */ 699 */
700static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, 700static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
701 struct packet_type *pt) 701 struct packet_type *pt, struct net_device *orig_dev)
702{ 702{
703 struct elapaarp *ea = aarp_hdr(skb); 703 struct elapaarp *ea = aarp_hdr(skb);
704 int hash, ret = 0; 704 int hash, ret = 0;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 192b529f86a4..1d31b3a3f1e5 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -53,12 +53,12 @@
53 53
54#include <linux/config.h> 54#include <linux/config.h>
55#include <linux/module.h> 55#include <linux/module.h>
56#include <linux/tcp.h>
57#include <linux/if_arp.h> 56#include <linux/if_arp.h>
58#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 57#include <linux/termios.h> /* For TIOCOUTQ/INQ */
59#include <net/datalink.h> 58#include <net/datalink.h>
60#include <net/psnap.h> 59#include <net/psnap.h>
61#include <net/sock.h> 60#include <net/sock.h>
61#include <net/tcp_states.h>
62#include <net/route.h> 62#include <net/route.h>
63#include <linux/atalk.h> 63#include <linux/atalk.h>
64 64
@@ -1390,7 +1390,7 @@ free_it:
1390 * [ie ARPHRD_ETHERTALK] 1390 * [ie ARPHRD_ETHERTALK]
1391 */ 1391 */
1392static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, 1392static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1393 struct packet_type *pt) 1393 struct packet_type *pt, struct net_device *orig_dev)
1394{ 1394{
1395 struct ddpehdr *ddp; 1395 struct ddpehdr *ddp;
1396 struct sock *sock; 1396 struct sock *sock;
@@ -1482,7 +1482,7 @@ freeit:
1482 * header and append a long one. 1482 * header and append a long one.
1483 */ 1483 */
1484static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, 1484static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1485 struct packet_type *pt) 1485 struct packet_type *pt, struct net_device *orig_dev)
1486{ 1486{
1487 /* Expand any short form frames */ 1487 /* Expand any short form frames */
1488 if (skb->mac.raw[2] == 1) { 1488 if (skb->mac.raw[2] == 1) {
@@ -1528,7 +1528,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
1528 } 1528 }
1529 skb->h.raw = skb->data; 1529 skb->h.raw = skb->data;
1530 1530
1531 return atalk_rcv(skb, dev, pt); 1531 return atalk_rcv(skb, dev, pt, orig_dev);
1532freeit: 1532freeit:
1533 kfree_skb(skb); 1533 kfree_skb(skb);
1534 return 0; 1534 return 0;
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 181a3002d8ad..4b1faca5013f 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -34,7 +34,6 @@
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
36{ 36{
37 struct sk_buff *skb;
38 unsigned long flags; 37 unsigned long flags;
39 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
40 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
47 prev->next = skb_to; 46 prev->next = skb_to;
48 to->prev->next = from->next; 47 to->prev->next = from->next;
49 to->prev = from->prev; 48 to->prev = from->prev;
50 for (skb = from->next; skb != skb_to; skb = skb->next)
51 skb->list = to;
52 to->qlen += from->qlen; 49 to->qlen += from->qlen;
53 spin_unlock(&to->lock); 50 spin_unlock(&to->lock);
54 from->prev = skb_from; 51 from->prev = skb_from;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a5c94f11547c..ea43dfb774e2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -45,7 +45,7 @@
45#include <linux/sysctl.h> 45#include <linux/sysctl.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <net/tcp.h> 48#include <net/tcp_states.h>
49#include <net/ip.h> 49#include <net/ip.h>
50#include <net/arp.h> 50#include <net/arp.h>
51 51
diff --git a/net/ax25/ax25_ds_in.c b/net/ax25/ax25_ds_in.c
index 8adc0022cf58..edcaa897027c 100644
--- a/net/ax25/ax25_ds_in.c
+++ b/net/ax25/ax25_ds_in.c
@@ -22,8 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/ip.h> /* For ip_rcv */ 25#include <net/tcp_states.h>
26#include <net/tcp.h>
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
28#include <asm/system.h> 27#include <asm/system.h>
29#include <linux/fcntl.h> 28#include <linux/fcntl.h>
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 3a8b67316fc3..061083efc1dc 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -18,7 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/net.h> 20#include <linux/net.h>
21#include <net/tcp.h> 21#include <net/tcp_states.h>
22#include <net/ax25.h> 22#include <net/ax25.h>
23#include <linux/inet.h> 23#include <linux/inet.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 3dc808fde33f..810c9c76c2e0 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -9,7 +9,6 @@
9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) 9 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) 10 * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
11 */ 11 */
12#include <linux/config.h>
13#include <linux/errno.h> 12#include <linux/errno.h>
14#include <linux/types.h> 13#include <linux/types.h>
15#include <linux/socket.h> 14#include <linux/socket.h>
@@ -26,9 +25,7 @@
26#include <linux/skbuff.h> 25#include <linux/skbuff.h>
27#include <linux/netfilter.h> 26#include <linux/netfilter.h>
28#include <net/sock.h> 27#include <net/sock.h>
29#include <net/ip.h> /* For ip_rcv */ 28#include <net/tcp_states.h>
30#include <net/tcp.h>
31#include <net/arp.h> /* For arp_rcv */
32#include <asm/uaccess.h> 29#include <asm/uaccess.h>
33#include <asm/system.h> 30#include <asm/system.h>
34#include <linux/fcntl.h> 31#include <linux/fcntl.h>
@@ -114,7 +111,6 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
114 111
115 pid = *skb->data; 112 pid = *skb->data;
116 113
117#ifdef CONFIG_INET
118 if (pid == AX25_P_IP) { 114 if (pid == AX25_P_IP) {
119 /* working around a TCP bug to keep additional listeners 115 /* working around a TCP bug to keep additional listeners
120 * happy. TCP re-uses the buffer and destroys the original 116 * happy. TCP re-uses the buffer and destroys the original
@@ -132,10 +128,9 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
132 skb->dev = ax25->ax25_dev->dev; 128 skb->dev = ax25->ax25_dev->dev;
133 skb->pkt_type = PACKET_HOST; 129 skb->pkt_type = PACKET_HOST;
134 skb->protocol = htons(ETH_P_IP); 130 skb->protocol = htons(ETH_P_IP);
135 ip_rcv(skb, skb->dev, NULL); /* Wrong ptype */ 131 netif_rx(skb);
136 return 1; 132 return 1;
137 } 133 }
138#endif
139 if (pid == AX25_P_SEGMENT) { 134 if (pid == AX25_P_SEGMENT) {
140 skb_pull(skb, 1); /* Remove PID */ 135 skb_pull(skb, 1); /* Remove PID */
141 return ax25_rx_fragment(ax25, skb); 136 return ax25_rx_fragment(ax25, skb);
@@ -250,7 +245,6 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
250 245
251 /* Now we are pointing at the pid byte */ 246 /* Now we are pointing at the pid byte */
252 switch (skb->data[1]) { 247 switch (skb->data[1]) {
253#ifdef CONFIG_INET
254 case AX25_P_IP: 248 case AX25_P_IP:
255 skb_pull(skb,2); /* drop PID/CTRL */ 249 skb_pull(skb,2); /* drop PID/CTRL */
256 skb->h.raw = skb->data; 250 skb->h.raw = skb->data;
@@ -258,7 +252,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
258 skb->dev = dev; 252 skb->dev = dev;
259 skb->pkt_type = PACKET_HOST; 253 skb->pkt_type = PACKET_HOST;
260 skb->protocol = htons(ETH_P_IP); 254 skb->protocol = htons(ETH_P_IP);
261 ip_rcv(skb, dev, ptype); /* Note ptype here is the wrong one, fix me later */ 255 netif_rx(skb);
262 break; 256 break;
263 257
264 case AX25_P_ARP: 258 case AX25_P_ARP:
@@ -268,9 +262,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
268 skb->dev = dev; 262 skb->dev = dev;
269 skb->pkt_type = PACKET_HOST; 263 skb->pkt_type = PACKET_HOST;
270 skb->protocol = htons(ETH_P_ARP); 264 skb->protocol = htons(ETH_P_ARP);
271 arp_rcv(skb, dev, ptype); /* Note ptype here is wrong... */ 265 netif_rx(skb);
272 break; 266 break;
273#endif
274 case AX25_P_TEXT: 267 case AX25_P_TEXT:
275 /* Now find a suitable dgram socket */ 268 /* Now find a suitable dgram socket */
276 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); 269 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
@@ -454,7 +447,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
454 * Receive an AX.25 frame via a SLIP interface. 447 * Receive an AX.25 frame via a SLIP interface.
455 */ 448 */
456int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, 449int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
457 struct packet_type *ptype) 450 struct packet_type *ptype, struct net_device *orig_dev)
458{ 451{
459 skb->sk = NULL; /* Initially we don't know who it's for */ 452 skb->sk = NULL; /* Initially we don't know who it's for */
460 skb->destructor = NULL; /* Who initializes this, dammit?! */ 453 skb->destructor = NULL; /* Who initializes this, dammit?! */
diff --git a/net/ax25/ax25_std_in.c b/net/ax25/ax25_std_in.c
index 7131873322c4..f6ed283e9de8 100644
--- a/net/ax25/ax25_std_in.c
+++ b/net/ax25/ax25_std_in.c
@@ -29,8 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/skbuff.h> 30#include <linux/skbuff.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/ip.h> /* For ip_rcv */ 32#include <net/tcp_states.h>
33#include <net/tcp.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35#include <asm/system.h> 34#include <asm/system.h>
36#include <linux/fcntl.h> 35#include <linux/fcntl.h>
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 066897bc0749..a29c480a4dc1 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -24,7 +24,7 @@
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/tcp.h> 27#include <net/tcp_states.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/system.h> 29#include <asm/system.h>
30#include <linux/fcntl.h> 30#include <linux/fcntl.h>
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 99694b57f6f5..c41dbe5fadee 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -24,7 +24,7 @@
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/tcp.h> 27#include <net/tcp_states.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/system.h> 29#include <asm/system.h>
30#include <linux/fcntl.h> 30#include <linux/fcntl.h>
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
76 if (skb_prev == NULL) 76 if (skb_prev == NULL)
77 skb_queue_head(&ax25->write_queue, skb); 77 skb_queue_head(&ax25->write_queue, skb);
78 else 78 else
79 skb_append(skb_prev, skb); 79 skb_append(skb_prev, skb, &ax25->write_queue);
80 skb_prev = skb; 80 skb_prev = skb;
81 } 81 }
82} 82}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ffa26c10bfe8..55dc42eac92c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -191,7 +191,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
191 191
192 /* Special commands */ 192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) { 193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 skb->pkt_type = HCI_COMMAND_PKT; 194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev; 195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb); 196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev); 197 hci_sched_cmd(hdev);
@@ -995,11 +995,11 @@ static int hci_send_frame(struct sk_buff *skb)
995 return -ENODEV; 995 return -ENODEV;
996 } 996 }
997 997
998 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); 998 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
999 999
1000 if (atomic_read(&hdev->promisc)) { 1000 if (atomic_read(&hdev->promisc)) {
1001 /* Time stamp */ 1001 /* Time stamp */
1002 do_gettimeofday(&skb->stamp); 1002 __net_timestamp(skb);
1003 1003
1004 hci_send_to_sock(hdev, skb); 1004 hci_send_to_sock(hdev, skb);
1005 } 1005 }
@@ -1034,7 +1034,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1034 1034
1035 BT_DBG("skb len %d", skb->len); 1035 BT_DBG("skb len %d", skb->len);
1036 1036
1037 skb->pkt_type = HCI_COMMAND_PKT; 1037 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1038 skb->dev = (void *) hdev; 1038 skb->dev = (void *) hdev;
1039 skb_queue_tail(&hdev->cmd_q, skb); 1039 skb_queue_tail(&hdev->cmd_q, skb);
1040 hci_sched_cmd(hdev); 1040 hci_sched_cmd(hdev);
@@ -1081,7 +1081,7 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1081 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); 1081 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1082 1082
1083 skb->dev = (void *) hdev; 1083 skb->dev = (void *) hdev;
1084 skb->pkt_type = HCI_ACLDATA_PKT; 1084 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1085 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1085 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1086 1086
1087 if (!(list = skb_shinfo(skb)->frag_list)) { 1087 if (!(list = skb_shinfo(skb)->frag_list)) {
@@ -1103,7 +1103,7 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1103 skb = list; list = list->next; 1103 skb = list; list = list->next;
1104 1104
1105 skb->dev = (void *) hdev; 1105 skb->dev = (void *) hdev;
1106 skb->pkt_type = HCI_ACLDATA_PKT; 1106 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1107 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1107 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1108 1108
1109 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1109 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1139,7 +1139,7 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1139 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); 1139 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1140 1140
1141 skb->dev = (void *) hdev; 1141 skb->dev = (void *) hdev;
1142 skb->pkt_type = HCI_SCODATA_PKT; 1142 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1143 skb_queue_tail(&conn->data_q, skb); 1143 skb_queue_tail(&conn->data_q, skb);
1144 hci_sched_tx(hdev); 1144 hci_sched_tx(hdev);
1145 return 0; 1145 return 0;
@@ -1369,7 +1369,7 @@ void hci_rx_task(unsigned long arg)
1369 1369
1370 if (test_bit(HCI_INIT, &hdev->flags)) { 1370 if (test_bit(HCI_INIT, &hdev->flags)) {
1371 /* Don't process data packets in this states. */ 1371 /* Don't process data packets in this states. */
1372 switch (skb->pkt_type) { 1372 switch (bt_cb(skb)->pkt_type) {
1373 case HCI_ACLDATA_PKT: 1373 case HCI_ACLDATA_PKT:
1374 case HCI_SCODATA_PKT: 1374 case HCI_SCODATA_PKT:
1375 kfree_skb(skb); 1375 kfree_skb(skb);
@@ -1378,7 +1378,7 @@ void hci_rx_task(unsigned long arg)
1378 } 1378 }
1379 1379
1380 /* Process frame */ 1380 /* Process frame */
1381 switch (skb->pkt_type) { 1381 switch (bt_cb(skb)->pkt_type) {
1382 case HCI_EVENT_PKT: 1382 case HCI_EVENT_PKT:
1383 hci_event_packet(hdev, skb); 1383 hci_event_packet(hdev, skb);
1384 break; 1384 break;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 46367bd129c3..d6da0939216d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -484,14 +484,18 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
484/* Inquiry Result */ 484/* Inquiry Result */
485static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 485static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
486{ 486{
487 struct inquiry_data data;
487 struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1); 488 struct inquiry_info *info = (struct inquiry_info *) (skb->data + 1);
488 int num_rsp = *((__u8 *) skb->data); 489 int num_rsp = *((__u8 *) skb->data);
489 490
490 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 491 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
491 492
493 if (!num_rsp)
494 return;
495
492 hci_dev_lock(hdev); 496 hci_dev_lock(hdev);
497
493 for (; num_rsp; num_rsp--) { 498 for (; num_rsp; num_rsp--) {
494 struct inquiry_data data;
495 bacpy(&data.bdaddr, &info->bdaddr); 499 bacpy(&data.bdaddr, &info->bdaddr);
496 data.pscan_rep_mode = info->pscan_rep_mode; 500 data.pscan_rep_mode = info->pscan_rep_mode;
497 data.pscan_period_mode = info->pscan_period_mode; 501 data.pscan_period_mode = info->pscan_period_mode;
@@ -502,30 +506,55 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
502 info++; 506 info++;
503 hci_inquiry_cache_update(hdev, &data); 507 hci_inquiry_cache_update(hdev, &data);
504 } 508 }
509
505 hci_dev_unlock(hdev); 510 hci_dev_unlock(hdev);
506} 511}
507 512
508/* Inquiry Result With RSSI */ 513/* Inquiry Result With RSSI */
509static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 514static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
510{ 515{
511 struct inquiry_info_with_rssi *info = (struct inquiry_info_with_rssi *) (skb->data + 1); 516 struct inquiry_data data;
512 int num_rsp = *((__u8 *) skb->data); 517 int num_rsp = *((__u8 *) skb->data);
513 518
514 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 519 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
515 520
521 if (!num_rsp)
522 return;
523
516 hci_dev_lock(hdev); 524 hci_dev_lock(hdev);
517 for (; num_rsp; num_rsp--) { 525
518 struct inquiry_data data; 526 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
519 bacpy(&data.bdaddr, &info->bdaddr); 527 struct inquiry_info_with_rssi_and_pscan_mode *info =
520 data.pscan_rep_mode = info->pscan_rep_mode; 528 (struct inquiry_info_with_rssi_and_pscan_mode *) (skb->data + 1);
521 data.pscan_period_mode = info->pscan_period_mode; 529
522 data.pscan_mode = 0x00; 530 for (; num_rsp; num_rsp--) {
523 memcpy(data.dev_class, info->dev_class, 3); 531 bacpy(&data.bdaddr, &info->bdaddr);
524 data.clock_offset = info->clock_offset; 532 data.pscan_rep_mode = info->pscan_rep_mode;
525 data.rssi = info->rssi; 533 data.pscan_period_mode = info->pscan_period_mode;
526 info++; 534 data.pscan_mode = info->pscan_mode;
527 hci_inquiry_cache_update(hdev, &data); 535 memcpy(data.dev_class, info->dev_class, 3);
536 data.clock_offset = info->clock_offset;
537 data.rssi = info->rssi;
538 info++;
539 hci_inquiry_cache_update(hdev, &data);
540 }
541 } else {
542 struct inquiry_info_with_rssi *info =
543 (struct inquiry_info_with_rssi *) (skb->data + 1);
544
545 for (; num_rsp; num_rsp--) {
546 bacpy(&data.bdaddr, &info->bdaddr);
547 data.pscan_rep_mode = info->pscan_rep_mode;
548 data.pscan_period_mode = info->pscan_period_mode;
549 data.pscan_mode = 0x00;
550 memcpy(data.dev_class, info->dev_class, 3);
551 data.clock_offset = info->clock_offset;
552 data.rssi = info->rssi;
553 info++;
554 hci_inquiry_cache_update(hdev, &data);
555 }
528 } 556 }
557
529 hci_dev_unlock(hdev); 558 hci_dev_unlock(hdev);
530} 559}
531 560
@@ -865,6 +894,24 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
865 hci_dev_unlock(hdev); 894 hci_dev_unlock(hdev);
866} 895}
867 896
897/* Page Scan Repetition Mode */
898static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
899{
900 struct hci_ev_pscan_rep_mode *ev = (struct hci_ev_pscan_rep_mode *) skb->data;
901 struct inquiry_entry *ie;
902
903 BT_DBG("%s", hdev->name);
904
905 hci_dev_lock(hdev);
906
907 if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) {
908 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
909 ie->timestamp = jiffies;
910 }
911
912 hci_dev_unlock(hdev);
913}
914
868void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 915void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
869{ 916{
870 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; 917 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data;
@@ -937,6 +984,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
937 hci_clock_offset_evt(hdev, skb); 984 hci_clock_offset_evt(hdev, skb);
938 break; 985 break;
939 986
987 case HCI_EV_PSCAN_REP_MODE:
988 hci_pscan_rep_mode_evt(hdev, skb);
989 break;
990
940 case HCI_EV_CMD_STATUS: 991 case HCI_EV_CMD_STATUS:
941 cs = (struct hci_ev_cmd_status *) skb->data; 992 cs = (struct hci_ev_cmd_status *) skb->data;
942 skb_pull(skb, sizeof(cs)); 993 skb_pull(skb, sizeof(cs));
@@ -1036,9 +1087,9 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
1036 memcpy(ev->data, data, dlen); 1087 memcpy(ev->data, data, dlen);
1037 1088
1038 bt_cb(skb)->incoming = 1; 1089 bt_cb(skb)->incoming = 1;
1039 do_gettimeofday(&skb->stamp); 1090 __net_timestamp(skb);
1040 1091
1041 skb->pkt_type = HCI_EVENT_PKT; 1092 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
1042 skb->dev = (void *) hdev; 1093 skb->dev = (void *) hdev;
1043 hci_send_to_sock(hdev, skb); 1094 hci_send_to_sock(hdev, skb);
1044 kfree_skb(skb); 1095 kfree_skb(skb);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ebdcce5e7ca0..32ef7975a139 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -110,11 +110,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
110 /* Apply filter */ 110 /* Apply filter */
111 flt = &hci_pi(sk)->filter; 111 flt = &hci_pi(sk)->filter;
112 112
113 if (!test_bit((skb->pkt_type == HCI_VENDOR_PKT) ? 113 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
114 0 : (skb->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 114 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
115 continue; 115 continue;
116 116
117 if (skb->pkt_type == HCI_EVENT_PKT) { 117 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
118 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 118 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
119 119
120 if (!hci_test_bit(evt, &flt->event_mask)) 120 if (!hci_test_bit(evt, &flt->event_mask))
@@ -131,7 +131,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
131 continue; 131 continue;
132 132
133 /* Put type byte before the data */ 133 /* Put type byte before the data */
134 memcpy(skb_push(nskb, 1), &nskb->pkt_type, 1); 134 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
135 135
136 if (sock_queue_rcv_skb(sk, nskb)) 136 if (sock_queue_rcv_skb(sk, nskb))
137 kfree_skb(nskb); 137 kfree_skb(nskb);
@@ -327,11 +327,17 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
327{ 327{
328 __u32 mask = hci_pi(sk)->cmsg_mask; 328 __u32 mask = hci_pi(sk)->cmsg_mask;
329 329
330 if (mask & HCI_CMSG_DIR) 330 if (mask & HCI_CMSG_DIR) {
331 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(int), &bt_cb(skb)->incoming); 331 int incoming = bt_cb(skb)->incoming;
332 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
333 }
334
335 if (mask & HCI_CMSG_TSTAMP) {
336 struct timeval tv;
332 337
333 if (mask & HCI_CMSG_TSTAMP) 338 skb_get_timestamp(skb, &tv);
334 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(skb->stamp), &skb->stamp); 339 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv);
340 }
335} 341}
336 342
337static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 343static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -405,11 +411,11 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
405 goto drop; 411 goto drop;
406 } 412 }
407 413
408 skb->pkt_type = *((unsigned char *) skb->data); 414 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
409 skb_pull(skb, 1); 415 skb_pull(skb, 1);
410 skb->dev = (void *) hdev; 416 skb->dev = (void *) hdev;
411 417
412 if (skb->pkt_type == HCI_COMMAND_PKT) { 418 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
413 u16 opcode = __le16_to_cpu(get_unaligned((u16 *)skb->data)); 419 u16 opcode = __le16_to_cpu(get_unaligned((u16 *)skb->data));
414 u16 ogf = hci_opcode_ogf(opcode); 420 u16 ogf = hci_opcode_ogf(opcode);
415 u16 ocf = hci_opcode_ocf(opcode); 421 u16 ocf = hci_opcode_ocf(opcode);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 32fccfb5bfa5..d3d6bc547212 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -372,7 +372,7 @@ static struct proto l2cap_proto = {
372 .obj_size = sizeof(struct l2cap_pinfo) 372 .obj_size = sizeof(struct l2cap_pinfo)
373}; 373};
374 374
375static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio) 375static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
376{ 376{
377 struct sock *sk; 377 struct sock *sk;
378 378
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 27bf5047cd33..173f46e8cdae 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -21,10 +21,6 @@
21 SOFTWARE IS DISCLAIMED. 21 SOFTWARE IS DISCLAIMED.
22*/ 22*/
23 23
24/*
25 RPN support - Dirk Husemann <hud@zurich.ibm.com>
26*/
27
28/* 24/*
29 * Bluetooth RFCOMM core. 25 * Bluetooth RFCOMM core.
30 * 26 *
@@ -115,10 +111,10 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_mcc_len(b) ((b & 0xfe) >> 1) 111#define __get_mcc_len(b) ((b & 0xfe) >> 1)
116 112
117/* RPN macros */ 113/* RPN macros */
118#define __rpn_line_settings(data, stop, parity) ((data & 0x3) | ((stop & 0x1) << 2) | ((parity & 0x3) << 3)) 114#define __rpn_line_settings(data, stop, parity) ((data & 0x3) | ((stop & 0x1) << 2) | ((parity & 0x7) << 3))
119#define __get_rpn_data_bits(line) ((line) & 0x3) 115#define __get_rpn_data_bits(line) ((line) & 0x3)
120#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 116#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
121#define __get_rpn_parity(line) (((line) >> 3) & 0x3) 117#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
122 118
123static inline void rfcomm_schedule(uint event) 119static inline void rfcomm_schedule(uint event)
124{ 120{
@@ -233,7 +229,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
233 d->rx_credits = RFCOMM_DEFAULT_CREDITS; 229 d->rx_credits = RFCOMM_DEFAULT_CREDITS;
234} 230}
235 231
236struct rfcomm_dlc *rfcomm_dlc_alloc(int prio) 232struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio)
237{ 233{
238 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); 234 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio);
239 if (!d) 235 if (!d)
@@ -780,10 +776,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
780 return rfcomm_send_frame(s, buf, ptr - buf); 776 return rfcomm_send_frame(s, buf, ptr - buf);
781} 777}
782 778
783static int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, 779int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
784 u8 bit_rate, u8 data_bits, u8 stop_bits, 780 u8 bit_rate, u8 data_bits, u8 stop_bits,
785 u8 parity, u8 flow_ctrl_settings, 781 u8 parity, u8 flow_ctrl_settings,
786 u8 xon_char, u8 xoff_char, u16 param_mask) 782 u8 xon_char, u8 xoff_char, u16 param_mask)
787{ 783{
788 struct rfcomm_hdr *hdr; 784 struct rfcomm_hdr *hdr;
789 struct rfcomm_mcc *mcc; 785 struct rfcomm_mcc *mcc;
@@ -791,9 +787,9 @@ static int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
791 u8 buf[16], *ptr = buf; 787 u8 buf[16], *ptr = buf;
792 788
793 BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x" 789 BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x"
794 "flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x", 790 " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x",
795 s, cr, dlci, bit_rate, data_bits, stop_bits, parity, 791 s, cr, dlci, bit_rate, data_bits, stop_bits, parity,
796 flow_ctrl_settings, xon_char, xoff_char, param_mask); 792 flow_ctrl_settings, xon_char, xoff_char, param_mask);
797 793
798 hdr = (void *) ptr; ptr += sizeof(*hdr); 794 hdr = (void *) ptr; ptr += sizeof(*hdr);
799 hdr->addr = __addr(s->initiator, 0); 795 hdr->addr = __addr(s->initiator, 0);
@@ -1265,16 +1261,16 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1265 u8 xon_char = 0; 1261 u8 xon_char = 0;
1266 u8 xoff_char = 0; 1262 u8 xoff_char = 0;
1267 u16 rpn_mask = RFCOMM_RPN_PM_ALL; 1263 u16 rpn_mask = RFCOMM_RPN_PM_ALL;
1268 1264
1269 BT_DBG("dlci %d cr %d len 0x%x bitr 0x%x line 0x%x flow 0x%x xonc 0x%x xoffc 0x%x pm 0x%x", 1265 BT_DBG("dlci %d cr %d len 0x%x bitr 0x%x line 0x%x flow 0x%x xonc 0x%x xoffc 0x%x pm 0x%x",
1270 dlci, cr, len, rpn->bit_rate, rpn->line_settings, rpn->flow_ctrl, 1266 dlci, cr, len, rpn->bit_rate, rpn->line_settings, rpn->flow_ctrl,
1271 rpn->xon_char, rpn->xoff_char, rpn->param_mask); 1267 rpn->xon_char, rpn->xoff_char, rpn->param_mask);
1272 1268
1273 if (!cr) 1269 if (!cr)
1274 return 0; 1270 return 0;
1275 1271
1276 if (len == 1) { 1272 if (len == 1) {
1277 /* request: return default setting */ 1273 /* This is a request, return default settings */
1278 bit_rate = RFCOMM_RPN_BR_115200; 1274 bit_rate = RFCOMM_RPN_BR_115200;
1279 data_bits = RFCOMM_RPN_DATA_8; 1275 data_bits = RFCOMM_RPN_DATA_8;
1280 stop_bits = RFCOMM_RPN_STOP_1; 1276 stop_bits = RFCOMM_RPN_STOP_1;
@@ -1282,11 +1278,12 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1282 flow_ctrl = RFCOMM_RPN_FLOW_NONE; 1278 flow_ctrl = RFCOMM_RPN_FLOW_NONE;
1283 xon_char = RFCOMM_RPN_XON_CHAR; 1279 xon_char = RFCOMM_RPN_XON_CHAR;
1284 xoff_char = RFCOMM_RPN_XOFF_CHAR; 1280 xoff_char = RFCOMM_RPN_XOFF_CHAR;
1285
1286 goto rpn_out; 1281 goto rpn_out;
1287 } 1282 }
1288 /* check for sane values: ignore/accept bit_rate, 8 bits, 1 stop bit, no parity, 1283
1289 no flow control lines, normal XON/XOFF chars */ 1284 /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit,
1285 * no parity, no flow control lines, normal XON/XOFF chars */
1286
1290 if (rpn->param_mask & RFCOMM_RPN_PM_BITRATE) { 1287 if (rpn->param_mask & RFCOMM_RPN_PM_BITRATE) {
1291 bit_rate = rpn->bit_rate; 1288 bit_rate = rpn->bit_rate;
1292 if (bit_rate != RFCOMM_RPN_BR_115200) { 1289 if (bit_rate != RFCOMM_RPN_BR_115200) {
@@ -1295,6 +1292,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1295 rpn_mask ^= RFCOMM_RPN_PM_BITRATE; 1292 rpn_mask ^= RFCOMM_RPN_PM_BITRATE;
1296 } 1293 }
1297 } 1294 }
1295
1298 if (rpn->param_mask & RFCOMM_RPN_PM_DATA) { 1296 if (rpn->param_mask & RFCOMM_RPN_PM_DATA) {
1299 data_bits = __get_rpn_data_bits(rpn->line_settings); 1297 data_bits = __get_rpn_data_bits(rpn->line_settings);
1300 if (data_bits != RFCOMM_RPN_DATA_8) { 1298 if (data_bits != RFCOMM_RPN_DATA_8) {
@@ -1303,6 +1301,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1303 rpn_mask ^= RFCOMM_RPN_PM_DATA; 1301 rpn_mask ^= RFCOMM_RPN_PM_DATA;
1304 } 1302 }
1305 } 1303 }
1304
1306 if (rpn->param_mask & RFCOMM_RPN_PM_STOP) { 1305 if (rpn->param_mask & RFCOMM_RPN_PM_STOP) {
1307 stop_bits = __get_rpn_stop_bits(rpn->line_settings); 1306 stop_bits = __get_rpn_stop_bits(rpn->line_settings);
1308 if (stop_bits != RFCOMM_RPN_STOP_1) { 1307 if (stop_bits != RFCOMM_RPN_STOP_1) {
@@ -1311,6 +1310,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1311 rpn_mask ^= RFCOMM_RPN_PM_STOP; 1310 rpn_mask ^= RFCOMM_RPN_PM_STOP;
1312 } 1311 }
1313 } 1312 }
1313
1314 if (rpn->param_mask & RFCOMM_RPN_PM_PARITY) { 1314 if (rpn->param_mask & RFCOMM_RPN_PM_PARITY) {
1315 parity = __get_rpn_parity(rpn->line_settings); 1315 parity = __get_rpn_parity(rpn->line_settings);
1316 if (parity != RFCOMM_RPN_PARITY_NONE) { 1316 if (parity != RFCOMM_RPN_PARITY_NONE) {
@@ -1319,6 +1319,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1319 rpn_mask ^= RFCOMM_RPN_PM_PARITY; 1319 rpn_mask ^= RFCOMM_RPN_PM_PARITY;
1320 } 1320 }
1321 } 1321 }
1322
1322 if (rpn->param_mask & RFCOMM_RPN_PM_FLOW) { 1323 if (rpn->param_mask & RFCOMM_RPN_PM_FLOW) {
1323 flow_ctrl = rpn->flow_ctrl; 1324 flow_ctrl = rpn->flow_ctrl;
1324 if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { 1325 if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) {
@@ -1327,6 +1328,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1327 rpn_mask ^= RFCOMM_RPN_PM_FLOW; 1328 rpn_mask ^= RFCOMM_RPN_PM_FLOW;
1328 } 1329 }
1329 } 1330 }
1331
1330 if (rpn->param_mask & RFCOMM_RPN_PM_XON) { 1332 if (rpn->param_mask & RFCOMM_RPN_PM_XON) {
1331 xon_char = rpn->xon_char; 1333 xon_char = rpn->xon_char;
1332 if (xon_char != RFCOMM_RPN_XON_CHAR) { 1334 if (xon_char != RFCOMM_RPN_XON_CHAR) {
@@ -1335,6 +1337,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1335 rpn_mask ^= RFCOMM_RPN_PM_XON; 1337 rpn_mask ^= RFCOMM_RPN_PM_XON;
1336 } 1338 }
1337 } 1339 }
1340
1338 if (rpn->param_mask & RFCOMM_RPN_PM_XOFF) { 1341 if (rpn->param_mask & RFCOMM_RPN_PM_XOFF) {
1339 xoff_char = rpn->xoff_char; 1342 xoff_char = rpn->xoff_char;
1340 if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { 1343 if (xoff_char != RFCOMM_RPN_XOFF_CHAR) {
@@ -1345,9 +1348,8 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1345 } 1348 }
1346 1349
1347rpn_out: 1350rpn_out:
1348 rfcomm_send_rpn(s, 0, dlci, 1351 rfcomm_send_rpn(s, 0, dlci, bit_rate, data_bits, stop_bits,
1349 bit_rate, data_bits, stop_bits, parity, flow_ctrl, 1352 parity, flow_ctrl, xon_char, xoff_char, rpn_mask);
1350 xon_char, xoff_char, rpn_mask);
1351 1353
1352 return 0; 1354 return 0;
1353} 1355}
@@ -1358,14 +1360,13 @@ static int rfcomm_recv_rls(struct rfcomm_session *s, int cr, struct sk_buff *skb
1358 u8 dlci = __get_dlci(rls->dlci); 1360 u8 dlci = __get_dlci(rls->dlci);
1359 1361
1360 BT_DBG("dlci %d cr %d status 0x%x", dlci, cr, rls->status); 1362 BT_DBG("dlci %d cr %d status 0x%x", dlci, cr, rls->status);
1361 1363
1362 if (!cr) 1364 if (!cr)
1363 return 0; 1365 return 0;
1364 1366
1365 /* FIXME: We should probably do something with this 1367 /* We should probably do something with this information here. But
1366 information here. But for now it's sufficient just 1368 * for now it's sufficient just to reply -- Bluetooth 1.1 says it's
1367 to reply -- Bluetooth 1.1 says it's mandatory to 1369 * mandatory to recognise and respond to RLS */
1368 recognise and respond to RLS */
1369 1370
1370 rfcomm_send_rls(s, 0, dlci, rls->status); 1371 rfcomm_send_rls(s, 0, dlci, rls->status);
1371 1372
@@ -1381,7 +1382,7 @@ static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb
1381 BT_DBG("dlci %d cr %d v24 0x%x", dlci, cr, msc->v24_sig); 1382 BT_DBG("dlci %d cr %d v24 0x%x", dlci, cr, msc->v24_sig);
1382 1383
1383 d = rfcomm_dlc_get(s, dlci); 1384 d = rfcomm_dlc_get(s, dlci);
1384 if (!d) 1385 if (!d)
1385 return 0; 1386 return 0;
1386 1387
1387 if (cr) { 1388 if (cr) {
@@ -1389,7 +1390,7 @@ static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb
1389 set_bit(RFCOMM_TX_THROTTLED, &d->flags); 1390 set_bit(RFCOMM_TX_THROTTLED, &d->flags);
1390 else 1391 else
1391 clear_bit(RFCOMM_TX_THROTTLED, &d->flags); 1392 clear_bit(RFCOMM_TX_THROTTLED, &d->flags);
1392 1393
1393 rfcomm_dlc_lock(d); 1394 rfcomm_dlc_lock(d);
1394 if (d->modem_status) 1395 if (d->modem_status)
1395 d->modem_status(d, msc->v24_sig); 1396 d->modem_status(d, msc->v24_sig);
@@ -1398,7 +1399,7 @@ static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb
1398 rfcomm_send_msc(s, 0, dlci, msc->v24_sig); 1399 rfcomm_send_msc(s, 0, dlci, msc->v24_sig);
1399 1400
1400 d->mscex |= RFCOMM_MSCEX_RX; 1401 d->mscex |= RFCOMM_MSCEX_RX;
1401 } else 1402 } else
1402 d->mscex |= RFCOMM_MSCEX_TX; 1403 d->mscex |= RFCOMM_MSCEX_TX;
1403 1404
1404 return 0; 1405 return 0;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 63a123c5c41b..90e19eb6d3cc 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -284,7 +284,7 @@ static struct proto rfcomm_proto = {
284 .obj_size = sizeof(struct rfcomm_pinfo) 284 .obj_size = sizeof(struct rfcomm_pinfo)
285}; 285};
286 286
287static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, int prio) 287static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
288{ 288{
289 struct rfcomm_dlc *d; 289 struct rfcomm_dlc *d;
290 struct sock *sk; 290 struct sock *sk;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 6304590fd36a..1bca860a6109 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -286,7 +286,7 @@ static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *de
286 skb->destructor = rfcomm_wfree; 286 skb->destructor = rfcomm_wfree;
287} 287}
288 288
289static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, int priority) 289static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority)
290{ 290{
291 if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { 291 if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) {
292 struct sk_buff *skb = alloc_skb(size, priority); 292 struct sk_buff *skb = alloc_skb(size, priority);
@@ -528,9 +528,14 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
528 struct rfcomm_dev *dev = dlc->owner; 528 struct rfcomm_dev *dev = dlc->owner;
529 if (!dev) 529 if (!dev)
530 return; 530 return;
531 531
532 BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); 532 BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
533 533
534 if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
535 if (dev->tty && !C_CLOCAL(dev->tty))
536 tty_hangup(dev->tty);
537 }
538
534 dev->modem_status = 539 dev->modem_status =
535 ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | 540 ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
536 ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | 541 ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) |
@@ -740,20 +745,143 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned
740 return -ENOIOCTLCMD; 745 return -ENOIOCTLCMD;
741} 746}
742 747
743#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
744
745static void rfcomm_tty_set_termios(struct tty_struct *tty, struct termios *old) 748static void rfcomm_tty_set_termios(struct tty_struct *tty, struct termios *old)
746{ 749{
747 BT_DBG("tty %p", tty); 750 struct termios *new = (struct termios *) tty->termios;
751 int old_baud_rate = tty_termios_baud_rate(old);
752 int new_baud_rate = tty_termios_baud_rate(new);
748 753
749 if ((tty->termios->c_cflag == old->c_cflag) && 754 u8 baud, data_bits, stop_bits, parity, x_on, x_off;
750 (RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old->c_iflag))) 755 u16 changes = 0;
751 return; 756
757 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
758
759 BT_DBG("tty %p termios %p", tty, old);
760
761 /* Handle turning off CRTSCTS */
762 if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS))
763 BT_DBG("Turning off CRTSCTS unsupported");
764
765 /* Parity on/off and when on, odd/even */
766 if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) ||
767 ((old->c_cflag & PARODD) != (new->c_cflag & PARODD)) ) {
768 changes |= RFCOMM_RPN_PM_PARITY;
769 BT_DBG("Parity change detected.");
770 }
771
772 /* Mark and space parity are not supported! */
773 if (new->c_cflag & PARENB) {
774 if (new->c_cflag & PARODD) {
775 BT_DBG("Parity is ODD");
776 parity = RFCOMM_RPN_PARITY_ODD;
777 } else {
778 BT_DBG("Parity is EVEN");
779 parity = RFCOMM_RPN_PARITY_EVEN;
780 }
781 } else {
782 BT_DBG("Parity is OFF");
783 parity = RFCOMM_RPN_PARITY_NONE;
784 }
785
786 /* Setting the x_on / x_off characters */
787 if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) {
788 BT_DBG("XOFF custom");
789 x_on = new->c_cc[VSTOP];
790 changes |= RFCOMM_RPN_PM_XON;
791 } else {
792 BT_DBG("XOFF default");
793 x_on = RFCOMM_RPN_XON_CHAR;
794 }
795
796 if (old->c_cc[VSTART] != new->c_cc[VSTART]) {
797 BT_DBG("XON custom");
798 x_off = new->c_cc[VSTART];
799 changes |= RFCOMM_RPN_PM_XOFF;
800 } else {
801 BT_DBG("XON default");
802 x_off = RFCOMM_RPN_XOFF_CHAR;
803 }
804
805 /* Handle setting of stop bits */
806 if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB))
807 changes |= RFCOMM_RPN_PM_STOP;
808
809 /* POSIX does not support 1.5 stop bits and RFCOMM does not
810 * support 2 stop bits. So a request for 2 stop bits gets
811 * translated to 1.5 stop bits */
812 if (new->c_cflag & CSTOPB) {
813 stop_bits = RFCOMM_RPN_STOP_15;
814 } else {
815 stop_bits = RFCOMM_RPN_STOP_1;
816 }
817
818 /* Handle number of data bits [5-8] */
819 if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE))
820 changes |= RFCOMM_RPN_PM_DATA;
821
822 switch (new->c_cflag & CSIZE) {
823 case CS5:
824 data_bits = RFCOMM_RPN_DATA_5;
825 break;
826 case CS6:
827 data_bits = RFCOMM_RPN_DATA_6;
828 break;
829 case CS7:
830 data_bits = RFCOMM_RPN_DATA_7;
831 break;
832 case CS8:
833 data_bits = RFCOMM_RPN_DATA_8;
834 break;
835 default:
836 data_bits = RFCOMM_RPN_DATA_8;
837 break;
838 }
839
840 /* Handle baudrate settings */
841 if (old_baud_rate != new_baud_rate)
842 changes |= RFCOMM_RPN_PM_BITRATE;
752 843
753 /* handle turning off CRTSCTS */ 844 switch (new_baud_rate) {
754 if ((old->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { 845 case 2400:
755 BT_DBG("turning off CRTSCTS"); 846 baud = RFCOMM_RPN_BR_2400;
847 break;
848 case 4800:
849 baud = RFCOMM_RPN_BR_4800;
850 break;
851 case 7200:
852 baud = RFCOMM_RPN_BR_7200;
853 break;
854 case 9600:
855 baud = RFCOMM_RPN_BR_9600;
856 break;
857 case 19200:
858 baud = RFCOMM_RPN_BR_19200;
859 break;
860 case 38400:
861 baud = RFCOMM_RPN_BR_38400;
862 break;
863 case 57600:
864 baud = RFCOMM_RPN_BR_57600;
865 break;
866 case 115200:
867 baud = RFCOMM_RPN_BR_115200;
868 break;
869 case 230400:
870 baud = RFCOMM_RPN_BR_230400;
871 break;
872 default:
873 /* 9600 is standard accordinag to the RFCOMM specification */
874 baud = RFCOMM_RPN_BR_9600;
875 break;
876
756 } 877 }
878
879 if (changes)
880 rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud,
881 data_bits, stop_bits, parity,
882 RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes);
883
884 return;
757} 885}
758 886
759static void rfcomm_tty_throttle(struct tty_struct *tty) 887static void rfcomm_tty_throttle(struct tty_struct *tty)
@@ -761,7 +889,7 @@ static void rfcomm_tty_throttle(struct tty_struct *tty)
761 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 889 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
762 890
763 BT_DBG("tty %p dev %p", tty, dev); 891 BT_DBG("tty %p dev %p", tty, dev);
764 892
765 rfcomm_dlc_throttle(dev->dlc); 893 rfcomm_dlc_throttle(dev->dlc);
766} 894}
767 895
@@ -770,7 +898,7 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty)
770 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 898 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
771 899
772 BT_DBG("tty %p dev %p", tty, dev); 900 BT_DBG("tty %p dev %p", tty, dev);
773 901
774 rfcomm_dlc_unthrottle(dev->dlc); 902 rfcomm_dlc_unthrottle(dev->dlc);
775} 903}
776 904
@@ -841,35 +969,35 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
841 969
842static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear) 970static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear)
843{ 971{
844 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 972 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
845 struct rfcomm_dlc *dlc = dev->dlc; 973 struct rfcomm_dlc *dlc = dev->dlc;
846 u8 v24_sig; 974 u8 v24_sig;
847 975
848 BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); 976 BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear);
849 977
850 rfcomm_dlc_get_modem_status(dlc, &v24_sig); 978 rfcomm_dlc_get_modem_status(dlc, &v24_sig);
851 979
852 if (set & TIOCM_DSR || set & TIOCM_DTR) 980 if (set & TIOCM_DSR || set & TIOCM_DTR)
853 v24_sig |= RFCOMM_V24_RTC; 981 v24_sig |= RFCOMM_V24_RTC;
854 if (set & TIOCM_RTS || set & TIOCM_CTS) 982 if (set & TIOCM_RTS || set & TIOCM_CTS)
855 v24_sig |= RFCOMM_V24_RTR; 983 v24_sig |= RFCOMM_V24_RTR;
856 if (set & TIOCM_RI) 984 if (set & TIOCM_RI)
857 v24_sig |= RFCOMM_V24_IC; 985 v24_sig |= RFCOMM_V24_IC;
858 if (set & TIOCM_CD) 986 if (set & TIOCM_CD)
859 v24_sig |= RFCOMM_V24_DV; 987 v24_sig |= RFCOMM_V24_DV;
860 988
861 if (clear & TIOCM_DSR || clear & TIOCM_DTR) 989 if (clear & TIOCM_DSR || clear & TIOCM_DTR)
862 v24_sig &= ~RFCOMM_V24_RTC; 990 v24_sig &= ~RFCOMM_V24_RTC;
863 if (clear & TIOCM_RTS || clear & TIOCM_CTS) 991 if (clear & TIOCM_RTS || clear & TIOCM_CTS)
864 v24_sig &= ~RFCOMM_V24_RTR; 992 v24_sig &= ~RFCOMM_V24_RTR;
865 if (clear & TIOCM_RI) 993 if (clear & TIOCM_RI)
866 v24_sig &= ~RFCOMM_V24_IC; 994 v24_sig &= ~RFCOMM_V24_IC;
867 if (clear & TIOCM_CD) 995 if (clear & TIOCM_CD)
868 v24_sig &= ~RFCOMM_V24_DV; 996 v24_sig &= ~RFCOMM_V24_DV;
869 997
870 rfcomm_dlc_set_modem_status(dlc, v24_sig); 998 rfcomm_dlc_set_modem_status(dlc, v24_sig);
871 999
872 return 0; 1000 return 0;
873} 1001}
874 1002
875/* ---- TTY structure ---- */ 1003/* ---- TTY structure ---- */
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 746c11fc017e..ce7ab7dfa0b2 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -418,7 +418,7 @@ static struct proto sco_proto = {
418 .obj_size = sizeof(struct sco_pinfo) 418 .obj_size = sizeof(struct sco_pinfo)
419}; 419};
420 420
421static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio) 421static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
422{ 422{
423 struct sock *sk; 423 struct sock *sk;
424 424
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e6c2200b7ca3..24396b914d11 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,7 +23,7 @@
23#include <asm/atomic.h> 23#include <asm/atomic.h>
24#include "br_private.h" 24#include "br_private.h"
25 25
26static kmem_cache_t *br_fdb_cache; 26static kmem_cache_t *br_fdb_cache __read_mostly;
27static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 27static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
28 const unsigned char *addr); 28 const unsigned char *addr);
29 29
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 02c632b4d325..c93d35ab95c0 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -23,10 +23,9 @@ static int ebt_target_mark(struct sk_buff **pskb, unsigned int hooknr,
23{ 23{
24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
25 25
26 if ((*pskb)->nfmark != info->mark) { 26 if ((*pskb)->nfmark != info->mark)
27 (*pskb)->nfmark = info->mark; 27 (*pskb)->nfmark = info->mark;
28 (*pskb)->nfcache |= NFC_ALTERED; 28
29 }
30 return info->target; 29 return info->target;
31} 30}
32 31
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 01af4fcef26d..aae26ae2e61f 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -78,8 +78,8 @@ static void ulog_send(unsigned int nlgroup)
78 if (ub->qlen > 1) 78 if (ub->qlen > 1)
79 ub->lastnlh->nlmsg_type = NLMSG_DONE; 79 ub->lastnlh->nlmsg_type = NLMSG_DONE;
80 80
81 NETLINK_CB(ub->skb).dst_groups = 1 << nlgroup; 81 NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
82 netlink_broadcast(ebtulognl, ub->skb, 0, 1 << nlgroup, GFP_ATOMIC); 82 netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
83 83
84 ub->qlen = 0; 84 ub->qlen = 0;
85 ub->skb = NULL; 85 ub->skb = NULL;
@@ -162,7 +162,7 @@ static void ebt_ulog(const struct sk_buff *skb, unsigned int hooknr,
162 pm->version = EBT_ULOG_VERSION; 162 pm->version = EBT_ULOG_VERSION;
163 do_gettimeofday(&pm->stamp); 163 do_gettimeofday(&pm->stamp);
164 if (ub->qlen == 1) 164 if (ub->qlen == 1)
165 ub->skb->stamp = pm->stamp; 165 skb_set_timestamp(ub->skb, &pm->stamp);
166 pm->data_len = copy_len; 166 pm->data_len = copy_len;
167 pm->mark = skb->nfmark; 167 pm->mark = skb->nfmark;
168 pm->hook = hooknr; 168 pm->hook = hooknr;
@@ -258,7 +258,8 @@ static int __init init(void)
258 spin_lock_init(&ulog_buffers[i].lock); 258 spin_lock_init(&ulog_buffers[i].lock);
259 } 259 }
260 260
261 ebtulognl = netlink_kernel_create(NETLINK_NFLOG, NULL); 261 ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS,
262 NULL, THIS_MODULE);
262 if (!ebtulognl) 263 if (!ebtulognl)
263 ret = -ENOMEM; 264 ret = -ENOMEM;
264 else if ((ret = ebt_register_watcher(&ulog))) 265 else if ((ret = ebt_register_watcher(&ulog)))
diff --git a/net/core/Makefile b/net/core/Makefile
index f5f5e58943e8..630da0f0579e 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -12,7 +12,6 @@ obj-y += dev.o ethtool.o dev_mcast.o dst.o \
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
14obj-$(CONFIG_SYSFS) += net-sysfs.o 14obj-$(CONFIG_SYSFS) += net-sysfs.o
15obj-$(CONFIG_NETFILTER) += netfilter.o
16obj-$(CONFIG_NET_DIVERT) += dv.o 15obj-$(CONFIG_NET_DIVERT) += dv.o
17obj-$(CONFIG_NET_PKTGEN) += pktgen.o 16obj-$(CONFIG_NET_PKTGEN) += pktgen.o
18obj-$(CONFIG_NET_RADIO) += wireless.o 17obj-$(CONFIG_NET_RADIO) += wireless.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fcee054b6f75..da9bf71421a7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -43,7 +43,6 @@
43#include <linux/errno.h> 43#include <linux/errno.h>
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/inet.h> 45#include <linux/inet.h>
46#include <linux/tcp.h>
47#include <linux/netdevice.h> 46#include <linux/netdevice.h>
48#include <linux/rtnetlink.h> 47#include <linux/rtnetlink.h>
49#include <linux/poll.h> 48#include <linux/poll.h>
@@ -51,9 +50,10 @@
51 50
52#include <net/protocol.h> 51#include <net/protocol.h>
53#include <linux/skbuff.h> 52#include <linux/skbuff.h>
54#include <net/sock.h>
55#include <net/checksum.h>
56 53
54#include <net/checksum.h>
55#include <net/sock.h>
56#include <net/tcp_states.h>
57 57
58/* 58/*
59 * Is a socket 'connection oriented' ? 59 * Is a socket 'connection oriented' ?
diff --git a/net/core/dev.c b/net/core/dev.c
index faf59b02c4bf..c01511e3d0c1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -267,10 +267,6 @@ void dev_add_pack(struct packet_type *pt)
267 spin_unlock_bh(&ptype_lock); 267 spin_unlock_bh(&ptype_lock);
268} 268}
269 269
270extern void linkwatch_run_queue(void);
271
272
273
274/** 270/**
275 * __dev_remove_pack - remove packet handler 271 * __dev_remove_pack - remove packet handler
276 * @pt: packet type declaration 272 * @pt: packet type declaration
@@ -1009,13 +1005,22 @@ void net_disable_timestamp(void)
1009 atomic_dec(&netstamp_needed); 1005 atomic_dec(&netstamp_needed);
1010} 1006}
1011 1007
1012static inline void net_timestamp(struct timeval *stamp) 1008void __net_timestamp(struct sk_buff *skb)
1009{
1010 struct timeval tv;
1011
1012 do_gettimeofday(&tv);
1013 skb_set_timestamp(skb, &tv);
1014}
1015EXPORT_SYMBOL(__net_timestamp);
1016
1017static inline void net_timestamp(struct sk_buff *skb)
1013{ 1018{
1014 if (atomic_read(&netstamp_needed)) 1019 if (atomic_read(&netstamp_needed))
1015 do_gettimeofday(stamp); 1020 __net_timestamp(skb);
1016 else { 1021 else {
1017 stamp->tv_sec = 0; 1022 skb->tstamp.off_sec = 0;
1018 stamp->tv_usec = 0; 1023 skb->tstamp.off_usec = 0;
1019 } 1024 }
1020} 1025}
1021 1026
@@ -1027,7 +1032,8 @@ static inline void net_timestamp(struct timeval *stamp)
1027void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1032void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1028{ 1033{
1029 struct packet_type *ptype; 1034 struct packet_type *ptype;
1030 net_timestamp(&skb->stamp); 1035
1036 net_timestamp(skb);
1031 1037
1032 rcu_read_lock(); 1038 rcu_read_lock();
1033 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1039 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1058,7 +1064,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1058 1064
1059 skb2->h.raw = skb2->nh.raw; 1065 skb2->h.raw = skb2->nh.raw;
1060 skb2->pkt_type = PACKET_OUTGOING; 1066 skb2->pkt_type = PACKET_OUTGOING;
1061 ptype->func(skb2, skb->dev, ptype); 1067 ptype->func(skb2, skb->dev, ptype, skb->dev);
1062 } 1068 }
1063 } 1069 }
1064 rcu_read_unlock(); 1070 rcu_read_unlock();
@@ -1123,8 +1129,6 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1123#define illegal_highdma(dev, skb) (0) 1129#define illegal_highdma(dev, skb) (0)
1124#endif 1130#endif
1125 1131
1126extern void skb_release_data(struct sk_buff *);
1127
1128/* Keep head the same: replace data */ 1132/* Keep head the same: replace data */
1129int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) 1133int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
1130{ 1134{
@@ -1379,8 +1383,8 @@ int netif_rx(struct sk_buff *skb)
1379 if (netpoll_rx(skb)) 1383 if (netpoll_rx(skb))
1380 return NET_RX_DROP; 1384 return NET_RX_DROP;
1381 1385
1382 if (!skb->stamp.tv_sec) 1386 if (!skb->tstamp.off_sec)
1383 net_timestamp(&skb->stamp); 1387 net_timestamp(skb);
1384 1388
1385 /* 1389 /*
1386 * The code is rearranged so that the path is the most 1390 * The code is rearranged so that the path is the most
@@ -1425,14 +1429,14 @@ int netif_rx_ni(struct sk_buff *skb)
1425 1429
1426EXPORT_SYMBOL(netif_rx_ni); 1430EXPORT_SYMBOL(netif_rx_ni);
1427 1431
1428static __inline__ void skb_bond(struct sk_buff *skb) 1432static inline struct net_device *skb_bond(struct sk_buff *skb)
1429{ 1433{
1430 struct net_device *dev = skb->dev; 1434 struct net_device *dev = skb->dev;
1431 1435
1432 if (dev->master) { 1436 if (dev->master)
1433 skb->real_dev = skb->dev;
1434 skb->dev = dev->master; 1437 skb->dev = dev->master;
1435 } 1438
1439 return dev;
1436} 1440}
1437 1441
1438static void net_tx_action(struct softirq_action *h) 1442static void net_tx_action(struct softirq_action *h)
@@ -1482,10 +1486,11 @@ static void net_tx_action(struct softirq_action *h)
1482} 1486}
1483 1487
1484static __inline__ int deliver_skb(struct sk_buff *skb, 1488static __inline__ int deliver_skb(struct sk_buff *skb,
1485 struct packet_type *pt_prev) 1489 struct packet_type *pt_prev,
1490 struct net_device *orig_dev)
1486{ 1491{
1487 atomic_inc(&skb->users); 1492 atomic_inc(&skb->users);
1488 return pt_prev->func(skb, skb->dev, pt_prev); 1493 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1489} 1494}
1490 1495
1491#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 1496#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
@@ -1496,7 +1501,8 @@ struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1496void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); 1501void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1497 1502
1498static __inline__ int handle_bridge(struct sk_buff **pskb, 1503static __inline__ int handle_bridge(struct sk_buff **pskb,
1499 struct packet_type **pt_prev, int *ret) 1504 struct packet_type **pt_prev, int *ret,
1505 struct net_device *orig_dev)
1500{ 1506{
1501 struct net_bridge_port *port; 1507 struct net_bridge_port *port;
1502 1508
@@ -1505,14 +1511,14 @@ static __inline__ int handle_bridge(struct sk_buff **pskb,
1505 return 0; 1511 return 0;
1506 1512
1507 if (*pt_prev) { 1513 if (*pt_prev) {
1508 *ret = deliver_skb(*pskb, *pt_prev); 1514 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1509 *pt_prev = NULL; 1515 *pt_prev = NULL;
1510 } 1516 }
1511 1517
1512 return br_handle_frame_hook(port, pskb); 1518 return br_handle_frame_hook(port, pskb);
1513} 1519}
1514#else 1520#else
1515#define handle_bridge(skb, pt_prev, ret) (0) 1521#define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1516#endif 1522#endif
1517 1523
1518#ifdef CONFIG_NET_CLS_ACT 1524#ifdef CONFIG_NET_CLS_ACT
@@ -1534,17 +1540,14 @@ static int ing_filter(struct sk_buff *skb)
1534 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1540 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1535 if (MAX_RED_LOOP < ttl++) { 1541 if (MAX_RED_LOOP < ttl++) {
1536 printk("Redir loop detected Dropping packet (%s->%s)\n", 1542 printk("Redir loop detected Dropping packet (%s->%s)\n",
1537 skb->input_dev?skb->input_dev->name:"??",skb->dev->name); 1543 skb->input_dev->name, skb->dev->name);
1538 return TC_ACT_SHOT; 1544 return TC_ACT_SHOT;
1539 } 1545 }
1540 1546
1541 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl); 1547 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1542 1548
1543 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); 1549 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1544 if (NULL == skb->input_dev) { 1550
1545 skb->input_dev = skb->dev;
1546 printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name);
1547 }
1548 spin_lock(&dev->ingress_lock); 1551 spin_lock(&dev->ingress_lock);
1549 if ((q = dev->qdisc_ingress) != NULL) 1552 if ((q = dev->qdisc_ingress) != NULL)
1550 result = q->enqueue(skb, q); 1553 result = q->enqueue(skb, q);
@@ -1559,6 +1562,7 @@ static int ing_filter(struct sk_buff *skb)
1559int netif_receive_skb(struct sk_buff *skb) 1562int netif_receive_skb(struct sk_buff *skb)
1560{ 1563{
1561 struct packet_type *ptype, *pt_prev; 1564 struct packet_type *ptype, *pt_prev;
1565 struct net_device *orig_dev;
1562 int ret = NET_RX_DROP; 1566 int ret = NET_RX_DROP;
1563 unsigned short type; 1567 unsigned short type;
1564 1568
@@ -1566,10 +1570,13 @@ int netif_receive_skb(struct sk_buff *skb)
1566 if (skb->dev->poll && netpoll_rx(skb)) 1570 if (skb->dev->poll && netpoll_rx(skb))
1567 return NET_RX_DROP; 1571 return NET_RX_DROP;
1568 1572
1569 if (!skb->stamp.tv_sec) 1573 if (!skb->tstamp.off_sec)
1570 net_timestamp(&skb->stamp); 1574 net_timestamp(skb);
1575
1576 if (!skb->input_dev)
1577 skb->input_dev = skb->dev;
1571 1578
1572 skb_bond(skb); 1579 orig_dev = skb_bond(skb);
1573 1580
1574 __get_cpu_var(netdev_rx_stat).total++; 1581 __get_cpu_var(netdev_rx_stat).total++;
1575 1582
@@ -1590,14 +1597,14 @@ int netif_receive_skb(struct sk_buff *skb)
1590 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1597 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1591 if (!ptype->dev || ptype->dev == skb->dev) { 1598 if (!ptype->dev || ptype->dev == skb->dev) {
1592 if (pt_prev) 1599 if (pt_prev)
1593 ret = deliver_skb(skb, pt_prev); 1600 ret = deliver_skb(skb, pt_prev, orig_dev);
1594 pt_prev = ptype; 1601 pt_prev = ptype;
1595 } 1602 }
1596 } 1603 }
1597 1604
1598#ifdef CONFIG_NET_CLS_ACT 1605#ifdef CONFIG_NET_CLS_ACT
1599 if (pt_prev) { 1606 if (pt_prev) {
1600 ret = deliver_skb(skb, pt_prev); 1607 ret = deliver_skb(skb, pt_prev, orig_dev);
1601 pt_prev = NULL; /* noone else should process this after*/ 1608 pt_prev = NULL; /* noone else should process this after*/
1602 } else { 1609 } else {
1603 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 1610 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
@@ -1616,7 +1623,7 @@ ncls:
1616 1623
1617 handle_diverter(skb); 1624 handle_diverter(skb);
1618 1625
1619 if (handle_bridge(&skb, &pt_prev, &ret)) 1626 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1620 goto out; 1627 goto out;
1621 1628
1622 type = skb->protocol; 1629 type = skb->protocol;
@@ -1624,13 +1631,13 @@ ncls:
1624 if (ptype->type == type && 1631 if (ptype->type == type &&
1625 (!ptype->dev || ptype->dev == skb->dev)) { 1632 (!ptype->dev || ptype->dev == skb->dev)) {
1626 if (pt_prev) 1633 if (pt_prev)
1627 ret = deliver_skb(skb, pt_prev); 1634 ret = deliver_skb(skb, pt_prev, orig_dev);
1628 pt_prev = ptype; 1635 pt_prev = ptype;
1629 } 1636 }
1630 } 1637 }
1631 1638
1632 if (pt_prev) { 1639 if (pt_prev) {
1633 ret = pt_prev->func(skb, skb->dev, pt_prev); 1640 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1634 } else { 1641 } else {
1635 kfree_skb(skb); 1642 kfree_skb(skb);
1636 /* Jamal, now you will not able to escape explaining 1643 /* Jamal, now you will not able to escape explaining
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a3eeb88e1c81..289c1b5a8e4a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -81,6 +81,18 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
81 return 0; 81 return 0;
82} 82}
83 83
84int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *addr, u8 *data)
85{
86 unsigned char len = dev->addr_len;
87 if ( addr->size < len )
88 return -ETOOSMALL;
89
90 addr->size = len;
91 memcpy(data, dev->perm_addr, len);
92 return 0;
93}
94
95
84/* Handlers for each ethtool command */ 96/* Handlers for each ethtool command */
85 97
86static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 98static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
@@ -683,6 +695,39 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
683 return ret; 695 return ret;
684} 696}
685 697
698static int ethtool_get_perm_addr(struct net_device *dev, void *useraddr)
699{
700 struct ethtool_perm_addr epaddr;
701 u8 *data;
702 int ret;
703
704 if (!dev->ethtool_ops->get_perm_addr)
705 return -EOPNOTSUPP;
706
707 if (copy_from_user(&epaddr,useraddr,sizeof(epaddr)))
708 return -EFAULT;
709
710 data = kmalloc(epaddr.size, GFP_USER);
711 if (!data)
712 return -ENOMEM;
713
714 ret = dev->ethtool_ops->get_perm_addr(dev,&epaddr,data);
715 if (ret)
716 return ret;
717
718 ret = -EFAULT;
719 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
720 goto out;
721 useraddr += sizeof(epaddr);
722 if (copy_to_user(useraddr, data, epaddr.size))
723 goto out;
724 ret = 0;
725
726 out:
727 kfree(data);
728 return ret;
729}
730
686/* The main entry point in this file. Called from net/core/dev.c */ 731/* The main entry point in this file. Called from net/core/dev.c */
687 732
688int dev_ethtool(struct ifreq *ifr) 733int dev_ethtool(struct ifreq *ifr)
@@ -806,6 +851,9 @@ int dev_ethtool(struct ifreq *ifr)
806 case ETHTOOL_GSTATS: 851 case ETHTOOL_GSTATS:
807 rc = ethtool_get_stats(dev, useraddr); 852 rc = ethtool_get_stats(dev, useraddr);
808 break; 853 break;
854 case ETHTOOL_GPERMADDR:
855 rc = ethtool_get_perm_addr(dev, useraddr);
856 break;
809 default: 857 default:
810 rc = -EOPNOTSUPP; 858 rc = -EOPNOTSUPP;
811 } 859 }
@@ -826,6 +874,7 @@ int dev_ethtool(struct ifreq *ifr)
826 874
827EXPORT_SYMBOL(dev_ethtool); 875EXPORT_SYMBOL(dev_ethtool);
828EXPORT_SYMBOL(ethtool_op_get_link); 876EXPORT_SYMBOL(ethtool_op_get_link);
877EXPORT_SYMBOL_GPL(ethtool_op_get_perm_addr);
829EXPORT_SYMBOL(ethtool_op_get_sg); 878EXPORT_SYMBOL(ethtool_op_get_sg);
830EXPORT_SYMBOL(ethtool_op_get_tso); 879EXPORT_SYMBOL(ethtool_op_get_tso);
831EXPORT_SYMBOL(ethtool_op_get_tx_csum); 880EXPORT_SYMBOL(ethtool_op_get_tx_csum);
diff --git a/net/core/flow.c b/net/core/flow.c
index f289570b15a3..7e95b39de9fd 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
42 42
43#define flow_table(cpu) (per_cpu(flow_tables, cpu)) 43#define flow_table(cpu) (per_cpu(flow_tables, cpu))
44 44
45static kmem_cache_t *flow_cachep; 45static kmem_cache_t *flow_cachep __read_mostly;
46 46
47static int flow_lwm, flow_hwm; 47static int flow_lwm, flow_hwm;
48 48
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1beb782ac41b..39fc55edf691 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1217,7 +1217,7 @@ static void neigh_proxy_process(unsigned long arg)
1217 1217
1218 while (skb != (struct sk_buff *)&tbl->proxy_queue) { 1218 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1219 struct sk_buff *back = skb; 1219 struct sk_buff *back = skb;
1220 long tdif = back->stamp.tv_usec - now; 1220 long tdif = NEIGH_CB(back)->sched_next - now;
1221 1221
1222 skb = skb->next; 1222 skb = skb->next;
1223 if (tdif <= 0) { 1223 if (tdif <= 0) {
@@ -1248,8 +1248,9 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1248 kfree_skb(skb); 1248 kfree_skb(skb);
1249 return; 1249 return;
1250 } 1250 }
1251 skb->stamp.tv_sec = LOCALLY_ENQUEUED; 1251
1252 skb->stamp.tv_usec = sched_next; 1252 NEIGH_CB(skb)->sched_next = sched_next;
1253 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253 1254
1254 spin_lock(&tbl->proxy_queue.lock); 1255 spin_lock(&tbl->proxy_queue.lock);
1255 if (del_timer(&tbl->proxy_timer)) { 1256 if (del_timer(&tbl->proxy_timer)) {
@@ -2342,8 +2343,8 @@ void neigh_app_ns(struct neighbour *n)
2342 } 2343 }
2343 nlh = (struct nlmsghdr *)skb->data; 2344 nlh = (struct nlmsghdr *)skb->data;
2344 nlh->nlmsg_flags = NLM_F_REQUEST; 2345 nlh->nlmsg_flags = NLM_F_REQUEST;
2345 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; 2346 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2346 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); 2347 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2347} 2348}
2348 2349
2349static void neigh_app_notify(struct neighbour *n) 2350static void neigh_app_notify(struct neighbour *n)
@@ -2360,8 +2361,8 @@ static void neigh_app_notify(struct neighbour *n)
2360 return; 2361 return;
2361 } 2362 }
2362 nlh = (struct nlmsghdr *)skb->data; 2363 nlh = (struct nlmsghdr *)skb->data;
2363 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH; 2364 NETLINK_CB(skb).dst_group = RTNLGRP_NEIGH;
2364 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC); 2365 netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2365} 2366}
2366 2367
2367#endif /* CONFIG_ARPD */ 2368#endif /* CONFIG_ARPD */
diff --git a/net/core/netfilter.c b/net/core/netfilter.c
deleted file mode 100644
index 076c156d5eda..000000000000
--- a/net/core/netfilter.c
+++ /dev/null
@@ -1,648 +0,0 @@
1/* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
8 *
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
12 */
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/netfilter.h>
16#include <net/protocol.h>
17#include <linux/init.h>
18#include <linux/skbuff.h>
19#include <linux/wait.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/if.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/tcp.h>
26#include <linux/udp.h>
27#include <linux/icmp.h>
28#include <net/sock.h>
29#include <net/route.h>
30#include <linux/ip.h>
31
32/* In this code, we can be waiting indefinitely for userspace to
33 * service a packet if a hook returns NF_QUEUE. We could keep a count
34 * of skbuffs queued for userspace, and not deregister a hook unless
35 * this is zero, but that sucks. Now, we simply check when the
36 * packets come back: if the hook is gone, the packet is discarded. */
37#ifdef CONFIG_NETFILTER_DEBUG
38#define NFDEBUG(format, args...) printk(format , ## args)
39#else
40#define NFDEBUG(format, args...)
41#endif
42
43/* Sockopts only registered and called from user context, so
44 net locking would be overkill. Also, [gs]etsockopt calls may
45 sleep. */
46static DECLARE_MUTEX(nf_sockopt_mutex);
47
48struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
49static LIST_HEAD(nf_sockopts);
50static DEFINE_SPINLOCK(nf_hook_lock);
51
52/*
53 * A queue handler may be registered for each protocol. Each is protected by
54 * long term mutex. The handler must provide an an outfn() to accept packets
55 * for queueing and must reinject all packets it receives, no matter what.
56 */
57static struct nf_queue_handler_t {
58 nf_queue_outfn_t outfn;
59 void *data;
60} queue_handler[NPROTO];
61static DEFINE_RWLOCK(queue_handler_lock);
62
63int nf_register_hook(struct nf_hook_ops *reg)
64{
65 struct list_head *i;
66
67 spin_lock_bh(&nf_hook_lock);
68 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
69 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
70 break;
71 }
72 list_add_rcu(&reg->list, i->prev);
73 spin_unlock_bh(&nf_hook_lock);
74
75 synchronize_net();
76 return 0;
77}
78
79void nf_unregister_hook(struct nf_hook_ops *reg)
80{
81 spin_lock_bh(&nf_hook_lock);
82 list_del_rcu(&reg->list);
83 spin_unlock_bh(&nf_hook_lock);
84
85 synchronize_net();
86}
87
88/* Do exclusive ranges overlap? */
89static inline int overlap(int min1, int max1, int min2, int max2)
90{
91 return max1 > min2 && min1 < max2;
92}
93
94/* Functions to register sockopt ranges (exclusive). */
95int nf_register_sockopt(struct nf_sockopt_ops *reg)
96{
97 struct list_head *i;
98 int ret = 0;
99
100 if (down_interruptible(&nf_sockopt_mutex) != 0)
101 return -EINTR;
102
103 list_for_each(i, &nf_sockopts) {
104 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
105 if (ops->pf == reg->pf
106 && (overlap(ops->set_optmin, ops->set_optmax,
107 reg->set_optmin, reg->set_optmax)
108 || overlap(ops->get_optmin, ops->get_optmax,
109 reg->get_optmin, reg->get_optmax))) {
110 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
111 ops->set_optmin, ops->set_optmax,
112 ops->get_optmin, ops->get_optmax,
113 reg->set_optmin, reg->set_optmax,
114 reg->get_optmin, reg->get_optmax);
115 ret = -EBUSY;
116 goto out;
117 }
118 }
119
120 list_add(&reg->list, &nf_sockopts);
121out:
122 up(&nf_sockopt_mutex);
123 return ret;
124}
125
126void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
127{
128 /* No point being interruptible: we're probably in cleanup_module() */
129 restart:
130 down(&nf_sockopt_mutex);
131 if (reg->use != 0) {
132 /* To be woken by nf_sockopt call... */
133 /* FIXME: Stuart Young's name appears gratuitously. */
134 set_current_state(TASK_UNINTERRUPTIBLE);
135 reg->cleanup_task = current;
136 up(&nf_sockopt_mutex);
137 schedule();
138 goto restart;
139 }
140 list_del(&reg->list);
141 up(&nf_sockopt_mutex);
142}
143
144/* Call get/setsockopt() */
145static int nf_sockopt(struct sock *sk, int pf, int val,
146 char __user *opt, int *len, int get)
147{
148 struct list_head *i;
149 struct nf_sockopt_ops *ops;
150 int ret;
151
152 if (down_interruptible(&nf_sockopt_mutex) != 0)
153 return -EINTR;
154
155 list_for_each(i, &nf_sockopts) {
156 ops = (struct nf_sockopt_ops *)i;
157 if (ops->pf == pf) {
158 if (get) {
159 if (val >= ops->get_optmin
160 && val < ops->get_optmax) {
161 ops->use++;
162 up(&nf_sockopt_mutex);
163 ret = ops->get(sk, val, opt, len);
164 goto out;
165 }
166 } else {
167 if (val >= ops->set_optmin
168 && val < ops->set_optmax) {
169 ops->use++;
170 up(&nf_sockopt_mutex);
171 ret = ops->set(sk, val, opt, *len);
172 goto out;
173 }
174 }
175 }
176 }
177 up(&nf_sockopt_mutex);
178 return -ENOPROTOOPT;
179
180 out:
181 down(&nf_sockopt_mutex);
182 ops->use--;
183 if (ops->cleanup_task)
184 wake_up_process(ops->cleanup_task);
185 up(&nf_sockopt_mutex);
186 return ret;
187}
188
189int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
190 int len)
191{
192 return nf_sockopt(sk, pf, val, opt, &len, 0);
193}
194
195int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
196{
197 return nf_sockopt(sk, pf, val, opt, len, 1);
198}
199
200static unsigned int nf_iterate(struct list_head *head,
201 struct sk_buff **skb,
202 int hook,
203 const struct net_device *indev,
204 const struct net_device *outdev,
205 struct list_head **i,
206 int (*okfn)(struct sk_buff *),
207 int hook_thresh)
208{
209 unsigned int verdict;
210
211 /*
212 * The caller must not block between calls to this
213 * function because of risk of continuing from deleted element.
214 */
215 list_for_each_continue_rcu(*i, head) {
216 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
217
218 if (hook_thresh > elem->priority)
219 continue;
220
221 /* Optimization: we don't need to hold module
222 reference here, since function can't sleep. --RR */
223 verdict = elem->hook(hook, skb, indev, outdev, okfn);
224 if (verdict != NF_ACCEPT) {
225#ifdef CONFIG_NETFILTER_DEBUG
226 if (unlikely(verdict > NF_MAX_VERDICT)) {
227 NFDEBUG("Evil return from %p(%u).\n",
228 elem->hook, hook);
229 continue;
230 }
231#endif
232 if (verdict != NF_REPEAT)
233 return verdict;
234 *i = (*i)->prev;
235 }
236 }
237 return NF_ACCEPT;
238}
239
240int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
241{
242 int ret;
243
244 write_lock_bh(&queue_handler_lock);
245 if (queue_handler[pf].outfn)
246 ret = -EBUSY;
247 else {
248 queue_handler[pf].outfn = outfn;
249 queue_handler[pf].data = data;
250 ret = 0;
251 }
252 write_unlock_bh(&queue_handler_lock);
253
254 return ret;
255}
256
257/* The caller must flush their queue before this */
258int nf_unregister_queue_handler(int pf)
259{
260 write_lock_bh(&queue_handler_lock);
261 queue_handler[pf].outfn = NULL;
262 queue_handler[pf].data = NULL;
263 write_unlock_bh(&queue_handler_lock);
264
265 return 0;
266}
267
268/*
269 * Any packet that leaves via this function must come back
270 * through nf_reinject().
271 */
272static int nf_queue(struct sk_buff *skb,
273 struct list_head *elem,
274 int pf, unsigned int hook,
275 struct net_device *indev,
276 struct net_device *outdev,
277 int (*okfn)(struct sk_buff *))
278{
279 int status;
280 struct nf_info *info;
281#ifdef CONFIG_BRIDGE_NETFILTER
282 struct net_device *physindev = NULL;
283 struct net_device *physoutdev = NULL;
284#endif
285
286 /* QUEUE == DROP if noone is waiting, to be safe. */
287 read_lock(&queue_handler_lock);
288 if (!queue_handler[pf].outfn) {
289 read_unlock(&queue_handler_lock);
290 kfree_skb(skb);
291 return 1;
292 }
293
294 info = kmalloc(sizeof(*info), GFP_ATOMIC);
295 if (!info) {
296 if (net_ratelimit())
297 printk(KERN_ERR "OOM queueing packet %p\n",
298 skb);
299 read_unlock(&queue_handler_lock);
300 kfree_skb(skb);
301 return 1;
302 }
303
304 *info = (struct nf_info) {
305 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
306
307 /* If it's going away, ignore hook. */
308 if (!try_module_get(info->elem->owner)) {
309 read_unlock(&queue_handler_lock);
310 kfree(info);
311 return 0;
312 }
313
314 /* Bump dev refs so they don't vanish while packet is out */
315 if (indev) dev_hold(indev);
316 if (outdev) dev_hold(outdev);
317
318#ifdef CONFIG_BRIDGE_NETFILTER
319 if (skb->nf_bridge) {
320 physindev = skb->nf_bridge->physindev;
321 if (physindev) dev_hold(physindev);
322 physoutdev = skb->nf_bridge->physoutdev;
323 if (physoutdev) dev_hold(physoutdev);
324 }
325#endif
326
327 status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
328 read_unlock(&queue_handler_lock);
329
330 if (status < 0) {
331 /* James M doesn't say fuck enough. */
332 if (indev) dev_put(indev);
333 if (outdev) dev_put(outdev);
334#ifdef CONFIG_BRIDGE_NETFILTER
335 if (physindev) dev_put(physindev);
336 if (physoutdev) dev_put(physoutdev);
337#endif
338 module_put(info->elem->owner);
339 kfree(info);
340 kfree_skb(skb);
341 return 1;
342 }
343 return 1;
344}
345
346/* Returns 1 if okfn() needs to be executed by the caller,
347 * -EPERM for NF_DROP, 0 otherwise. */
348int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
349 struct net_device *indev,
350 struct net_device *outdev,
351 int (*okfn)(struct sk_buff *),
352 int hook_thresh)
353{
354 struct list_head *elem;
355 unsigned int verdict;
356 int ret = 0;
357
358 /* We may already have this, but read-locks nest anyway */
359 rcu_read_lock();
360
361 elem = &nf_hooks[pf][hook];
362next_hook:
363 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
364 outdev, &elem, okfn, hook_thresh);
365 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
366 ret = 1;
367 goto unlock;
368 } else if (verdict == NF_DROP) {
369 kfree_skb(*pskb);
370 ret = -EPERM;
371 } else if (verdict == NF_QUEUE) {
372 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
373 if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
374 goto next_hook;
375 }
376unlock:
377 rcu_read_unlock();
378 return ret;
379}
380
381void nf_reinject(struct sk_buff *skb, struct nf_info *info,
382 unsigned int verdict)
383{
384 struct list_head *elem = &info->elem->list;
385 struct list_head *i;
386
387 rcu_read_lock();
388
389 /* Release those devices we held, or Alexey will kill me. */
390 if (info->indev) dev_put(info->indev);
391 if (info->outdev) dev_put(info->outdev);
392#ifdef CONFIG_BRIDGE_NETFILTER
393 if (skb->nf_bridge) {
394 if (skb->nf_bridge->physindev)
395 dev_put(skb->nf_bridge->physindev);
396 if (skb->nf_bridge->physoutdev)
397 dev_put(skb->nf_bridge->physoutdev);
398 }
399#endif
400
401 /* Drop reference to owner of hook which queued us. */
402 module_put(info->elem->owner);
403
404 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
405 if (i == elem)
406 break;
407 }
408
409 if (elem == &nf_hooks[info->pf][info->hook]) {
410 /* The module which sent it to userspace is gone. */
411 NFDEBUG("%s: module disappeared, dropping packet.\n",
412 __FUNCTION__);
413 verdict = NF_DROP;
414 }
415
416 /* Continue traversal iff userspace said ok... */
417 if (verdict == NF_REPEAT) {
418 elem = elem->prev;
419 verdict = NF_ACCEPT;
420 }
421
422 if (verdict == NF_ACCEPT) {
423 next_hook:
424 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
425 &skb, info->hook,
426 info->indev, info->outdev, &elem,
427 info->okfn, INT_MIN);
428 }
429
430 switch (verdict) {
431 case NF_ACCEPT:
432 info->okfn(skb);
433 break;
434
435 case NF_QUEUE:
436 if (!nf_queue(skb, elem, info->pf, info->hook,
437 info->indev, info->outdev, info->okfn))
438 goto next_hook;
439 break;
440 }
441 rcu_read_unlock();
442
443 if (verdict == NF_DROP)
444 kfree_skb(skb);
445
446 kfree(info);
447 return;
448}
449
450#ifdef CONFIG_INET
451/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
452int ip_route_me_harder(struct sk_buff **pskb)
453{
454 struct iphdr *iph = (*pskb)->nh.iph;
455 struct rtable *rt;
456 struct flowi fl = {};
457 struct dst_entry *odst;
458 unsigned int hh_len;
459
460 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
461 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
462 */
463 if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
464 fl.nl_u.ip4_u.daddr = iph->daddr;
465 fl.nl_u.ip4_u.saddr = iph->saddr;
466 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
467 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
468#ifdef CONFIG_IP_ROUTE_FWMARK
469 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
470#endif
471 fl.proto = iph->protocol;
472 if (ip_route_output_key(&rt, &fl) != 0)
473 return -1;
474
475 /* Drop old route. */
476 dst_release((*pskb)->dst);
477 (*pskb)->dst = &rt->u.dst;
478 } else {
479 /* non-local src, find valid iif to satisfy
480 * rp-filter when calling ip_route_input. */
481 fl.nl_u.ip4_u.daddr = iph->saddr;
482 if (ip_route_output_key(&rt, &fl) != 0)
483 return -1;
484
485 odst = (*pskb)->dst;
486 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
487 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
488 dst_release(&rt->u.dst);
489 return -1;
490 }
491 dst_release(&rt->u.dst);
492 dst_release(odst);
493 }
494
495 if ((*pskb)->dst->error)
496 return -1;
497
498 /* Change in oif may mean change in hh_len. */
499 hh_len = (*pskb)->dst->dev->hard_header_len;
500 if (skb_headroom(*pskb) < hh_len) {
501 struct sk_buff *nskb;
502
503 nskb = skb_realloc_headroom(*pskb, hh_len);
504 if (!nskb)
505 return -1;
506 if ((*pskb)->sk)
507 skb_set_owner_w(nskb, (*pskb)->sk);
508 kfree_skb(*pskb);
509 *pskb = nskb;
510 }
511
512 return 0;
513}
514EXPORT_SYMBOL(ip_route_me_harder);
515
516int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
517{
518 struct sk_buff *nskb;
519
520 if (writable_len > (*pskb)->len)
521 return 0;
522
523 /* Not exclusive use of packet? Must copy. */
524 if (skb_shared(*pskb) || skb_cloned(*pskb))
525 goto copy_skb;
526
527 return pskb_may_pull(*pskb, writable_len);
528
529copy_skb:
530 nskb = skb_copy(*pskb, GFP_ATOMIC);
531 if (!nskb)
532 return 0;
533 BUG_ON(skb_is_nonlinear(nskb));
534
535 /* Rest of kernel will get very unhappy if we pass it a
536 suddenly-orphaned skbuff */
537 if ((*pskb)->sk)
538 skb_set_owner_w(nskb, (*pskb)->sk);
539 kfree_skb(*pskb);
540 *pskb = nskb;
541 return 1;
542}
543EXPORT_SYMBOL(skb_ip_make_writable);
544#endif /*CONFIG_INET*/
545
546/* Internal logging interface, which relies on the real
547 LOG target modules */
548
549#define NF_LOG_PREFIXLEN 128
550
551static nf_logfn *nf_logging[NPROTO]; /* = NULL */
552static int reported = 0;
553static DEFINE_SPINLOCK(nf_log_lock);
554
555int nf_log_register(int pf, nf_logfn *logfn)
556{
557 int ret = -EBUSY;
558
559 /* Any setup of logging members must be done before
560 * substituting pointer. */
561 spin_lock(&nf_log_lock);
562 if (!nf_logging[pf]) {
563 rcu_assign_pointer(nf_logging[pf], logfn);
564 ret = 0;
565 }
566 spin_unlock(&nf_log_lock);
567 return ret;
568}
569
570void nf_log_unregister(int pf, nf_logfn *logfn)
571{
572 spin_lock(&nf_log_lock);
573 if (nf_logging[pf] == logfn)
574 nf_logging[pf] = NULL;
575 spin_unlock(&nf_log_lock);
576
577 /* Give time to concurrent readers. */
578 synchronize_net();
579}
580
581void nf_log_packet(int pf,
582 unsigned int hooknum,
583 const struct sk_buff *skb,
584 const struct net_device *in,
585 const struct net_device *out,
586 const char *fmt, ...)
587{
588 va_list args;
589 char prefix[NF_LOG_PREFIXLEN];
590 nf_logfn *logfn;
591
592 rcu_read_lock();
593 logfn = rcu_dereference(nf_logging[pf]);
594 if (logfn) {
595 va_start(args, fmt);
596 vsnprintf(prefix, sizeof(prefix), fmt, args);
597 va_end(args);
598 /* We must read logging before nf_logfn[pf] */
599 logfn(hooknum, skb, in, out, prefix);
600 } else if (!reported) {
601 printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
602 "no backend logging module loaded in!\n");
603 reported++;
604 }
605 rcu_read_unlock();
606}
607EXPORT_SYMBOL(nf_log_register);
608EXPORT_SYMBOL(nf_log_unregister);
609EXPORT_SYMBOL(nf_log_packet);
610
611/* This does not belong here, but locally generated errors need it if connection
612 tracking in use: without this, connection may not be in hash table, and hence
613 manufactured ICMP or RST packets will not be associated with it. */
614void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
615
616void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
617{
618 void (*attach)(struct sk_buff *, struct sk_buff *);
619
620 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
621 mb(); /* Just to be sure: must be read before executing this */
622 attach(new, skb);
623 }
624}
625
626void __init netfilter_init(void)
627{
628 int i, h;
629
630 for (i = 0; i < NPROTO; i++) {
631 for (h = 0; h < NF_MAX_HOOKS; h++)
632 INIT_LIST_HEAD(&nf_hooks[i][h]);
633 }
634}
635
636EXPORT_SYMBOL(ip_ct_attach);
637EXPORT_SYMBOL(nf_ct_attach);
638EXPORT_SYMBOL(nf_getsockopt);
639EXPORT_SYMBOL(nf_hook_slow);
640EXPORT_SYMBOL(nf_hooks);
641EXPORT_SYMBOL(nf_register_hook);
642EXPORT_SYMBOL(nf_register_queue_handler);
643EXPORT_SYMBOL(nf_register_sockopt);
644EXPORT_SYMBOL(nf_reinject);
645EXPORT_SYMBOL(nf_setsockopt);
646EXPORT_SYMBOL(nf_unregister_hook);
647EXPORT_SYMBOL(nf_unregister_queue_handler);
648EXPORT_SYMBOL(nf_unregister_sockopt);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index bb55675f0685..b8203de5ff07 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -32,7 +32,6 @@
32 * Further increasing requires to change hash table size. 32 * Further increasing requires to change hash table size.
33 */ 33 */
34int sysctl_max_syn_backlog = 256; 34int sysctl_max_syn_backlog = 256;
35EXPORT_SYMBOL(sysctl_max_syn_backlog);
36 35
37int reqsk_queue_alloc(struct request_sock_queue *queue, 36int reqsk_queue_alloc(struct request_sock_queue *queue,
38 const int nr_table_entries) 37 const int nr_table_entries)
@@ -53,6 +52,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
53 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
54 rwlock_init(&queue->syn_wait_lock); 53 rwlock_init(&queue->syn_wait_lock);
55 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 54 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
55 queue->rskq_defer_accept = 0;
56 lopt->nr_table_entries = nr_table_entries;
56 57
57 write_lock_bh(&queue->syn_wait_lock); 58 write_lock_bh(&queue->syn_wait_lock);
58 queue->listen_opt = lopt; 59 queue->listen_opt = lopt;
@@ -62,3 +63,28 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
62} 63}
63 64
64EXPORT_SYMBOL(reqsk_queue_alloc); 65EXPORT_SYMBOL(reqsk_queue_alloc);
66
67void reqsk_queue_destroy(struct request_sock_queue *queue)
68{
69 /* make all the listen_opt local to us */
70 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
71
72 if (lopt->qlen != 0) {
73 int i;
74
75 for (i = 0; i < lopt->nr_table_entries; i++) {
76 struct request_sock *req;
77
78 while ((req = lopt->syn_table[i]) != NULL) {
79 lopt->syn_table[i] = req->dl_next;
80 lopt->qlen--;
81 reqsk_free(req);
82 }
83 }
84 }
85
86 BUG_TRAP(lopt->qlen == 0);
87 kfree(lopt);
88}
89
90EXPORT_SYMBOL(reqsk_queue_destroy);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4b1bb30e6381..9bed7569ce3f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -148,7 +148,7 @@ int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
148{ 148{
149 int err = 0; 149 int err = 0;
150 150
151 NETLINK_CB(skb).dst_groups = group; 151 NETLINK_CB(skb).dst_group = group;
152 if (echo) 152 if (echo)
153 atomic_inc(&skb->users); 153 atomic_inc(&skb->users);
154 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 154 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
@@ -458,8 +458,8 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
458 kfree_skb(skb); 458 kfree_skb(skb);
459 return; 459 return;
460 } 460 }
461 NETLINK_CB(skb).dst_groups = RTMGRP_LINK; 461 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
462 netlink_broadcast(rtnl, skb, 0, RTMGRP_LINK, GFP_KERNEL); 462 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_KERNEL);
463} 463}
464 464
465static int rtnetlink_done(struct netlink_callback *cb) 465static int rtnetlink_done(struct netlink_callback *cb)
@@ -708,7 +708,8 @@ void __init rtnetlink_init(void)
708 if (!rta_buf) 708 if (!rta_buf)
709 panic("rtnetlink_init: cannot allocate rta_buf\n"); 709 panic("rtnetlink_init: cannot allocate rta_buf\n");
710 710
711 rtnl = netlink_kernel_create(NETLINK_ROUTE, rtnetlink_rcv); 711 rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv,
712 THIS_MODULE);
712 if (rtnl == NULL) 713 if (rtnl == NULL)
713 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 714 panic("rtnetlink_init: cannot initialize rtnetlink\n");
714 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); 715 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7eab867ede59..f80a28785610 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -68,7 +68,10 @@
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <asm/system.h> 69#include <asm/system.h>
70 70
71static kmem_cache_t *skbuff_head_cache; 71static kmem_cache_t *skbuff_head_cache __read_mostly;
72static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73
74struct timeval __read_mostly skb_tv_base;
72 75
73/* 76/*
74 * Keep out-of-line to prevent kernel bloat. 77 * Keep out-of-line to prevent kernel bloat.
@@ -118,7 +121,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
118 */ 121 */
119 122
120/** 123/**
121 * alloc_skb - allocate a network buffer 124 * __alloc_skb - allocate a network buffer
122 * @size: size to allocate 125 * @size: size to allocate
123 * @gfp_mask: allocation mask 126 * @gfp_mask: allocation mask
124 * 127 *
@@ -129,14 +132,20 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
129 * Buffers may only be allocated from interrupts using a @gfp_mask of 132 * Buffers may only be allocated from interrupts using a @gfp_mask of
130 * %GFP_ATOMIC. 133 * %GFP_ATOMIC.
131 */ 134 */
132struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) 135struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask,
136 int fclone)
133{ 137{
134 struct sk_buff *skb; 138 struct sk_buff *skb;
135 u8 *data; 139 u8 *data;
136 140
137 /* Get the HEAD */ 141 /* Get the HEAD */
138 skb = kmem_cache_alloc(skbuff_head_cache, 142 if (fclone)
139 gfp_mask & ~__GFP_DMA); 143 skb = kmem_cache_alloc(skbuff_fclone_cache,
144 gfp_mask & ~__GFP_DMA);
145 else
146 skb = kmem_cache_alloc(skbuff_head_cache,
147 gfp_mask & ~__GFP_DMA);
148
140 if (!skb) 149 if (!skb)
141 goto out; 150 goto out;
142 151
@@ -153,7 +162,15 @@ struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask)
153 skb->data = data; 162 skb->data = data;
154 skb->tail = data; 163 skb->tail = data;
155 skb->end = data + size; 164 skb->end = data + size;
165 if (fclone) {
166 struct sk_buff *child = skb + 1;
167 atomic_t *fclone_ref = (atomic_t *) (child + 1);
156 168
169 skb->fclone = SKB_FCLONE_ORIG;
170 atomic_set(fclone_ref, 1);
171
172 child->fclone = SKB_FCLONE_UNAVAILABLE;
173 }
157 atomic_set(&(skb_shinfo(skb)->dataref), 1); 174 atomic_set(&(skb_shinfo(skb)->dataref), 1);
158 skb_shinfo(skb)->nr_frags = 0; 175 skb_shinfo(skb)->nr_frags = 0;
159 skb_shinfo(skb)->tso_size = 0; 176 skb_shinfo(skb)->tso_size = 0;
@@ -266,8 +283,34 @@ void skb_release_data(struct sk_buff *skb)
266 */ 283 */
267void kfree_skbmem(struct sk_buff *skb) 284void kfree_skbmem(struct sk_buff *skb)
268{ 285{
286 struct sk_buff *other;
287 atomic_t *fclone_ref;
288
269 skb_release_data(skb); 289 skb_release_data(skb);
270 kmem_cache_free(skbuff_head_cache, skb); 290 switch (skb->fclone) {
291 case SKB_FCLONE_UNAVAILABLE:
292 kmem_cache_free(skbuff_head_cache, skb);
293 break;
294
295 case SKB_FCLONE_ORIG:
296 fclone_ref = (atomic_t *) (skb + 2);
297 if (atomic_dec_and_test(fclone_ref))
298 kmem_cache_free(skbuff_fclone_cache, skb);
299 break;
300
301 case SKB_FCLONE_CLONE:
302 fclone_ref = (atomic_t *) (skb + 1);
303 other = skb - 1;
304
305 /* The clone portion is available for
306 * fast-cloning again.
307 */
308 skb->fclone = SKB_FCLONE_UNAVAILABLE;
309
310 if (atomic_dec_and_test(fclone_ref))
311 kmem_cache_free(skbuff_fclone_cache, other);
312 break;
313 };
271} 314}
272 315
273/** 316/**
@@ -281,8 +324,6 @@ void kfree_skbmem(struct sk_buff *skb)
281 324
282void __kfree_skb(struct sk_buff *skb) 325void __kfree_skb(struct sk_buff *skb)
283{ 326{
284 BUG_ON(skb->list != NULL);
285
286 dst_release(skb->dst); 327 dst_release(skb->dst);
287#ifdef CONFIG_XFRM 328#ifdef CONFIG_XFRM
288 secpath_put(skb->sp); 329 secpath_put(skb->sp);
@@ -302,7 +343,6 @@ void __kfree_skb(struct sk_buff *skb)
302 skb->tc_index = 0; 343 skb->tc_index = 0;
303#ifdef CONFIG_NET_CLS_ACT 344#ifdef CONFIG_NET_CLS_ACT
304 skb->tc_verd = 0; 345 skb->tc_verd = 0;
305 skb->tc_classid = 0;
306#endif 346#endif
307#endif 347#endif
308 348
@@ -325,19 +365,27 @@ void __kfree_skb(struct sk_buff *skb)
325 365
326struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) 366struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
327{ 367{
328 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 368 struct sk_buff *n;
329 369
330 if (!n) 370 n = skb + 1;
331 return NULL; 371 if (skb->fclone == SKB_FCLONE_ORIG &&
372 n->fclone == SKB_FCLONE_UNAVAILABLE) {
373 atomic_t *fclone_ref = (atomic_t *) (n + 1);
374 n->fclone = SKB_FCLONE_CLONE;
375 atomic_inc(fclone_ref);
376 } else {
377 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
378 if (!n)
379 return NULL;
380 n->fclone = SKB_FCLONE_UNAVAILABLE;
381 }
332 382
333#define C(x) n->x = skb->x 383#define C(x) n->x = skb->x
334 384
335 n->next = n->prev = NULL; 385 n->next = n->prev = NULL;
336 n->list = NULL;
337 n->sk = NULL; 386 n->sk = NULL;
338 C(stamp); 387 C(tstamp);
339 C(dev); 388 C(dev);
340 C(real_dev);
341 C(h); 389 C(h);
342 C(nh); 390 C(nh);
343 C(mac); 391 C(mac);
@@ -361,7 +409,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
361 n->destructor = NULL; 409 n->destructor = NULL;
362#ifdef CONFIG_NETFILTER 410#ifdef CONFIG_NETFILTER
363 C(nfmark); 411 C(nfmark);
364 C(nfcache);
365 C(nfct); 412 C(nfct);
366 nf_conntrack_get(skb->nfct); 413 nf_conntrack_get(skb->nfct);
367 C(nfctinfo); 414 C(nfctinfo);
@@ -370,9 +417,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
370 nf_bridge_get(skb->nf_bridge); 417 nf_bridge_get(skb->nf_bridge);
371#endif 418#endif
372#endif /*CONFIG_NETFILTER*/ 419#endif /*CONFIG_NETFILTER*/
373#if defined(CONFIG_HIPPI)
374 C(private);
375#endif
376#ifdef CONFIG_NET_SCHED 420#ifdef CONFIG_NET_SCHED
377 C(tc_index); 421 C(tc_index);
378#ifdef CONFIG_NET_CLS_ACT 422#ifdef CONFIG_NET_CLS_ACT
@@ -380,7 +424,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
380 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 424 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
381 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 425 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
382 C(input_dev); 426 C(input_dev);
383 C(tc_classid);
384#endif 427#endif
385 428
386#endif 429#endif
@@ -404,10 +447,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
404 */ 447 */
405 unsigned long offset = new->data - old->data; 448 unsigned long offset = new->data - old->data;
406 449
407 new->list = NULL;
408 new->sk = NULL; 450 new->sk = NULL;
409 new->dev = old->dev; 451 new->dev = old->dev;
410 new->real_dev = old->real_dev;
411 new->priority = old->priority; 452 new->priority = old->priority;
412 new->protocol = old->protocol; 453 new->protocol = old->protocol;
413 new->dst = dst_clone(old->dst); 454 new->dst = dst_clone(old->dst);
@@ -419,12 +460,12 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
419 new->mac.raw = old->mac.raw + offset; 460 new->mac.raw = old->mac.raw + offset;
420 memcpy(new->cb, old->cb, sizeof(old->cb)); 461 memcpy(new->cb, old->cb, sizeof(old->cb));
421 new->local_df = old->local_df; 462 new->local_df = old->local_df;
463 new->fclone = SKB_FCLONE_UNAVAILABLE;
422 new->pkt_type = old->pkt_type; 464 new->pkt_type = old->pkt_type;
423 new->stamp = old->stamp; 465 new->tstamp = old->tstamp;
424 new->destructor = NULL; 466 new->destructor = NULL;
425#ifdef CONFIG_NETFILTER 467#ifdef CONFIG_NETFILTER
426 new->nfmark = old->nfmark; 468 new->nfmark = old->nfmark;
427 new->nfcache = old->nfcache;
428 new->nfct = old->nfct; 469 new->nfct = old->nfct;
429 nf_conntrack_get(old->nfct); 470 nf_conntrack_get(old->nfct);
430 new->nfctinfo = old->nfctinfo; 471 new->nfctinfo = old->nfctinfo;
@@ -1344,50 +1385,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1344 __skb_queue_tail(list, newsk); 1385 __skb_queue_tail(list, newsk);
1345 spin_unlock_irqrestore(&list->lock, flags); 1386 spin_unlock_irqrestore(&list->lock, flags);
1346} 1387}
1388
1347/** 1389/**
1348 * skb_unlink - remove a buffer from a list 1390 * skb_unlink - remove a buffer from a list
1349 * @skb: buffer to remove 1391 * @skb: buffer to remove
1392 * @list: list to use
1350 * 1393 *
1351 * Place a packet after a given packet in a list. The list locks are taken 1394 * Remove a packet from a list. The list locks are taken and this
1352 * and this function is atomic with respect to other list locked calls 1395 * function is atomic with respect to other list locked calls
1353 * 1396 *
1354 * Works even without knowing the list it is sitting on, which can be 1397 * You must know what list the SKB is on.
1355 * handy at times. It also means that THE LIST MUST EXIST when you
1356 * unlink. Thus a list must have its contents unlinked before it is
1357 * destroyed.
1358 */ 1398 */
1359void skb_unlink(struct sk_buff *skb) 1399void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1360{ 1400{
1361 struct sk_buff_head *list = skb->list; 1401 unsigned long flags;
1362
1363 if (list) {
1364 unsigned long flags;
1365 1402
1366 spin_lock_irqsave(&list->lock, flags); 1403 spin_lock_irqsave(&list->lock, flags);
1367 if (skb->list == list) 1404 __skb_unlink(skb, list);
1368 __skb_unlink(skb, skb->list); 1405 spin_unlock_irqrestore(&list->lock, flags);
1369 spin_unlock_irqrestore(&list->lock, flags);
1370 }
1371} 1406}
1372 1407
1373
1374/** 1408/**
1375 * skb_append - append a buffer 1409 * skb_append - append a buffer
1376 * @old: buffer to insert after 1410 * @old: buffer to insert after
1377 * @newsk: buffer to insert 1411 * @newsk: buffer to insert
1412 * @list: list to use
1378 * 1413 *
1379 * Place a packet after a given packet in a list. The list locks are taken 1414 * Place a packet after a given packet in a list. The list locks are taken
1380 * and this function is atomic with respect to other list locked calls. 1415 * and this function is atomic with respect to other list locked calls.
1381 * A buffer cannot be placed on two lists at the same time. 1416 * A buffer cannot be placed on two lists at the same time.
1382 */ 1417 */
1383 1418void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1384void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1385{ 1419{
1386 unsigned long flags; 1420 unsigned long flags;
1387 1421
1388 spin_lock_irqsave(&old->list->lock, flags); 1422 spin_lock_irqsave(&list->lock, flags);
1389 __skb_append(old, newsk); 1423 __skb_append(old, newsk, list);
1390 spin_unlock_irqrestore(&old->list->lock, flags); 1424 spin_unlock_irqrestore(&list->lock, flags);
1391} 1425}
1392 1426
1393 1427
@@ -1395,19 +1429,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1395 * skb_insert - insert a buffer 1429 * skb_insert - insert a buffer
1396 * @old: buffer to insert before 1430 * @old: buffer to insert before
1397 * @newsk: buffer to insert 1431 * @newsk: buffer to insert
1432 * @list: list to use
1433 *
1434 * Place a packet before a given packet in a list. The list locks are
1435 * taken and this function is atomic with respect to other list locked
1436 * calls.
1398 * 1437 *
1399 * Place a packet before a given packet in a list. The list locks are taken
1400 * and this function is atomic with respect to other list locked calls
1401 * A buffer cannot be placed on two lists at the same time. 1438 * A buffer cannot be placed on two lists at the same time.
1402 */ 1439 */
1403 1440void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1404void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1405{ 1441{
1406 unsigned long flags; 1442 unsigned long flags;
1407 1443
1408 spin_lock_irqsave(&old->list->lock, flags); 1444 spin_lock_irqsave(&list->lock, flags);
1409 __skb_insert(newsk, old->prev, old, old->list); 1445 __skb_insert(newsk, old->prev, old, list);
1410 spin_unlock_irqrestore(&old->list->lock, flags); 1446 spin_unlock_irqrestore(&list->lock, flags);
1411} 1447}
1412 1448
1413#if 0 1449#if 0
@@ -1663,12 +1699,23 @@ void __init skb_init(void)
1663 NULL, NULL); 1699 NULL, NULL);
1664 if (!skbuff_head_cache) 1700 if (!skbuff_head_cache)
1665 panic("cannot create skbuff cache"); 1701 panic("cannot create skbuff cache");
1702
1703 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
1704 (2*sizeof(struct sk_buff)) +
1705 sizeof(atomic_t),
1706 0,
1707 SLAB_HWCACHE_ALIGN,
1708 NULL, NULL);
1709 if (!skbuff_fclone_cache)
1710 panic("cannot create skbuff cache");
1711
1712 do_gettimeofday(&skb_tv_base);
1666} 1713}
1667 1714
1668EXPORT_SYMBOL(___pskb_trim); 1715EXPORT_SYMBOL(___pskb_trim);
1669EXPORT_SYMBOL(__kfree_skb); 1716EXPORT_SYMBOL(__kfree_skb);
1670EXPORT_SYMBOL(__pskb_pull_tail); 1717EXPORT_SYMBOL(__pskb_pull_tail);
1671EXPORT_SYMBOL(alloc_skb); 1718EXPORT_SYMBOL(__alloc_skb);
1672EXPORT_SYMBOL(pskb_copy); 1719EXPORT_SYMBOL(pskb_copy);
1673EXPORT_SYMBOL(pskb_expand_head); 1720EXPORT_SYMBOL(pskb_expand_head);
1674EXPORT_SYMBOL(skb_checksum); 1721EXPORT_SYMBOL(skb_checksum);
@@ -1696,3 +1743,4 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
1696EXPORT_SYMBOL(skb_seq_read); 1743EXPORT_SYMBOL(skb_seq_read);
1697EXPORT_SYMBOL(skb_abort_seq_read); 1744EXPORT_SYMBOL(skb_abort_seq_read);
1698EXPORT_SYMBOL(skb_find_text); 1745EXPORT_SYMBOL(skb_find_text);
1746EXPORT_SYMBOL(skb_tv_base);
diff --git a/net/core/sock.c b/net/core/sock.c
index 12f6d9a2a522..ccd10fd65682 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -260,7 +260,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
260 260
261 if (val > sysctl_wmem_max) 261 if (val > sysctl_wmem_max)
262 val = sysctl_wmem_max; 262 val = sysctl_wmem_max;
263 263set_sndbuf:
264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 264 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
265 if ((val * 2) < SOCK_MIN_SNDBUF) 265 if ((val * 2) < SOCK_MIN_SNDBUF)
266 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 266 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
@@ -274,6 +274,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
274 sk->sk_write_space(sk); 274 sk->sk_write_space(sk);
275 break; 275 break;
276 276
277 case SO_SNDBUFFORCE:
278 if (!capable(CAP_NET_ADMIN)) {
279 ret = -EPERM;
280 break;
281 }
282 goto set_sndbuf;
283
277 case SO_RCVBUF: 284 case SO_RCVBUF:
278 /* Don't error on this BSD doesn't and if you think 285 /* Don't error on this BSD doesn't and if you think
279 about it this is right. Otherwise apps have to 286 about it this is right. Otherwise apps have to
@@ -282,7 +289,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
282 289
283 if (val > sysctl_rmem_max) 290 if (val > sysctl_rmem_max)
284 val = sysctl_rmem_max; 291 val = sysctl_rmem_max;
285 292set_rcvbuf:
286 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 293 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
287 /* FIXME: is this lower bound the right one? */ 294 /* FIXME: is this lower bound the right one? */
288 if ((val * 2) < SOCK_MIN_RCVBUF) 295 if ((val * 2) < SOCK_MIN_RCVBUF)
@@ -291,6 +298,13 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
291 sk->sk_rcvbuf = val * 2; 298 sk->sk_rcvbuf = val * 2;
292 break; 299 break;
293 300
301 case SO_RCVBUFFORCE:
302 if (!capable(CAP_NET_ADMIN)) {
303 ret = -EPERM;
304 break;
305 }
306 goto set_rcvbuf;
307
294 case SO_KEEPALIVE: 308 case SO_KEEPALIVE:
295#ifdef CONFIG_INET 309#ifdef CONFIG_INET
296 if (sk->sk_protocol == IPPROTO_TCP) 310 if (sk->sk_protocol == IPPROTO_TCP)
@@ -686,6 +700,80 @@ void sk_free(struct sock *sk)
686 module_put(owner); 700 module_put(owner);
687} 701}
688 702
703struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority)
704{
705 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
706
707 if (newsk != NULL) {
708 struct sk_filter *filter;
709
710 memcpy(newsk, sk, sk->sk_prot->obj_size);
711
712 /* SANITY */
713 sk_node_init(&newsk->sk_node);
714 sock_lock_init(newsk);
715 bh_lock_sock(newsk);
716
717 atomic_set(&newsk->sk_rmem_alloc, 0);
718 atomic_set(&newsk->sk_wmem_alloc, 0);
719 atomic_set(&newsk->sk_omem_alloc, 0);
720 skb_queue_head_init(&newsk->sk_receive_queue);
721 skb_queue_head_init(&newsk->sk_write_queue);
722
723 rwlock_init(&newsk->sk_dst_lock);
724 rwlock_init(&newsk->sk_callback_lock);
725
726 newsk->sk_dst_cache = NULL;
727 newsk->sk_wmem_queued = 0;
728 newsk->sk_forward_alloc = 0;
729 newsk->sk_send_head = NULL;
730 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
731 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
732
733 sock_reset_flag(newsk, SOCK_DONE);
734 skb_queue_head_init(&newsk->sk_error_queue);
735
736 filter = newsk->sk_filter;
737 if (filter != NULL)
738 sk_filter_charge(newsk, filter);
739
740 if (unlikely(xfrm_sk_clone_policy(newsk))) {
741 /* It is still raw copy of parent, so invalidate
742 * destructor and make plain sk_free() */
743 newsk->sk_destruct = NULL;
744 sk_free(newsk);
745 newsk = NULL;
746 goto out;
747 }
748
749 newsk->sk_err = 0;
750 newsk->sk_priority = 0;
751 atomic_set(&newsk->sk_refcnt, 2);
752
753 /*
754 * Increment the counter in the same struct proto as the master
755 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
756 * is the same as sk->sk_prot->socks, as this field was copied
757 * with memcpy).
758 *
759 * This _changes_ the previous behaviour, where
760 * tcp_create_openreq_child always was incrementing the
761 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
762 * to be taken into account in all callers. -acme
763 */
764 sk_refcnt_debug_inc(newsk);
765 newsk->sk_socket = NULL;
766 newsk->sk_sleep = NULL;
767
768 if (newsk->sk_prot->sockets_allocated)
769 atomic_inc(newsk->sk_prot->sockets_allocated);
770 }
771out:
772 return newsk;
773}
774
775EXPORT_SYMBOL_GPL(sk_clone);
776
689void __init sk_init(void) 777void __init sk_init(void)
690{ 778{
691 if (num_physpages <= 4096) { 779 if (num_physpages <= 4096) {
@@ -1353,11 +1441,7 @@ void sk_common_release(struct sock *sk)
1353 1441
1354 xfrm_sk_free_policy(sk); 1442 xfrm_sk_free_policy(sk);
1355 1443
1356#ifdef INET_REFCNT_DEBUG 1444 sk_refcnt_debug_release(sk);
1357 if (atomic_read(&sk->sk_refcnt) != 1)
1358 printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
1359 sk, atomic_read(&sk->sk_refcnt));
1360#endif
1361 sock_put(sk); 1445 sock_put(sk);
1362} 1446}
1363 1447
@@ -1368,7 +1452,8 @@ static LIST_HEAD(proto_list);
1368 1452
1369int proto_register(struct proto *prot, int alloc_slab) 1453int proto_register(struct proto *prot, int alloc_slab)
1370{ 1454{
1371 char *request_sock_slab_name; 1455 char *request_sock_slab_name = NULL;
1456 char *timewait_sock_slab_name;
1372 int rc = -ENOBUFS; 1457 int rc = -ENOBUFS;
1373 1458
1374 if (alloc_slab) { 1459 if (alloc_slab) {
@@ -1399,6 +1484,23 @@ int proto_register(struct proto *prot, int alloc_slab)
1399 goto out_free_request_sock_slab_name; 1484 goto out_free_request_sock_slab_name;
1400 } 1485 }
1401 } 1486 }
1487
1488 if (prot->twsk_obj_size) {
1489 static const char mask[] = "tw_sock_%s";
1490
1491 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1492
1493 if (timewait_sock_slab_name == NULL)
1494 goto out_free_request_sock_slab;
1495
1496 sprintf(timewait_sock_slab_name, mask, prot->name);
1497 prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
1498 prot->twsk_obj_size,
1499 0, SLAB_HWCACHE_ALIGN,
1500 NULL, NULL);
1501 if (prot->twsk_slab == NULL)
1502 goto out_free_timewait_sock_slab_name;
1503 }
1402 } 1504 }
1403 1505
1404 write_lock(&proto_list_lock); 1506 write_lock(&proto_list_lock);
@@ -1407,6 +1509,13 @@ int proto_register(struct proto *prot, int alloc_slab)
1407 rc = 0; 1509 rc = 0;
1408out: 1510out:
1409 return rc; 1511 return rc;
1512out_free_timewait_sock_slab_name:
1513 kfree(timewait_sock_slab_name);
1514out_free_request_sock_slab:
1515 if (prot->rsk_prot && prot->rsk_prot->slab) {
1516 kmem_cache_destroy(prot->rsk_prot->slab);
1517 prot->rsk_prot->slab = NULL;
1518 }
1410out_free_request_sock_slab_name: 1519out_free_request_sock_slab_name:
1411 kfree(request_sock_slab_name); 1520 kfree(request_sock_slab_name);
1412out_free_sock_slab: 1521out_free_sock_slab:
@@ -1434,6 +1543,14 @@ void proto_unregister(struct proto *prot)
1434 prot->rsk_prot->slab = NULL; 1543 prot->rsk_prot->slab = NULL;
1435 } 1544 }
1436 1545
1546 if (prot->twsk_slab != NULL) {
1547 const char *name = kmem_cache_name(prot->twsk_slab);
1548
1549 kmem_cache_destroy(prot->twsk_slab);
1550 kfree(name);
1551 prot->twsk_slab = NULL;
1552 }
1553
1437 list_del(&prot->node); 1554 list_del(&prot->node);
1438 write_unlock(&proto_list_lock); 1555 write_unlock(&proto_list_lock);
1439} 1556}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 8f817ad9f546..2f278c8e4743 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -9,23 +9,18 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/socket.h>
13#include <net/sock.h>
12 14
13#ifdef CONFIG_SYSCTL 15#ifdef CONFIG_SYSCTL
14 16
15extern int netdev_max_backlog; 17extern int netdev_max_backlog;
16extern int netdev_budget;
17extern int weight_p; 18extern int weight_p;
18extern int net_msg_cost;
19extern int net_msg_burst;
20 19
21extern __u32 sysctl_wmem_max; 20extern __u32 sysctl_wmem_max;
22extern __u32 sysctl_rmem_max; 21extern __u32 sysctl_rmem_max;
23extern __u32 sysctl_wmem_default;
24extern __u32 sysctl_rmem_default;
25 22
26extern int sysctl_core_destroy_delay; 23extern int sysctl_core_destroy_delay;
27extern int sysctl_optmem_max;
28extern int sysctl_somaxconn;
29 24
30#ifdef CONFIG_NET_DIVERT 25#ifdef CONFIG_NET_DIVERT
31extern char sysctl_divert_version[]; 26extern char sysctl_divert_version[];
diff --git a/net/core/utils.c b/net/core/utils.c
index 88eb8b68e26b..7b5970fc9e40 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -16,7 +16,9 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/inet.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/net.h>
20#include <linux/string.h> 22#include <linux/string.h>
21#include <linux/types.h> 23#include <linux/types.h>
22#include <linux/random.h> 24#include <linux/random.h>
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 3ff5639c0b78..5caae2399f3a 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -571,10 +571,6 @@ static int wireless_seq_show(struct seq_file *seq, void *v)
571 return 0; 571 return 0;
572} 572}
573 573
574extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
575extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
576extern void dev_seq_stop(struct seq_file *seq, void *v);
577
578static struct seq_operations wireless_seq_ops = { 574static struct seq_operations wireless_seq_ops = {
579 .start = dev_seq_start, 575 .start = dev_seq_start,
580 .next = dev_seq_next, 576 .next = dev_seq_next,
@@ -1144,8 +1140,8 @@ static inline void rtmsg_iwinfo(struct net_device * dev,
1144 kfree_skb(skb); 1140 kfree_skb(skb);
1145 return; 1141 return;
1146 } 1142 }
1147 NETLINK_CB(skb).dst_groups = RTMGRP_LINK; 1143 NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
1148 netlink_broadcast(rtnl, skb, 0, RTMGRP_LINK, GFP_ATOMIC); 1144 netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC);
1149} 1145}
1150#endif /* WE_EVENT_NETLINK */ 1146#endif /* WE_EVENT_NETLINK */
1151 1147
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
new file mode 100644
index 000000000000..187ac182e24b
--- /dev/null
+++ b/net/dccp/Kconfig
@@ -0,0 +1,50 @@
1menu "DCCP Configuration (EXPERIMENTAL)"
2 depends on INET && EXPERIMENTAL
3
4config IP_DCCP
5 tristate "The DCCP Protocol (EXPERIMENTAL)"
6 ---help---
7 Datagram Congestion Control Protocol
8
9 From draft-ietf-dccp-spec-11 <http://www.icir.org/kohler/dcp/draft-ietf-dccp-spec-11.txt>.
10
11 The Datagram Congestion Control Protocol (DCCP) is a transport
12 protocol that implements bidirectional, unicast connections of
13 congestion-controlled, unreliable datagrams. It should be suitable
14 for use by applications such as streaming media, Internet telephony,
15 and on-line games
16
17 To compile this protocol support as a module, choose M here: the
18 module will be called dccp.
19
20 If in doubt, say N.
21
22config INET_DCCP_DIAG
23 depends on IP_DCCP && INET_DIAG
24 def_tristate y if (IP_DCCP = y && INET_DIAG = y)
25 def_tristate m
26
27source "net/dccp/ccids/Kconfig"
28
29menu "DCCP Kernel Hacking"
30 depends on IP_DCCP && DEBUG_KERNEL=y
31
32config IP_DCCP_DEBUG
33 bool "DCCP debug messages"
34 ---help---
35 Only use this if you're hacking DCCP.
36
37 Just say N.
38
39config IP_DCCP_UNLOAD_HACK
40 depends on IP_DCCP=m && IP_DCCP_CCID3=m
41 bool "DCCP control sock unload hack"
42 ---help---
43 Enable this to be able to unload the dccp module when the it
44 has only one refcount held, the control sock one. Just execute
45 "rmmod dccp_ccid3 dccp"
46
47 Just say N.
48endmenu
49
50endmenu
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
new file mode 100644
index 000000000000..fb97bb042455
--- /dev/null
+++ b/net/dccp/Makefile
@@ -0,0 +1,10 @@
1obj-$(CONFIG_IP_DCCP) += dccp.o
2
3dccp-y := ccid.o input.o ipv4.o minisocks.o options.o output.o proto.o \
4 timer.o
5
6obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
7
8dccp_diag-y := diag.o
9
10obj-y += ccids/
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
new file mode 100644
index 000000000000..9d8fc0e289ea
--- /dev/null
+++ b/net/dccp/ccid.c
@@ -0,0 +1,139 @@
1/*
2 * net/dccp/ccid.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * CCID infrastructure
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include "ccid.h"
15
16static struct ccid *ccids[CCID_MAX];
17#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
18static atomic_t ccids_lockct = ATOMIC_INIT(0);
19static DEFINE_SPINLOCK(ccids_lock);
20
21/*
22 * The strategy is: modifications ccids vector are short, do not sleep and
23 * veeery rare, but read access should be free of any exclusive locks.
24 */
25static void ccids_write_lock(void)
26{
27 spin_lock(&ccids_lock);
28 while (atomic_read(&ccids_lockct) != 0) {
29 spin_unlock(&ccids_lock);
30 yield();
31 spin_lock(&ccids_lock);
32 }
33}
34
35static inline void ccids_write_unlock(void)
36{
37 spin_unlock(&ccids_lock);
38}
39
40static inline void ccids_read_lock(void)
41{
42 atomic_inc(&ccids_lockct);
43 spin_unlock_wait(&ccids_lock);
44}
45
46static inline void ccids_read_unlock(void)
47{
48 atomic_dec(&ccids_lockct);
49}
50
51#else
52#define ccids_write_lock() do { } while(0)
53#define ccids_write_unlock() do { } while(0)
54#define ccids_read_lock() do { } while(0)
55#define ccids_read_unlock() do { } while(0)
56#endif
57
58int ccid_register(struct ccid *ccid)
59{
60 int err;
61
62 if (ccid->ccid_init == NULL)
63 return -1;
64
65 ccids_write_lock();
66 err = -EEXIST;
67 if (ccids[ccid->ccid_id] == NULL) {
68 ccids[ccid->ccid_id] = ccid;
69 err = 0;
70 }
71 ccids_write_unlock();
72 if (err == 0)
73 pr_info("CCID: Registered CCID %d (%s)\n",
74 ccid->ccid_id, ccid->ccid_name);
75 return err;
76}
77
78EXPORT_SYMBOL_GPL(ccid_register);
79
80int ccid_unregister(struct ccid *ccid)
81{
82 ccids_write_lock();
83 ccids[ccid->ccid_id] = NULL;
84 ccids_write_unlock();
85 pr_info("CCID: Unregistered CCID %d (%s)\n",
86 ccid->ccid_id, ccid->ccid_name);
87 return 0;
88}
89
90EXPORT_SYMBOL_GPL(ccid_unregister);
91
92struct ccid *ccid_init(unsigned char id, struct sock *sk)
93{
94 struct ccid *ccid;
95
96#ifdef CONFIG_KMOD
97 if (ccids[id] == NULL)
98 request_module("net-dccp-ccid-%d", id);
99#endif
100 ccids_read_lock();
101
102 ccid = ccids[id];
103 if (ccid == NULL)
104 goto out;
105
106 if (!try_module_get(ccid->ccid_owner))
107 goto out_err;
108
109 if (ccid->ccid_init(sk) != 0)
110 goto out_module_put;
111out:
112 ccids_read_unlock();
113 return ccid;
114out_module_put:
115 module_put(ccid->ccid_owner);
116out_err:
117 ccid = NULL;
118 goto out;
119}
120
121EXPORT_SYMBOL_GPL(ccid_init);
122
123void ccid_exit(struct ccid *ccid, struct sock *sk)
124{
125 if (ccid == NULL)
126 return;
127
128 ccids_read_lock();
129
130 if (ccids[ccid->ccid_id] != NULL) {
131 if (ccid->ccid_exit != NULL)
132 ccid->ccid_exit(sk);
133 module_put(ccid->ccid_owner);
134 }
135
136 ccids_read_unlock();
137}
138
139EXPORT_SYMBOL_GPL(ccid_exit);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
new file mode 100644
index 000000000000..962f1e9e2f7e
--- /dev/null
+++ b/net/dccp/ccid.h
@@ -0,0 +1,180 @@
1#ifndef _CCID_H
2#define _CCID_H
3/*
4 * net/dccp/ccid.h
5 *
6 * An implementation of the DCCP protocol
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * CCID infrastructure
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <net/sock.h>
17#include <linux/dccp.h>
18#include <linux/list.h>
19#include <linux/module.h>
20
21#define CCID_MAX 255
22
23struct ccid {
24 unsigned char ccid_id;
25 const char *ccid_name;
26 struct module *ccid_owner;
27 int (*ccid_init)(struct sock *sk);
28 void (*ccid_exit)(struct sock *sk);
29 int (*ccid_hc_rx_init)(struct sock *sk);
30 int (*ccid_hc_tx_init)(struct sock *sk);
31 void (*ccid_hc_rx_exit)(struct sock *sk);
32 void (*ccid_hc_tx_exit)(struct sock *sk);
33 void (*ccid_hc_rx_packet_recv)(struct sock *sk,
34 struct sk_buff *skb);
35 int (*ccid_hc_rx_parse_options)(struct sock *sk,
36 unsigned char option,
37 unsigned char len, u16 idx,
38 unsigned char* value);
39 void (*ccid_hc_rx_insert_options)(struct sock *sk,
40 struct sk_buff *skb);
41 void (*ccid_hc_tx_insert_options)(struct sock *sk,
42 struct sk_buff *skb);
43 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
44 struct sk_buff *skb);
45 int (*ccid_hc_tx_parse_options)(struct sock *sk,
46 unsigned char option,
47 unsigned char len, u16 idx,
48 unsigned char* value);
49 int (*ccid_hc_tx_send_packet)(struct sock *sk,
50 struct sk_buff *skb, int len);
51 void (*ccid_hc_tx_packet_sent)(struct sock *sk, int more,
52 int len);
53 void (*ccid_hc_rx_get_info)(struct sock *sk,
54 struct tcp_info *info);
55 void (*ccid_hc_tx_get_info)(struct sock *sk,
56 struct tcp_info *info);
57};
58
59extern int ccid_register(struct ccid *ccid);
60extern int ccid_unregister(struct ccid *ccid);
61
62extern struct ccid *ccid_init(unsigned char id, struct sock *sk);
63extern void ccid_exit(struct ccid *ccid, struct sock *sk);
64
65static inline void __ccid_get(struct ccid *ccid)
66{
67 __module_get(ccid->ccid_owner);
68}
69
70static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
71 struct sk_buff *skb, int len)
72{
73 int rc = 0;
74 if (ccid->ccid_hc_tx_send_packet != NULL)
75 rc = ccid->ccid_hc_tx_send_packet(sk, skb, len);
76 return rc;
77}
78
79static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
80 int more, int len)
81{
82 if (ccid->ccid_hc_tx_packet_sent != NULL)
83 ccid->ccid_hc_tx_packet_sent(sk, more, len);
84}
85
86static inline int ccid_hc_rx_init(struct ccid *ccid, struct sock *sk)
87{
88 int rc = 0;
89 if (ccid->ccid_hc_rx_init != NULL)
90 rc = ccid->ccid_hc_rx_init(sk);
91 return rc;
92}
93
94static inline int ccid_hc_tx_init(struct ccid *ccid, struct sock *sk)
95{
96 int rc = 0;
97 if (ccid->ccid_hc_tx_init != NULL)
98 rc = ccid->ccid_hc_tx_init(sk);
99 return rc;
100}
101
102static inline void ccid_hc_rx_exit(struct ccid *ccid, struct sock *sk)
103{
104 if (ccid->ccid_hc_rx_exit != NULL &&
105 dccp_sk(sk)->dccps_hc_rx_ccid_private != NULL)
106 ccid->ccid_hc_rx_exit(sk);
107}
108
109static inline void ccid_hc_tx_exit(struct ccid *ccid, struct sock *sk)
110{
111 if (ccid->ccid_hc_tx_exit != NULL &&
112 dccp_sk(sk)->dccps_hc_tx_ccid_private != NULL)
113 ccid->ccid_hc_tx_exit(sk);
114}
115
116static inline void ccid_hc_rx_packet_recv(struct ccid *ccid, struct sock *sk,
117 struct sk_buff *skb)
118{
119 if (ccid->ccid_hc_rx_packet_recv != NULL)
120 ccid->ccid_hc_rx_packet_recv(sk, skb);
121}
122
123static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
124 struct sk_buff *skb)
125{
126 if (ccid->ccid_hc_tx_packet_recv != NULL)
127 ccid->ccid_hc_tx_packet_recv(sk, skb);
128}
129
130static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
131 unsigned char option,
132 unsigned char len, u16 idx,
133 unsigned char* value)
134{
135 int rc = 0;
136 if (ccid->ccid_hc_tx_parse_options != NULL)
137 rc = ccid->ccid_hc_tx_parse_options(sk, option, len, idx,
138 value);
139 return rc;
140}
141
142static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
143 unsigned char option,
144 unsigned char len, u16 idx,
145 unsigned char* value)
146{
147 int rc = 0;
148 if (ccid->ccid_hc_rx_parse_options != NULL)
149 rc = ccid->ccid_hc_rx_parse_options(sk, option, len, idx, value);
150 return rc;
151}
152
153static inline void ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
154 struct sk_buff *skb)
155{
156 if (ccid->ccid_hc_tx_insert_options != NULL)
157 ccid->ccid_hc_tx_insert_options(sk, skb);
158}
159
160static inline void ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
161 struct sk_buff *skb)
162{
163 if (ccid->ccid_hc_rx_insert_options != NULL)
164 ccid->ccid_hc_rx_insert_options(sk, skb);
165}
166
167static inline void ccid_hc_rx_get_info(struct ccid *ccid, struct sock *sk,
168 struct tcp_info *info)
169{
170 if (ccid->ccid_hc_rx_get_info != NULL)
171 ccid->ccid_hc_rx_get_info(sk, info);
172}
173
174static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk,
175 struct tcp_info *info)
176{
177 if (ccid->ccid_hc_tx_get_info != NULL)
178 ccid->ccid_hc_tx_get_info(sk, info);
179}
180#endif /* _CCID_H */
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
new file mode 100644
index 000000000000..7684d83946a4
--- /dev/null
+++ b/net/dccp/ccids/Kconfig
@@ -0,0 +1,29 @@
1menu "DCCP CCIDs Configuration (EXPERIMENTAL)"
2 depends on IP_DCCP && EXPERIMENTAL
3
4config IP_DCCP_CCID3
5 tristate "CCID3 (TFRC) (EXPERIMENTAL)"
6 depends on IP_DCCP
7 ---help---
8 CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
9 rate-controlled congestion control mechanism. TFRC is designed to
10 be reasonably fair when competing for bandwidth with TCP-like flows,
11 where a flow is "reasonably fair" if its sending rate is generally
12 within a factor of two of the sending rate of a TCP flow under the
13 same conditions. However, TFRC has a much lower variation of
14 throughput over time compared with TCP, which makes CCID 3 more
15 suitable than CCID 2 for applications such streaming media where a
16 relatively smooth sending rate is of importance.
17
18 CCID 3 is further described in [CCID 3 PROFILE]. The TFRC
19 congestion control algorithms were initially described in RFC 3448.
20
21 This text was extracted from draft-ietf-dccp-spec-11.txt.
22
23 If in doubt, say M.
24
25config IP_DCCP_TFRC_LIB
26 depends on IP_DCCP_CCID3
27 def_tristate IP_DCCP_CCID3
28
29endmenu
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile
new file mode 100644
index 000000000000..956f79f50743
--- /dev/null
+++ b/net/dccp/ccids/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o
2
3dccp_ccid3-y := ccid3.o
4
5obj-y += lib/
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
new file mode 100644
index 000000000000..7bf3b3a91e97
--- /dev/null
+++ b/net/dccp/ccids/ccid3.c
@@ -0,0 +1,1221 @@
1/*
2 * net/dccp/ccids/ccid3.c
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
6 *
7 * An implementation of the DCCP protocol
8 *
9 * This code has been developed by the University of Waikato WAND
10 * research group. For further information please see http://www.wand.net.nz/
11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors:
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
15 *
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
19 *
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36
37#include <linux/config.h>
38#include "../ccid.h"
39#include "../dccp.h"
40#include "lib/packet_history.h"
41#include "lib/loss_interval.h"
42#include "lib/tfrc.h"
43#include "ccid3.h"
44
45/*
46 * Reason for maths with 10 here is to avoid 32 bit overflow when a is big.
47 */
48static inline u32 usecs_div(const u32 a, const u32 b)
49{
50 const u32 tmp = a * (USEC_PER_SEC / 10);
51 return b > 20 ? tmp / (b / 10) : tmp;
52}
53
54static int ccid3_debug;
55
56#ifdef CCID3_DEBUG
57#define ccid3_pr_debug(format, a...) \
58 do { if (ccid3_debug) \
59 printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
60 } while (0)
61#else
62#define ccid3_pr_debug(format, a...)
63#endif
64
65static struct dccp_tx_hist *ccid3_tx_hist;
66static struct dccp_rx_hist *ccid3_rx_hist;
67static struct dccp_li_hist *ccid3_li_hist;
68
69static int ccid3_init(struct sock *sk)
70{
71 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
72 return 0;
73}
74
75static void ccid3_exit(struct sock *sk)
76{
77 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
78}
79
80/* TFRC sender states */
81enum ccid3_hc_tx_states {
82 TFRC_SSTATE_NO_SENT = 1,
83 TFRC_SSTATE_NO_FBACK,
84 TFRC_SSTATE_FBACK,
85 TFRC_SSTATE_TERM,
86};
87
88#ifdef CCID3_DEBUG
89static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
90{
91 static char *ccid3_state_names[] = {
92 [TFRC_SSTATE_NO_SENT] = "NO_SENT",
93 [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
94 [TFRC_SSTATE_FBACK] = "FBACK",
95 [TFRC_SSTATE_TERM] = "TERM",
96 };
97
98 return ccid3_state_names[state];
99}
100#endif
101
102static inline void ccid3_hc_tx_set_state(struct sock *sk,
103 enum ccid3_hc_tx_states state)
104{
105 struct dccp_sock *dp = dccp_sk(sk);
106 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
107 enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
108
109 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
110 dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
111 ccid3_tx_state_name(state));
112 WARN_ON(state == oldstate);
113 hctx->ccid3hctx_state = state;
114}
115
116/* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
117static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx)
118{
119 /*
120 * If no feedback spec says t_ipi is 1 second (set elsewhere and then
121 * doubles after every no feedback timer (separate function)
122 */
123 if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK)
124 hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s,
125 hctx->ccid3hctx_x);
126}
127
128/* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
129static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx)
130{
131 hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
132 TFRC_OPSYS_HALF_TIME_GRAN);
133}
134
135/*
136 * Update X by
137 * If (p > 0)
138 * x_calc = calcX(s, R, p);
139 * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
140 * Else
141 * If (now - tld >= R)
142 * X = max(min(2 * X, 2 * X_recv), s / R);
143 * tld = now;
144 */
145static void ccid3_hc_tx_update_x(struct sock *sk)
146{
147 struct dccp_sock *dp = dccp_sk(sk);
148 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
149
150 /* To avoid large error in calcX */
151 if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
152 hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
153 hctx->ccid3hctx_rtt,
154 hctx->ccid3hctx_p);
155 hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
156 2 * hctx->ccid3hctx_x_recv),
157 (hctx->ccid3hctx_s /
158 TFRC_MAX_BACK_OFF_TIME));
159 } else {
160 struct timeval now;
161
162 do_gettimeofday(&now);
163 if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
164 hctx->ccid3hctx_rtt) {
165 hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
166 hctx->ccid3hctx_x) * 2,
167 usecs_div(hctx->ccid3hctx_s,
168 hctx->ccid3hctx_rtt));
169 hctx->ccid3hctx_t_ld = now;
170 }
171 }
172}
173
174static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
175{
176 struct sock *sk = (struct sock *)data;
177 struct dccp_sock *dp = dccp_sk(sk);
178 unsigned long next_tmout = 0;
179 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
180
181 bh_lock_sock(sk);
182 if (sock_owned_by_user(sk)) {
183 /* Try again later. */
184 /* XXX: set some sensible MIB */
185 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
186 jiffies + HZ / 5);
187 goto out;
188 }
189
190 ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
191 ccid3_tx_state_name(hctx->ccid3hctx_state));
192
193 switch (hctx->ccid3hctx_state) {
194 case TFRC_SSTATE_TERM:
195 goto out;
196 case TFRC_SSTATE_NO_FBACK:
197 /* Halve send rate */
198 hctx->ccid3hctx_x /= 2;
199 if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
200 TFRC_MAX_BACK_OFF_TIME))
201 hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
202 TFRC_MAX_BACK_OFF_TIME);
203
204 ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
205 "bytes/s\n",
206 dccp_role(sk), sk,
207 ccid3_tx_state_name(hctx->ccid3hctx_state),
208 hctx->ccid3hctx_x);
209 next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s,
210 hctx->ccid3hctx_x),
211 TFRC_INITIAL_TIMEOUT);
212 /*
213 * FIXME - not sure above calculation is correct. See section
214 * 5 of CCID3 11 should adjust tx_t_ipi and double that to
215 * achieve it really
216 */
217 break;
218 case TFRC_SSTATE_FBACK:
219 /*
220 * Check if IDLE since last timeout and recv rate is less than
221 * 4 packets per RTT
222 */
223 if (!hctx->ccid3hctx_idle ||
224 (hctx->ccid3hctx_x_recv >=
225 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
226 ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
227 dccp_role(sk), sk,
228 ccid3_tx_state_name(hctx->ccid3hctx_state));
229 /* Halve sending rate */
230
231 /* If (X_calc > 2 * X_recv)
232 * X_recv = max(X_recv / 2, s / (2 * t_mbi));
233 * Else
234 * X_recv = X_calc / 4;
235 */
236 BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
237 hctx->ccid3hctx_x_calc == 0);
238
239 /* check also if p is zero -> x_calc is infinity? */
240 if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
241 hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
242 hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
243 hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
244 else
245 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
246
247 /* Update sending rate */
248 ccid3_hc_tx_update_x(sk);
249 }
250 /*
251 * Schedule no feedback timer to expire in
252 * max(4 * R, 2 * s / X)
253 */
254 next_tmout = max_t(u32, hctx->ccid3hctx_t_rto,
255 2 * usecs_div(hctx->ccid3hctx_s,
256 hctx->ccid3hctx_x));
257 break;
258 default:
259 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
260 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
261 dump_stack();
262 goto out;
263 }
264
265 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
266 jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
267 hctx->ccid3hctx_idle = 1;
268out:
269 bh_unlock_sock(sk);
270 sock_put(sk);
271}
272
273static int ccid3_hc_tx_send_packet(struct sock *sk,
274 struct sk_buff *skb, int len)
275{
276 struct dccp_sock *dp = dccp_sk(sk);
277 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
278 struct dccp_tx_hist_entry *new_packet;
279 struct timeval now;
280 long delay;
281 int rc = -ENOTCONN;
282
283 /* Check if pure ACK or Terminating*/
284
285 /*
286 * XXX: We only call this function for DATA and DATAACK, on, these
287 * packets can have zero length, but why the comment about "pure ACK"?
288 */
289 if (hctx == NULL || len == 0 ||
290 hctx->ccid3hctx_state == TFRC_SSTATE_TERM)
291 goto out;
292
293 /* See if last packet allocated was not sent */
294 new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
295 if (new_packet == NULL || new_packet->dccphtx_sent) {
296 new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
297 SLAB_ATOMIC);
298
299 rc = -ENOBUFS;
300 if (new_packet == NULL) {
301 ccid3_pr_debug("%s, sk=%p, not enough mem to add "
302 "to history, send refused\n",
303 dccp_role(sk), sk);
304 goto out;
305 }
306
307 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
308 }
309
310 do_gettimeofday(&now);
311
312 switch (hctx->ccid3hctx_state) {
313 case TFRC_SSTATE_NO_SENT:
314 ccid3_pr_debug("%s, sk=%p, first packet(%llu)\n",
315 dccp_role(sk), sk, dp->dccps_gss);
316
317 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
318 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
319 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
320 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
321 hctx->ccid3hctx_last_win_count = 0;
322 hctx->ccid3hctx_t_last_win_count = now;
323 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
324 hctx->ccid3hctx_t_ipi = TFRC_INITIAL_TIMEOUT;
325
326 /* Set nominal send time for initial packet */
327 hctx->ccid3hctx_t_nom = now;
328 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
329 hctx->ccid3hctx_t_ipi);
330 ccid3_calc_new_delta(hctx);
331 rc = 0;
332 break;
333 case TFRC_SSTATE_NO_FBACK:
334 case TFRC_SSTATE_FBACK:
335 delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) -
336 hctx->ccid3hctx_delta);
337 ccid3_pr_debug("send_packet delay=%ld\n", delay);
338 delay /= -1000;
339 /* divide by -1000 is to convert to ms and get sign right */
340 rc = delay > 0 ? delay : 0;
341 break;
342 default:
343 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
344 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
345 dump_stack();
346 rc = -EINVAL;
347 break;
348 }
349
350 /* Can we send? if so add options and add to packet history */
351 if (rc == 0)
352 new_packet->dccphtx_ccval =
353 DCCP_SKB_CB(skb)->dccpd_ccval =
354 hctx->ccid3hctx_last_win_count;
355out:
356 return rc;
357}
358
359static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
360{
361 struct dccp_sock *dp = dccp_sk(sk);
362 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
363 struct timeval now;
364
365 BUG_ON(hctx == NULL);
366
367 if (hctx->ccid3hctx_state == TFRC_SSTATE_TERM) {
368 ccid3_pr_debug("%s, sk=%p, while state is TFRC_SSTATE_TERM!\n",
369 dccp_role(sk), sk);
370 return;
371 }
372
373 do_gettimeofday(&now);
374
375 /* check if we have sent a data packet */
376 if (len > 0) {
377 unsigned long quarter_rtt;
378 struct dccp_tx_hist_entry *packet;
379
380 packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
381 if (packet == NULL) {
382 printk(KERN_CRIT "%s: packet doesn't exists in "
383 "history!\n", __FUNCTION__);
384 return;
385 }
386 if (packet->dccphtx_sent) {
387 printk(KERN_CRIT "%s: no unsent packet in history!\n",
388 __FUNCTION__);
389 return;
390 }
391 packet->dccphtx_tstamp = now;
392 packet->dccphtx_seqno = dp->dccps_gss;
393 /*
394 * Check if win_count have changed
395 * Algorithm in "8.1. Window Counter Valuer" in
396 * draft-ietf-dccp-ccid3-11.txt
397 */
398 quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
399 if (likely(hctx->ccid3hctx_rtt > 8))
400 quarter_rtt /= hctx->ccid3hctx_rtt / 4;
401
402 if (quarter_rtt > 0) {
403 hctx->ccid3hctx_t_last_win_count = now;
404 hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
405 min_t(unsigned long, quarter_rtt, 5)) % 16;
406 ccid3_pr_debug("%s, sk=%p, window changed from "
407 "%u to %u!\n",
408 dccp_role(sk), sk,
409 packet->dccphtx_ccval,
410 hctx->ccid3hctx_last_win_count);
411 }
412
413 hctx->ccid3hctx_idle = 0;
414 packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
415 packet->dccphtx_sent = 1;
416 } else
417 ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
418 dccp_role(sk), sk, dp->dccps_gss);
419
420 switch (hctx->ccid3hctx_state) {
421 case TFRC_SSTATE_NO_SENT:
422 /* if first wasn't pure ack */
423 if (len != 0)
424 printk(KERN_CRIT "%s: %s, First packet sent is noted "
425 "as a data packet\n",
426 __FUNCTION__, dccp_role(sk));
427 return;
428 case TFRC_SSTATE_NO_FBACK:
429 case TFRC_SSTATE_FBACK:
430 if (len > 0) {
431 hctx->ccid3hctx_t_nom = now;
432 ccid3_calc_new_t_ipi(hctx);
433 ccid3_calc_new_delta(hctx);
434 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
435 hctx->ccid3hctx_t_ipi);
436 }
437 break;
438 default:
439 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
440 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
441 dump_stack();
442 break;
443 }
444}
445
446static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
447{
448 struct dccp_sock *dp = dccp_sk(sk);
449 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
450 struct ccid3_options_received *opt_recv;
451 struct dccp_tx_hist_entry *packet;
452 unsigned long next_tmout;
453 u32 t_elapsed;
454 u32 pinv;
455 u32 x_recv;
456 u32 r_sample;
457
458 if (hctx == NULL)
459 return;
460
461 if (hctx->ccid3hctx_state == TFRC_SSTATE_TERM) {
462 ccid3_pr_debug("%s, sk=%p, received a packet when "
463 "terminating!\n", dccp_role(sk), sk);
464 return;
465 }
466
467 /* we are only interested in ACKs */
468 if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
469 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
470 return;
471
472 opt_recv = &hctx->ccid3hctx_options_received;
473
474 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time;
475 x_recv = opt_recv->ccid3or_receive_rate;
476 pinv = opt_recv->ccid3or_loss_event_rate;
477
478 switch (hctx->ccid3hctx_state) {
479 case TFRC_SSTATE_NO_SENT:
480 /* FIXME: what to do here? */
481 return;
482 case TFRC_SSTATE_NO_FBACK:
483 case TFRC_SSTATE_FBACK:
484 /* Calculate new round trip sample by
485 * R_sample = (now - t_recvdata) - t_delay */
486 /* get t_recvdata from history */
487 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
488 DCCP_SKB_CB(skb)->dccpd_ack_seq);
489 if (packet == NULL) {
490 ccid3_pr_debug("%s, sk=%p, seqno %llu(%s) does't "
491 "exist in history!\n",
492 dccp_role(sk), sk,
493 DCCP_SKB_CB(skb)->dccpd_ack_seq,
494 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
495 return;
496 }
497
498 /* Update RTT */
499 r_sample = timeval_now_delta(&packet->dccphtx_tstamp);
500 /* FIXME: */
501 // r_sample -= usecs_to_jiffies(t_elapsed * 10);
502
503 /* Update RTT estimate by
504 * If (No feedback recv)
505 * R = R_sample;
506 * Else
507 * R = q * R + (1 - q) * R_sample;
508 *
509 * q is a constant, RFC 3448 recomments 0.9
510 */
511 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
512 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
513 hctx->ccid3hctx_rtt = r_sample;
514 } else
515 hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
516 r_sample / 10;
517
518 ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
519 "r_sample=%us\n", dccp_role(sk), sk,
520 hctx->ccid3hctx_rtt, r_sample);
521
522 /* Update timeout interval */
523 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
524 USEC_PER_SEC);
525
526 /* Update receive rate */
527 hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
528
529 /* Update loss event rate */
530 if (pinv == ~0 || pinv == 0)
531 hctx->ccid3hctx_p = 0;
532 else {
533 hctx->ccid3hctx_p = 1000000 / pinv;
534
535 if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
536 hctx->ccid3hctx_p = TFRC_SMALLEST_P;
537 ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
538 dccp_role(sk), sk);
539 }
540 }
541
542 /* unschedule no feedback timer */
543 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
544
545 /* Update sending rate */
546 ccid3_hc_tx_update_x(sk);
547
548 /* Update next send time */
549 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
550 hctx->ccid3hctx_t_ipi);
551 ccid3_calc_new_t_ipi(hctx);
552 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
553 hctx->ccid3hctx_t_ipi);
554 ccid3_calc_new_delta(hctx);
555
556 /* remove all packets older than the one acked from history */
557 dccp_tx_hist_purge_older(ccid3_tx_hist,
558 &hctx->ccid3hctx_hist, packet);
559 /*
560 * As we have calculated new ipi, delta, t_nom it is possible that
561 * we now can send a packet, so wake up dccp_wait_for_ccids.
562 */
563 sk->sk_write_space(sk);
564
565 /*
566 * Schedule no feedback timer to expire in
567 * max(4 * R, 2 * s / X)
568 */
569 next_tmout = max(hctx->ccid3hctx_t_rto,
570 2 * usecs_div(hctx->ccid3hctx_s,
571 hctx->ccid3hctx_x));
572
573 ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
574 "expire in %lu jiffies (%luus)\n",
575 dccp_role(sk), sk,
576 usecs_to_jiffies(next_tmout), next_tmout);
577
578 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
579 jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
580
581 /* set idle flag */
582 hctx->ccid3hctx_idle = 1;
583 break;
584 default:
585 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
586 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
587 dump_stack();
588 break;
589 }
590}
591
592static void ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
593{
594 const struct dccp_sock *dp = dccp_sk(sk);
595 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
596
597 if (hctx == NULL || !(sk->sk_state == DCCP_OPEN ||
598 sk->sk_state == DCCP_PARTOPEN))
599 return;
600
601 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
602}
603
604static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
605 unsigned char len, u16 idx,
606 unsigned char *value)
607{
608 int rc = 0;
609 struct dccp_sock *dp = dccp_sk(sk);
610 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
611 struct ccid3_options_received *opt_recv;
612
613 if (hctx == NULL)
614 return 0;
615
616 opt_recv = &hctx->ccid3hctx_options_received;
617
618 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
619 opt_recv->ccid3or_seqno = dp->dccps_gsr;
620 opt_recv->ccid3or_loss_event_rate = ~0;
621 opt_recv->ccid3or_loss_intervals_idx = 0;
622 opt_recv->ccid3or_loss_intervals_len = 0;
623 opt_recv->ccid3or_receive_rate = 0;
624 }
625
626 switch (option) {
627 case TFRC_OPT_LOSS_EVENT_RATE:
628 if (len != 4) {
629 ccid3_pr_debug("%s, sk=%p, invalid len for "
630 "TFRC_OPT_LOSS_EVENT_RATE\n",
631 dccp_role(sk), sk);
632 rc = -EINVAL;
633 } else {
634 opt_recv->ccid3or_loss_event_rate = ntohl(*(u32 *)value);
635 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
636 dccp_role(sk), sk,
637 opt_recv->ccid3or_loss_event_rate);
638 }
639 break;
640 case TFRC_OPT_LOSS_INTERVALS:
641 opt_recv->ccid3or_loss_intervals_idx = idx;
642 opt_recv->ccid3or_loss_intervals_len = len;
643 ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
644 dccp_role(sk), sk,
645 opt_recv->ccid3or_loss_intervals_idx,
646 opt_recv->ccid3or_loss_intervals_len);
647 break;
648 case TFRC_OPT_RECEIVE_RATE:
649 if (len != 4) {
650 ccid3_pr_debug("%s, sk=%p, invalid len for "
651 "TFRC_OPT_RECEIVE_RATE\n",
652 dccp_role(sk), sk);
653 rc = -EINVAL;
654 } else {
655 opt_recv->ccid3or_receive_rate = ntohl(*(u32 *)value);
656 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
657 dccp_role(sk), sk,
658 opt_recv->ccid3or_receive_rate);
659 }
660 break;
661 }
662
663 return rc;
664}
665
666static int ccid3_hc_tx_init(struct sock *sk)
667{
668 struct dccp_sock *dp = dccp_sk(sk);
669 struct ccid3_hc_tx_sock *hctx;
670
671 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
672
673 hctx = dp->dccps_hc_tx_ccid_private = kmalloc(sizeof(*hctx),
674 gfp_any());
675 if (hctx == NULL)
676 return -ENOMEM;
677
678 memset(hctx, 0, sizeof(*hctx));
679
680 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
681 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
682 hctx->ccid3hctx_s = dp->dccps_packet_size;
683 else
684 hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
685
686 /* Set transmission rate to 1 packet per second */
687 hctx->ccid3hctx_x = hctx->ccid3hctx_s;
688 hctx->ccid3hctx_t_rto = USEC_PER_SEC;
689 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
690 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
691 init_timer(&hctx->ccid3hctx_no_feedback_timer);
692
693 return 0;
694}
695
696static void ccid3_hc_tx_exit(struct sock *sk)
697{
698 struct dccp_sock *dp = dccp_sk(sk);
699 struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
700
701 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
702 BUG_ON(hctx == NULL);
703
704 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
705 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
706
707 /* Empty packet history */
708 dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
709
710 kfree(dp->dccps_hc_tx_ccid_private);
711 dp->dccps_hc_tx_ccid_private = NULL;
712}
713
714/*
715 * RX Half Connection methods
716 */
717
718/* TFRC receiver states */
719enum ccid3_hc_rx_states {
720 TFRC_RSTATE_NO_DATA = 1,
721 TFRC_RSTATE_DATA,
722 TFRC_RSTATE_TERM = 127,
723};
724
725#ifdef CCID3_DEBUG
726static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
727{
728 static char *ccid3_rx_state_names[] = {
729 [TFRC_RSTATE_NO_DATA] = "NO_DATA",
730 [TFRC_RSTATE_DATA] = "DATA",
731 [TFRC_RSTATE_TERM] = "TERM",
732 };
733
734 return ccid3_rx_state_names[state];
735}
736#endif
737
738static inline void ccid3_hc_rx_set_state(struct sock *sk,
739 enum ccid3_hc_rx_states state)
740{
741 struct dccp_sock *dp = dccp_sk(sk);
742 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
743 enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
744
745 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
746 dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
747 ccid3_rx_state_name(state));
748 WARN_ON(state == oldstate);
749 hcrx->ccid3hcrx_state = state;
750}
751
752static void ccid3_hc_rx_send_feedback(struct sock *sk)
753{
754 struct dccp_sock *dp = dccp_sk(sk);
755 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
756 struct dccp_rx_hist_entry *packet;
757 struct timeval now;
758
759 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
760
761 do_gettimeofday(&now);
762
763 switch (hcrx->ccid3hcrx_state) {
764 case TFRC_RSTATE_NO_DATA:
765 hcrx->ccid3hcrx_x_recv = 0;
766 break;
767 case TFRC_RSTATE_DATA: {
768 const u32 delta = timeval_delta(&now,
769 &hcrx->ccid3hcrx_tstamp_last_feedback);
770
771 hcrx->ccid3hcrx_x_recv = (hcrx->ccid3hcrx_bytes_recv *
772 USEC_PER_SEC);
773 if (likely(delta > 1))
774 hcrx->ccid3hcrx_x_recv /= delta;
775 }
776 break;
777 default:
778 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
779 __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
780 dump_stack();
781 return;
782 }
783
784 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
785 if (packet == NULL) {
786 printk(KERN_CRIT "%s: %s, sk=%p, no data packet in history!\n",
787 __FUNCTION__, dccp_role(sk), sk);
788 dump_stack();
789 return;
790 }
791
792 hcrx->ccid3hcrx_tstamp_last_feedback = now;
793 hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval;
794 hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
795 hcrx->ccid3hcrx_bytes_recv = 0;
796
797 /* Convert to multiples of 10us */
798 hcrx->ccid3hcrx_elapsed_time =
799 timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
800 if (hcrx->ccid3hcrx_p == 0)
801 hcrx->ccid3hcrx_pinv = ~0;
802 else
803 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
804 dccp_send_ack(sk);
805}
806
807static void ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
808{
809 const struct dccp_sock *dp = dccp_sk(sk);
810 u32 x_recv, pinv;
811 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
812
813 if (hcrx == NULL || !(sk->sk_state == DCCP_OPEN ||
814 sk->sk_state == DCCP_PARTOPEN))
815 return;
816
817 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter;
818
819 if (dccp_packet_without_ack(skb))
820 return;
821
822 if (hcrx->ccid3hcrx_elapsed_time != 0)
823 dccp_insert_option_elapsed_time(sk, skb,
824 hcrx->ccid3hcrx_elapsed_time);
825 dccp_insert_option_timestamp(sk, skb);
826 x_recv = htonl(hcrx->ccid3hcrx_x_recv);
827 pinv = htonl(hcrx->ccid3hcrx_pinv);
828 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
829 &pinv, sizeof(pinv));
830 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
831 &x_recv, sizeof(x_recv));
832}
833
834/* calculate first loss interval
835 *
836 * returns estimated loss interval in usecs */
837
838static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
839{
840 struct dccp_sock *dp = dccp_sk(sk);
841 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
842 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
843 u32 rtt, delta, x_recv, fval, p, tmp2;
844 struct timeval tstamp = { 0, };
845 int interval = 0;
846 int win_count = 0;
847 int step = 0;
848 u64 tmp1;
849
850 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
851 dccphrx_node) {
852 if (dccp_rx_hist_entry_data_packet(entry)) {
853 tail = entry;
854
855 switch (step) {
856 case 0:
857 tstamp = entry->dccphrx_tstamp;
858 win_count = entry->dccphrx_ccval;
859 step = 1;
860 break;
861 case 1:
862 interval = win_count - entry->dccphrx_ccval;
863 if (interval < 0)
864 interval += TFRC_WIN_COUNT_LIMIT;
865 if (interval > 4)
866 goto found;
867 break;
868 }
869 }
870 }
871
872 if (step == 0) {
873 printk(KERN_CRIT "%s: %s, sk=%p, packet history contains no "
874 "data packets!\n",
875 __FUNCTION__, dccp_role(sk), sk);
876 return ~0;
877 }
878
879 if (interval == 0) {
880 ccid3_pr_debug("%s, sk=%p, Could not find a win_count "
881 "interval > 0. Defaulting to 1\n",
882 dccp_role(sk), sk);
883 interval = 1;
884 }
885found:
886 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
887 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
888 dccp_role(sk), sk, rtt);
889 if (rtt == 0)
890 rtt = 1;
891
892 delta = timeval_now_delta(&hcrx->ccid3hcrx_tstamp_last_feedback);
893 x_recv = hcrx->ccid3hcrx_bytes_recv * USEC_PER_SEC;
894 if (likely(delta > 1))
895 x_recv /= delta;
896
897 tmp1 = (u64)x_recv * (u64)rtt;
898 do_div(tmp1,10000000);
899 tmp2 = (u32)tmp1;
900 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
901 /* do not alter order above or you will get overflow on 32 bit */
902 p = tfrc_calc_x_reverse_lookup(fval);
903 ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
904 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
905
906 if (p == 0)
907 return ~0;
908 else
909 return 1000000 / p;
910}
911
912static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
913{
914 struct dccp_sock *dp = dccp_sk(sk);
915 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
916
917 if (seq_loss != DCCP_MAX_SEQNO + 1 &&
918 list_empty(&hcrx->ccid3hcrx_li_hist)) {
919 struct dccp_li_hist_entry *li_tail;
920
921 li_tail = dccp_li_hist_interval_new(ccid3_li_hist,
922 &hcrx->ccid3hcrx_li_hist,
923 seq_loss, win_loss);
924 if (li_tail == NULL)
925 return;
926 li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
927 }
928 /* FIXME: find end of interval */
929}
930
931static void ccid3_hc_rx_detect_loss(struct sock *sk)
932{
933 struct dccp_sock *dp = dccp_sk(sk);
934 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
935 u8 win_loss;
936 const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist,
937 &hcrx->ccid3hcrx_li_hist,
938 &win_loss);
939
940 ccid3_hc_rx_update_li(sk, seq_loss, win_loss);
941}
942
943static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
944{
945 struct dccp_sock *dp = dccp_sk(sk);
946 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
947 const struct dccp_options_received *opt_recv;
948 struct dccp_rx_hist_entry *packet;
949 struct timeval now;
950 u8 win_count;
951 u32 p_prev;
952 int ins;
953
954 if (hcrx == NULL)
955 return;
956
957 BUG_ON(!(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
958 hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA));
959
960 opt_recv = &dp->dccps_options_received;
961
962 switch (DCCP_SKB_CB(skb)->dccpd_type) {
963 case DCCP_PKT_ACK:
964 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
965 return;
966 case DCCP_PKT_DATAACK:
967 if (opt_recv->dccpor_timestamp_echo == 0)
968 break;
969 p_prev = hcrx->ccid3hcrx_rtt;
970 do_gettimeofday(&now);
971 hcrx->ccid3hcrx_rtt = timeval_usecs(&now) -
972 (opt_recv->dccpor_timestamp_echo -
973 opt_recv->dccpor_elapsed_time) * 10;
974 if (p_prev != hcrx->ccid3hcrx_rtt)
975 ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n",
976 dccp_role(sk), hcrx->ccid3hcrx_rtt,
977 opt_recv->dccpor_elapsed_time);
978 break;
979 case DCCP_PKT_DATA:
980 break;
981 default:
982 ccid3_pr_debug("%s, sk=%p, not DATA/DATAACK/ACK packet(%s)\n",
983 dccp_role(sk), sk,
984 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
985 return;
986 }
987
988 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, opt_recv->dccpor_ndp,
989 skb, SLAB_ATOMIC);
990 if (packet == NULL) {
991 ccid3_pr_debug("%s, sk=%p, Not enough mem to add rx packet "
992 "to history (consider it lost)!",
993 dccp_role(sk), sk);
994 return;
995 }
996
997 win_count = packet->dccphrx_ccval;
998
999 ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
1000 &hcrx->ccid3hcrx_li_hist, packet);
1001
1002 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
1003 return;
1004
1005 switch (hcrx->ccid3hcrx_state) {
1006 case TFRC_RSTATE_NO_DATA:
1007 ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
1008 "feedback\n",
1009 dccp_role(sk), sk,
1010 dccp_state_name(sk->sk_state), skb);
1011 ccid3_hc_rx_send_feedback(sk);
1012 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
1013 return;
1014 case TFRC_RSTATE_DATA:
1015 hcrx->ccid3hcrx_bytes_recv += skb->len -
1016 dccp_hdr(skb)->dccph_doff * 4;
1017 if (ins != 0)
1018 break;
1019
1020 do_gettimeofday(&now);
1021 if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
1022 hcrx->ccid3hcrx_rtt) {
1023 hcrx->ccid3hcrx_tstamp_last_ack = now;
1024 ccid3_hc_rx_send_feedback(sk);
1025 }
1026 return;
1027 default:
1028 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
1029 __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
1030 dump_stack();
1031 return;
1032 }
1033
1034 /* Dealing with packet loss */
1035 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1036 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1037
1038 ccid3_hc_rx_detect_loss(sk);
1039 p_prev = hcrx->ccid3hcrx_p;
1040
1041 /* Calculate loss event rate */
1042 if (!list_empty(&hcrx->ccid3hcrx_li_hist))
1043 /* Scaling up by 1000000 as fixed decimal */
1044 hcrx->ccid3hcrx_p = 1000000 / dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
1045
1046 if (hcrx->ccid3hcrx_p > p_prev) {
1047 ccid3_hc_rx_send_feedback(sk);
1048 return;
1049 }
1050}
1051
1052static int ccid3_hc_rx_init(struct sock *sk)
1053{
1054 struct dccp_sock *dp = dccp_sk(sk);
1055 struct ccid3_hc_rx_sock *hcrx;
1056
1057 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
1058
1059 hcrx = dp->dccps_hc_rx_ccid_private = kmalloc(sizeof(*hcrx),
1060 gfp_any());
1061 if (hcrx == NULL)
1062 return -ENOMEM;
1063
1064 memset(hcrx, 0, sizeof(*hcrx));
1065
1066 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
1067 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
1068 hcrx->ccid3hcrx_s = dp->dccps_packet_size;
1069 else
1070 hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
1071
1072 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
1073 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
1074 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
1075 /*
1076 * XXX this seems to be paranoid, need to think more about this, for
1077 * now start with something different than zero. -acme
1078 */
1079 hcrx->ccid3hcrx_rtt = USEC_PER_SEC / 5;
1080 return 0;
1081}
1082
1083static void ccid3_hc_rx_exit(struct sock *sk)
1084{
1085 struct dccp_sock *dp = dccp_sk(sk);
1086 struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
1087
1088 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
1089
1090 if (hcrx == NULL)
1091 return;
1092
1093 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
1094
1095 /* Empty packet history */
1096 dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
1097
1098 /* Empty loss interval history */
1099 dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
1100
1101 kfree(dp->dccps_hc_rx_ccid_private);
1102 dp->dccps_hc_rx_ccid_private = NULL;
1103}
1104
1105static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
1106{
1107 const struct dccp_sock *dp = dccp_sk(sk);
1108 const struct ccid3_hc_rx_sock *hcrx = dp->dccps_hc_rx_ccid_private;
1109
1110 if (hcrx == NULL)
1111 return;
1112
1113 info->tcpi_ca_state = hcrx->ccid3hcrx_state;
1114 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1115 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
1116}
1117
1118static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
1119{
1120 const struct dccp_sock *dp = dccp_sk(sk);
1121 const struct ccid3_hc_tx_sock *hctx = dp->dccps_hc_tx_ccid_private;
1122
1123 if (hctx == NULL)
1124 return;
1125
1126 info->tcpi_rto = hctx->ccid3hctx_t_rto;
1127 info->tcpi_rtt = hctx->ccid3hctx_rtt;
1128}
1129
1130static struct ccid ccid3 = {
1131 .ccid_id = 3,
1132 .ccid_name = "ccid3",
1133 .ccid_owner = THIS_MODULE,
1134 .ccid_init = ccid3_init,
1135 .ccid_exit = ccid3_exit,
1136 .ccid_hc_tx_init = ccid3_hc_tx_init,
1137 .ccid_hc_tx_exit = ccid3_hc_tx_exit,
1138 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
1139 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
1140 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
1141 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
1142 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
1143 .ccid_hc_rx_init = ccid3_hc_rx_init,
1144 .ccid_hc_rx_exit = ccid3_hc_rx_exit,
1145 .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
1146 .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
1147 .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
1148 .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
1149};
1150
1151module_param(ccid3_debug, int, 0444);
1152MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
1153
1154static __init int ccid3_module_init(void)
1155{
1156 int rc = -ENOBUFS;
1157
1158 ccid3_rx_hist = dccp_rx_hist_new("ccid3");
1159 if (ccid3_rx_hist == NULL)
1160 goto out;
1161
1162 ccid3_tx_hist = dccp_tx_hist_new("ccid3");
1163 if (ccid3_tx_hist == NULL)
1164 goto out_free_rx;
1165
1166 ccid3_li_hist = dccp_li_hist_new("ccid3");
1167 if (ccid3_li_hist == NULL)
1168 goto out_free_tx;
1169
1170 rc = ccid_register(&ccid3);
1171 if (rc != 0)
1172 goto out_free_loss_interval_history;
1173out:
1174 return rc;
1175
1176out_free_loss_interval_history:
1177 dccp_li_hist_delete(ccid3_li_hist);
1178 ccid3_li_hist = NULL;
1179out_free_tx:
1180 dccp_tx_hist_delete(ccid3_tx_hist);
1181 ccid3_tx_hist = NULL;
1182out_free_rx:
1183 dccp_rx_hist_delete(ccid3_rx_hist);
1184 ccid3_rx_hist = NULL;
1185 goto out;
1186}
1187module_init(ccid3_module_init);
1188
1189static __exit void ccid3_module_exit(void)
1190{
1191#ifdef CONFIG_IP_DCCP_UNLOAD_HACK
1192 /*
1193 * Hack to use while developing, so that we get rid of the control
1194 * sock, that is what keeps a refcount on dccp.ko -acme
1195 */
1196 extern void dccp_ctl_sock_exit(void);
1197
1198 dccp_ctl_sock_exit();
1199#endif
1200 ccid_unregister(&ccid3);
1201
1202 if (ccid3_tx_hist != NULL) {
1203 dccp_tx_hist_delete(ccid3_tx_hist);
1204 ccid3_tx_hist = NULL;
1205 }
1206 if (ccid3_rx_hist != NULL) {
1207 dccp_rx_hist_delete(ccid3_rx_hist);
1208 ccid3_rx_hist = NULL;
1209 }
1210 if (ccid3_li_hist != NULL) {
1211 dccp_li_hist_delete(ccid3_li_hist);
1212 ccid3_li_hist = NULL;
1213 }
1214}
1215module_exit(ccid3_module_exit);
1216
1217MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, "
1218 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1219MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1220MODULE_LICENSE("GPL");
1221MODULE_ALIAS("net-dccp-ccid-3");
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
new file mode 100644
index 000000000000..ee8cbace6630
--- /dev/null
+++ b/net/dccp/ccids/ccid3.h
@@ -0,0 +1,137 @@
1/*
2 * net/dccp/ccids/ccid3.h
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 *
6 * An implementation of the DCCP protocol
7 *
8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz
11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors:
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
15 *
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
19 *
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36#ifndef _DCCP_CCID3_H_
37#define _DCCP_CCID3_H_
38
39#include <linux/config.h>
40#include <linux/list.h>
41#include <linux/time.h>
42#include <linux/types.h>
43
44#define TFRC_MIN_PACKET_SIZE 16
45#define TFRC_STD_PACKET_SIZE 256
46#define TFRC_MAX_PACKET_SIZE 65535
47
48/* Two seconds as per CCID3 spec */
49#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC)
50
51/* In usecs - half the scheduling granularity as per RFC3448 4.6 */
52#define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ))
53
54/* In seconds */
55#define TFRC_MAX_BACK_OFF_TIME 64
56
57#define TFRC_SMALLEST_P 40
58
59enum ccid3_options {
60 TFRC_OPT_LOSS_EVENT_RATE = 192,
61 TFRC_OPT_LOSS_INTERVALS = 193,
62 TFRC_OPT_RECEIVE_RATE = 194,
63};
64
65struct ccid3_options_received {
66 u64 ccid3or_seqno:48,
67 ccid3or_loss_intervals_idx:16;
68 u16 ccid3or_loss_intervals_len;
69 u32 ccid3or_loss_event_rate;
70 u32 ccid3or_receive_rate;
71};
72
73/** struct ccid3_hc_tx_sock - CCID3 sender half connection sock
74 *
75 * @ccid3hctx_state - Sender state
76 * @ccid3hctx_x - Current sending rate
77 * @ccid3hctx_x_recv - Receive rate
78 * @ccid3hctx_x_calc - Calculated send (?) rate
79 * @ccid3hctx_s - Packet size
80 * @ccid3hctx_rtt - Estimate of current round trip time in usecs
81 * @@ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
82 * @ccid3hctx_last_win_count - Last window counter sent
83 * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
84 * with last_win_count value sent
85 * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
86 * @ccid3hctx_idle - FIXME
87 * @ccid3hctx_t_ld - Time last doubled during slow start
88 * @ccid3hctx_t_nom - Nominal send time of next packet
89 * @ccid3hctx_t_ipi - Interpacket (send) interval
90 * @ccid3hctx_delta - Send timer delta
91 * @ccid3hctx_hist - Packet history
92 */
93struct ccid3_hc_tx_sock {
94 u32 ccid3hctx_x;
95 u32 ccid3hctx_x_recv;
96 u32 ccid3hctx_x_calc;
97 u16 ccid3hctx_s;
98 u32 ccid3hctx_rtt;
99 u32 ccid3hctx_p;
100 u8 ccid3hctx_state;
101 u8 ccid3hctx_last_win_count;
102 u8 ccid3hctx_idle;
103 struct timeval ccid3hctx_t_last_win_count;
104 struct timer_list ccid3hctx_no_feedback_timer;
105 struct timeval ccid3hctx_t_ld;
106 struct timeval ccid3hctx_t_nom;
107 u32 ccid3hctx_t_rto;
108 u32 ccid3hctx_t_ipi;
109 u32 ccid3hctx_delta;
110 struct list_head ccid3hctx_hist;
111 struct ccid3_options_received ccid3hctx_options_received;
112};
113
114struct ccid3_hc_rx_sock {
115 u64 ccid3hcrx_seqno_last_counter:48,
116 ccid3hcrx_state:8,
117 ccid3hcrx_last_counter:4;
118 unsigned long ccid3hcrx_rtt;
119 u32 ccid3hcrx_p;
120 u32 ccid3hcrx_bytes_recv;
121 struct timeval ccid3hcrx_tstamp_last_feedback;
122 struct timeval ccid3hcrx_tstamp_last_ack;
123 struct list_head ccid3hcrx_hist;
124 struct list_head ccid3hcrx_li_hist;
125 u16 ccid3hcrx_s;
126 u32 ccid3hcrx_pinv;
127 u32 ccid3hcrx_elapsed_time;
128 u32 ccid3hcrx_x_recv;
129};
130
131#define ccid3_hc_tx_field(s,field) (s->dccps_hc_tx_ccid_private == NULL ? 0 : \
132 ((struct ccid3_hc_tx_sock *)s->dccps_hc_tx_ccid_private)->ccid3hctx_##field)
133
134#define ccid3_hc_rx_field(s,field) (s->dccps_hc_rx_ccid_private == NULL ? 0 : \
135 ((struct ccid3_hc_rx_sock *)s->dccps_hc_rx_ccid_private)->ccid3hcrx_##field)
136
137#endif /* _DCCP_CCID3_H_ */
diff --git a/net/dccp/ccids/lib/Makefile b/net/dccp/ccids/lib/Makefile
new file mode 100644
index 000000000000..5f940a6cbaca
--- /dev/null
+++ b/net/dccp/ccids/lib/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_IP_DCCP_TFRC_LIB) += dccp_tfrc_lib.o
2
3dccp_tfrc_lib-y := loss_interval.o packet_history.o tfrc_equation.o
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
new file mode 100644
index 000000000000..4c01a54143ad
--- /dev/null
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -0,0 +1,144 @@
1/*
2 * net/dccp/ccids/lib/loss_interval.c
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16
17#include "loss_interval.h"
18
19struct dccp_li_hist *dccp_li_hist_new(const char *name)
20{
21 struct dccp_li_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
22 static const char dccp_li_hist_mask[] = "li_hist_%s";
23 char *slab_name;
24
25 if (hist == NULL)
26 goto out;
27
28 slab_name = kmalloc(strlen(name) + sizeof(dccp_li_hist_mask) - 1,
29 GFP_ATOMIC);
30 if (slab_name == NULL)
31 goto out_free_hist;
32
33 sprintf(slab_name, dccp_li_hist_mask, name);
34 hist->dccplih_slab = kmem_cache_create(slab_name,
35 sizeof(struct dccp_li_hist_entry),
36 0, SLAB_HWCACHE_ALIGN,
37 NULL, NULL);
38 if (hist->dccplih_slab == NULL)
39 goto out_free_slab_name;
40out:
41 return hist;
42out_free_slab_name:
43 kfree(slab_name);
44out_free_hist:
45 kfree(hist);
46 hist = NULL;
47 goto out;
48}
49
50EXPORT_SYMBOL_GPL(dccp_li_hist_new);
51
52void dccp_li_hist_delete(struct dccp_li_hist *hist)
53{
54 const char* name = kmem_cache_name(hist->dccplih_slab);
55
56 kmem_cache_destroy(hist->dccplih_slab);
57 kfree(name);
58 kfree(hist);
59}
60
61EXPORT_SYMBOL_GPL(dccp_li_hist_delete);
62
63void dccp_li_hist_purge(struct dccp_li_hist *hist, struct list_head *list)
64{
65 struct dccp_li_hist_entry *entry, *next;
66
67 list_for_each_entry_safe(entry, next, list, dccplih_node) {
68 list_del_init(&entry->dccplih_node);
69 kmem_cache_free(hist->dccplih_slab, entry);
70 }
71}
72
73EXPORT_SYMBOL_GPL(dccp_li_hist_purge);
74
75/* Weights used to calculate loss event rate */
76/*
77 * These are integers as per section 8 of RFC3448. We can then divide by 4 *
78 * when we use it.
79 */
80static const int dccp_li_hist_w[DCCP_LI_HIST_IVAL_F_LENGTH] = {
81 4, 4, 4, 4, 3, 2, 1, 1,
82};
83
84u32 dccp_li_hist_calc_i_mean(struct list_head *list)
85{
86 struct dccp_li_hist_entry *li_entry, *li_next;
87 int i = 0;
88 u32 i_tot;
89 u32 i_tot0 = 0;
90 u32 i_tot1 = 0;
91 u32 w_tot = 0;
92
93 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
94 if (i < DCCP_LI_HIST_IVAL_F_LENGTH) {
95 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
96 w_tot += dccp_li_hist_w[i];
97 }
98
99 if (i != 0)
100 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
101
102 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH)
103 break;
104 }
105
106 if (i != DCCP_LI_HIST_IVAL_F_LENGTH)
107 return 0;
108
109 i_tot = max(i_tot0, i_tot1);
110
111 /* FIXME: Why do we do this? -Ian McDonald */
112 if (i_tot * 4 < w_tot)
113 i_tot = w_tot * 4;
114
115 return i_tot * 4 / w_tot;
116}
117
118EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
119
120struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist,
121 struct list_head *list,
122 const u64 seq_loss,
123 const u8 win_loss)
124{
125 struct dccp_li_hist_entry *tail = NULL, *entry;
126 int i;
127
128 for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) {
129 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
130 if (entry == NULL) {
131 dccp_li_hist_purge(hist, list);
132 return NULL;
133 }
134 if (tail == NULL)
135 tail = entry;
136 list_add(&entry->dccplih_node, list);
137 }
138
139 entry->dccplih_seqno = seq_loss;
140 entry->dccplih_win_count = win_loss;
141 return tail;
142}
143
144EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
new file mode 100644
index 000000000000..13ad47ba1420
--- /dev/null
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -0,0 +1,61 @@
1#ifndef _DCCP_LI_HIST_
2#define _DCCP_LI_HIST_
3/*
4 * net/dccp/ccids/lib/loss_interval.h
5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20
21#define DCCP_LI_HIST_IVAL_F_LENGTH 8
22
23struct dccp_li_hist {
24 kmem_cache_t *dccplih_slab;
25};
26
27extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
28extern void dccp_li_hist_delete(struct dccp_li_hist *hist);
29
30struct dccp_li_hist_entry {
31 struct list_head dccplih_node;
32 u64 dccplih_seqno:48,
33 dccplih_win_count:4;
34 u32 dccplih_interval;
35};
36
37static inline struct dccp_li_hist_entry *
38 dccp_li_hist_entry_new(struct dccp_li_hist *hist,
39 const unsigned int __nocast prio)
40{
41 return kmem_cache_alloc(hist->dccplih_slab, prio);
42}
43
44static inline void dccp_li_hist_entry_delete(struct dccp_li_hist *hist,
45 struct dccp_li_hist_entry *entry)
46{
47 if (entry != NULL)
48 kmem_cache_free(hist->dccplih_slab, entry);
49}
50
51extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
52 struct list_head *list);
53
54extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
55
56extern struct dccp_li_hist_entry *
57 dccp_li_hist_interval_new(struct dccp_li_hist *hist,
58 struct list_head *list,
59 const u64 seq_loss,
60 const u8 win_loss);
61#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
new file mode 100644
index 000000000000..d3f9d2053830
--- /dev/null
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -0,0 +1,398 @@
1/*
2 * net/dccp/packet_history.h
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 *
6 * An implementation of the DCCP protocol
7 *
8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz
11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors:
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
15 *
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
19 *
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36
37#include <linux/config.h>
38#include <linux/module.h>
39#include <linux/string.h>
40
41#include "packet_history.h"
42
43struct dccp_rx_hist *dccp_rx_hist_new(const char *name)
44{
45 struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
46 static const char dccp_rx_hist_mask[] = "rx_hist_%s";
47 char *slab_name;
48
49 if (hist == NULL)
50 goto out;
51
52 slab_name = kmalloc(strlen(name) + sizeof(dccp_rx_hist_mask) - 1,
53 GFP_ATOMIC);
54 if (slab_name == NULL)
55 goto out_free_hist;
56
57 sprintf(slab_name, dccp_rx_hist_mask, name);
58 hist->dccprxh_slab = kmem_cache_create(slab_name,
59 sizeof(struct dccp_rx_hist_entry),
60 0, SLAB_HWCACHE_ALIGN,
61 NULL, NULL);
62 if (hist->dccprxh_slab == NULL)
63 goto out_free_slab_name;
64out:
65 return hist;
66out_free_slab_name:
67 kfree(slab_name);
68out_free_hist:
69 kfree(hist);
70 hist = NULL;
71 goto out;
72}
73
74EXPORT_SYMBOL_GPL(dccp_rx_hist_new);
75
76void dccp_rx_hist_delete(struct dccp_rx_hist *hist)
77{
78 const char* name = kmem_cache_name(hist->dccprxh_slab);
79
80 kmem_cache_destroy(hist->dccprxh_slab);
81 kfree(name);
82 kfree(hist);
83}
84
85EXPORT_SYMBOL_GPL(dccp_rx_hist_delete);
86
87void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
88{
89 struct dccp_rx_hist_entry *entry, *next;
90
91 list_for_each_entry_safe(entry, next, list, dccphrx_node) {
92 list_del_init(&entry->dccphrx_node);
93 kmem_cache_free(hist->dccprxh_slab, entry);
94 }
95}
96
97EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
98
99struct dccp_rx_hist_entry *
100 dccp_rx_hist_find_data_packet(const struct list_head *list)
101{
102 struct dccp_rx_hist_entry *entry, *packet = NULL;
103
104 list_for_each_entry(entry, list, dccphrx_node)
105 if (entry->dccphrx_type == DCCP_PKT_DATA ||
106 entry->dccphrx_type == DCCP_PKT_DATAACK) {
107 packet = entry;
108 break;
109 }
110
111 return packet;
112}
113
114EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet);
115
116int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
117 struct list_head *rx_list,
118 struct list_head *li_list,
119 struct dccp_rx_hist_entry *packet)
120{
121 struct dccp_rx_hist_entry *entry, *next, *iter;
122 u8 num_later = 0;
123
124 iter = dccp_rx_hist_head(rx_list);
125 if (iter == NULL)
126 dccp_rx_hist_add_entry(rx_list, packet);
127 else {
128 const u64 seqno = packet->dccphrx_seqno;
129
130 if (after48(seqno, iter->dccphrx_seqno))
131 dccp_rx_hist_add_entry(rx_list, packet);
132 else {
133 if (dccp_rx_hist_entry_data_packet(iter))
134 num_later = 1;
135
136 list_for_each_entry_continue(iter, rx_list,
137 dccphrx_node) {
138 if (after48(seqno, iter->dccphrx_seqno)) {
139 dccp_rx_hist_add_entry(&iter->dccphrx_node,
140 packet);
141 goto trim_history;
142 }
143
144 if (dccp_rx_hist_entry_data_packet(iter))
145 num_later++;
146
147 if (num_later == TFRC_RECV_NUM_LATE_LOSS) {
148 dccp_rx_hist_entry_delete(hist, packet);
149 return 1;
150 }
151 }
152
153 if (num_later < TFRC_RECV_NUM_LATE_LOSS)
154 dccp_rx_hist_add_entry(rx_list, packet);
155 /*
156 * FIXME: else what? should we destroy the packet
157 * like above?
158 */
159 }
160 }
161
162trim_history:
163 /*
164 * Trim history (remove all packets after the NUM_LATE_LOSS + 1
165 * data packets)
166 */
167 num_later = TFRC_RECV_NUM_LATE_LOSS + 1;
168
169 if (!list_empty(li_list)) {
170 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
171 if (num_later == 0) {
172 list_del_init(&entry->dccphrx_node);
173 dccp_rx_hist_entry_delete(hist, entry);
174 } else if (dccp_rx_hist_entry_data_packet(entry))
175 --num_later;
176 }
177 } else {
178 int step = 0;
179 u8 win_count = 0; /* Not needed, but lets shut up gcc */
180 int tmp;
181 /*
182 * We have no loss interval history so we need at least one
183 * rtt:s of data packets to approximate rtt.
184 */
185 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
186 if (num_later == 0) {
187 switch (step) {
188 case 0:
189 step = 1;
190 /* OK, find next data packet */
191 num_later = 1;
192 break;
193 case 1:
194 step = 2;
195 /* OK, find next data packet */
196 num_later = 1;
197 win_count = entry->dccphrx_ccval;
198 break;
199 case 2:
200 tmp = win_count - entry->dccphrx_ccval;
201 if (tmp < 0)
202 tmp += TFRC_WIN_COUNT_LIMIT;
203 if (tmp > TFRC_WIN_COUNT_PER_RTT + 1) {
204 /*
205 * We have found a packet older
206 * than one rtt remove the rest
207 */
208 step = 3;
209 } else /* OK, find next data packet */
210 num_later = 1;
211 break;
212 case 3:
213 list_del_init(&entry->dccphrx_node);
214 dccp_rx_hist_entry_delete(hist, entry);
215 break;
216 }
217 } else if (dccp_rx_hist_entry_data_packet(entry))
218 --num_later;
219 }
220 }
221
222 return 0;
223}
224
225EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
226
227u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
228 struct list_head *li_list, u8 *win_loss)
229{
230 struct dccp_rx_hist_entry *entry, *next, *packet;
231 struct dccp_rx_hist_entry *a_loss = NULL;
232 struct dccp_rx_hist_entry *b_loss = NULL;
233 u64 seq_loss = DCCP_MAX_SEQNO + 1;
234 u8 num_later = TFRC_RECV_NUM_LATE_LOSS;
235
236 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
237 if (num_later == 0) {
238 b_loss = entry;
239 break;
240 } else if (dccp_rx_hist_entry_data_packet(entry))
241 --num_later;
242 }
243
244 if (b_loss == NULL)
245 goto out;
246
247 num_later = 1;
248 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
249 if (num_later == 0) {
250 a_loss = entry;
251 break;
252 } else if (dccp_rx_hist_entry_data_packet(entry))
253 --num_later;
254 }
255
256 if (a_loss == NULL) {
257 if (list_empty(li_list)) {
258 /* no loss event have occured yet */
259 LIMIT_NETDEBUG("%s: TODO: find a lost data packet by "
260 "comparing to initial seqno\n",
261 __FUNCTION__);
262 goto out;
263 } else {
264 LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!",
265 __FUNCTION__);
266 goto out;
267 }
268 }
269
270 /* Locate a lost data packet */
271 entry = packet = b_loss;
272 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
273 u64 delta = dccp_delta_seqno(entry->dccphrx_seqno,
274 packet->dccphrx_seqno);
275
276 if (delta != 0) {
277 if (dccp_rx_hist_entry_data_packet(packet))
278 --delta;
279 /*
280 * FIXME: check this, probably this % usage is because
281 * in earlier drafts the ndp count was just 8 bits
282 * long, but now it cam be up to 24 bits long.
283 */
284#if 0
285 if (delta % DCCP_NDP_LIMIT !=
286 (packet->dccphrx_ndp -
287 entry->dccphrx_ndp) % DCCP_NDP_LIMIT)
288#endif
289 if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) {
290 seq_loss = entry->dccphrx_seqno;
291 dccp_inc_seqno(&seq_loss);
292 }
293 }
294 packet = entry;
295 if (packet == a_loss)
296 break;
297 }
298out:
299 if (seq_loss != DCCP_MAX_SEQNO + 1)
300 *win_loss = a_loss->dccphrx_ccval;
301 else
302 *win_loss = 0; /* Paranoia */
303
304 return seq_loss;
305}
306
307EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss);
308
309struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
310{
311 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
312 static const char dccp_tx_hist_mask[] = "tx_hist_%s";
313 char *slab_name;
314
315 if (hist == NULL)
316 goto out;
317
318 slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
319 GFP_ATOMIC);
320 if (slab_name == NULL)
321 goto out_free_hist;
322
323 sprintf(slab_name, dccp_tx_hist_mask, name);
324 hist->dccptxh_slab = kmem_cache_create(slab_name,
325 sizeof(struct dccp_tx_hist_entry),
326 0, SLAB_HWCACHE_ALIGN,
327 NULL, NULL);
328 if (hist->dccptxh_slab == NULL)
329 goto out_free_slab_name;
330out:
331 return hist;
332out_free_slab_name:
333 kfree(slab_name);
334out_free_hist:
335 kfree(hist);
336 hist = NULL;
337 goto out;
338}
339
340EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
341
342void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
343{
344 const char* name = kmem_cache_name(hist->dccptxh_slab);
345
346 kmem_cache_destroy(hist->dccptxh_slab);
347 kfree(name);
348 kfree(hist);
349}
350
351EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
352
353struct dccp_tx_hist_entry *
354 dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
355{
356 struct dccp_tx_hist_entry *packet = NULL, *entry;
357
358 list_for_each_entry(entry, list, dccphtx_node)
359 if (entry->dccphtx_seqno == seq) {
360 packet = entry;
361 break;
362 }
363
364 return packet;
365}
366
367EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
368
369void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
370 struct list_head *list,
371 struct dccp_tx_hist_entry *packet)
372{
373 struct dccp_tx_hist_entry *next;
374
375 list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
376 list_del_init(&packet->dccphtx_node);
377 dccp_tx_hist_entry_delete(hist, packet);
378 }
379}
380
381EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
382
383void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
384{
385 struct dccp_tx_hist_entry *entry, *next;
386
387 list_for_each_entry_safe(entry, next, list, dccphtx_node) {
388 list_del_init(&entry->dccphtx_node);
389 dccp_tx_hist_entry_delete(hist, entry);
390 }
391}
392
393EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
394
395MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, "
396 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
397MODULE_DESCRIPTION("DCCP TFRC library");
398MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
new file mode 100644
index 000000000000..fb90a91aa93d
--- /dev/null
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -0,0 +1,199 @@
1/*
2 * net/dccp/packet_history.h
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 *
6 * An implementation of the DCCP protocol
7 *
8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz
11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors:
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
15 *
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
19 *
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36
37#ifndef _DCCP_PKT_HIST_
38#define _DCCP_PKT_HIST_
39
40#include <linux/config.h>
41#include <linux/list.h>
42#include <linux/slab.h>
43#include <linux/time.h>
44
45#include "../../dccp.h"
46
47/* Number of later packets received before one is considered lost */
48#define TFRC_RECV_NUM_LATE_LOSS 3
49
50#define TFRC_WIN_COUNT_PER_RTT 4
51#define TFRC_WIN_COUNT_LIMIT 16
52
53struct dccp_tx_hist_entry {
54 struct list_head dccphtx_node;
55 u64 dccphtx_seqno:48,
56 dccphtx_ccval:4,
57 dccphtx_sent:1;
58 u32 dccphtx_rtt;
59 struct timeval dccphtx_tstamp;
60};
61
62struct dccp_rx_hist_entry {
63 struct list_head dccphrx_node;
64 u64 dccphrx_seqno:48,
65 dccphrx_ccval:4,
66 dccphrx_type:4;
67 u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */
68 struct timeval dccphrx_tstamp;
69};
70
71struct dccp_tx_hist {
72 kmem_cache_t *dccptxh_slab;
73};
74
75extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
76extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
77
78struct dccp_rx_hist {
79 kmem_cache_t *dccprxh_slab;
80};
81
82extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
83extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
84extern struct dccp_rx_hist_entry *
85 dccp_rx_hist_find_data_packet(const struct list_head *list);
86
87static inline struct dccp_tx_hist_entry *
88 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
89 const unsigned int __nocast prio)
90{
91 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
92 prio);
93
94 if (entry != NULL)
95 entry->dccphtx_sent = 0;
96
97 return entry;
98}
99
100static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
101 struct dccp_tx_hist_entry *entry)
102{
103 if (entry != NULL)
104 kmem_cache_free(hist->dccptxh_slab, entry);
105}
106
107extern struct dccp_tx_hist_entry *
108 dccp_tx_hist_find_entry(const struct list_head *list,
109 const u64 seq);
110
111static inline void dccp_tx_hist_add_entry(struct list_head *list,
112 struct dccp_tx_hist_entry *entry)
113{
114 list_add(&entry->dccphtx_node, list);
115}
116
117extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
118 struct list_head *list,
119 struct dccp_tx_hist_entry *next);
120
121extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
122 struct list_head *list);
123
124static inline struct dccp_tx_hist_entry *
125 dccp_tx_hist_head(struct list_head *list)
126{
127 struct dccp_tx_hist_entry *head = NULL;
128
129 if (!list_empty(list))
130 head = list_entry(list->next, struct dccp_tx_hist_entry,
131 dccphtx_node);
132 return head;
133}
134
135static inline struct dccp_rx_hist_entry *
136 dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
137 const u32 ndp,
138 const struct sk_buff *skb,
139 const unsigned int __nocast prio)
140{
141 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
142 prio);
143
144 if (entry != NULL) {
145 const struct dccp_hdr *dh = dccp_hdr(skb);
146
147 entry->dccphrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
148 entry->dccphrx_ccval = dh->dccph_ccval;
149 entry->dccphrx_type = dh->dccph_type;
150 entry->dccphrx_ndp = ndp;
151 do_gettimeofday(&(entry->dccphrx_tstamp));
152 }
153
154 return entry;
155}
156
157static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
158 struct dccp_rx_hist_entry *entry)
159{
160 if (entry != NULL)
161 kmem_cache_free(hist->dccprxh_slab, entry);
162}
163
164extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
165 struct list_head *list);
166
167static inline void dccp_rx_hist_add_entry(struct list_head *list,
168 struct dccp_rx_hist_entry *entry)
169{
170 list_add(&entry->dccphrx_node, list);
171}
172
173static inline struct dccp_rx_hist_entry *
174 dccp_rx_hist_head(struct list_head *list)
175{
176 struct dccp_rx_hist_entry *head = NULL;
177
178 if (!list_empty(list))
179 head = list_entry(list->next, struct dccp_rx_hist_entry,
180 dccphrx_node);
181 return head;
182}
183
184static inline int
185 dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry)
186{
187 return entry->dccphrx_type == DCCP_PKT_DATA ||
188 entry->dccphrx_type == DCCP_PKT_DATAACK;
189}
190
191extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
192 struct list_head *rx_list,
193 struct list_head *li_list,
194 struct dccp_rx_hist_entry *packet);
195
196extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
197 struct list_head *li_list, u8 *win_loss);
198
199#endif /* _DCCP_PKT_HIST_ */
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
new file mode 100644
index 000000000000..130c4c40cfe3
--- /dev/null
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -0,0 +1,22 @@
1#ifndef _TFRC_H_
2#define _TFRC_H_
3/*
4 * net/dccp/ccids/lib/tfrc.h
5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/types.h>
18
19extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
20extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
21
22#endif /* _TFRC_H_ */
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
new file mode 100644
index 000000000000..d2b5933b4510
--- /dev/null
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -0,0 +1,644 @@
1/*
2 * net/dccp/ccids/lib/tfrc_equation.c
3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17
18#include <asm/bug.h>
19#include <asm/div64.h>
20
21#include "tfrc.h"
22
23#define TFRC_CALC_X_ARRSIZE 500
24
25#define TFRC_CALC_X_SPLIT 50000
26/* equivalent to 0.05 */
27
28static const u32 tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE][2] = {
29 { 37172, 8172 },
30 { 53499, 11567 },
31 { 66664, 14180 },
32 { 78298, 16388 },
33 { 89021, 18339 },
34 { 99147, 20108 },
35 { 108858, 21738 },
36 { 118273, 23260 },
37 { 127474, 24693 },
38 { 136520, 26052 },
39 { 145456, 27348 },
40 { 154316, 28589 },
41 { 163130, 29783 },
42 { 171919, 30935 },
43 { 180704, 32049 },
44 { 189502, 33130 },
45 { 198328, 34180 },
46 { 207194, 35202 },
47 { 216114, 36198 },
48 { 225097, 37172 },
49 { 234153, 38123 },
50 { 243294, 39055 },
51 { 252527, 39968 },
52 { 261861, 40864 },
53 { 271305, 41743 },
54 { 280866, 42607 },
55 { 290553, 43457 },
56 { 300372, 44293 },
57 { 310333, 45117 },
58 { 320441, 45929 },
59 { 330705, 46729 },
60 { 341131, 47518 },
61 { 351728, 48297 },
62 { 362501, 49066 },
63 { 373460, 49826 },
64 { 384609, 50577 },
65 { 395958, 51320 },
66 { 407513, 52054 },
67 { 419281, 52780 },
68 { 431270, 53499 },
69 { 443487, 54211 },
70 { 455940, 54916 },
71 { 468635, 55614 },
72 { 481581, 56306 },
73 { 494785, 56991 },
74 { 508254, 57671 },
75 { 521996, 58345 },
76 { 536019, 59014 },
77 { 550331, 59677 },
78 { 564939, 60335 },
79 { 579851, 60988 },
80 { 595075, 61636 },
81 { 610619, 62279 },
82 { 626491, 62918 },
83 { 642700, 63553 },
84 { 659253, 64183 },
85 { 676158, 64809 },
86 { 693424, 65431 },
87 { 711060, 66050 },
88 { 729073, 66664 },
89 { 747472, 67275 },
90 { 766266, 67882 },
91 { 785464, 68486 },
92 { 805073, 69087 },
93 { 825103, 69684 },
94 { 845562, 70278 },
95 { 866460, 70868 },
96 { 887805, 71456 },
97 { 909606, 72041 },
98 { 931873, 72623 },
99 { 954614, 73202 },
100 { 977839, 73778 },
101 { 1001557, 74352 },
102 { 1025777, 74923 },
103 { 1050508, 75492 },
104 { 1075761, 76058 },
105 { 1101544, 76621 },
106 { 1127867, 77183 },
107 { 1154739, 77741 },
108 { 1182172, 78298 },
109 { 1210173, 78852 },
110 { 1238753, 79405 },
111 { 1267922, 79955 },
112 { 1297689, 80503 },
113 { 1328066, 81049 },
114 { 1359060, 81593 },
115 { 1390684, 82135 },
116 { 1422947, 82675 },
117 { 1455859, 83213 },
118 { 1489430, 83750 },
119 { 1523671, 84284 },
120 { 1558593, 84817 },
121 { 1594205, 85348 },
122 { 1630518, 85878 },
123 { 1667543, 86406 },
124 { 1705290, 86932 },
125 { 1743770, 87457 },
126 { 1782994, 87980 },
127 { 1822973, 88501 },
128 { 1863717, 89021 },
129 { 1905237, 89540 },
130 { 1947545, 90057 },
131 { 1990650, 90573 },
132 { 2034566, 91087 },
133 { 2079301, 91600 },
134 { 2124869, 92111 },
135 { 2171279, 92622 },
136 { 2218543, 93131 },
137 { 2266673, 93639 },
138 { 2315680, 94145 },
139 { 2365575, 94650 },
140 { 2416371, 95154 },
141 { 2468077, 95657 },
142 { 2520707, 96159 },
143 { 2574271, 96660 },
144 { 2628782, 97159 },
145 { 2684250, 97658 },
146 { 2740689, 98155 },
147 { 2798110, 98651 },
148 { 2856524, 99147 },
149 { 2915944, 99641 },
150 { 2976382, 100134 },
151 { 3037850, 100626 },
152 { 3100360, 101117 },
153 { 3163924, 101608 },
154 { 3228554, 102097 },
155 { 3294263, 102586 },
156 { 3361063, 103073 },
157 { 3428966, 103560 },
158 { 3497984, 104045 },
159 { 3568131, 104530 },
160 { 3639419, 105014 },
161 { 3711860, 105498 },
162 { 3785467, 105980 },
163 { 3860253, 106462 },
164 { 3936229, 106942 },
165 { 4013410, 107422 },
166 { 4091808, 107902 },
167 { 4171435, 108380 },
168 { 4252306, 108858 },
169 { 4334431, 109335 },
170 { 4417825, 109811 },
171 { 4502501, 110287 },
172 { 4588472, 110762 },
173 { 4675750, 111236 },
174 { 4764349, 111709 },
175 { 4854283, 112182 },
176 { 4945564, 112654 },
177 { 5038206, 113126 },
178 { 5132223, 113597 },
179 { 5227627, 114067 },
180 { 5324432, 114537 },
181 { 5422652, 115006 },
182 { 5522299, 115474 },
183 { 5623389, 115942 },
184 { 5725934, 116409 },
185 { 5829948, 116876 },
186 { 5935446, 117342 },
187 { 6042439, 117808 },
188 { 6150943, 118273 },
189 { 6260972, 118738 },
190 { 6372538, 119202 },
191 { 6485657, 119665 },
192 { 6600342, 120128 },
193 { 6716607, 120591 },
194 { 6834467, 121053 },
195 { 6953935, 121514 },
196 { 7075025, 121976 },
197 { 7197752, 122436 },
198 { 7322131, 122896 },
199 { 7448175, 123356 },
200 { 7575898, 123815 },
201 { 7705316, 124274 },
202 { 7836442, 124733 },
203 { 7969291, 125191 },
204 { 8103877, 125648 },
205 { 8240216, 126105 },
206 { 8378321, 126562 },
207 { 8518208, 127018 },
208 { 8659890, 127474 },
209 { 8803384, 127930 },
210 { 8948702, 128385 },
211 { 9095861, 128840 },
212 { 9244875, 129294 },
213 { 9395760, 129748 },
214 { 9548529, 130202 },
215 { 9703198, 130655 },
216 { 9859782, 131108 },
217 { 10018296, 131561 },
218 { 10178755, 132014 },
219 { 10341174, 132466 },
220 { 10505569, 132917 },
221 { 10671954, 133369 },
222 { 10840345, 133820 },
223 { 11010757, 134271 },
224 { 11183206, 134721 },
225 { 11357706, 135171 },
226 { 11534274, 135621 },
227 { 11712924, 136071 },
228 { 11893673, 136520 },
229 { 12076536, 136969 },
230 { 12261527, 137418 },
231 { 12448664, 137867 },
232 { 12637961, 138315 },
233 { 12829435, 138763 },
234 { 13023101, 139211 },
235 { 13218974, 139658 },
236 { 13417071, 140106 },
237 { 13617407, 140553 },
238 { 13819999, 140999 },
239 { 14024862, 141446 },
240 { 14232012, 141892 },
241 { 14441465, 142339 },
242 { 14653238, 142785 },
243 { 14867346, 143230 },
244 { 15083805, 143676 },
245 { 15302632, 144121 },
246 { 15523842, 144566 },
247 { 15747453, 145011 },
248 { 15973479, 145456 },
249 { 16201939, 145900 },
250 { 16432847, 146345 },
251 { 16666221, 146789 },
252 { 16902076, 147233 },
253 { 17140429, 147677 },
254 { 17381297, 148121 },
255 { 17624696, 148564 },
256 { 17870643, 149007 },
257 { 18119154, 149451 },
258 { 18370247, 149894 },
259 { 18623936, 150336 },
260 { 18880241, 150779 },
261 { 19139176, 151222 },
262 { 19400759, 151664 },
263 { 19665007, 152107 },
264 { 19931936, 152549 },
265 { 20201564, 152991 },
266 { 20473907, 153433 },
267 { 20748982, 153875 },
268 { 21026807, 154316 },
269 { 21307399, 154758 },
270 { 21590773, 155199 },
271 { 21876949, 155641 },
272 { 22165941, 156082 },
273 { 22457769, 156523 },
274 { 22752449, 156964 },
275 { 23049999, 157405 },
276 { 23350435, 157846 },
277 { 23653774, 158287 },
278 { 23960036, 158727 },
279 { 24269236, 159168 },
280 { 24581392, 159608 },
281 { 24896521, 160049 },
282 { 25214642, 160489 },
283 { 25535772, 160929 },
284 { 25859927, 161370 },
285 { 26187127, 161810 },
286 { 26517388, 162250 },
287 { 26850728, 162690 },
288 { 27187165, 163130 },
289 { 27526716, 163569 },
290 { 27869400, 164009 },
291 { 28215234, 164449 },
292 { 28564236, 164889 },
293 { 28916423, 165328 },
294 { 29271815, 165768 },
295 { 29630428, 166208 },
296 { 29992281, 166647 },
297 { 30357392, 167087 },
298 { 30725779, 167526 },
299 { 31097459, 167965 },
300 { 31472452, 168405 },
301 { 31850774, 168844 },
302 { 32232445, 169283 },
303 { 32617482, 169723 },
304 { 33005904, 170162 },
305 { 33397730, 170601 },
306 { 33792976, 171041 },
307 { 34191663, 171480 },
308 { 34593807, 171919 },
309 { 34999428, 172358 },
310 { 35408544, 172797 },
311 { 35821174, 173237 },
312 { 36237335, 173676 },
313 { 36657047, 174115 },
314 { 37080329, 174554 },
315 { 37507197, 174993 },
316 { 37937673, 175433 },
317 { 38371773, 175872 },
318 { 38809517, 176311 },
319 { 39250924, 176750 },
320 { 39696012, 177190 },
321 { 40144800, 177629 },
322 { 40597308, 178068 },
323 { 41053553, 178507 },
324 { 41513554, 178947 },
325 { 41977332, 179386 },
326 { 42444904, 179825 },
327 { 42916290, 180265 },
328 { 43391509, 180704 },
329 { 43870579, 181144 },
330 { 44353520, 181583 },
331 { 44840352, 182023 },
332 { 45331092, 182462 },
333 { 45825761, 182902 },
334 { 46324378, 183342 },
335 { 46826961, 183781 },
336 { 47333531, 184221 },
337 { 47844106, 184661 },
338 { 48358706, 185101 },
339 { 48877350, 185541 },
340 { 49400058, 185981 },
341 { 49926849, 186421 },
342 { 50457743, 186861 },
343 { 50992759, 187301 },
344 { 51531916, 187741 },
345 { 52075235, 188181 },
346 { 52622735, 188622 },
347 { 53174435, 189062 },
348 { 53730355, 189502 },
349 { 54290515, 189943 },
350 { 54854935, 190383 },
351 { 55423634, 190824 },
352 { 55996633, 191265 },
353 { 56573950, 191706 },
354 { 57155606, 192146 },
355 { 57741621, 192587 },
356 { 58332014, 193028 },
357 { 58926806, 193470 },
358 { 59526017, 193911 },
359 { 60129666, 194352 },
360 { 60737774, 194793 },
361 { 61350361, 195235 },
362 { 61967446, 195677 },
363 { 62589050, 196118 },
364 { 63215194, 196560 },
365 { 63845897, 197002 },
366 { 64481179, 197444 },
367 { 65121061, 197886 },
368 { 65765563, 198328 },
369 { 66414705, 198770 },
370 { 67068508, 199213 },
371 { 67726992, 199655 },
372 { 68390177, 200098 },
373 { 69058085, 200540 },
374 { 69730735, 200983 },
375 { 70408147, 201426 },
376 { 71090343, 201869 },
377 { 71777343, 202312 },
378 { 72469168, 202755 },
379 { 73165837, 203199 },
380 { 73867373, 203642 },
381 { 74573795, 204086 },
382 { 75285124, 204529 },
383 { 76001380, 204973 },
384 { 76722586, 205417 },
385 { 77448761, 205861 },
386 { 78179926, 206306 },
387 { 78916102, 206750 },
388 { 79657310, 207194 },
389 { 80403571, 207639 },
390 { 81154906, 208084 },
391 { 81911335, 208529 },
392 { 82672880, 208974 },
393 { 83439562, 209419 },
394 { 84211402, 209864 },
395 { 84988421, 210309 },
396 { 85770640, 210755 },
397 { 86558080, 211201 },
398 { 87350762, 211647 },
399 { 88148708, 212093 },
400 { 88951938, 212539 },
401 { 89760475, 212985 },
402 { 90574339, 213432 },
403 { 91393551, 213878 },
404 { 92218133, 214325 },
405 { 93048107, 214772 },
406 { 93883493, 215219 },
407 { 94724314, 215666 },
408 { 95570590, 216114 },
409 { 96422343, 216561 },
410 { 97279594, 217009 },
411 { 98142366, 217457 },
412 { 99010679, 217905 },
413 { 99884556, 218353 },
414 { 100764018, 218801 },
415 { 101649086, 219250 },
416 { 102539782, 219698 },
417 { 103436128, 220147 },
418 { 104338146, 220596 },
419 { 105245857, 221046 },
420 { 106159284, 221495 },
421 { 107078448, 221945 },
422 { 108003370, 222394 },
423 { 108934074, 222844 },
424 { 109870580, 223294 },
425 { 110812910, 223745 },
426 { 111761087, 224195 },
427 { 112715133, 224646 },
428 { 113675069, 225097 },
429 { 114640918, 225548 },
430 { 115612702, 225999 },
431 { 116590442, 226450 },
432 { 117574162, 226902 },
433 { 118563882, 227353 },
434 { 119559626, 227805 },
435 { 120561415, 228258 },
436 { 121569272, 228710 },
437 { 122583219, 229162 },
438 { 123603278, 229615 },
439 { 124629471, 230068 },
440 { 125661822, 230521 },
441 { 126700352, 230974 },
442 { 127745083, 231428 },
443 { 128796039, 231882 },
444 { 129853241, 232336 },
445 { 130916713, 232790 },
446 { 131986475, 233244 },
447 { 133062553, 233699 },
448 { 134144966, 234153 },
449 { 135233739, 234608 },
450 { 136328894, 235064 },
451 { 137430453, 235519 },
452 { 138538440, 235975 },
453 { 139652876, 236430 },
454 { 140773786, 236886 },
455 { 141901190, 237343 },
456 { 143035113, 237799 },
457 { 144175576, 238256 },
458 { 145322604, 238713 },
459 { 146476218, 239170 },
460 { 147636442, 239627 },
461 { 148803298, 240085 },
462 { 149976809, 240542 },
463 { 151156999, 241000 },
464 { 152343890, 241459 },
465 { 153537506, 241917 },
466 { 154737869, 242376 },
467 { 155945002, 242835 },
468 { 157158929, 243294 },
469 { 158379673, 243753 },
470 { 159607257, 244213 },
471 { 160841704, 244673 },
472 { 162083037, 245133 },
473 { 163331279, 245593 },
474 { 164586455, 246054 },
475 { 165848586, 246514 },
476 { 167117696, 246975 },
477 { 168393810, 247437 },
478 { 169676949, 247898 },
479 { 170967138, 248360 },
480 { 172264399, 248822 },
481 { 173568757, 249284 },
482 { 174880235, 249747 },
483 { 176198856, 250209 },
484 { 177524643, 250672 },
485 { 178857621, 251136 },
486 { 180197813, 251599 },
487 { 181545242, 252063 },
488 { 182899933, 252527 },
489 { 184261908, 252991 },
490 { 185631191, 253456 },
491 { 187007807, 253920 },
492 { 188391778, 254385 },
493 { 189783129, 254851 },
494 { 191181884, 255316 },
495 { 192588065, 255782 },
496 { 194001698, 256248 },
497 { 195422805, 256714 },
498 { 196851411, 257181 },
499 { 198287540, 257648 },
500 { 199731215, 258115 },
501 { 201182461, 258582 },
502 { 202641302, 259050 },
503 { 204107760, 259518 },
504 { 205581862, 259986 },
505 { 207063630, 260454 },
506 { 208553088, 260923 },
507 { 210050262, 261392 },
508 { 211555174, 261861 },
509 { 213067849, 262331 },
510 { 214588312, 262800 },
511 { 216116586, 263270 },
512 { 217652696, 263741 },
513 { 219196666, 264211 },
514 { 220748520, 264682 },
515 { 222308282, 265153 },
516 { 223875978, 265625 },
517 { 225451630, 266097 },
518 { 227035265, 266569 },
519 { 228626905, 267041 },
520 { 230226576, 267514 },
521 { 231834302, 267986 },
522 { 233450107, 268460 },
523 { 235074016, 268933 },
524 { 236706054, 269407 },
525 { 238346244, 269881 },
526 { 239994613, 270355 },
527 { 241651183, 270830 },
528 { 243315981, 271305 }
529};
530
531/* Calculate the send rate as per section 3.1 of RFC3448
532
533Returns send rate in bytes per second
534
535Integer maths and lookups are used as not allowed floating point in kernel
536
537The function for Xcalc as per section 3.1 of RFC3448 is:
538
539X = s
540 -------------------------------------------------------------
541 R*sqrt(2*b*p/3) + (t_RTO * (3*sqrt(3*b*p/8) * p * (1+32*p^2)))
542
543where
544X is the trasmit rate in bytes/second
545s is the packet size in bytes
546R is the round trip time in seconds
547p is the loss event rate, between 0 and 1.0, of the number of loss events
548 as a fraction of the number of packets transmitted
549t_RTO is the TCP retransmission timeout value in seconds
550b is the number of packets acknowledged by a single TCP acknowledgement
551
552we can assume that b = 1 and t_RTO is 4 * R. With this the equation becomes:
553
554X = s
555 -----------------------------------------------------------------------
556 R * sqrt(2 * p / 3) + (12 * R * (sqrt(3 * p / 8) * p * (1 + 32 * p^2)))
557
558
559which we can break down into:
560
561X = s
562 --------
563 R * f(p)
564
565where f(p) = sqrt(2 * p / 3) + (12 * sqrt(3 * p / 8) * p * (1 + 32 * p * p))
566
567Function parameters:
568s - bytes
569R - RTT in usecs
570p - loss rate (decimal fraction multiplied by 1,000,000)
571
572Returns Xcalc in bytes per second
573
574DON'T alter this code unless you run test cases against it as the code
575has been manipulated to stop underflow/overlow.
576
577*/
578u32 tfrc_calc_x(u16 s, u32 R, u32 p)
579{
580 int index;
581 u32 f;
582 u64 tmp1, tmp2;
583
584 if (p < TFRC_CALC_X_SPLIT)
585 index = (p / (TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE)) - 1;
586 else
587 index = (p / (1000000 / TFRC_CALC_X_ARRSIZE)) - 1;
588
589 if (index < 0)
590 /* p should be 0 unless there is a bug in my code */
591 index = 0;
592
593 if (R == 0)
594 R = 1; /* RTT can't be zero or else divide by zero */
595
596 BUG_ON(index >= TFRC_CALC_X_ARRSIZE);
597
598 if (p >= TFRC_CALC_X_SPLIT)
599 f = tfrc_calc_x_lookup[index][0];
600 else
601 f = tfrc_calc_x_lookup[index][1];
602
603 tmp1 = ((u64)s * 100000000);
604 tmp2 = ((u64)R * (u64)f);
605 do_div(tmp2, 10000);
606 do_div(tmp1, tmp2);
607 /* Don't alter above math unless you test due to overflow on 32 bit */
608
609 return (u32)tmp1;
610}
611
612EXPORT_SYMBOL_GPL(tfrc_calc_x);
613
614/*
615 * args: fvalue - function value to match
616 * returns: p closest to that value
617 *
618 * both fvalue and p are multiplied by 1,000,000 to use ints
619 */
620u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
621{
622 int ctr = 0;
623 int small;
624
625 if (fvalue < tfrc_calc_x_lookup[0][1])
626 return 0;
627
628 if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1])
629 small = 1;
630 else if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0])
631 return 1000000;
632 else
633 small = 0;
634
635 while (fvalue > tfrc_calc_x_lookup[ctr][small])
636 ctr++;
637
638 if (small)
639 return TFRC_CALC_X_SPLIT * ctr / TFRC_CALC_X_ARRSIZE;
640 else
641 return 1000000 * ctr / TFRC_CALC_X_ARRSIZE;
642}
643
644EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
new file mode 100644
index 000000000000..33456c0d5937
--- /dev/null
+++ b/net/dccp/dccp.h
@@ -0,0 +1,493 @@
1#ifndef _DCCP_H
2#define _DCCP_H
3/*
4 * net/dccp/dccp.h
5 *
6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/config.h>
16#include <linux/dccp.h>
17#include <net/snmp.h>
18#include <net/sock.h>
19#include <net/tcp.h>
20
21#ifdef CONFIG_IP_DCCP_DEBUG
22extern int dccp_debug;
23
24#define dccp_pr_debug(format, a...) \
25 do { if (dccp_debug) \
26 printk(KERN_DEBUG "%s: " format, __FUNCTION__ , ##a); \
27 } while (0)
28#define dccp_pr_debug_cat(format, a...) do { if (dccp_debug) \
29 printk(format, ##a); } while (0)
30#else
31#define dccp_pr_debug(format, a...)
32#define dccp_pr_debug_cat(format, a...)
33#endif
34
35extern struct inet_hashinfo dccp_hashinfo;
36
37extern atomic_t dccp_orphan_count;
38extern int dccp_tw_count;
39extern void dccp_tw_deschedule(struct inet_timewait_sock *tw);
40
41extern void dccp_time_wait(struct sock *sk, int state, int timeo);
42
43/* FIXME: Right size this */
44#define DCCP_MAX_OPT_LEN 128
45
46#define DCCP_MAX_PACKET_HDR 32
47
48#define MAX_DCCP_HEADER (DCCP_MAX_PACKET_HDR + DCCP_MAX_OPT_LEN + MAX_HEADER)
49
50#define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT
51 * state, about 60 seconds */
52
53/* draft-ietf-dccp-spec-11.txt initial RTO value */
54#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
55
56/* Maximal interval between probes for local resources. */
57#define DCCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ / 2U))
58
59#define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
60
61extern struct proto dccp_v4_prot;
62
63/* is seq1 < seq2 ? */
64static inline int before48(const u64 seq1, const u64 seq2)
65{
66 return (s64)((seq1 << 16) - (seq2 << 16)) < 0;
67}
68
69/* is seq1 > seq2 ? */
70static inline int after48(const u64 seq1, const u64 seq2)
71{
72 return (s64)((seq2 << 16) - (seq1 << 16)) < 0;
73}
74
75/* is seq2 <= seq1 <= seq3 ? */
76static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3)
77{
78 return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16);
79}
80
81static inline u64 max48(const u64 seq1, const u64 seq2)
82{
83 return after48(seq1, seq2) ? seq1 : seq2;
84}
85
86enum {
87 DCCP_MIB_NUM = 0,
88 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
89 DCCP_MIB_ESTABRESETS, /* EstabResets */
90 DCCP_MIB_CURRESTAB, /* CurrEstab */
91 DCCP_MIB_OUTSEGS, /* OutSegs */
92 DCCP_MIB_OUTRSTS,
93 DCCP_MIB_ABORTONTIMEOUT,
94 DCCP_MIB_TIMEOUTS,
95 DCCP_MIB_ABORTFAILED,
96 DCCP_MIB_PASSIVEOPENS,
97 DCCP_MIB_ATTEMPTFAILS,
98 DCCP_MIB_OUTDATAGRAMS,
99 DCCP_MIB_INERRS,
100 DCCP_MIB_OPTMANDATORYERROR,
101 DCCP_MIB_INVALIDOPT,
102 __DCCP_MIB_MAX
103};
104
105#define DCCP_MIB_MAX __DCCP_MIB_MAX
106struct dccp_mib {
107 unsigned long mibs[DCCP_MIB_MAX];
108} __SNMP_MIB_ALIGN__;
109
110DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
111#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
112#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field)
113#define DCCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(dccp_statistics, field)
114#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
115#define DCCP_ADD_STATS_BH(field, val) \
116 SNMP_ADD_STATS_BH(dccp_statistics, field, val)
117#define DCCP_ADD_STATS_USER(field, val) \
118 SNMP_ADD_STATS_USER(dccp_statistics, field, val)
119
120extern int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb);
121extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb);
122
123extern int dccp_send_response(struct sock *sk);
124extern void dccp_send_ack(struct sock *sk);
125extern void dccp_send_delayed_ack(struct sock *sk);
126extern void dccp_send_sync(struct sock *sk, const u64 seq,
127 const enum dccp_pkt_type pkt_type);
128
129extern int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo);
130extern void dccp_write_space(struct sock *sk);
131
132extern void dccp_init_xmit_timers(struct sock *sk);
133static inline void dccp_clear_xmit_timers(struct sock *sk)
134{
135 inet_csk_clear_xmit_timers(sk);
136}
137
138extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
139
140extern const char *dccp_packet_name(const int type);
141extern const char *dccp_state_name(const int state);
142
143static inline void dccp_set_state(struct sock *sk, const int state)
144{
145 const int oldstate = sk->sk_state;
146
147 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
148 dccp_role(sk), sk,
149 dccp_state_name(oldstate), dccp_state_name(state));
150 WARN_ON(state == oldstate);
151
152 switch (state) {
153 case DCCP_OPEN:
154 if (oldstate != DCCP_OPEN)
155 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
156 break;
157
158 case DCCP_CLOSED:
159 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
160 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
161
162 sk->sk_prot->unhash(sk);
163 if (inet_csk(sk)->icsk_bind_hash != NULL &&
164 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
165 inet_put_port(&dccp_hashinfo, sk);
166 /* fall through */
167 default:
168 if (oldstate == DCCP_OPEN)
169 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
170 }
171
172 /* Change state AFTER socket is unhashed to avoid closed
173 * socket sitting in hash tables.
174 */
175 sk->sk_state = state;
176}
177
178static inline void dccp_done(struct sock *sk)
179{
180 dccp_set_state(sk, DCCP_CLOSED);
181 dccp_clear_xmit_timers(sk);
182
183 sk->sk_shutdown = SHUTDOWN_MASK;
184
185 if (!sock_flag(sk, SOCK_DEAD))
186 sk->sk_state_change(sk);
187 else
188 inet_csk_destroy_sock(sk);
189}
190
191static inline void dccp_openreq_init(struct request_sock *req,
192 struct dccp_sock *dp,
193 struct sk_buff *skb)
194{
195 /*
196 * FIXME: fill in the other req fields from the DCCP options
197 * received
198 */
199 inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
200 inet_rsk(req)->acked = 0;
201 req->rcv_wnd = 0;
202}
203
204extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
205
206extern struct sock *dccp_create_openreq_child(struct sock *sk,
207 const struct request_sock *req,
208 const struct sk_buff *skb);
209
210extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
211
212extern void dccp_v4_err(struct sk_buff *skb, u32);
213
214extern int dccp_v4_rcv(struct sk_buff *skb);
215
216extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
217 struct sk_buff *skb,
218 struct request_sock *req,
219 struct dst_entry *dst);
220extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
221 struct request_sock *req,
222 struct request_sock **prev);
223
224extern int dccp_child_process(struct sock *parent, struct sock *child,
225 struct sk_buff *skb);
226extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
227 struct dccp_hdr *dh, unsigned len);
228extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
229 const struct dccp_hdr *dh, const unsigned len);
230
231extern void dccp_close(struct sock *sk, long timeout);
232extern struct sk_buff *dccp_make_response(struct sock *sk,
233 struct dst_entry *dst,
234 struct request_sock *req);
235extern struct sk_buff *dccp_make_reset(struct sock *sk,
236 struct dst_entry *dst,
237 enum dccp_reset_codes code);
238
239extern int dccp_connect(struct sock *sk);
240extern int dccp_disconnect(struct sock *sk, int flags);
241extern int dccp_getsockopt(struct sock *sk, int level, int optname,
242 char __user *optval, int __user *optlen);
243extern int dccp_setsockopt(struct sock *sk, int level, int optname,
244 char __user *optval, int optlen);
245extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
246extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
247 struct msghdr *msg, size_t size);
248extern int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
249 struct msghdr *msg, size_t len, int nonblock,
250 int flags, int *addr_len);
251extern void dccp_shutdown(struct sock *sk, int how);
252
253extern int dccp_v4_checksum(const struct sk_buff *skb,
254 const u32 saddr, const u32 daddr);
255
256extern int dccp_v4_send_reset(struct sock *sk,
257 enum dccp_reset_codes code);
258extern void dccp_send_close(struct sock *sk, const int active);
259
260struct dccp_skb_cb {
261 __u8 dccpd_type;
262 __u8 dccpd_reset_code;
263 __u8 dccpd_service;
264 __u8 dccpd_ccval;
265 __u64 dccpd_seq;
266 __u64 dccpd_ack_seq;
267 int dccpd_opt_len;
268};
269
270#define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0]))
271
272static inline int dccp_non_data_packet(const struct sk_buff *skb)
273{
274 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
275
276 return type == DCCP_PKT_ACK ||
277 type == DCCP_PKT_CLOSE ||
278 type == DCCP_PKT_CLOSEREQ ||
279 type == DCCP_PKT_RESET ||
280 type == DCCP_PKT_SYNC ||
281 type == DCCP_PKT_SYNCACK;
282}
283
284static inline int dccp_packet_without_ack(const struct sk_buff *skb)
285{
286 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
287
288 return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST;
289}
290
291#define DCCP_MAX_SEQNO ((((u64)1) << 48) - 1)
292#define DCCP_PKT_WITHOUT_ACK_SEQ (DCCP_MAX_SEQNO << 2)
293
294static inline void dccp_set_seqno(u64 *seqno, u64 value)
295{
296 if (value > DCCP_MAX_SEQNO)
297 value -= DCCP_MAX_SEQNO + 1;
298 *seqno = value;
299}
300
301static inline u64 dccp_delta_seqno(u64 seqno1, u64 seqno2)
302{
303 return ((seqno2 << 16) - (seqno1 << 16)) >> 16;
304}
305
306static inline void dccp_inc_seqno(u64 *seqno)
307{
308 if (++*seqno > DCCP_MAX_SEQNO)
309 *seqno = 0;
310}
311
312static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss)
313{
314 struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh +
315 sizeof(*dh));
316
317#if defined(__LITTLE_ENDIAN_BITFIELD)
318 dh->dccph_seq = htonl((gss >> 32)) >> 8;
319#elif defined(__BIG_ENDIAN_BITFIELD)
320 dh->dccph_seq = htonl((gss >> 32));
321#else
322#error "Adjust your <asm/byteorder.h> defines"
323#endif
324 dhx->dccph_seq_low = htonl(gss & 0xffffffff);
325}
326
327static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack,
328 const u64 gsr)
329{
330#if defined(__LITTLE_ENDIAN_BITFIELD)
331 dhack->dccph_ack_nr_high = htonl((gsr >> 32)) >> 8;
332#elif defined(__BIG_ENDIAN_BITFIELD)
333 dhack->dccph_ack_nr_high = htonl((gsr >> 32));
334#else
335#error "Adjust your <asm/byteorder.h> defines"
336#endif
337 dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff);
338}
339
340static inline void dccp_update_gsr(struct sock *sk, u64 seq)
341{
342 struct dccp_sock *dp = dccp_sk(sk);
343
344 dp->dccps_gsr = seq;
345 dccp_set_seqno(&dp->dccps_swl,
346 (dp->dccps_gsr + 1 -
347 (dp->dccps_options.dccpo_sequence_window / 4)));
348 dccp_set_seqno(&dp->dccps_swh,
349 (dp->dccps_gsr +
350 (3 * dp->dccps_options.dccpo_sequence_window) / 4));
351}
352
353static inline void dccp_update_gss(struct sock *sk, u64 seq)
354{
355 struct dccp_sock *dp = dccp_sk(sk);
356
357 dp->dccps_awh = dp->dccps_gss = seq;
358 dccp_set_seqno(&dp->dccps_awl,
359 (dp->dccps_gss -
360 dp->dccps_options.dccpo_sequence_window + 1));
361}
362
363extern void dccp_insert_options(struct sock *sk, struct sk_buff *skb);
364extern void dccp_insert_option_elapsed_time(struct sock *sk,
365 struct sk_buff *skb,
366 u32 elapsed_time);
367extern void dccp_insert_option_timestamp(struct sock *sk,
368 struct sk_buff *skb);
369extern void dccp_insert_option(struct sock *sk, struct sk_buff *skb,
370 unsigned char option,
371 const void *value, unsigned char len);
372
373extern struct socket *dccp_ctl_socket;
374
375#define DCCP_ACKPKTS_STATE_RECEIVED 0
376#define DCCP_ACKPKTS_STATE_ECN_MARKED (1 << 6)
377#define DCCP_ACKPKTS_STATE_NOT_RECEIVED (3 << 6)
378
379#define DCCP_ACKPKTS_STATE_MASK 0xC0 /* 11000000 */
380#define DCCP_ACKPKTS_LEN_MASK 0x3F /* 00111111 */
381
382/** struct dccp_ackpkts - acknowledgeable packets
383 *
384 * This data structure is the one defined in the DCCP draft
385 * Appendix A.
386 *
387 * @dccpap_buf_head - circular buffer head
388 * @dccpap_buf_tail - circular buffer tail
389 * @dccpap_buf_ackno - ack # of the most recent packet acknowledgeable in the
390 * buffer (i.e. %dccpap_buf_head)
391 * @dccpap_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked
392 * by the buffer with State 0
393 *
394 * Additionally, the HC-Receiver must keep some information about the
395 * Ack Vectors it has recently sent. For each packet sent carrying an
396 * Ack Vector, it remembers four variables:
397 *
398 * @dccpap_ack_seqno - the Sequence Number used for the packet
399 * (HC-Receiver seqno)
400 * @dccpap_ack_ptr - the value of buf_head at the time of acknowledgement.
401 * @dccpap_ack_ackno - the Acknowledgement Number used for the packet
402 * (HC-Sender seqno)
403 * @dccpap_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
404 *
405 * @dccpap_buf_len - circular buffer length
406 * @dccpap_time - the time in usecs
407 * @dccpap_buf - circular buffer of acknowledgeable packets
408 */
409struct dccp_ackpkts {
410 unsigned int dccpap_buf_head;
411 unsigned int dccpap_buf_tail;
412 u64 dccpap_buf_ackno;
413 u64 dccpap_ack_seqno;
414 u64 dccpap_ack_ackno;
415 unsigned int dccpap_ack_ptr;
416 unsigned int dccpap_buf_vector_len;
417 unsigned int dccpap_ack_vector_len;
418 unsigned int dccpap_buf_len;
419 struct timeval dccpap_time;
420 u8 dccpap_buf_nonce;
421 u8 dccpap_ack_nonce;
422 u8 dccpap_buf[0];
423};
424
425extern struct dccp_ackpkts *
426 dccp_ackpkts_alloc(unsigned int len,
427 const unsigned int __nocast priority);
428extern void dccp_ackpkts_free(struct dccp_ackpkts *ap);
429extern int dccp_ackpkts_add(struct dccp_ackpkts *ap, u64 ackno, u8 state);
430extern void dccp_ackpkts_check_rcv_ackno(struct dccp_ackpkts *ap,
431 struct sock *sk, u64 ackno);
432
433static inline suseconds_t timeval_usecs(const struct timeval *tv)
434{
435 return tv->tv_sec * USEC_PER_SEC + tv->tv_usec;
436}
437
438static inline suseconds_t timeval_delta(const struct timeval *large,
439 const struct timeval *small)
440{
441 time_t secs = large->tv_sec - small->tv_sec;
442 suseconds_t usecs = large->tv_usec - small->tv_usec;
443
444 if (usecs < 0) {
445 secs--;
446 usecs += USEC_PER_SEC;
447 }
448 return secs * USEC_PER_SEC + usecs;
449}
450
451static inline void timeval_add_usecs(struct timeval *tv,
452 const suseconds_t usecs)
453{
454 tv->tv_usec += usecs;
455 while (tv->tv_usec >= USEC_PER_SEC) {
456 tv->tv_sec++;
457 tv->tv_usec -= USEC_PER_SEC;
458 }
459}
460
461static inline void timeval_sub_usecs(struct timeval *tv,
462 const suseconds_t usecs)
463{
464 tv->tv_usec -= usecs;
465 while (tv->tv_usec < 0) {
466 tv->tv_sec--;
467 tv->tv_usec += USEC_PER_SEC;
468 }
469}
470
471/*
472 * Returns the difference in usecs between timeval
473 * passed in and current time
474 */
475static inline suseconds_t timeval_now_delta(const struct timeval *tv)
476{
477 struct timeval now;
478 do_gettimeofday(&now);
479 return timeval_delta(&now, tv);
480}
481
482#ifdef CONFIG_IP_DCCP_DEBUG
483extern void dccp_ackvector_print(const u64 ackno,
484 const unsigned char *vector, int len);
485extern void dccp_ackpkts_print(const struct dccp_ackpkts *ap);
486#else
487static inline void dccp_ackvector_print(const u64 ackno,
488 const unsigned char *vector,
489 int len) { }
490static inline void dccp_ackpkts_print(const struct dccp_ackpkts *ap) { }
491#endif
492
493#endif /* _DCCP_H */
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
new file mode 100644
index 000000000000..f675d8e642d3
--- /dev/null
+++ b/net/dccp/diag.c
@@ -0,0 +1,71 @@
1/*
2 * net/dccp/diag.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@mandriva.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13
14#include <linux/module.h>
15#include <linux/inet_diag.h>
16
17#include "ccid.h"
18#include "dccp.h"
19
20static void dccp_get_info(struct sock *sk, struct tcp_info *info)
21{
22 struct dccp_sock *dp = dccp_sk(sk);
23 const struct inet_connection_sock *icsk = inet_csk(sk);
24
25 memset(info, 0, sizeof(*info));
26
27 info->tcpi_state = sk->sk_state;
28 info->tcpi_retransmits = icsk->icsk_retransmits;
29 info->tcpi_probes = icsk->icsk_probes_out;
30 info->tcpi_backoff = icsk->icsk_backoff;
31 info->tcpi_pmtu = dp->dccps_pmtu_cookie;
32
33 if (dp->dccps_options.dccpo_send_ack_vector)
34 info->tcpi_options |= TCPI_OPT_SACK;
35
36 ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info);
37 ccid_hc_tx_get_info(dp->dccps_hc_tx_ccid, sk, info);
38}
39
40static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
41 void *_info)
42{
43 r->idiag_rqueue = r->idiag_wqueue = 0;
44
45 if (_info != NULL)
46 dccp_get_info(sk, _info);
47}
48
49static struct inet_diag_handler dccp_diag_handler = {
50 .idiag_hashinfo = &dccp_hashinfo,
51 .idiag_get_info = dccp_diag_get_info,
52 .idiag_type = DCCPDIAG_GETSOCK,
53 .idiag_info_size = sizeof(struct tcp_info),
54};
55
56static int __init dccp_diag_init(void)
57{
58 return inet_diag_register(&dccp_diag_handler);
59}
60
61static void __exit dccp_diag_fini(void)
62{
63 inet_diag_unregister(&dccp_diag_handler);
64}
65
66module_init(dccp_diag_init);
67module_exit(dccp_diag_fini);
68
69MODULE_LICENSE("GPL");
70MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
71MODULE_DESCRIPTION("DCCP inet_diag handler");
diff --git a/net/dccp/input.c b/net/dccp/input.c
new file mode 100644
index 000000000000..ef29cef1dafe
--- /dev/null
+++ b/net/dccp/input.c
@@ -0,0 +1,600 @@
1/*
2 * net/dccp/input.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include <net/sock.h>
18
19#include "ccid.h"
20#include "dccp.h"
21
22static void dccp_fin(struct sock *sk, struct sk_buff *skb)
23{
24 sk->sk_shutdown |= RCV_SHUTDOWN;
25 sock_set_flag(sk, SOCK_DONE);
26 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
27 __skb_queue_tail(&sk->sk_receive_queue, skb);
28 skb_set_owner_r(skb, sk);
29 sk->sk_data_ready(sk, 0);
30}
31
32static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33{
34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
35 dccp_fin(sk, skb);
36 dccp_set_state(sk, DCCP_CLOSED);
37 sk_wake_async(sk, 1, POLL_HUP);
38}
39
40static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
41{
42 /*
43 * Step 7: Check for unexpected packet types
44 * If (S.is_server and P.type == CloseReq)
45 * Send Sync packet acknowledging P.seqno
46 * Drop packet and return
47 */
48 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
49 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
50 return;
51 }
52
53 dccp_set_state(sk, DCCP_CLOSING);
54 dccp_send_close(sk, 0);
55}
56
57static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
58{
59 struct dccp_sock *dp = dccp_sk(sk);
60
61 if (dp->dccps_options.dccpo_send_ack_vector)
62 dccp_ackpkts_check_rcv_ackno(dp->dccps_hc_rx_ackpkts, sk,
63 DCCP_SKB_CB(skb)->dccpd_ack_seq);
64}
65
66static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
67{
68 const struct dccp_hdr *dh = dccp_hdr(skb);
69 struct dccp_sock *dp = dccp_sk(sk);
70 u64 lswl, lawl;
71
72 /*
73 * Step 5: Prepare sequence numbers for Sync
74 * If P.type == Sync or P.type == SyncAck,
75 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
76 * / * P is valid, so update sequence number variables
77 * accordingly. After this update, P will pass the tests
78 * in Step 6. A SyncAck is generated if necessary in
79 * Step 15 * /
80 * Update S.GSR, S.SWL, S.SWH
81 * Otherwise,
82 * Drop packet and return
83 */
84 if (dh->dccph_type == DCCP_PKT_SYNC ||
85 dh->dccph_type == DCCP_PKT_SYNCACK) {
86 if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
87 dp->dccps_awl, dp->dccps_awh) &&
88 !before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_swl))
89 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
90 else
91 return -1;
92 }
93
94 /*
95 * Step 6: Check sequence numbers
96 * Let LSWL = S.SWL and LAWL = S.AWL
97 * If P.type == CloseReq or P.type == Close or P.type == Reset,
98 * LSWL := S.GSR + 1, LAWL := S.GAR
99 * If LSWL <= P.seqno <= S.SWH
100 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
101 * Update S.GSR, S.SWL, S.SWH
102 * If P.type != Sync,
103 * Update S.GAR
104 * Otherwise,
105 * Send Sync packet acknowledging P.seqno
106 * Drop packet and return
107 */
108 lswl = dp->dccps_swl;
109 lawl = dp->dccps_awl;
110
111 if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
112 dh->dccph_type == DCCP_PKT_CLOSE ||
113 dh->dccph_type == DCCP_PKT_RESET) {
114 lswl = dp->dccps_gsr;
115 dccp_inc_seqno(&lswl);
116 lawl = dp->dccps_gar;
117 }
118
119 if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) &&
120 (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ ||
121 between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
122 lawl, dp->dccps_awh))) {
123 dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq);
124
125 if (dh->dccph_type != DCCP_PKT_SYNC &&
126 (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
127 DCCP_PKT_WITHOUT_ACK_SEQ))
128 dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
129 } else {
130 LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
131 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
132 "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
133 "sending SYNC...\n",
134 dccp_packet_name(dh->dccph_type),
135 (unsigned long long) lswl,
136 (unsigned long long)
137 DCCP_SKB_CB(skb)->dccpd_seq,
138 (unsigned long long) dp->dccps_swh,
139 (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
140 DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
141 (unsigned long long) lawl,
142 (unsigned long long)
143 DCCP_SKB_CB(skb)->dccpd_ack_seq,
144 (unsigned long long) dp->dccps_awh);
145 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
146 return -1;
147 }
148
149 return 0;
150}
151
152int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
153 const struct dccp_hdr *dh, const unsigned len)
154{
155 struct dccp_sock *dp = dccp_sk(sk);
156
157 if (dccp_check_seqno(sk, skb))
158 goto discard;
159
160 if (dccp_parse_options(sk, skb))
161 goto discard;
162
163 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
164 dccp_event_ack_recv(sk, skb);
165
166 /*
167 * FIXME: check ECN to see if we should use
168 * DCCP_ACKPKTS_STATE_ECN_MARKED
169 */
170 if (dp->dccps_options.dccpo_send_ack_vector) {
171 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
172
173 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
174 DCCP_SKB_CB(skb)->dccpd_seq,
175 DCCP_ACKPKTS_STATE_RECEIVED)) {
176 LIMIT_NETDEBUG(KERN_WARNING "DCCP: acknowledgeable "
177 "packets buffer full!\n");
178 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
179 inet_csk_schedule_ack(sk);
180 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
181 TCP_DELACK_MIN,
182 DCCP_RTO_MAX);
183 goto discard;
184 }
185
186 /*
187 * FIXME: this activation is probably wrong, have to study more
188 * TCP delack machinery and how it fits into DCCP draft, but
189 * for now it kinda "works" 8)
190 */
191 if (!inet_csk_ack_scheduled(sk)) {
192 inet_csk_schedule_ack(sk);
193 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5 * HZ,
194 DCCP_RTO_MAX);
195 }
196 }
197
198 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
199 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
200
201 switch (dccp_hdr(skb)->dccph_type) {
202 case DCCP_PKT_DATAACK:
203 case DCCP_PKT_DATA:
204 /*
205 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
206 * option if it is.
207 */
208 __skb_pull(skb, dh->dccph_doff * 4);
209 __skb_queue_tail(&sk->sk_receive_queue, skb);
210 skb_set_owner_r(skb, sk);
211 sk->sk_data_ready(sk, 0);
212 return 0;
213 case DCCP_PKT_ACK:
214 goto discard;
215 case DCCP_PKT_RESET:
216 /*
217 * Step 9: Process Reset
218 * If P.type == Reset,
219 * Tear down connection
220 * S.state := TIMEWAIT
221 * Set TIMEWAIT timer
222 * Drop packet and return
223 */
224 dccp_fin(sk, skb);
225 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
226 return 0;
227 case DCCP_PKT_CLOSEREQ:
228 dccp_rcv_closereq(sk, skb);
229 goto discard;
230 case DCCP_PKT_CLOSE:
231 dccp_rcv_close(sk, skb);
232 return 0;
233 case DCCP_PKT_REQUEST:
234 /* Step 7
235 * or (S.is_server and P.type == Response)
236 * or (S.is_client and P.type == Request)
237 * or (S.state >= OPEN and P.type == Request
238 * and P.seqno >= S.OSR)
239 * or (S.state >= OPEN and P.type == Response
240 * and P.seqno >= S.OSR)
241 * or (S.state == RESPOND and P.type == Data),
242 * Send Sync packet acknowledging P.seqno
243 * Drop packet and return
244 */
245 if (dp->dccps_role != DCCP_ROLE_LISTEN)
246 goto send_sync;
247 goto check_seq;
248 case DCCP_PKT_RESPONSE:
249 if (dp->dccps_role != DCCP_ROLE_CLIENT)
250 goto send_sync;
251check_seq:
252 if (!before48(DCCP_SKB_CB(skb)->dccpd_seq, dp->dccps_osr)) {
253send_sync:
254 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
255 DCCP_PKT_SYNC);
256 }
257 break;
258 case DCCP_PKT_SYNC:
259 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
260 DCCP_PKT_SYNCACK);
261 /*
262 * From the draft:
263 *
264 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
265 * MAY have non-zero-length application data areas, whose
266 * contents * receivers MUST ignore.
267 */
268 goto discard;
269 }
270
271 DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
272discard:
273 __kfree_skb(skb);
274 return 0;
275}
276
277static int dccp_rcv_request_sent_state_process(struct sock *sk,
278 struct sk_buff *skb,
279 const struct dccp_hdr *dh,
280 const unsigned len)
281{
282 /*
283 * Step 4: Prepare sequence numbers in REQUEST
284 * If S.state == REQUEST,
285 * If (P.type == Response or P.type == Reset)
286 * and S.AWL <= P.ackno <= S.AWH,
287 * / * Set sequence number variables corresponding to the
288 * other endpoint, so P will pass the tests in Step 6 * /
289 * Set S.GSR, S.ISR, S.SWL, S.SWH
290 * / * Response processing continues in Step 10; Reset
291 * processing continues in Step 9 * /
292 */
293 if (dh->dccph_type == DCCP_PKT_RESPONSE) {
294 const struct inet_connection_sock *icsk = inet_csk(sk);
295 struct dccp_sock *dp = dccp_sk(sk);
296
297 /* Stop the REQUEST timer */
298 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
299 BUG_TRAP(sk->sk_send_head != NULL);
300 __kfree_skb(sk->sk_send_head);
301 sk->sk_send_head = NULL;
302
303 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
304 dp->dccps_awl, dp->dccps_awh)) {
305 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
306 "P.ackno=%llu, S.AWH=%llu \n",
307 (unsigned long long)dp->dccps_awl,
308 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
309 (unsigned long long)dp->dccps_awh);
310 goto out_invalid_packet;
311 }
312
313 dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
314 dccp_update_gsr(sk, dp->dccps_isr);
315 /*
316 * SWL and AWL are initially adjusted so that they are not less than
317 * the initial Sequence Numbers received and sent, respectively:
318 * SWL := max(GSR + 1 - floor(W/4), ISR),
319 * AWL := max(GSS - W' + 1, ISS).
320 * These adjustments MUST be applied only at the beginning of the
321 * connection.
322 *
323 * AWL was adjusted in dccp_v4_connect -acme
324 */
325 dccp_set_seqno(&dp->dccps_swl,
326 max48(dp->dccps_swl, dp->dccps_isr));
327
328 if (ccid_hc_rx_init(dp->dccps_hc_rx_ccid, sk) != 0 ||
329 ccid_hc_tx_init(dp->dccps_hc_tx_ccid, sk) != 0) {
330 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
331 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
332 /* FIXME: send appropriate RESET code */
333 goto out_invalid_packet;
334 }
335
336 dccp_sync_mss(sk, dp->dccps_pmtu_cookie);
337
338 /*
339 * Step 10: Process REQUEST state (second part)
340 * If S.state == REQUEST,
341 * / * If we get here, P is a valid Response from the
342 * server (see Step 4), and we should move to
343 * PARTOPEN state. PARTOPEN means send an Ack,
344 * don't send Data packets, retransmit Acks
345 * periodically, and always include any Init Cookie
346 * from the Response * /
347 * S.state := PARTOPEN
348 * Set PARTOPEN timer
349 * Continue with S.state == PARTOPEN
350 * / * Step 12 will send the Ack completing the
351 * three-way handshake * /
352 */
353 dccp_set_state(sk, DCCP_PARTOPEN);
354
355 /* Make sure socket is routed, for correct metrics. */
356 inet_sk_rebuild_header(sk);
357
358 if (!sock_flag(sk, SOCK_DEAD)) {
359 sk->sk_state_change(sk);
360 sk_wake_async(sk, 0, POLL_OUT);
361 }
362
363 if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
364 icsk->icsk_accept_queue.rskq_defer_accept) {
365 /* Save one ACK. Data will be ready after
366 * several ticks, if write_pending is set.
367 *
368 * It may be deleted, but with this feature tcpdumps
369 * look so _wonderfully_ clever, that I was not able
370 * to stand against the temptation 8) --ANK
371 */
372 /*
373 * OK, in DCCP we can as well do a similar trick, its
374 * even in the draft, but there is no need for us to
375 * schedule an ack here, as dccp_sendmsg does this for
376 * us, also stated in the draft. -acme
377 */
378 __kfree_skb(skb);
379 return 0;
380 }
381 dccp_send_ack(sk);
382 return -1;
383 }
384
385out_invalid_packet:
386 return 1; /* dccp_v4_do_rcv will send a reset, but...
387 FIXME: the reset code should be
388 DCCP_RESET_CODE_PACKET_ERROR */
389}
390
391static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
392 struct sk_buff *skb,
393 const struct dccp_hdr *dh,
394 const unsigned len)
395{
396 int queued = 0;
397
398 switch (dh->dccph_type) {
399 case DCCP_PKT_RESET:
400 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
401 break;
402 case DCCP_PKT_DATAACK:
403 case DCCP_PKT_ACK:
404 /*
405 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
406 * here but only if we haven't used the DELACK timer for
407 * something else, like sending a delayed ack for a TIMESTAMP
408 * echo, etc, for now were not clearing it, sending an extra
409 * ACK when there is nothing else to do in DELACK is not a big
410 * deal after all.
411 */
412
413 /* Stop the PARTOPEN timer */
414 if (sk->sk_state == DCCP_PARTOPEN)
415 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
416
417 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
418 dccp_set_state(sk, DCCP_OPEN);
419
420 if (dh->dccph_type == DCCP_PKT_DATAACK) {
421 dccp_rcv_established(sk, skb, dh, len);
422 queued = 1; /* packet was queued
423 (by dccp_rcv_established) */
424 }
425 break;
426 }
427
428 return queued;
429}
430
431int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
432 struct dccp_hdr *dh, unsigned len)
433{
434 struct dccp_sock *dp = dccp_sk(sk);
435 const int old_state = sk->sk_state;
436 int queued = 0;
437
438 /*
439 * Step 3: Process LISTEN state
440 * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
441 *
442 * If S.state == LISTEN,
443 * If P.type == Request or P contains a valid Init Cookie
444 * option,
445 * * Must scan the packet's options to check for an Init
446 * Cookie. Only the Init Cookie is processed here,
447 * however; other options are processed in Step 8. This
448 * scan need only be performed if the endpoint uses Init
449 * Cookies *
450 * * Generate a new socket and switch to that socket *
451 * Set S := new socket for this port pair
452 * S.state = RESPOND
453 * Choose S.ISS (initial seqno) or set from Init Cookie
454 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
455 * Continue with S.state == RESPOND
456 * * A Response packet will be generated in Step 11 *
457 * Otherwise,
458 * Generate Reset(No Connection) unless P.type == Reset
459 * Drop packet and return
460 *
461 * NOTE: the check for the packet types is done in
462 * dccp_rcv_state_process
463 */
464 if (sk->sk_state == DCCP_LISTEN) {
465 if (dh->dccph_type == DCCP_PKT_REQUEST) {
466 if (dccp_v4_conn_request(sk, skb) < 0)
467 return 1;
468
469 /* FIXME: do congestion control initialization */
470 goto discard;
471 }
472 if (dh->dccph_type == DCCP_PKT_RESET)
473 goto discard;
474
475 /* Caller (dccp_v4_do_rcv) will send Reset(No Connection)*/
476 return 1;
477 }
478
479 if (sk->sk_state != DCCP_REQUESTING) {
480 if (dccp_check_seqno(sk, skb))
481 goto discard;
482
483 /*
484 * Step 8: Process options and mark acknowledgeable
485 */
486 if (dccp_parse_options(sk, skb))
487 goto discard;
488
489 if (DCCP_SKB_CB(skb)->dccpd_ack_seq !=
490 DCCP_PKT_WITHOUT_ACK_SEQ)
491 dccp_event_ack_recv(sk, skb);
492
493 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
494 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
495
496 /*
497 * FIXME: check ECN to see if we should use
498 * DCCP_ACKPKTS_STATE_ECN_MARKED
499 */
500 if (dp->dccps_options.dccpo_send_ack_vector) {
501 if (dccp_ackpkts_add(dp->dccps_hc_rx_ackpkts,
502 DCCP_SKB_CB(skb)->dccpd_seq,
503 DCCP_ACKPKTS_STATE_RECEIVED))
504 goto discard;
505 /*
506 * FIXME: this activation is probably wrong, have to
507 * study more TCP delack machinery and how it fits into
508 * DCCP draft, but for now it kinda "works" 8)
509 */
510 if ((dp->dccps_hc_rx_ackpkts->dccpap_ack_seqno ==
511 DCCP_MAX_SEQNO + 1) &&
512 !inet_csk_ack_scheduled(sk)) {
513 inet_csk_schedule_ack(sk);
514 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
515 TCP_DELACK_MIN,
516 DCCP_RTO_MAX);
517 }
518 }
519 }
520
521 /*
522 * Step 9: Process Reset
523 * If P.type == Reset,
524 * Tear down connection
525 * S.state := TIMEWAIT
526 * Set TIMEWAIT timer
527 * Drop packet and return
528 */
529 if (dh->dccph_type == DCCP_PKT_RESET) {
530 /*
531 * Queue the equivalent of TCP fin so that dccp_recvmsg
532 * exits the loop
533 */
534 dccp_fin(sk, skb);
535 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
536 return 0;
537 /*
538 * Step 7: Check for unexpected packet types
539 * If (S.is_server and P.type == CloseReq)
540 * or (S.is_server and P.type == Response)
541 * or (S.is_client and P.type == Request)
542 * or (S.state == RESPOND and P.type == Data),
543 * Send Sync packet acknowledging P.seqno
544 * Drop packet and return
545 */
546 } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
547 (dh->dccph_type == DCCP_PKT_RESPONSE ||
548 dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
549 (dp->dccps_role == DCCP_ROLE_CLIENT &&
550 dh->dccph_type == DCCP_PKT_REQUEST) ||
551 (sk->sk_state == DCCP_RESPOND &&
552 dh->dccph_type == DCCP_PKT_DATA)) {
553 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
554 DCCP_PKT_SYNC);
555 goto discard;
556 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
557 dccp_rcv_closereq(sk, skb);
558 goto discard;
559 } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
560 dccp_rcv_close(sk, skb);
561 return 0;
562 }
563
564 switch (sk->sk_state) {
565 case DCCP_CLOSED:
566 return 1;
567
568 case DCCP_REQUESTING:
569 /* FIXME: do congestion control initialization */
570
571 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
572 if (queued >= 0)
573 return queued;
574
575 __kfree_skb(skb);
576 return 0;
577
578 case DCCP_RESPOND:
579 case DCCP_PARTOPEN:
580 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
581 dh, len);
582 break;
583 }
584
585 if (dh->dccph_type == DCCP_PKT_ACK ||
586 dh->dccph_type == DCCP_PKT_DATAACK) {
587 switch (old_state) {
588 case DCCP_PARTOPEN:
589 sk->sk_state_change(sk);
590 sk_wake_async(sk, 0, POLL_OUT);
591 break;
592 }
593 }
594
595 if (!queued) {
596discard:
597 __kfree_skb(skb);
598 }
599 return 0;
600}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
new file mode 100644
index 000000000000..3fc75dbee4b8
--- /dev/null
+++ b/net/dccp/ipv4.c
@@ -0,0 +1,1356 @@
1/*
2 * net/dccp/ipv4.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/icmp.h>
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/random.h>
19
20#include <net/icmp.h>
21#include <net/inet_hashtables.h>
22#include <net/sock.h>
23#include <net/tcp_states.h>
24#include <net/xfrm.h>
25
26#include "ccid.h"
27#include "dccp.h"
28
29struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
30 .lhash_lock = RW_LOCK_UNLOCKED,
31 .lhash_users = ATOMIC_INIT(0),
32 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
33 .portalloc_lock = SPIN_LOCK_UNLOCKED,
34 .port_rover = 1024 - 1,
35};
36
37EXPORT_SYMBOL_GPL(dccp_hashinfo);
38
39static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
40{
41 return inet_csk_get_port(&dccp_hashinfo, sk, snum);
42}
43
44static void dccp_v4_hash(struct sock *sk)
45{
46 inet_hash(&dccp_hashinfo, sk);
47}
48
49static void dccp_v4_unhash(struct sock *sk)
50{
51 inet_unhash(&dccp_hashinfo, sk);
52}
53
54/* called with local bh disabled */
55static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
56 struct inet_timewait_sock **twp)
57{
58 struct inet_sock *inet = inet_sk(sk);
59 const u32 daddr = inet->rcv_saddr;
60 const u32 saddr = inet->daddr;
61 const int dif = sk->sk_bound_dev_if;
62 INET_ADDR_COOKIE(acookie, saddr, daddr)
63 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
64 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport,
65 dccp_hashinfo.ehash_size);
66 struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash];
67 const struct sock *sk2;
68 const struct hlist_node *node;
69 struct inet_timewait_sock *tw;
70
71 write_lock(&head->lock);
72
73 /* Check TIME-WAIT sockets first. */
74 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
75 tw = inet_twsk(sk2);
76
77 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif))
78 goto not_unique;
79 }
80 tw = NULL;
81
82 /* And established part... */
83 sk_for_each(sk2, node, &head->chain) {
84 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
85 goto not_unique;
86 }
87
88 /* Must record num and sport now. Otherwise we will see
89 * in hash table socket with a funny identity. */
90 inet->num = lport;
91 inet->sport = htons(lport);
92 sk->sk_hashent = hash;
93 BUG_TRAP(sk_unhashed(sk));
94 __sk_add_node(sk, &head->chain);
95 sock_prot_inc_use(sk->sk_prot);
96 write_unlock(&head->lock);
97
98 if (twp != NULL) {
99 *twp = tw;
100 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
101 } else if (tw != NULL) {
102 /* Silly. Should hash-dance instead... */
103 inet_twsk_deschedule(tw, &dccp_death_row);
104 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
105
106 inet_twsk_put(tw);
107 }
108
109 return 0;
110
111not_unique:
112 write_unlock(&head->lock);
113 return -EADDRNOTAVAIL;
114}
115
116/*
117 * Bind a port for a connect operation and hash it.
118 */
119static int dccp_v4_hash_connect(struct sock *sk)
120{
121 const unsigned short snum = inet_sk(sk)->num;
122 struct inet_bind_hashbucket *head;
123 struct inet_bind_bucket *tb;
124 int ret;
125
126 if (snum == 0) {
127 int rover;
128 int low = sysctl_local_port_range[0];
129 int high = sysctl_local_port_range[1];
130 int remaining = (high - low) + 1;
131 struct hlist_node *node;
132 struct inet_timewait_sock *tw = NULL;
133
134 local_bh_disable();
135
136 /* TODO. Actually it is not so bad idea to remove
137 * dccp_hashinfo.portalloc_lock before next submission to
138 * Linus.
139 * As soon as we touch this place at all it is time to think.
140 *
141 * Now it protects single _advisory_ variable
142 * dccp_hashinfo.port_rover, hence it is mostly useless.
143 * Code will work nicely if we just delete it, but
144 * I am afraid in contented case it will work not better or
145 * even worse: another cpu just will hit the same bucket
146 * and spin there.
147 * So some cpu salt could remove both contention and
148 * memory pingpong. Any ideas how to do this in a nice way?
149 */
150 spin_lock(&dccp_hashinfo.portalloc_lock);
151 rover = dccp_hashinfo.port_rover;
152
153 do {
154 rover++;
155 if ((rover < low) || (rover > high))
156 rover = low;
157 head = &dccp_hashinfo.bhash[inet_bhashfn(rover,
158 dccp_hashinfo.bhash_size)];
159 spin_lock(&head->lock);
160
161 /* Does not bother with rcv_saddr checks,
162 * because the established check is already
163 * unique enough.
164 */
165 inet_bind_bucket_for_each(tb, node, &head->chain) {
166 if (tb->port == rover) {
167 BUG_TRAP(!hlist_empty(&tb->owners));
168 if (tb->fastreuse >= 0)
169 goto next_port;
170 if (!__dccp_v4_check_established(sk,
171 rover,
172 &tw))
173 goto ok;
174 goto next_port;
175 }
176 }
177
178 tb = inet_bind_bucket_create(dccp_hashinfo.bind_bucket_cachep,
179 head, rover);
180 if (tb == NULL) {
181 spin_unlock(&head->lock);
182 break;
183 }
184 tb->fastreuse = -1;
185 goto ok;
186
187 next_port:
188 spin_unlock(&head->lock);
189 } while (--remaining > 0);
190 dccp_hashinfo.port_rover = rover;
191 spin_unlock(&dccp_hashinfo.portalloc_lock);
192
193 local_bh_enable();
194
195 return -EADDRNOTAVAIL;
196
197ok:
198 /* All locks still held and bhs disabled */
199 dccp_hashinfo.port_rover = rover;
200 spin_unlock(&dccp_hashinfo.portalloc_lock);
201
202 inet_bind_hash(sk, tb, rover);
203 if (sk_unhashed(sk)) {
204 inet_sk(sk)->sport = htons(rover);
205 __inet_hash(&dccp_hashinfo, sk, 0);
206 }
207 spin_unlock(&head->lock);
208
209 if (tw != NULL) {
210 inet_twsk_deschedule(tw, &dccp_death_row);
211 inet_twsk_put(tw);
212 }
213
214 ret = 0;
215 goto out;
216 }
217
218 head = &dccp_hashinfo.bhash[inet_bhashfn(snum,
219 dccp_hashinfo.bhash_size)];
220 tb = inet_csk(sk)->icsk_bind_hash;
221 spin_lock_bh(&head->lock);
222 if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) {
223 __inet_hash(&dccp_hashinfo, sk, 0);
224 spin_unlock_bh(&head->lock);
225 return 0;
226 } else {
227 spin_unlock(&head->lock);
228 /* No definite answer... Walk to established hash table */
229 ret = __dccp_v4_check_established(sk, snum, NULL);
230out:
231 local_bh_enable();
232 return ret;
233 }
234}
235
236static int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
237 int addr_len)
238{
239 struct inet_sock *inet = inet_sk(sk);
240 struct dccp_sock *dp = dccp_sk(sk);
241 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
242 struct rtable *rt;
243 u32 daddr, nexthop;
244 int tmp;
245 int err;
246
247 dp->dccps_role = DCCP_ROLE_CLIENT;
248
249 if (addr_len < sizeof(struct sockaddr_in))
250 return -EINVAL;
251
252 if (usin->sin_family != AF_INET)
253 return -EAFNOSUPPORT;
254
255 nexthop = daddr = usin->sin_addr.s_addr;
256 if (inet->opt != NULL && inet->opt->srr) {
257 if (daddr == 0)
258 return -EINVAL;
259 nexthop = inet->opt->faddr;
260 }
261
262 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
263 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
264 IPPROTO_DCCP,
265 inet->sport, usin->sin_port, sk);
266 if (tmp < 0)
267 return tmp;
268
269 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
270 ip_rt_put(rt);
271 return -ENETUNREACH;
272 }
273
274 if (inet->opt == NULL || !inet->opt->srr)
275 daddr = rt->rt_dst;
276
277 if (inet->saddr == 0)
278 inet->saddr = rt->rt_src;
279 inet->rcv_saddr = inet->saddr;
280
281 inet->dport = usin->sin_port;
282 inet->daddr = daddr;
283
284 dp->dccps_ext_header_len = 0;
285 if (inet->opt != NULL)
286 dp->dccps_ext_header_len = inet->opt->optlen;
287 /*
288 * Socket identity is still unknown (sport may be zero).
289 * However we set state to DCCP_REQUESTING and not releasing socket
290 * lock select source port, enter ourselves into the hash tables and
291 * complete initialization after this.
292 */
293 dccp_set_state(sk, DCCP_REQUESTING);
294 err = dccp_v4_hash_connect(sk);
295 if (err != 0)
296 goto failure;
297
298 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
299 if (err != 0)
300 goto failure;
301
302 /* OK, now commit destination to socket. */
303 sk_setup_caps(sk, &rt->u.dst);
304
305 dp->dccps_gar =
306 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr,
307 inet->daddr,
308 inet->sport,
309 usin->sin_port);
310 dccp_update_gss(sk, dp->dccps_iss);
311
312 /*
313 * SWL and AWL are initially adjusted so that they are not less than
314 * the initial Sequence Numbers received and sent, respectively:
315 * SWL := max(GSR + 1 - floor(W/4), ISR),
316 * AWL := max(GSS - W' + 1, ISS).
317 * These adjustments MUST be applied only at the beginning of the
318 * connection.
319 */
320 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
321
322 inet->id = dp->dccps_iss ^ jiffies;
323
324 err = dccp_connect(sk);
325 rt = NULL;
326 if (err != 0)
327 goto failure;
328out:
329 return err;
330failure:
331 /*
332 * This unhashes the socket and releases the local port, if necessary.
333 */
334 dccp_set_state(sk, DCCP_CLOSED);
335 ip_rt_put(rt);
336 sk->sk_route_caps = 0;
337 inet->dport = 0;
338 goto out;
339}
340
341/*
342 * This routine does path mtu discovery as defined in RFC1191.
343 */
344static inline void dccp_do_pmtu_discovery(struct sock *sk,
345 const struct iphdr *iph,
346 u32 mtu)
347{
348 struct dst_entry *dst;
349 const struct inet_sock *inet = inet_sk(sk);
350 const struct dccp_sock *dp = dccp_sk(sk);
351
352 /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
353 * send out by Linux are always < 576bytes so they should go through
354 * unfragmented).
355 */
356 if (sk->sk_state == DCCP_LISTEN)
357 return;
358
359 /* We don't check in the destentry if pmtu discovery is forbidden
360 * on this route. We just assume that no packet_to_big packets
361 * are send back when pmtu discovery is not active.
362 * There is a small race when the user changes this flag in the
363 * route, but I think that's acceptable.
364 */
365 if ((dst = __sk_dst_check(sk, 0)) == NULL)
366 return;
367
368 dst->ops->update_pmtu(dst, mtu);
369
370 /* Something is about to be wrong... Remember soft error
371 * for the case, if this connection will not able to recover.
372 */
373 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
374 sk->sk_err_soft = EMSGSIZE;
375
376 mtu = dst_mtu(dst);
377
378 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
379 dp->dccps_pmtu_cookie > mtu) {
380 dccp_sync_mss(sk, mtu);
381
382 /*
383 * From: draft-ietf-dccp-spec-11.txt
384 *
385 * DCCP-Sync packets are the best choice for upward
386 * probing, since DCCP-Sync probes do not risk application
387 * data loss.
388 */
389 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
390 } /* else let the usual retransmit timer handle it */
391}
392
393static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
394{
395 int err;
396 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
397 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
398 sizeof(struct dccp_hdr_ext) +
399 sizeof(struct dccp_hdr_ack_bits);
400 struct sk_buff *skb;
401
402 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
403 return;
404
405 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
406 if (skb == NULL)
407 return;
408
409 /* Reserve space for headers. */
410 skb_reserve(skb, MAX_DCCP_HEADER);
411
412 skb->dst = dst_clone(rxskb->dst);
413
414 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
415 dh = dccp_hdr(skb);
416 memset(dh, 0, dccp_hdr_ack_len);
417
418 /* Build DCCP header and checksum it. */
419 dh->dccph_type = DCCP_PKT_ACK;
420 dh->dccph_sport = rxdh->dccph_dport;
421 dh->dccph_dport = rxdh->dccph_sport;
422 dh->dccph_doff = dccp_hdr_ack_len / 4;
423 dh->dccph_x = 1;
424
425 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
426 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
427 DCCP_SKB_CB(rxskb)->dccpd_seq);
428
429 bh_lock_sock(dccp_ctl_socket->sk);
430 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
431 rxskb->nh.iph->daddr,
432 rxskb->nh.iph->saddr, NULL);
433 bh_unlock_sock(dccp_ctl_socket->sk);
434
435 if (err == NET_XMIT_CN || err == 0) {
436 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
437 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
438 }
439}
440
441static void dccp_v4_reqsk_send_ack(struct sk_buff *skb,
442 struct request_sock *req)
443{
444 dccp_v4_ctl_send_ack(skb);
445}
446
447static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
448 struct dst_entry *dst)
449{
450 int err = -1;
451 struct sk_buff *skb;
452
453 /* First, grab a route. */
454
455 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
456 goto out;
457
458 skb = dccp_make_response(sk, dst, req);
459 if (skb != NULL) {
460 const struct inet_request_sock *ireq = inet_rsk(req);
461
462 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
463 ireq->rmt_addr,
464 ireq->opt);
465 if (err == NET_XMIT_CN)
466 err = 0;
467 }
468
469out:
470 dst_release(dst);
471 return err;
472}
473
474/*
475 * This routine is called by the ICMP module when it gets some sort of error
476 * condition. If err < 0 then the socket should be closed and the error
477 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
478 * After adjustment header points to the first 8 bytes of the tcp header. We
479 * need to find the appropriate port.
480 *
481 * The locking strategy used here is very "optimistic". When someone else
482 * accesses the socket the ICMP is just dropped and for some paths there is no
483 * check at all. A more general error queue to queue errors for later handling
484 * is probably better.
485 */
486void dccp_v4_err(struct sk_buff *skb, u32 info)
487{
488 const struct iphdr *iph = (struct iphdr *)skb->data;
489 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
490 (iph->ihl << 2));
491 struct dccp_sock *dp;
492 struct inet_sock *inet;
493 const int type = skb->h.icmph->type;
494 const int code = skb->h.icmph->code;
495 struct sock *sk;
496 __u64 seq;
497 int err;
498
499 if (skb->len < (iph->ihl << 2) + 8) {
500 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
501 return;
502 }
503
504 sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport,
505 iph->saddr, dh->dccph_sport, inet_iif(skb));
506 if (sk == NULL) {
507 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
508 return;
509 }
510
511 if (sk->sk_state == DCCP_TIME_WAIT) {
512 inet_twsk_put((struct inet_timewait_sock *)sk);
513 return;
514 }
515
516 bh_lock_sock(sk);
517 /* If too many ICMPs get dropped on busy
518 * servers this needs to be solved differently.
519 */
520 if (sock_owned_by_user(sk))
521 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
522
523 if (sk->sk_state == DCCP_CLOSED)
524 goto out;
525
526 dp = dccp_sk(sk);
527 seq = dccp_hdr_seq(skb);
528 if (sk->sk_state != DCCP_LISTEN &&
529 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
530 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
531 goto out;
532 }
533
534 switch (type) {
535 case ICMP_SOURCE_QUENCH:
536 /* Just silently ignore these. */
537 goto out;
538 case ICMP_PARAMETERPROB:
539 err = EPROTO;
540 break;
541 case ICMP_DEST_UNREACH:
542 if (code > NR_ICMP_UNREACH)
543 goto out;
544
545 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
546 if (!sock_owned_by_user(sk))
547 dccp_do_pmtu_discovery(sk, iph, info);
548 goto out;
549 }
550
551 err = icmp_err_convert[code].errno;
552 break;
553 case ICMP_TIME_EXCEEDED:
554 err = EHOSTUNREACH;
555 break;
556 default:
557 goto out;
558 }
559
560 switch (sk->sk_state) {
561 struct request_sock *req , **prev;
562 case DCCP_LISTEN:
563 if (sock_owned_by_user(sk))
564 goto out;
565 req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
566 iph->daddr, iph->saddr);
567 if (!req)
568 goto out;
569
570 /*
571 * ICMPs are not backlogged, hence we cannot get an established
572 * socket here.
573 */
574 BUG_TRAP(!req->sk);
575
576 if (seq != dccp_rsk(req)->dreq_iss) {
577 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
578 goto out;
579 }
580 /*
581 * Still in RESPOND, just remove it silently.
582 * There is no good way to pass the error to the newly
583 * created socket, and POSIX does not want network
584 * errors returned from accept().
585 */
586 inet_csk_reqsk_queue_drop(sk, req, prev);
587 goto out;
588
589 case DCCP_REQUESTING:
590 case DCCP_RESPOND:
591 if (!sock_owned_by_user(sk)) {
592 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
593 sk->sk_err = err;
594
595 sk->sk_error_report(sk);
596
597 dccp_done(sk);
598 } else
599 sk->sk_err_soft = err;
600 goto out;
601 }
602
603 /* If we've already connected we will keep trying
604 * until we time out, or the user gives up.
605 *
606 * rfc1122 4.2.3.9 allows to consider as hard errors
607 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
608 * but it is obsoleted by pmtu discovery).
609 *
610 * Note, that in modern internet, where routing is unreliable
611 * and in each dark corner broken firewalls sit, sending random
612 * errors ordered by their masters even this two messages finally lose
613 * their original sense (even Linux sends invalid PORT_UNREACHs)
614 *
615 * Now we are in compliance with RFCs.
616 * --ANK (980905)
617 */
618
619 inet = inet_sk(sk);
620 if (!sock_owned_by_user(sk) && inet->recverr) {
621 sk->sk_err = err;
622 sk->sk_error_report(sk);
623 } else /* Only an error on timeout */
624 sk->sk_err_soft = err;
625out:
626 bh_unlock_sock(sk);
627 sock_put(sk);
628}
629
630int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
631{
632 struct sk_buff *skb;
633 /*
634 * FIXME: what if rebuild_header fails?
635 * Should we be doing a rebuild_header here?
636 */
637 int err = inet_sk_rebuild_header(sk);
638
639 if (err != 0)
640 return err;
641
642 skb = dccp_make_reset(sk, sk->sk_dst_cache, code);
643 if (skb != NULL) {
644 const struct dccp_sock *dp = dccp_sk(sk);
645 const struct inet_sock *inet = inet_sk(sk);
646
647 err = ip_build_and_send_pkt(skb, sk,
648 inet->saddr, inet->daddr, NULL);
649 if (err == NET_XMIT_CN)
650 err = 0;
651
652 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
653 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
654 }
655
656 return err;
657}
658
659static inline u64 dccp_v4_init_sequence(const struct sock *sk,
660 const struct sk_buff *skb)
661{
662 return secure_dccp_sequence_number(skb->nh.iph->daddr,
663 skb->nh.iph->saddr,
664 dccp_hdr(skb)->dccph_dport,
665 dccp_hdr(skb)->dccph_sport);
666}
667
668int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
669{
670 struct inet_request_sock *ireq;
671 struct dccp_sock dp;
672 struct request_sock *req;
673 struct dccp_request_sock *dreq;
674 const __u32 saddr = skb->nh.iph->saddr;
675 const __u32 daddr = skb->nh.iph->daddr;
676 struct dst_entry *dst = NULL;
677
678 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
679 if (((struct rtable *)skb->dst)->rt_flags &
680 (RTCF_BROADCAST | RTCF_MULTICAST))
681 goto drop;
682
683 /*
684 * TW buckets are converted to open requests without
685 * limitations, they conserve resources and peer is
686 * evidently real one.
687 */
688 if (inet_csk_reqsk_queue_is_full(sk))
689 goto drop;
690
691 /*
692 * Accept backlog is full. If we have already queued enough
693 * of warm entries in syn queue, drop request. It is better than
694 * clogging syn queue with openreqs with exponentially increasing
695 * timeout.
696 */
697 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
698 goto drop;
699
700 req = reqsk_alloc(sk->sk_prot->rsk_prot);
701 if (req == NULL)
702 goto drop;
703
704 /* FIXME: process options */
705
706 dccp_openreq_init(req, &dp, skb);
707
708 ireq = inet_rsk(req);
709 ireq->loc_addr = daddr;
710 ireq->rmt_addr = saddr;
711 /* FIXME: Merge Aristeu's option parsing code when ready */
712 req->rcv_wnd = 100; /* Fake, option parsing will get the
713 right value */
714 ireq->opt = NULL;
715
716 /*
717 * Step 3: Process LISTEN state
718 *
719 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
720 *
721 * In fact we defer setting S.GSR, S.SWL, S.SWH to
722 * dccp_create_openreq_child.
723 */
724 dreq = dccp_rsk(req);
725 dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
726 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
727 dreq->dreq_service = dccp_hdr_request(skb)->dccph_req_service;
728
729 if (dccp_v4_send_response(sk, req, dst))
730 goto drop_and_free;
731
732 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
733 return 0;
734
735drop_and_free:
736 /*
737 * FIXME: should be reqsk_free after implementing req->rsk_ops
738 */
739 __reqsk_free(req);
740drop:
741 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
742 return -1;
743}
744
745/*
746 * The three way handshake has completed - we got a valid ACK or DATAACK -
747 * now create the new socket.
748 *
749 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
750 */
751struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
752 struct request_sock *req,
753 struct dst_entry *dst)
754{
755 struct inet_request_sock *ireq;
756 struct inet_sock *newinet;
757 struct dccp_sock *newdp;
758 struct sock *newsk;
759
760 if (sk_acceptq_is_full(sk))
761 goto exit_overflow;
762
763 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
764 goto exit;
765
766 newsk = dccp_create_openreq_child(sk, req, skb);
767 if (newsk == NULL)
768 goto exit;
769
770 sk_setup_caps(newsk, dst);
771
772 newdp = dccp_sk(newsk);
773 newinet = inet_sk(newsk);
774 ireq = inet_rsk(req);
775 newinet->daddr = ireq->rmt_addr;
776 newinet->rcv_saddr = ireq->loc_addr;
777 newinet->saddr = ireq->loc_addr;
778 newinet->opt = ireq->opt;
779 ireq->opt = NULL;
780 newinet->mc_index = inet_iif(skb);
781 newinet->mc_ttl = skb->nh.iph->ttl;
782 newinet->id = jiffies;
783
784 dccp_sync_mss(newsk, dst_mtu(dst));
785
786 __inet_hash(&dccp_hashinfo, newsk, 0);
787 __inet_inherit_port(&dccp_hashinfo, sk, newsk);
788
789 return newsk;
790
791exit_overflow:
792 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
793exit:
794 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
795 dst_release(dst);
796 return NULL;
797}
798
799static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
800{
801 const struct dccp_hdr *dh = dccp_hdr(skb);
802 const struct iphdr *iph = skb->nh.iph;
803 struct sock *nsk;
804 struct request_sock **prev;
805 /* Find possible connection requests. */
806 struct request_sock *req = inet_csk_search_req(sk, &prev,
807 dh->dccph_sport,
808 iph->saddr, iph->daddr);
809 if (req != NULL)
810 return dccp_check_req(sk, skb, req, prev);
811
812 nsk = __inet_lookup_established(&dccp_hashinfo,
813 iph->saddr, dh->dccph_sport,
814 iph->daddr, ntohs(dh->dccph_dport),
815 inet_iif(skb));
816 if (nsk != NULL) {
817 if (nsk->sk_state != DCCP_TIME_WAIT) {
818 bh_lock_sock(nsk);
819 return nsk;
820 }
821 inet_twsk_put((struct inet_timewait_sock *)nsk);
822 return NULL;
823 }
824
825 return sk;
826}
827
828int dccp_v4_checksum(const struct sk_buff *skb, const u32 saddr,
829 const u32 daddr)
830{
831 const struct dccp_hdr* dh = dccp_hdr(skb);
832 int checksum_len;
833 u32 tmp;
834
835 if (dh->dccph_cscov == 0)
836 checksum_len = skb->len;
837 else {
838 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
839 checksum_len = checksum_len < skb->len ? checksum_len :
840 skb->len;
841 }
842
843 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
844 return csum_tcpudp_magic(saddr, daddr, checksum_len,
845 IPPROTO_DCCP, tmp);
846}
847
848static int dccp_v4_verify_checksum(struct sk_buff *skb,
849 const u32 saddr, const u32 daddr)
850{
851 struct dccp_hdr *dh = dccp_hdr(skb);
852 int checksum_len;
853 u32 tmp;
854
855 if (dh->dccph_cscov == 0)
856 checksum_len = skb->len;
857 else {
858 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
859 checksum_len = checksum_len < skb->len ? checksum_len :
860 skb->len;
861 }
862 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
863 return csum_tcpudp_magic(saddr, daddr, checksum_len,
864 IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
865}
866
867static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
868 struct sk_buff *skb)
869{
870 struct rtable *rt;
871 struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
872 .nl_u = { .ip4_u =
873 { .daddr = skb->nh.iph->saddr,
874 .saddr = skb->nh.iph->daddr,
875 .tos = RT_CONN_FLAGS(sk) } },
876 .proto = sk->sk_protocol,
877 .uli_u = { .ports =
878 { .sport = dccp_hdr(skb)->dccph_dport,
879 .dport = dccp_hdr(skb)->dccph_sport }
880 }
881 };
882
883 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
884 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
885 return NULL;
886 }
887
888 return &rt->u.dst;
889}
890
891static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
892{
893 int err;
894 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
895 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
896 sizeof(struct dccp_hdr_ext) +
897 sizeof(struct dccp_hdr_reset);
898 struct sk_buff *skb;
899 struct dst_entry *dst;
900 u64 seqno;
901
902 /* Never send a reset in response to a reset. */
903 if (rxdh->dccph_type == DCCP_PKT_RESET)
904 return;
905
906 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
907 return;
908
909 dst = dccp_v4_route_skb(dccp_ctl_socket->sk, rxskb);
910 if (dst == NULL)
911 return;
912
913 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
914 if (skb == NULL)
915 goto out;
916
917 /* Reserve space for headers. */
918 skb_reserve(skb, MAX_DCCP_HEADER);
919 skb->dst = dst_clone(dst);
920
921 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
922 dh = dccp_hdr(skb);
923 memset(dh, 0, dccp_hdr_reset_len);
924
925 /* Build DCCP header and checksum it. */
926 dh->dccph_type = DCCP_PKT_RESET;
927 dh->dccph_sport = rxdh->dccph_dport;
928 dh->dccph_dport = rxdh->dccph_sport;
929 dh->dccph_doff = dccp_hdr_reset_len / 4;
930 dh->dccph_x = 1;
931 dccp_hdr_reset(skb)->dccph_reset_code =
932 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
933
934 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
935 seqno = 0;
936 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
937 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
938
939 dccp_hdr_set_seq(dh, seqno);
940 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
941 DCCP_SKB_CB(rxskb)->dccpd_seq);
942
943 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
944 rxskb->nh.iph->daddr);
945
946 bh_lock_sock(dccp_ctl_socket->sk);
947 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
948 rxskb->nh.iph->daddr,
949 rxskb->nh.iph->saddr, NULL);
950 bh_unlock_sock(dccp_ctl_socket->sk);
951
952 if (err == NET_XMIT_CN || err == 0) {
953 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
954 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
955 }
956out:
957 dst_release(dst);
958}
959
960int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
961{
962 struct dccp_hdr *dh = dccp_hdr(skb);
963
964 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
965 if (dccp_rcv_established(sk, skb, dh, skb->len))
966 goto reset;
967 return 0;
968 }
969
970 /*
971 * Step 3: Process LISTEN state
972 * If S.state == LISTEN,
973 * If P.type == Request or P contains a valid Init Cookie
974 * option,
975 * * Must scan the packet's options to check for an Init
976 * Cookie. Only the Init Cookie is processed here,
977 * however; other options are processed in Step 8. This
978 * scan need only be performed if the endpoint uses Init
979 * Cookies *
980 * * Generate a new socket and switch to that socket *
981 * Set S := new socket for this port pair
982 * S.state = RESPOND
983 * Choose S.ISS (initial seqno) or set from Init Cookie
984 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
985 * Continue with S.state == RESPOND
986 * * A Response packet will be generated in Step 11 *
987 * Otherwise,
988 * Generate Reset(No Connection) unless P.type == Reset
989 * Drop packet and return
990 *
991 * NOTE: the check for the packet types is done in
992 * dccp_rcv_state_process
993 */
994 if (sk->sk_state == DCCP_LISTEN) {
995 struct sock *nsk = dccp_v4_hnd_req(sk, skb);
996
997 if (nsk == NULL)
998 goto discard;
999
1000 if (nsk != sk) {
1001 if (dccp_child_process(sk, nsk, skb))
1002 goto reset;
1003 return 0;
1004 }
1005 }
1006
1007 if (dccp_rcv_state_process(sk, skb, dh, skb->len))
1008 goto reset;
1009 return 0;
1010
1011reset:
1012 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
1013 dccp_v4_ctl_send_reset(skb);
1014discard:
1015 kfree_skb(skb);
1016 return 0;
1017}
1018
1019static inline int dccp_invalid_packet(struct sk_buff *skb)
1020{
1021 const struct dccp_hdr *dh;
1022
1023 if (skb->pkt_type != PACKET_HOST)
1024 return 1;
1025
1026 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
1027 LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n");
1028 return 1;
1029 }
1030
1031 dh = dccp_hdr(skb);
1032
1033 /* If the packet type is not understood, drop packet and return */
1034 if (dh->dccph_type >= DCCP_PKT_INVALID) {
1035 LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n");
1036 return 1;
1037 }
1038
1039 /*
1040 * If P.Data Offset is too small for packet type, or too large for
1041 * packet, drop packet and return
1042 */
1043 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
1044 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
1045 "too small 1\n",
1046 dh->dccph_doff);
1047 return 1;
1048 }
1049
1050 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
1051 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
1052 "too small 2\n",
1053 dh->dccph_doff);
1054 return 1;
1055 }
1056
1057 dh = dccp_hdr(skb);
1058
1059 /*
1060 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
1061 * has short sequence numbers), drop packet and return
1062 */
1063 if (dh->dccph_x == 0 &&
1064 dh->dccph_type != DCCP_PKT_DATA &&
1065 dh->dccph_type != DCCP_PKT_ACK &&
1066 dh->dccph_type != DCCP_PKT_DATAACK) {
1067 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack "
1068 "nor DataAck and P.X == 0\n",
1069 dccp_packet_name(dh->dccph_type));
1070 return 1;
1071 }
1072
1073 /* If the header checksum is incorrect, drop packet and return */
1074 if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr,
1075 skb->nh.iph->daddr) < 0) {
1076 LIMIT_NETDEBUG(KERN_WARNING "DCCP: header checksum is "
1077 "incorrect\n");
1078 return 1;
1079 }
1080
1081 return 0;
1082}
1083
1084/* this is called when real data arrives */
1085int dccp_v4_rcv(struct sk_buff *skb)
1086{
1087 const struct dccp_hdr *dh;
1088 struct sock *sk;
1089 int rc;
1090
1091 /* Step 1: Check header basics: */
1092
1093 if (dccp_invalid_packet(skb))
1094 goto discard_it;
1095
1096 dh = dccp_hdr(skb);
1097#if 0
1098 /*
1099 * Use something like this to simulate some DATA/DATAACK loss to test
1100 * dccp_ackpkts_add, you'll get something like this on a session that
1101 * sends 10 DATA/DATAACK packets:
1102 *
1103 * ackpkts_print: 281473596467422 |0,0|3,0|0,0|3,0|0,0|3,0|0,0|3,0|0,1|
1104 *
1105 * 0, 0 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == just this packet
1106 * 0, 1 means: DCCP_ACKPKTS_STATE_RECEIVED, RLE == two adjacent packets
1107 * with the same state
1108 * 3, 0 means: DCCP_ACKPKTS_STATE_NOT_RECEIVED, RLE == just this packet
1109 *
1110 * So...
1111 *
1112 * 281473596467422 was received
1113 * 281473596467421 was not received
1114 * 281473596467420 was received
1115 * 281473596467419 was not received
1116 * 281473596467418 was received
1117 * 281473596467417 was not received
1118 * 281473596467416 was received
1119 * 281473596467415 was not received
1120 * 281473596467414 was received
1121 * 281473596467413 was received (this one was the 3way handshake
1122 * RESPONSE)
1123 *
1124 */
1125 if (dh->dccph_type == DCCP_PKT_DATA ||
1126 dh->dccph_type == DCCP_PKT_DATAACK) {
1127 static int discard = 0;
1128
1129 if (discard) {
1130 discard = 0;
1131 goto discard_it;
1132 }
1133 discard = 1;
1134 }
1135#endif
1136 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1137 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1138
1139 dccp_pr_debug("%8.8s "
1140 "src=%u.%u.%u.%u@%-5d "
1141 "dst=%u.%u.%u.%u@%-5d seq=%llu",
1142 dccp_packet_name(dh->dccph_type),
1143 NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport),
1144 NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport),
1145 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
1146
1147 if (dccp_packet_without_ack(skb)) {
1148 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1149 dccp_pr_debug_cat("\n");
1150 } else {
1151 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1152 dccp_pr_debug_cat(", ack=%llu\n",
1153 (unsigned long long)
1154 DCCP_SKB_CB(skb)->dccpd_ack_seq);
1155 }
1156
1157 /* Step 2:
1158 * Look up flow ID in table and get corresponding socket */
1159 sk = __inet_lookup(&dccp_hashinfo,
1160 skb->nh.iph->saddr, dh->dccph_sport,
1161 skb->nh.iph->daddr, ntohs(dh->dccph_dport),
1162 inet_iif(skb));
1163
1164 /*
1165 * Step 2:
1166 * If no socket ...
1167 * Generate Reset(No Connection) unless P.type == Reset
1168 * Drop packet and return
1169 */
1170 if (sk == NULL) {
1171 dccp_pr_debug("failed to look up flow ID in table and "
1172 "get corresponding socket\n");
1173 goto no_dccp_socket;
1174 }
1175
1176 /*
1177 * Step 2:
1178 * ... or S.state == TIMEWAIT,
1179 * Generate Reset(No Connection) unless P.type == Reset
1180 * Drop packet and return
1181 */
1182
1183 if (sk->sk_state == DCCP_TIME_WAIT) {
1184 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: "
1185 "do_time_wait\n");
1186 goto do_time_wait;
1187 }
1188
1189 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
1190 dccp_pr_debug("xfrm4_policy_check failed\n");
1191 goto discard_and_relse;
1192 }
1193
1194 if (sk_filter(sk, skb, 0)) {
1195 dccp_pr_debug("sk_filter failed\n");
1196 goto discard_and_relse;
1197 }
1198
1199 skb->dev = NULL;
1200
1201 bh_lock_sock(sk);
1202 rc = 0;
1203 if (!sock_owned_by_user(sk))
1204 rc = dccp_v4_do_rcv(sk, skb);
1205 else
1206 sk_add_backlog(sk, skb);
1207 bh_unlock_sock(sk);
1208
1209 sock_put(sk);
1210 return rc;
1211
1212no_dccp_socket:
1213 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1214 goto discard_it;
1215 /*
1216 * Step 2:
1217 * Generate Reset(No Connection) unless P.type == Reset
1218 * Drop packet and return
1219 */
1220 if (dh->dccph_type != DCCP_PKT_RESET) {
1221 DCCP_SKB_CB(skb)->dccpd_reset_code =
1222 DCCP_RESET_CODE_NO_CONNECTION;
1223 dccp_v4_ctl_send_reset(skb);
1224 }
1225
1226discard_it:
1227 /* Discard frame. */
1228 kfree_skb(skb);
1229 return 0;
1230
1231discard_and_relse:
1232 sock_put(sk);
1233 goto discard_it;
1234
1235do_time_wait:
1236 inet_twsk_put((struct inet_timewait_sock *)sk);
1237 goto no_dccp_socket;
1238}
1239
1240static int dccp_v4_init_sock(struct sock *sk)
1241{
1242 struct dccp_sock *dp = dccp_sk(sk);
1243 static int dccp_ctl_socket_init = 1;
1244
1245 dccp_options_init(&dp->dccps_options);
1246
1247 if (dp->dccps_options.dccpo_send_ack_vector) {
1248 dp->dccps_hc_rx_ackpkts =
1249 dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN,
1250 GFP_KERNEL);
1251
1252 if (dp->dccps_hc_rx_ackpkts == NULL)
1253 return -ENOMEM;
1254 }
1255
1256 /*
1257 * FIXME: We're hardcoding the CCID, and doing this at this point makes
1258 * the listening (master) sock get CCID control blocks, which is not
1259 * necessary, but for now, to not mess with the test userspace apps,
1260 * lets leave it here, later the real solution is to do this in a
1261 * setsockopt(CCIDs-I-want/accept). -acme
1262 */
1263 if (likely(!dccp_ctl_socket_init)) {
1264 dp->dccps_hc_rx_ccid = ccid_init(dp->dccps_options.dccpo_ccid,
1265 sk);
1266 dp->dccps_hc_tx_ccid = ccid_init(dp->dccps_options.dccpo_ccid,
1267 sk);
1268 if (dp->dccps_hc_rx_ccid == NULL ||
1269 dp->dccps_hc_tx_ccid == NULL) {
1270 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1271 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1272 dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts);
1273 dp->dccps_hc_rx_ackpkts = NULL;
1274 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1275 return -ENOMEM;
1276 }
1277 } else
1278 dccp_ctl_socket_init = 0;
1279
1280 dccp_init_xmit_timers(sk);
1281 inet_csk(sk)->icsk_rto = DCCP_TIMEOUT_INIT;
1282 sk->sk_state = DCCP_CLOSED;
1283 sk->sk_write_space = dccp_write_space;
1284 dp->dccps_mss_cache = 536;
1285 dp->dccps_role = DCCP_ROLE_UNDEFINED;
1286
1287 return 0;
1288}
1289
1290static int dccp_v4_destroy_sock(struct sock *sk)
1291{
1292 struct dccp_sock *dp = dccp_sk(sk);
1293
1294 /*
1295 * DCCP doesn't use sk_qrite_queue, just sk_send_head
1296 * for retransmissions
1297 */
1298 if (sk->sk_send_head != NULL) {
1299 kfree_skb(sk->sk_send_head);
1300 sk->sk_send_head = NULL;
1301 }
1302
1303 /* Clean up a referenced DCCP bind bucket. */
1304 if (inet_csk(sk)->icsk_bind_hash != NULL)
1305 inet_put_port(&dccp_hashinfo, sk);
1306
1307 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
1308 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
1309 dccp_ackpkts_free(dp->dccps_hc_rx_ackpkts);
1310 dp->dccps_hc_rx_ackpkts = NULL;
1311 ccid_exit(dp->dccps_hc_rx_ccid, sk);
1312 ccid_exit(dp->dccps_hc_tx_ccid, sk);
1313 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
1314
1315 return 0;
1316}
1317
1318static void dccp_v4_reqsk_destructor(struct request_sock *req)
1319{
1320 kfree(inet_rsk(req)->opt);
1321}
1322
1323static struct request_sock_ops dccp_request_sock_ops = {
1324 .family = PF_INET,
1325 .obj_size = sizeof(struct dccp_request_sock),
1326 .rtx_syn_ack = dccp_v4_send_response,
1327 .send_ack = dccp_v4_reqsk_send_ack,
1328 .destructor = dccp_v4_reqsk_destructor,
1329 .send_reset = dccp_v4_ctl_send_reset,
1330};
1331
1332struct proto dccp_v4_prot = {
1333 .name = "DCCP",
1334 .owner = THIS_MODULE,
1335 .close = dccp_close,
1336 .connect = dccp_v4_connect,
1337 .disconnect = dccp_disconnect,
1338 .ioctl = dccp_ioctl,
1339 .init = dccp_v4_init_sock,
1340 .setsockopt = dccp_setsockopt,
1341 .getsockopt = dccp_getsockopt,
1342 .sendmsg = dccp_sendmsg,
1343 .recvmsg = dccp_recvmsg,
1344 .backlog_rcv = dccp_v4_do_rcv,
1345 .hash = dccp_v4_hash,
1346 .unhash = dccp_v4_unhash,
1347 .accept = inet_csk_accept,
1348 .get_port = dccp_v4_get_port,
1349 .shutdown = dccp_shutdown,
1350 .destroy = dccp_v4_destroy_sock,
1351 .orphan_count = &dccp_orphan_count,
1352 .max_header = MAX_DCCP_HEADER,
1353 .obj_size = sizeof(struct dccp_sock),
1354 .rsk_prot = &dccp_request_sock_ops,
1355 .twsk_obj_size = sizeof(struct inet_timewait_sock),
1356};
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
new file mode 100644
index 000000000000..ce5dff4ac22e
--- /dev/null
+++ b/net/dccp/minisocks.c
@@ -0,0 +1,264 @@
1/*
2 * net/dccp/minisocks.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16#include <linux/timer.h>
17
18#include <net/sock.h>
19#include <net/xfrm.h>
20#include <net/inet_timewait_sock.h>
21
22#include "ccid.h"
23#include "dccp.h"
24
25struct inet_timewait_death_row dccp_death_row = {
26 .sysctl_max_tw_buckets = NR_FILE * 2,
27 .period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
28 .death_lock = SPIN_LOCK_UNLOCKED,
29 .hashinfo = &dccp_hashinfo,
30 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
31 (unsigned long)&dccp_death_row),
32 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
33 inet_twdr_twkill_work,
34 &dccp_death_row),
35/* Short-time timewait calendar */
36
37 .twcal_hand = -1,
38 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
39 (unsigned long)&dccp_death_row),
40};
41
42void dccp_time_wait(struct sock *sk, int state, int timeo)
43{
44 struct inet_timewait_sock *tw = NULL;
45
46 if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
47 tw = inet_twsk_alloc(sk, state);
48
49 if (tw != NULL) {
50 const struct inet_connection_sock *icsk = inet_csk(sk);
51 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
52
53 /* Linkage updates. */
54 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
55
56 /* Get the TIME_WAIT timeout firing. */
57 if (timeo < rto)
58 timeo = rto;
59
60 tw->tw_timeout = DCCP_TIMEWAIT_LEN;
61 if (state == DCCP_TIME_WAIT)
62 timeo = DCCP_TIMEWAIT_LEN;
63
64 inet_twsk_schedule(tw, &dccp_death_row, timeo,
65 DCCP_TIMEWAIT_LEN);
66 inet_twsk_put(tw);
67 } else {
68 /* Sorry, if we're out of memory, just CLOSE this
69 * socket up. We've got bigger problems than
70 * non-graceful socket closings.
71 */
72 LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket "
73 "table overflow\n");
74 }
75
76 dccp_done(sk);
77}
78
79struct sock *dccp_create_openreq_child(struct sock *sk,
80 const struct request_sock *req,
81 const struct sk_buff *skb)
82{
83 /*
84 * Step 3: Process LISTEN state
85 *
86 * // Generate a new socket and switch to that socket
87 * Set S := new socket for this port pair
88 */
89 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
90
91 if (newsk != NULL) {
92 const struct dccp_request_sock *dreq = dccp_rsk(req);
93 struct inet_connection_sock *newicsk = inet_csk(sk);
94 struct dccp_sock *newdp = dccp_sk(newsk);
95
96 newdp->dccps_hc_rx_ackpkts = NULL;
97 newdp->dccps_role = DCCP_ROLE_SERVER;
98 newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
99
100 if (newdp->dccps_options.dccpo_send_ack_vector) {
101 newdp->dccps_hc_rx_ackpkts =
102 dccp_ackpkts_alloc(DCCP_MAX_ACK_VECTOR_LEN,
103 GFP_ATOMIC);
104 /*
105 * XXX: We're using the same CCIDs set on the parent,
106 * i.e. sk_clone copied the master sock and left the
107 * CCID pointers for this child, that is why we do the
108 * __ccid_get calls.
109 */
110 if (unlikely(newdp->dccps_hc_rx_ackpkts == NULL))
111 goto out_free;
112 }
113
114 if (unlikely(ccid_hc_rx_init(newdp->dccps_hc_rx_ccid,
115 newsk) != 0 ||
116 ccid_hc_tx_init(newdp->dccps_hc_tx_ccid,
117 newsk) != 0)) {
118 dccp_ackpkts_free(newdp->dccps_hc_rx_ackpkts);
119 ccid_hc_rx_exit(newdp->dccps_hc_rx_ccid, newsk);
120 ccid_hc_tx_exit(newdp->dccps_hc_tx_ccid, newsk);
121out_free:
122 /* It is still raw copy of parent, so invalidate
123 * destructor and make plain sk_free() */
124 newsk->sk_destruct = NULL;
125 sk_free(newsk);
126 return NULL;
127 }
128
129 __ccid_get(newdp->dccps_hc_rx_ccid);
130 __ccid_get(newdp->dccps_hc_tx_ccid);
131
132 /*
133 * Step 3: Process LISTEN state
134 *
135 * Choose S.ISS (initial seqno) or set from Init Cookie
136 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
137 * Cookie
138 */
139
140 /* See dccp_v4_conn_request */
141 newdp->dccps_options.dccpo_sequence_window = req->rcv_wnd;
142
143 newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
144 dccp_update_gsr(newsk, dreq->dreq_isr);
145
146 newdp->dccps_iss = dreq->dreq_iss;
147 dccp_update_gss(newsk, dreq->dreq_iss);
148
149 /*
150 * SWL and AWL are initially adjusted so that they are not less than
151 * the initial Sequence Numbers received and sent, respectively:
152 * SWL := max(GSR + 1 - floor(W/4), ISR),
153 * AWL := max(GSS - W' + 1, ISS).
154 * These adjustments MUST be applied only at the beginning of the
155 * connection.
156 */
157 dccp_set_seqno(&newdp->dccps_swl,
158 max48(newdp->dccps_swl, newdp->dccps_isr));
159 dccp_set_seqno(&newdp->dccps_awl,
160 max48(newdp->dccps_awl, newdp->dccps_iss));
161
162 dccp_init_xmit_timers(newsk);
163
164 DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
165 }
166 return newsk;
167}
168
169/*
170 * Process an incoming packet for RESPOND sockets represented
171 * as an request_sock.
172 */
173struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
174 struct request_sock *req,
175 struct request_sock **prev)
176{
177 struct sock *child = NULL;
178
179 /* Check for retransmitted REQUEST */
180 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
181 if (after48(DCCP_SKB_CB(skb)->dccpd_seq,
182 dccp_rsk(req)->dreq_isr)) {
183 struct dccp_request_sock *dreq = dccp_rsk(req);
184
185 dccp_pr_debug("Retransmitted REQUEST\n");
186 /* Send another RESPONSE packet */
187 dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1);
188 dccp_set_seqno(&dreq->dreq_isr,
189 DCCP_SKB_CB(skb)->dccpd_seq);
190 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
191 }
192 /* Network Duplicate, discard packet */
193 return NULL;
194 }
195
196 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
197
198 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
199 dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
200 goto drop;
201
202 /* Invalid ACK */
203 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dccp_rsk(req)->dreq_iss) {
204 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
205 "dreq_iss=%llu\n",
206 (unsigned long long)
207 DCCP_SKB_CB(skb)->dccpd_ack_seq,
208 (unsigned long long)
209 dccp_rsk(req)->dreq_iss);
210 goto drop;
211 }
212
213 child = dccp_v4_request_recv_sock(sk, skb, req, NULL);
214 if (child == NULL)
215 goto listen_overflow;
216
217 /* FIXME: deal with options */
218
219 inet_csk_reqsk_queue_unlink(sk, req, prev);
220 inet_csk_reqsk_queue_removed(sk, req);
221 inet_csk_reqsk_queue_add(sk, req, child);
222out:
223 return child;
224listen_overflow:
225 dccp_pr_debug("listen_overflow!\n");
226 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
227drop:
228 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
229 req->rsk_ops->send_reset(skb);
230
231 inet_csk_reqsk_queue_drop(sk, req, prev);
232 goto out;
233}
234
235/*
236 * Queue segment on the new socket if the new socket is active,
237 * otherwise we just shortcircuit this and continue with
238 * the new socket.
239 */
240int dccp_child_process(struct sock *parent, struct sock *child,
241 struct sk_buff *skb)
242{
243 int ret = 0;
244 const int state = child->sk_state;
245
246 if (!sock_owned_by_user(child)) {
247 ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
248 skb->len);
249
250 /* Wakeup parent, send SIGIO */
251 if (state == DCCP_RESPOND && child->sk_state != state)
252 parent->sk_data_ready(parent, 0);
253 } else {
254 /* Alas, it is possible again, because we do lookup
255 * in main socket hash table and lock on listening
256 * socket does not protect us more.
257 */
258 sk_add_backlog(child, skb);
259 }
260
261 bh_unlock_sock(child);
262 sock_put(child);
263 return ret;
264}
diff --git a/net/dccp/options.c b/net/dccp/options.c
new file mode 100644
index 000000000000..382c5894acb2
--- /dev/null
+++ b/net/dccp/options.c
@@ -0,0 +1,855 @@
1/*
2 * net/dccp/options.c
3 *
4 * An implementation of the DCCP protocol
5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14#include <linux/config.h>
15#include <linux/dccp.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/skbuff.h>
20
21#include "ccid.h"
22#include "dccp.h"
23
24static void dccp_ackpkts_check_rcv_ackvector(struct dccp_ackpkts *ap,
25 struct sock *sk,
26 const u64 ackno,
27 const unsigned char len,
28 const unsigned char *vector);
29
30/* stores the default values for new connection. may be changed with sysctl */
31static const struct dccp_options dccpo_default_values = {
32 .dccpo_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW,
33 .dccpo_ccid = DCCPF_INITIAL_CCID,
34 .dccpo_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR,
35 .dccpo_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT,
36};
37
38void dccp_options_init(struct dccp_options *dccpo)
39{
40 memcpy(dccpo, &dccpo_default_values, sizeof(*dccpo));
41}
42
43static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len)
44{
45 u32 value = 0;
46
47 if (len > 3)
48 value += *bf++ << 24;
49 if (len > 2)
50 value += *bf++ << 16;
51 if (len > 1)
52 value += *bf++ << 8;
53 if (len > 0)
54 value += *bf;
55
56 return value;
57}
58
59int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
60{
61 struct dccp_sock *dp = dccp_sk(sk);
62#ifdef CONFIG_IP_DCCP_DEBUG
63 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
64 "CLIENT rx opt: " : "server rx opt: ";
65#endif
66 const struct dccp_hdr *dh = dccp_hdr(skb);
67 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
68 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
69 unsigned char *opt_ptr = options;
70 const unsigned char *opt_end = (unsigned char *)dh +
71 (dh->dccph_doff * 4);
72 struct dccp_options_received *opt_recv = &dp->dccps_options_received;
73 unsigned char opt, len;
74 unsigned char *value;
75
76 memset(opt_recv, 0, sizeof(*opt_recv));
77
78 while (opt_ptr != opt_end) {
79 opt = *opt_ptr++;
80 len = 0;
81 value = NULL;
82
83 /* Check if this isn't a single byte option */
84 if (opt > DCCPO_MAX_RESERVED) {
85 if (opt_ptr == opt_end)
86 goto out_invalid_option;
87
88 len = *opt_ptr++;
89 if (len < 3)
90 goto out_invalid_option;
91 /*
92 * Remove the type and len fields, leaving
93 * just the value size
94 */
95 len -= 2;
96 value = opt_ptr;
97 opt_ptr += len;
98
99 if (opt_ptr > opt_end)
100 goto out_invalid_option;
101 }
102
103 switch (opt) {
104 case DCCPO_PADDING:
105 break;
106 case DCCPO_NDP_COUNT:
107 if (len > 3)
108 goto out_invalid_option;
109
110 opt_recv->dccpor_ndp = dccp_decode_value_var(value, len);
111 dccp_pr_debug("%sNDP count=%d\n", debug_prefix,
112 opt_recv->dccpor_ndp);
113 break;
114 case DCCPO_ACK_VECTOR_0:
115 if (len > DCCP_MAX_ACK_VECTOR_LEN)
116 goto out_invalid_option;
117
118 if (pkt_type == DCCP_PKT_DATA)
119 continue;
120
121 opt_recv->dccpor_ack_vector_len = len;
122 opt_recv->dccpor_ack_vector_idx = value - options;
123
124 dccp_pr_debug("%sACK vector 0, len=%d, ack_ackno=%llu\n",
125 debug_prefix, len,
126 (unsigned long long)
127 DCCP_SKB_CB(skb)->dccpd_ack_seq);
128 dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq,
129 value, len);
130 dccp_ackpkts_check_rcv_ackvector(dp->dccps_hc_rx_ackpkts,
131 sk,
132 DCCP_SKB_CB(skb)->dccpd_ack_seq,
133 len, value);
134 break;
135 case DCCPO_TIMESTAMP:
136 if (len != 4)
137 goto out_invalid_option;
138
139 opt_recv->dccpor_timestamp = ntohl(*(u32 *)value);
140
141 dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
142 do_gettimeofday(&dp->dccps_timestamp_time);
143
144 dccp_pr_debug("%sTIMESTAMP=%u, ackno=%llu\n",
145 debug_prefix, opt_recv->dccpor_timestamp,
146 (unsigned long long)
147 DCCP_SKB_CB(skb)->dccpd_ack_seq);
148 break;
149 case DCCPO_TIMESTAMP_ECHO:
150 if (len != 4 && len != 6 && len != 8)
151 goto out_invalid_option;
152
153 opt_recv->dccpor_timestamp_echo = ntohl(*(u32 *)value);
154
155 dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ",
156 debug_prefix,
157 opt_recv->dccpor_timestamp_echo,
158 len + 2,
159 (unsigned long long)
160 DCCP_SKB_CB(skb)->dccpd_ack_seq);
161
162 if (len > 4) {
163 if (len == 6)
164 opt_recv->dccpor_elapsed_time =
165 ntohs(*(u16 *)(value + 4));
166 else
167 opt_recv->dccpor_elapsed_time =
168 ntohl(*(u32 *)(value + 4));
169
170 dccp_pr_debug("%sTIMESTAMP_ECHO ELAPSED_TIME=%d\n",
171 debug_prefix,
172 opt_recv->dccpor_elapsed_time);
173 }
174 break;
175 case DCCPO_ELAPSED_TIME:
176 if (len != 2 && len != 4)
177 goto out_invalid_option;
178
179 if (pkt_type == DCCP_PKT_DATA)
180 continue;
181
182 if (len == 2)
183 opt_recv->dccpor_elapsed_time =
184 ntohs(*(u16 *)value);
185 else
186 opt_recv->dccpor_elapsed_time =
187 ntohl(*(u32 *)value);
188
189 dccp_pr_debug("%sELAPSED_TIME=%d\n", debug_prefix,
190 opt_recv->dccpor_elapsed_time);
191 break;
192 /*
193 * From draft-ietf-dccp-spec-11.txt:
194 *
195 * Option numbers 128 through 191 are for
196 * options sent from the HC-Sender to the
197 * HC-Receiver; option numbers 192 through 255
198 * are for options sent from the HC-Receiver to
199 * the HC-Sender.
200 */
201 case 128 ... 191: {
202 const u16 idx = value - options;
203
204 if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
205 opt, len, idx,
206 value) != 0)
207 goto out_invalid_option;
208 }
209 break;
210 case 192 ... 255: {
211 const u16 idx = value - options;
212
213 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
214 opt, len, idx,
215 value) != 0)
216 goto out_invalid_option;
217 }
218 break;
219 default:
220 pr_info("DCCP(%p): option %d(len=%d) not "
221 "implemented, ignoring\n",
222 sk, opt, len);
223 break;
224 }
225 }
226
227 return 0;
228
229out_invalid_option:
230 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
231 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
232 pr_info("DCCP(%p): invalid option %d, len=%d\n", sk, opt, len);
233 return -1;
234}
235
236static void dccp_encode_value_var(const u32 value, unsigned char *to,
237 const unsigned int len)
238{
239 if (len > 3)
240 *to++ = (value & 0xFF000000) >> 24;
241 if (len > 2)
242 *to++ = (value & 0xFF0000) >> 16;
243 if (len > 1)
244 *to++ = (value & 0xFF00) >> 8;
245 if (len > 0)
246 *to++ = (value & 0xFF);
247}
248
249static inline int dccp_ndp_len(const int ndp)
250{
251 return likely(ndp <= 0xFF) ? 1 : ndp <= 0xFFFF ? 2 : 3;
252}
253
254void dccp_insert_option(struct sock *sk, struct sk_buff *skb,
255 const unsigned char option,
256 const void *value, const unsigned char len)
257{
258 unsigned char *to;
259
260 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 2 > DCCP_MAX_OPT_LEN) {
261 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to insert "
262 "%d option!\n", option);
263 return;
264 }
265
266 DCCP_SKB_CB(skb)->dccpd_opt_len += len + 2;
267
268 to = skb_push(skb, len + 2);
269 *to++ = option;
270 *to++ = len + 2;
271
272 memcpy(to, value, len);
273}
274
275EXPORT_SYMBOL_GPL(dccp_insert_option);
276
277static void dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
278{
279 struct dccp_sock *dp = dccp_sk(sk);
280 int ndp = dp->dccps_ndp_count;
281
282 if (dccp_non_data_packet(skb))
283 ++dp->dccps_ndp_count;
284 else
285 dp->dccps_ndp_count = 0;
286
287 if (ndp > 0) {
288 unsigned char *ptr;
289 const int ndp_len = dccp_ndp_len(ndp);
290 const int len = ndp_len + 2;
291
292 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
293 return;
294
295 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
296
297 ptr = skb_push(skb, len);
298 *ptr++ = DCCPO_NDP_COUNT;
299 *ptr++ = len;
300 dccp_encode_value_var(ndp, ptr, ndp_len);
301 }
302}
303
304static inline int dccp_elapsed_time_len(const u32 elapsed_time)
305{
306 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
307}
308
309void dccp_insert_option_elapsed_time(struct sock *sk,
310 struct sk_buff *skb,
311 u32 elapsed_time)
312{
313#ifdef CONFIG_IP_DCCP_DEBUG
314 struct dccp_sock *dp = dccp_sk(sk);
315 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
316 "CLIENT TX opt: " : "server TX opt: ";
317#endif
318 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
319 const int len = 2 + elapsed_time_len;
320 unsigned char *to;
321
322 if (elapsed_time_len == 0)
323 return;
324
325 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
326 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to "
327 "insert elapsed time!\n");
328 return;
329 }
330
331 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
332
333 to = skb_push(skb, len);
334 *to++ = DCCPO_ELAPSED_TIME;
335 *to++ = len;
336
337 if (elapsed_time_len == 2) {
338 const u16 var16 = htons((u16)elapsed_time);
339 memcpy(to, &var16, 2);
340 } else {
341 const u32 var32 = htonl(elapsed_time);
342 memcpy(to, &var32, 4);
343 }
344
345 dccp_pr_debug("%sELAPSED_TIME=%u, len=%d, seqno=%llu\n",
346 debug_prefix, elapsed_time,
347 len,
348 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
349}
350
351EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
352
353static void dccp_insert_option_ack_vector(struct sock *sk, struct sk_buff *skb)
354{
355 struct dccp_sock *dp = dccp_sk(sk);
356#ifdef CONFIG_IP_DCCP_DEBUG
357 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
358 "CLIENT TX opt: " : "server TX opt: ";
359#endif
360 struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
361 int len = ap->dccpap_buf_vector_len + 2;
362 const u32 elapsed_time = timeval_now_delta(&ap->dccpap_time) / 10;
363 unsigned char *to, *from;
364
365 if (elapsed_time != 0)
366 dccp_insert_option_elapsed_time(sk, skb, elapsed_time);
367
368 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
369 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to "
370 "insert ACK Vector!\n");
371 return;
372 }
373
374 /*
375 * XXX: now we have just one ack vector sent record, so
376 * we have to wait for it to be cleared.
377 *
378 * Of course this is not acceptable, but this is just for
379 * basic testing now.
380 */
381 if (ap->dccpap_ack_seqno != DCCP_MAX_SEQNO + 1)
382 return;
383
384 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
385
386 to = skb_push(skb, len);
387 *to++ = DCCPO_ACK_VECTOR_0;
388 *to++ = len;
389
390 len = ap->dccpap_buf_vector_len;
391 from = ap->dccpap_buf + ap->dccpap_buf_head;
392
393 /* Check if buf_head wraps */
394 if (ap->dccpap_buf_head + len > ap->dccpap_buf_len) {
395 const unsigned int tailsize = (ap->dccpap_buf_len -
396 ap->dccpap_buf_head);
397
398 memcpy(to, from, tailsize);
399 to += tailsize;
400 len -= tailsize;
401 from = ap->dccpap_buf;
402 }
403
404 memcpy(to, from, len);
405 /*
406 * From draft-ietf-dccp-spec-11.txt:
407 *
408 * For each acknowledgement it sends, the HC-Receiver will add an
409 * acknowledgement record. ack_seqno will equal the HC-Receiver
410 * sequence number it used for the ack packet; ack_ptr will equal
411 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
412 * equal buf_nonce.
413 *
414 * This implemention uses just one ack record for now.
415 */
416 ap->dccpap_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
417 ap->dccpap_ack_ptr = ap->dccpap_buf_head;
418 ap->dccpap_ack_ackno = ap->dccpap_buf_ackno;
419 ap->dccpap_ack_nonce = ap->dccpap_buf_nonce;
420 ap->dccpap_ack_vector_len = ap->dccpap_buf_vector_len;
421
422 dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, "
423 "ack_ackno=%llu\n",
424 debug_prefix, ap->dccpap_ack_vector_len,
425 (unsigned long long) ap->dccpap_ack_seqno,
426 (unsigned long long) ap->dccpap_ack_ackno);
427}
428
429void dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb)
430{
431 struct timeval tv;
432 u32 now;
433
434 do_gettimeofday(&tv);
435 now = (tv.tv_sec * USEC_PER_SEC + tv.tv_usec) / 10;
436 /* yes this will overflow but that is the point as we want a
437 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */
438
439 now = htonl(now);
440 dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now));
441}
442
443EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp);
444
445static void dccp_insert_option_timestamp_echo(struct sock *sk,
446 struct sk_buff *skb)
447{
448 struct dccp_sock *dp = dccp_sk(sk);
449#ifdef CONFIG_IP_DCCP_DEBUG
450 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
451 "CLIENT TX opt: " : "server TX opt: ";
452#endif
453 u32 tstamp_echo;
454 const u32 elapsed_time =
455 timeval_now_delta(&dp->dccps_timestamp_time) / 10;
456 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
457 const int len = 6 + elapsed_time_len;
458 unsigned char *to;
459
460 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
461 LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small to insert "
462 "timestamp echo!\n");
463 return;
464 }
465
466 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
467
468 to = skb_push(skb, len);
469 *to++ = DCCPO_TIMESTAMP_ECHO;
470 *to++ = len;
471
472 tstamp_echo = htonl(dp->dccps_timestamp_echo);
473 memcpy(to, &tstamp_echo, 4);
474 to += 4;
475
476 if (elapsed_time_len == 2) {
477 const u16 var16 = htons((u16)elapsed_time);
478 memcpy(to, &var16, 2);
479 } else if (elapsed_time_len == 4) {
480 const u32 var32 = htonl(elapsed_time);
481 memcpy(to, &var32, 4);
482 }
483
484 dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, seqno=%llu\n",
485 debug_prefix, dp->dccps_timestamp_echo,
486 len,
487 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
488
489 dp->dccps_timestamp_echo = 0;
490 dp->dccps_timestamp_time.tv_sec = 0;
491 dp->dccps_timestamp_time.tv_usec = 0;
492}
493
494void dccp_insert_options(struct sock *sk, struct sk_buff *skb)
495{
496 struct dccp_sock *dp = dccp_sk(sk);
497
498 DCCP_SKB_CB(skb)->dccpd_opt_len = 0;
499
500 if (dp->dccps_options.dccpo_send_ndp_count)
501 dccp_insert_option_ndp(sk, skb);
502
503 if (!dccp_packet_without_ack(skb)) {
504 if (dp->dccps_options.dccpo_send_ack_vector &&
505 (dp->dccps_hc_rx_ackpkts->dccpap_buf_ackno !=
506 DCCP_MAX_SEQNO + 1))
507 dccp_insert_option_ack_vector(sk, skb);
508
509 if (dp->dccps_timestamp_echo != 0)
510 dccp_insert_option_timestamp_echo(sk, skb);
511 }
512
513 ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb);
514 ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb);
515
516 /* XXX: insert other options when appropriate */
517
518 if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) {
519 /* The length of all options has to be a multiple of 4 */
520 int padding = DCCP_SKB_CB(skb)->dccpd_opt_len % 4;
521
522 if (padding != 0) {
523 padding = 4 - padding;
524 memset(skb_push(skb, padding), 0, padding);
525 DCCP_SKB_CB(skb)->dccpd_opt_len += padding;
526 }
527 }
528}
529
530struct dccp_ackpkts *dccp_ackpkts_alloc(const unsigned int len,
531 const unsigned int __nocast priority)
532{
533 struct dccp_ackpkts *ap = kmalloc(sizeof(*ap) + len, priority);
534
535 if (ap != NULL) {
536#ifdef CONFIG_IP_DCCP_DEBUG
537 memset(ap->dccpap_buf, 0xFF, len);
538#endif
539 ap->dccpap_buf_len = len;
540 ap->dccpap_buf_head =
541 ap->dccpap_buf_tail =
542 ap->dccpap_buf_len - 1;
543 ap->dccpap_buf_ackno =
544 ap->dccpap_ack_ackno =
545 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
546 ap->dccpap_buf_nonce = ap->dccpap_buf_nonce = 0;
547 ap->dccpap_ack_ptr = 0;
548 ap->dccpap_time.tv_sec = 0;
549 ap->dccpap_time.tv_usec = 0;
550 ap->dccpap_buf_vector_len = ap->dccpap_ack_vector_len = 0;
551 }
552
553 return ap;
554}
555
556void dccp_ackpkts_free(struct dccp_ackpkts *ap)
557{
558 if (ap != NULL) {
559#ifdef CONFIG_IP_DCCP_DEBUG
560 memset(ap, 0xFF, sizeof(*ap) + ap->dccpap_buf_len);
561#endif
562 kfree(ap);
563 }
564}
565
566static inline u8 dccp_ackpkts_state(const struct dccp_ackpkts *ap,
567 const unsigned int index)
568{
569 return ap->dccpap_buf[index] & DCCP_ACKPKTS_STATE_MASK;
570}
571
572static inline u8 dccp_ackpkts_len(const struct dccp_ackpkts *ap,
573 const unsigned int index)
574{
575 return ap->dccpap_buf[index] & DCCP_ACKPKTS_LEN_MASK;
576}
577
578/*
579 * If several packets are missing, the HC-Receiver may prefer to enter multiple
580 * bytes with run length 0, rather than a single byte with a larger run length;
581 * this simplifies table updates if one of the missing packets arrives.
582 */
583static inline int dccp_ackpkts_set_buf_head_state(struct dccp_ackpkts *ap,
584 const unsigned int packets,
585 const unsigned char state)
586{
587 unsigned int gap;
588 signed long new_head;
589
590 if (ap->dccpap_buf_vector_len + packets > ap->dccpap_buf_len)
591 return -ENOBUFS;
592
593 gap = packets - 1;
594 new_head = ap->dccpap_buf_head - packets;
595
596 if (new_head < 0) {
597 if (gap > 0) {
598 memset(ap->dccpap_buf, DCCP_ACKPKTS_STATE_NOT_RECEIVED,
599 gap + new_head + 1);
600 gap = -new_head;
601 }
602 new_head += ap->dccpap_buf_len;
603 }
604
605 ap->dccpap_buf_head = new_head;
606
607 if (gap > 0)
608 memset(ap->dccpap_buf + ap->dccpap_buf_head + 1,
609 DCCP_ACKPKTS_STATE_NOT_RECEIVED, gap);
610
611 ap->dccpap_buf[ap->dccpap_buf_head] = state;
612 ap->dccpap_buf_vector_len += packets;
613 return 0;
614}
615
616/*
617 * Implements the draft-ietf-dccp-spec-11.txt Appendix A
618 */
619int dccp_ackpkts_add(struct dccp_ackpkts *ap, u64 ackno, u8 state)
620{
621 /*
622 * Check at the right places if the buffer is full, if it is, tell the
623 * caller to start dropping packets till the HC-Sender acks our ACK
624 * vectors, when we will free up space in dccpap_buf.
625 *
626 * We may well decide to do buffer compression, etc, but for now lets
627 * just drop.
628 *
629 * From Appendix A:
630 *
631 * Of course, the circular buffer may overflow, either when the
632 * HC-Sender is sending data at a very high rate, when the
633 * HC-Receiver's acknowledgements are not reaching the HC-Sender,
634 * or when the HC-Sender is forgetting to acknowledge those acks
635 * (so the HC-Receiver is unable to clean up old state). In this
636 * case, the HC-Receiver should either compress the buffer (by
637 * increasing run lengths when possible), transfer its state to
638 * a larger buffer, or, as a last resort, drop all received
639 * packets, without processing them whatsoever, until its buffer
640 * shrinks again.
641 */
642
643 /* See if this is the first ackno being inserted */
644 if (ap->dccpap_buf_vector_len == 0) {
645 ap->dccpap_buf[ap->dccpap_buf_head] = state;
646 ap->dccpap_buf_vector_len = 1;
647 } else if (after48(ackno, ap->dccpap_buf_ackno)) {
648 const u64 delta = dccp_delta_seqno(ap->dccpap_buf_ackno,
649 ackno);
650
651 /*
652 * Look if the state of this packet is the same as the
653 * previous ackno and if so if we can bump the head len.
654 */
655 if (delta == 1 &&
656 dccp_ackpkts_state(ap, ap->dccpap_buf_head) == state &&
657 (dccp_ackpkts_len(ap, ap->dccpap_buf_head) <
658 DCCP_ACKPKTS_LEN_MASK))
659 ap->dccpap_buf[ap->dccpap_buf_head]++;
660 else if (dccp_ackpkts_set_buf_head_state(ap, delta, state))
661 return -ENOBUFS;
662 } else {
663 /*
664 * A.1.2. Old Packets
665 *
666 * When a packet with Sequence Number S arrives, and
667 * S <= buf_ackno, the HC-Receiver will scan the table
668 * for the byte corresponding to S. (Indexing structures
669 * could reduce the complexity of this scan.)
670 */
671 u64 delta = dccp_delta_seqno(ackno, ap->dccpap_buf_ackno);
672 unsigned int index = ap->dccpap_buf_head;
673
674 while (1) {
675 const u8 len = dccp_ackpkts_len(ap, index);
676 const u8 state = dccp_ackpkts_state(ap, index);
677 /*
678 * valid packets not yet in dccpap_buf have a reserved
679 * entry, with a len equal to 0.
680 */
681 if (state == DCCP_ACKPKTS_STATE_NOT_RECEIVED &&
682 len == 0 && delta == 0) { /* Found our
683 reserved seat! */
684 dccp_pr_debug("Found %llu reserved seat!\n",
685 (unsigned long long) ackno);
686 ap->dccpap_buf[index] = state;
687 goto out;
688 }
689 /* len == 0 means one packet */
690 if (delta < len + 1)
691 goto out_duplicate;
692
693 delta -= len + 1;
694 if (++index == ap->dccpap_buf_len)
695 index = 0;
696 }
697 }
698
699 ap->dccpap_buf_ackno = ackno;
700 do_gettimeofday(&ap->dccpap_time);
701out:
702 dccp_pr_debug("");
703 dccp_ackpkts_print(ap);
704 return 0;
705
706out_duplicate:
707 /* Duplicate packet */
708 dccp_pr_debug("Received a dup or already considered lost "
709 "packet: %llu\n", (unsigned long long) ackno);
710 return -EILSEQ;
711}
712
713#ifdef CONFIG_IP_DCCP_DEBUG
714void dccp_ackvector_print(const u64 ackno, const unsigned char *vector,
715 int len)
716{
717 if (!dccp_debug)
718 return;
719
720 printk("ACK vector len=%d, ackno=%llu |", len,
721 (unsigned long long) ackno);
722
723 while (len--) {
724 const u8 state = (*vector & DCCP_ACKPKTS_STATE_MASK) >> 6;
725 const u8 rl = (*vector & DCCP_ACKPKTS_LEN_MASK);
726
727 printk("%d,%d|", state, rl);
728 ++vector;
729 }
730
731 printk("\n");
732}
733
734void dccp_ackpkts_print(const struct dccp_ackpkts *ap)
735{
736 dccp_ackvector_print(ap->dccpap_buf_ackno,
737 ap->dccpap_buf + ap->dccpap_buf_head,
738 ap->dccpap_buf_vector_len);
739}
740#endif
741
742static void dccp_ackpkts_trow_away_ack_record(struct dccp_ackpkts *ap)
743{
744 /*
745 * As we're keeping track of the ack vector size
746 * (dccpap_buf_vector_len) and the sent ack vector size
747 * (dccpap_ack_vector_len) we don't need dccpap_buf_tail at all, but
748 * keep this code here as in the future we'll implement a vector of
749 * ack records, as suggested in draft-ietf-dccp-spec-11.txt
750 * Appendix A. -acme
751 */
752#if 0
753 ap->dccpap_buf_tail = ap->dccpap_ack_ptr + 1;
754 if (ap->dccpap_buf_tail >= ap->dccpap_buf_len)
755 ap->dccpap_buf_tail -= ap->dccpap_buf_len;
756#endif
757 ap->dccpap_buf_vector_len -= ap->dccpap_ack_vector_len;
758}
759
760void dccp_ackpkts_check_rcv_ackno(struct dccp_ackpkts *ap, struct sock *sk,
761 u64 ackno)
762{
763 /* Check if we actually sent an ACK vector */
764 if (ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1)
765 return;
766
767 if (ackno == ap->dccpap_ack_seqno) {
768#ifdef CONFIG_IP_DCCP_DEBUG
769 struct dccp_sock *dp = dccp_sk(sk);
770 const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
771 "CLIENT rx ack: " : "server rx ack: ";
772#endif
773 dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, "
774 "ack_ackno=%llu, ACKED!\n",
775 debug_prefix, 1,
776 (unsigned long long) ap->dccpap_ack_seqno,
777 (unsigned long long) ap->dccpap_ack_ackno);
778 dccp_ackpkts_trow_away_ack_record(ap);
779 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
780 }
781}
782
783static void dccp_ackpkts_check_rcv_ackvector(struct dccp_ackpkts *ap,
784 struct sock *sk, u64 ackno,
785 const unsigned char len,
786 const unsigned char *vector)
787{
788 unsigned char i;
789
790 /* Check if we actually sent an ACK vector */
791 if (ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1)
792 return;
793 /*
794 * We're in the receiver half connection, so if the received an ACK
795 * vector ackno (e.g. 50) before dccpap_ack_seqno (e.g. 52), we're
796 * not interested.
797 *
798 * Extra explanation with example:
799 *
800 * if we received an ACK vector with ackno 50, it can only be acking
801 * 50, 49, 48, etc, not 52 (the seqno for the ACK vector we sent).
802 */
803 /* dccp_pr_debug("is %llu < %llu? ", ackno, ap->dccpap_ack_seqno); */
804 if (before48(ackno, ap->dccpap_ack_seqno)) {
805 /* dccp_pr_debug_cat("yes\n"); */
806 return;
807 }
808 /* dccp_pr_debug_cat("no\n"); */
809
810 i = len;
811 while (i--) {
812 const u8 rl = (*vector & DCCP_ACKPKTS_LEN_MASK);
813 u64 ackno_end_rl;
814
815 dccp_set_seqno(&ackno_end_rl, ackno - rl);
816
817 /*
818 * dccp_pr_debug("is %llu <= %llu <= %llu? ", ackno_end_rl,
819 * ap->dccpap_ack_seqno, ackno);
820 */
821 if (between48(ap->dccpap_ack_seqno, ackno_end_rl, ackno)) {
822 const u8 state = (*vector &
823 DCCP_ACKPKTS_STATE_MASK) >> 6;
824 /* dccp_pr_debug_cat("yes\n"); */
825
826 if (state != DCCP_ACKPKTS_STATE_NOT_RECEIVED) {
827#ifdef CONFIG_IP_DCCP_DEBUG
828 struct dccp_sock *dp = dccp_sk(sk);
829 const char *debug_prefix =
830 dp->dccps_role == DCCP_ROLE_CLIENT ?
831 "CLIENT rx ack: " : "server rx ack: ";
832#endif
833 dccp_pr_debug("%sACK vector 0, len=%d, "
834 "ack_seqno=%llu, ack_ackno=%llu, "
835 "ACKED!\n",
836 debug_prefix, len,
837 (unsigned long long)
838 ap->dccpap_ack_seqno,
839 (unsigned long long)
840 ap->dccpap_ack_ackno);
841 dccp_ackpkts_trow_away_ack_record(ap);
842 }
843 /*
844 * If dccpap_ack_seqno was not received, no problem
845 * we'll send another ACK vector.
846 */
847 ap->dccpap_ack_seqno = DCCP_MAX_SEQNO + 1;
848 break;
849 }
850 /* dccp_pr_debug_cat("no\n"); */
851
852 dccp_set_seqno(&ackno, ackno_end_rl - 1);
853 ++vector;
854 }
855}
diff --git a/net/dccp/output.c b/net/dccp/output.c
new file mode 100644
index 000000000000..28de157a4326
--- /dev/null
+++ b/net/dccp/output.c
@@ -0,0 +1,528 @@
1/*
2 * net/dccp/output.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include <net/sock.h>
18
19#include "ccid.h"
20#include "dccp.h"
21
22static inline void dccp_event_ack_sent(struct sock *sk)
23{
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25}
26
27/*
28 * All SKB's seen here are completely headerless. It is our
29 * job to build the DCCP header, and pass the packet down to
30 * IP so it can do the same plus pass the packet off to the
31 * device.
32 */
33int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
34{
35 if (likely(skb != NULL)) {
36 const struct inet_sock *inet = inet_sk(sk);
37 struct dccp_sock *dp = dccp_sk(sk);
38 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
39 struct dccp_hdr *dh;
40 /* XXX For now we're using only 48 bits sequence numbers */
41 const int dccp_header_size = sizeof(*dh) +
42 sizeof(struct dccp_hdr_ext) +
43 dccp_packet_hdr_len(dcb->dccpd_type);
44 int err, set_ack = 1;
45 u64 ackno = dp->dccps_gsr;
46
47 dccp_inc_seqno(&dp->dccps_gss);
48
49 switch (dcb->dccpd_type) {
50 case DCCP_PKT_DATA:
51 set_ack = 0;
52 break;
53 case DCCP_PKT_SYNC:
54 case DCCP_PKT_SYNCACK:
55 ackno = dcb->dccpd_seq;
56 break;
57 }
58
59 dcb->dccpd_seq = dp->dccps_gss;
60 dccp_insert_options(sk, skb);
61
62 skb->h.raw = skb_push(skb, dccp_header_size);
63 dh = dccp_hdr(skb);
64 /*
65 * Data packets are not cloned as they are never retransmitted
66 */
67 if (skb_cloned(skb))
68 skb_set_owner_w(skb, sk);
69
70 /* Build DCCP header and checksum it. */
71 memset(dh, 0, dccp_header_size);
72 dh->dccph_type = dcb->dccpd_type;
73 dh->dccph_sport = inet->sport;
74 dh->dccph_dport = inet->dport;
75 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
76 dh->dccph_ccval = dcb->dccpd_ccval;
77 /* XXX For now we're using only 48 bits sequence numbers */
78 dh->dccph_x = 1;
79
80 dp->dccps_awh = dp->dccps_gss;
81 dccp_hdr_set_seq(dh, dp->dccps_gss);
82 if (set_ack)
83 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
84
85 switch (dcb->dccpd_type) {
86 case DCCP_PKT_REQUEST:
87 dccp_hdr_request(skb)->dccph_req_service =
88 dcb->dccpd_service;
89 break;
90 case DCCP_PKT_RESET:
91 dccp_hdr_reset(skb)->dccph_reset_code =
92 dcb->dccpd_reset_code;
93 break;
94 }
95
96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
97 inet->daddr);
98
99 if (set_ack)
100 dccp_event_ack_sent(sk);
101
102 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
103
104 err = ip_queue_xmit(skb, 0);
105 if (err <= 0)
106 return err;
107
108 /* NET_XMIT_CN is special. It does not guarantee,
109 * that this packet is lost. It tells that device
110 * is about to start to drop packets or already
111 * drops some packets of the same priority and
112 * invokes us to send less aggressively.
113 */
114 return err == NET_XMIT_CN ? 0 : err;
115 }
116 return -ENOBUFS;
117}
118
119unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
120{
121 struct dccp_sock *dp = dccp_sk(sk);
122 int mss_now;
123
124 /*
125 * FIXME: we really should be using the af_specific thing to support
126 * IPv6.
127 * mss_now = pmtu - tp->af_specific->net_header_len -
128 * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
129 */
130 mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) -
131 sizeof(struct dccp_hdr_ext);
132
133 /* Now subtract optional transport overhead */
134 mss_now -= dp->dccps_ext_header_len;
135
136 /*
137 * FIXME: this should come from the CCID infrastructure, where, say,
138 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
139 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
140 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
141 * make it a multiple of 4
142 */
143
144 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
145
146 /* And store cached results */
147 dp->dccps_pmtu_cookie = pmtu;
148 dp->dccps_mss_cache = mss_now;
149
150 return mss_now;
151}
152
153void dccp_write_space(struct sock *sk)
154{
155 read_lock(&sk->sk_callback_lock);
156
157 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
158 wake_up_interruptible(sk->sk_sleep);
159 /* Should agree with poll, otherwise some programs break */
160 if (sock_writeable(sk))
161 sk_wake_async(sk, 2, POLL_OUT);
162
163 read_unlock(&sk->sk_callback_lock);
164}
165
166/**
167 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
168 * @sk: socket to wait for
169 * @timeo: for how long
170 */
171static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
172 long *timeo)
173{
174 struct dccp_sock *dp = dccp_sk(sk);
175 DEFINE_WAIT(wait);
176 long delay;
177 int rc;
178
179 while (1) {
180 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
181
182 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
183 goto do_error;
184 if (!*timeo)
185 goto do_nonblock;
186 if (signal_pending(current))
187 goto do_interrupted;
188
189 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
190 skb->len);
191 if (rc <= 0)
192 break;
193 delay = msecs_to_jiffies(rc);
194 if (delay > *timeo || delay < 0)
195 goto do_nonblock;
196
197 sk->sk_write_pending++;
198 release_sock(sk);
199 *timeo -= schedule_timeout(delay);
200 lock_sock(sk);
201 sk->sk_write_pending--;
202 }
203out:
204 finish_wait(sk->sk_sleep, &wait);
205 return rc;
206
207do_error:
208 rc = -EPIPE;
209 goto out;
210do_nonblock:
211 rc = -EAGAIN;
212 goto out;
213do_interrupted:
214 rc = sock_intr_errno(*timeo);
215 goto out;
216}
217
218int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
219{
220 const struct dccp_sock *dp = dccp_sk(sk);
221 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
222 skb->len);
223
224 if (err > 0)
225 err = dccp_wait_for_ccid(sk, skb, timeo);
226
227 if (err == 0) {
228 const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
229 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
230 const int len = skb->len;
231
232 if (sk->sk_state == DCCP_PARTOPEN) {
233 /* See 8.1.5. Handshake Completion */
234 inet_csk_schedule_ack(sk);
235 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
236 inet_csk(sk)->icsk_rto,
237 DCCP_RTO_MAX);
238 dcb->dccpd_type = DCCP_PKT_DATAACK;
239 /*
240 * FIXME: we really should have a
241 * dccps_ack_pending or use icsk.
242 */
243 } else if (inet_csk_ack_scheduled(sk) ||
244 dp->dccps_timestamp_echo != 0 ||
245 (dp->dccps_options.dccpo_send_ack_vector &&
246 ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
247 ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
248 dcb->dccpd_type = DCCP_PKT_DATAACK;
249 else
250 dcb->dccpd_type = DCCP_PKT_DATA;
251
252 err = dccp_transmit_skb(sk, skb);
253 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
254 }
255
256 return err;
257}
258
259int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
260{
261 if (inet_sk_rebuild_header(sk) != 0)
262 return -EHOSTUNREACH; /* Routing failure or similar. */
263
264 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
265 pskb_copy(skb, GFP_ATOMIC):
266 skb_clone(skb, GFP_ATOMIC)));
267}
268
269struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
270 struct request_sock *req)
271{
272 struct dccp_hdr *dh;
273 const int dccp_header_size = sizeof(struct dccp_hdr) +
274 sizeof(struct dccp_hdr_ext) +
275 sizeof(struct dccp_hdr_response);
276 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
277 dccp_header_size, 1,
278 GFP_ATOMIC);
279 if (skb == NULL)
280 return NULL;
281
282 /* Reserve space for headers. */
283 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
284
285 skb->dst = dst_clone(dst);
286 skb->csum = 0;
287
288 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
289 DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
290 dccp_insert_options(sk, skb);
291
292 skb->h.raw = skb_push(skb, dccp_header_size);
293
294 dh = dccp_hdr(skb);
295 memset(dh, 0, dccp_header_size);
296
297 dh->dccph_sport = inet_sk(sk)->sport;
298 dh->dccph_dport = inet_rsk(req)->rmt_port;
299 dh->dccph_doff = (dccp_header_size +
300 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
301 dh->dccph_type = DCCP_PKT_RESPONSE;
302 dh->dccph_x = 1;
303 dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
304 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
305
306 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
307 inet_rsk(req)->rmt_addr);
308
309 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
310 return skb;
311}
312
313struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
314 const enum dccp_reset_codes code)
315
316{
317 struct dccp_hdr *dh;
318 struct dccp_sock *dp = dccp_sk(sk);
319 const int dccp_header_size = sizeof(struct dccp_hdr) +
320 sizeof(struct dccp_hdr_ext) +
321 sizeof(struct dccp_hdr_reset);
322 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
323 dccp_header_size, 1,
324 GFP_ATOMIC);
325 if (skb == NULL)
326 return NULL;
327
328 /* Reserve space for headers. */
329 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
330
331 skb->dst = dst_clone(dst);
332 skb->csum = 0;
333
334 dccp_inc_seqno(&dp->dccps_gss);
335
336 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
337 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
338 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
339 dccp_insert_options(sk, skb);
340
341 skb->h.raw = skb_push(skb, dccp_header_size);
342
343 dh = dccp_hdr(skb);
344 memset(dh, 0, dccp_header_size);
345
346 dh->dccph_sport = inet_sk(sk)->sport;
347 dh->dccph_dport = inet_sk(sk)->dport;
348 dh->dccph_doff = (dccp_header_size +
349 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
350 dh->dccph_type = DCCP_PKT_RESET;
351 dh->dccph_x = 1;
352 dccp_hdr_set_seq(dh, dp->dccps_gss);
353 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
354
355 dccp_hdr_reset(skb)->dccph_reset_code = code;
356
357 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
358 inet_sk(sk)->daddr);
359
360 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
361 return skb;
362}
363
364/*
365 * Do all connect socket setups that can be done AF independent.
366 */
367static inline void dccp_connect_init(struct sock *sk)
368{
369 struct dst_entry *dst = __sk_dst_get(sk);
370 struct inet_connection_sock *icsk = inet_csk(sk);
371
372 sk->sk_err = 0;
373 sock_reset_flag(sk, SOCK_DONE);
374
375 dccp_sync_mss(sk, dst_mtu(dst));
376
377 /*
378 * FIXME: set dp->{dccps_swh,dccps_swl}, with
379 * something like dccp_inc_seq
380 */
381
382 icsk->icsk_retransmits = 0;
383}
384
385int dccp_connect(struct sock *sk)
386{
387 struct sk_buff *skb;
388 struct inet_connection_sock *icsk = inet_csk(sk);
389
390 dccp_connect_init(sk);
391
392 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
393 if (unlikely(skb == NULL))
394 return -ENOBUFS;
395
396 /* Reserve space for headers. */
397 skb_reserve(skb, MAX_DCCP_HEADER);
398
399 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
400 /* FIXME: set service to something meaningful, coming
401 * from userspace*/
402 DCCP_SKB_CB(skb)->dccpd_service = 0;
403 skb->csum = 0;
404 skb_set_owner_w(skb, sk);
405
406 BUG_TRAP(sk->sk_send_head == NULL);
407 sk->sk_send_head = skb;
408 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
409 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
410
411 /* Timer for repeating the REQUEST until an answer. */
412 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
413 icsk->icsk_rto, DCCP_RTO_MAX);
414 return 0;
415}
416
417void dccp_send_ack(struct sock *sk)
418{
419 /* If we have been reset, we may not send again. */
420 if (sk->sk_state != DCCP_CLOSED) {
421 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
422
423 if (skb == NULL) {
424 inet_csk_schedule_ack(sk);
425 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
426 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
427 TCP_DELACK_MAX,
428 DCCP_RTO_MAX);
429 return;
430 }
431
432 /* Reserve space for headers */
433 skb_reserve(skb, MAX_DCCP_HEADER);
434 skb->csum = 0;
435 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
436 skb_set_owner_w(skb, sk);
437 dccp_transmit_skb(sk, skb);
438 }
439}
440
441EXPORT_SYMBOL_GPL(dccp_send_ack);
442
443void dccp_send_delayed_ack(struct sock *sk)
444{
445 struct inet_connection_sock *icsk = inet_csk(sk);
446 /*
447 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
448 * with using 2s, and active senders also piggyback the ACK into a
449 * DATAACK packet, so this is really for quiescent senders.
450 */
451 unsigned long timeout = jiffies + 2 * HZ;
452
453 /* Use new timeout only if there wasn't a older one earlier. */
454 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
455 /* If delack timer was blocked or is about to expire,
456 * send ACK now.
457 *
458 * FIXME: check the "about to expire" part
459 */
460 if (icsk->icsk_ack.blocked) {
461 dccp_send_ack(sk);
462 return;
463 }
464
465 if (!time_before(timeout, icsk->icsk_ack.timeout))
466 timeout = icsk->icsk_ack.timeout;
467 }
468 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
469 icsk->icsk_ack.timeout = timeout;
470 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
471}
472
473void dccp_send_sync(struct sock *sk, const u64 seq,
474 const enum dccp_pkt_type pkt_type)
475{
476 /*
477 * We are not putting this on the write queue, so
478 * dccp_transmit_skb() will set the ownership to this
479 * sock.
480 */
481 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
482
483 if (skb == NULL)
484 /* FIXME: how to make sure the sync is sent? */
485 return;
486
487 /* Reserve space for headers and prepare control bits. */
488 skb_reserve(skb, MAX_DCCP_HEADER);
489 skb->csum = 0;
490 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
491 DCCP_SKB_CB(skb)->dccpd_seq = seq;
492
493 skb_set_owner_w(skb, sk);
494 dccp_transmit_skb(sk, skb);
495}
496
497/*
498 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
499 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
500 * any circumstances.
501 */
502void dccp_send_close(struct sock *sk, const int active)
503{
504 struct dccp_sock *dp = dccp_sk(sk);
505 struct sk_buff *skb;
506 const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
507
508 skb = alloc_skb(sk->sk_prot->max_header, prio);
509 if (skb == NULL)
510 return;
511
512 /* Reserve space for headers and prepare control bits. */
513 skb_reserve(skb, sk->sk_prot->max_header);
514 skb->csum = 0;
515 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
516 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
517
518 skb_set_owner_w(skb, sk);
519 if (active) {
520 BUG_TRAP(sk->sk_send_head == NULL);
521 sk->sk_send_head = skb;
522 dccp_transmit_skb(sk, skb_clone(skb, prio));
523 } else
524 dccp_transmit_skb(sk, skb);
525
526 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
527 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
528}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
new file mode 100644
index 000000000000..18a0e69c9dc7
--- /dev/null
+++ b/net/dccp/proto.c
@@ -0,0 +1,826 @@
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/dccp.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/in.h>
21#include <linux/if_arp.h>
22#include <linux/init.h>
23#include <linux/random.h>
24#include <net/checksum.h>
25
26#include <net/inet_common.h>
27#include <net/ip.h>
28#include <net/protocol.h>
29#include <net/sock.h>
30#include <net/xfrm.h>
31
32#include <asm/semaphore.h>
33#include <linux/spinlock.h>
34#include <linux/timer.h>
35#include <linux/delay.h>
36#include <linux/poll.h>
37#include <linux/dccp.h>
38
39#include "ccid.h"
40#include "dccp.h"
41
42DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
43
44atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46static struct net_protocol dccp_protocol = {
47 .handler = dccp_v4_rcv,
48 .err_handler = dccp_v4_err,
49};
50
51const char *dccp_packet_name(const int type)
52{
53 static const char *dccp_packet_names[] = {
54 [DCCP_PKT_REQUEST] = "REQUEST",
55 [DCCP_PKT_RESPONSE] = "RESPONSE",
56 [DCCP_PKT_DATA] = "DATA",
57 [DCCP_PKT_ACK] = "ACK",
58 [DCCP_PKT_DATAACK] = "DATAACK",
59 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
60 [DCCP_PKT_CLOSE] = "CLOSE",
61 [DCCP_PKT_RESET] = "RESET",
62 [DCCP_PKT_SYNC] = "SYNC",
63 [DCCP_PKT_SYNCACK] = "SYNCACK",
64 };
65
66 if (type >= DCCP_NR_PKT_TYPES)
67 return "INVALID";
68 else
69 return dccp_packet_names[type];
70}
71
72EXPORT_SYMBOL_GPL(dccp_packet_name);
73
74const char *dccp_state_name(const int state)
75{
76 static char *dccp_state_names[] = {
77 [DCCP_OPEN] = "OPEN",
78 [DCCP_REQUESTING] = "REQUESTING",
79 [DCCP_PARTOPEN] = "PARTOPEN",
80 [DCCP_LISTEN] = "LISTEN",
81 [DCCP_RESPOND] = "RESPOND",
82 [DCCP_CLOSING] = "CLOSING",
83 [DCCP_TIME_WAIT] = "TIME_WAIT",
84 [DCCP_CLOSED] = "CLOSED",
85 };
86
87 if (state >= DCCP_MAX_STATES)
88 return "INVALID STATE!";
89 else
90 return dccp_state_names[state];
91}
92
93EXPORT_SYMBOL_GPL(dccp_state_name);
94
95static inline int dccp_listen_start(struct sock *sk)
96{
97 dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
98 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
99}
100
101int dccp_disconnect(struct sock *sk, int flags)
102{
103 struct inet_connection_sock *icsk = inet_csk(sk);
104 struct inet_sock *inet = inet_sk(sk);
105 int err = 0;
106 const int old_state = sk->sk_state;
107
108 if (old_state != DCCP_CLOSED)
109 dccp_set_state(sk, DCCP_CLOSED);
110
111 /* ABORT function of RFC793 */
112 if (old_state == DCCP_LISTEN) {
113 inet_csk_listen_stop(sk);
114 /* FIXME: do the active reset thing */
115 } else if (old_state == DCCP_REQUESTING)
116 sk->sk_err = ECONNRESET;
117
118 dccp_clear_xmit_timers(sk);
119 __skb_queue_purge(&sk->sk_receive_queue);
120 if (sk->sk_send_head != NULL) {
121 __kfree_skb(sk->sk_send_head);
122 sk->sk_send_head = NULL;
123 }
124
125 inet->dport = 0;
126
127 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
128 inet_reset_saddr(sk);
129
130 sk->sk_shutdown = 0;
131 sock_reset_flag(sk, SOCK_DONE);
132
133 icsk->icsk_backoff = 0;
134 inet_csk_delack_init(sk);
135 __sk_dst_reset(sk);
136
137 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
138
139 sk->sk_error_report(sk);
140 return err;
141}
142
143/*
144 * Wait for a DCCP event.
145 *
146 * Note that we don't need to lock the socket, as the upper poll layers
147 * take care of normal races (between the test and the event) and we don't
148 * go look at any of the socket buffers directly.
149 */
150static unsigned int dccp_poll(struct file *file, struct socket *sock,
151 poll_table *wait)
152{
153 unsigned int mask;
154 struct sock *sk = sock->sk;
155
156 poll_wait(file, sk->sk_sleep, wait);
157 if (sk->sk_state == DCCP_LISTEN)
158 return inet_csk_listen_poll(sk);
159
160 /* Socket is not locked. We are protected from async events
161 by poll logic and correct handling of state changes
162 made by another threads is impossible in any case.
163 */
164
165 mask = 0;
166 if (sk->sk_err)
167 mask = POLLERR;
168
169 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
170 mask |= POLLHUP;
171 if (sk->sk_shutdown & RCV_SHUTDOWN)
172 mask |= POLLIN | POLLRDNORM;
173
174 /* Connected? */
175 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
176 if (atomic_read(&sk->sk_rmem_alloc) > 0)
177 mask |= POLLIN | POLLRDNORM;
178
179 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
180 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
181 mask |= POLLOUT | POLLWRNORM;
182 } else { /* send SIGIO later */
183 set_bit(SOCK_ASYNC_NOSPACE,
184 &sk->sk_socket->flags);
185 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
186
187 /* Race breaker. If space is freed after
188 * wspace test but before the flags are set,
189 * IO signal will be lost.
190 */
191 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
192 mask |= POLLOUT | POLLWRNORM;
193 }
194 }
195 }
196 return mask;
197}
198
199int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
200{
201 dccp_pr_debug("entry\n");
202 return -ENOIOCTLCMD;
203}
204
205int dccp_setsockopt(struct sock *sk, int level, int optname,
206 char __user *optval, int optlen)
207{
208 struct dccp_sock *dp;
209 int err;
210 int val;
211
212 if (level != SOL_DCCP)
213 return ip_setsockopt(sk, level, optname, optval, optlen);
214
215 if (optlen < sizeof(int))
216 return -EINVAL;
217
218 if (get_user(val, (int __user *)optval))
219 return -EFAULT;
220
221 lock_sock(sk);
222
223 dp = dccp_sk(sk);
224 err = 0;
225
226 switch (optname) {
227 case DCCP_SOCKOPT_PACKET_SIZE:
228 dp->dccps_packet_size = val;
229 break;
230 default:
231 err = -ENOPROTOOPT;
232 break;
233 }
234
235 release_sock(sk);
236 return err;
237}
238
239int dccp_getsockopt(struct sock *sk, int level, int optname,
240 char __user *optval, int __user *optlen)
241{
242 struct dccp_sock *dp;
243 int val, len;
244
245 if (level != SOL_DCCP)
246 return ip_getsockopt(sk, level, optname, optval, optlen);
247
248 if (get_user(len, optlen))
249 return -EFAULT;
250
251 len = min_t(unsigned int, len, sizeof(int));
252 if (len < 0)
253 return -EINVAL;
254
255 dp = dccp_sk(sk);
256
257 switch (optname) {
258 case DCCP_SOCKOPT_PACKET_SIZE:
259 val = dp->dccps_packet_size;
260 break;
261 default:
262 return -ENOPROTOOPT;
263 }
264
265 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
266 return -EFAULT;
267
268 return 0;
269}
270
271int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
272 size_t len)
273{
274 const struct dccp_sock *dp = dccp_sk(sk);
275 const int flags = msg->msg_flags;
276 const int noblock = flags & MSG_DONTWAIT;
277 struct sk_buff *skb;
278 int rc, size;
279 long timeo;
280
281 if (len > dp->dccps_mss_cache)
282 return -EMSGSIZE;
283
284 lock_sock(sk);
285 timeo = sock_sndtimeo(sk, noblock);
286
287 /*
288 * We have to use sk_stream_wait_connect here to set sk_write_pending,
289 * so that the trick in dccp_rcv_request_sent_state_process.
290 */
291 /* Wait for a connection to finish. */
292 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
293 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
294 goto out_release;
295
296 size = sk->sk_prot->max_header + len;
297 release_sock(sk);
298 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
299 lock_sock(sk);
300 if (skb == NULL)
301 goto out_release;
302
303 skb_reserve(skb, sk->sk_prot->max_header);
304 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
305 if (rc != 0)
306 goto out_discard;
307
308 rc = dccp_write_xmit(sk, skb, &timeo);
309 /*
310 * XXX we don't use sk_write_queue, so just discard the packet.
311 * Current plan however is to _use_ sk_write_queue with
312 * an algorith similar to tcp_sendmsg, where the main difference
313 * is that in DCCP we have to respect packet boundaries, so
314 * no coalescing of skbs.
315 *
316 * This bug was _quickly_ found & fixed by just looking at an OSTRA
317 * generated callgraph 8) -acme
318 */
319 if (rc != 0)
320 goto out_discard;
321out_release:
322 release_sock(sk);
323 return rc ? : len;
324out_discard:
325 kfree_skb(skb);
326 goto out_release;
327}
328
329int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
330 size_t len, int nonblock, int flags, int *addr_len)
331{
332 const struct dccp_hdr *dh;
333 long timeo;
334
335 lock_sock(sk);
336
337 if (sk->sk_state == DCCP_LISTEN) {
338 len = -ENOTCONN;
339 goto out;
340 }
341
342 timeo = sock_rcvtimeo(sk, nonblock);
343
344 do {
345 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
346
347 if (skb == NULL)
348 goto verify_sock_status;
349
350 dh = dccp_hdr(skb);
351
352 if (dh->dccph_type == DCCP_PKT_DATA ||
353 dh->dccph_type == DCCP_PKT_DATAACK)
354 goto found_ok_skb;
355
356 if (dh->dccph_type == DCCP_PKT_RESET ||
357 dh->dccph_type == DCCP_PKT_CLOSE) {
358 dccp_pr_debug("found fin ok!\n");
359 len = 0;
360 goto found_fin_ok;
361 }
362 dccp_pr_debug("packet_type=%s\n",
363 dccp_packet_name(dh->dccph_type));
364 sk_eat_skb(sk, skb);
365verify_sock_status:
366 if (sock_flag(sk, SOCK_DONE)) {
367 len = 0;
368 break;
369 }
370
371 if (sk->sk_err) {
372 len = sock_error(sk);
373 break;
374 }
375
376 if (sk->sk_shutdown & RCV_SHUTDOWN) {
377 len = 0;
378 break;
379 }
380
381 if (sk->sk_state == DCCP_CLOSED) {
382 if (!sock_flag(sk, SOCK_DONE)) {
383 /* This occurs when user tries to read
384 * from never connected socket.
385 */
386 len = -ENOTCONN;
387 break;
388 }
389 len = 0;
390 break;
391 }
392
393 if (!timeo) {
394 len = -EAGAIN;
395 break;
396 }
397
398 if (signal_pending(current)) {
399 len = sock_intr_errno(timeo);
400 break;
401 }
402
403 sk_wait_data(sk, &timeo);
404 continue;
405 found_ok_skb:
406 if (len > skb->len)
407 len = skb->len;
408 else if (len < skb->len)
409 msg->msg_flags |= MSG_TRUNC;
410
411 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
412 /* Exception. Bailout! */
413 len = -EFAULT;
414 break;
415 }
416 found_fin_ok:
417 if (!(flags & MSG_PEEK))
418 sk_eat_skb(sk, skb);
419 break;
420 } while (1);
421out:
422 release_sock(sk);
423 return len;
424}
425
426static int inet_dccp_listen(struct socket *sock, int backlog)
427{
428 struct sock *sk = sock->sk;
429 unsigned char old_state;
430 int err;
431
432 lock_sock(sk);
433
434 err = -EINVAL;
435 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
436 goto out;
437
438 old_state = sk->sk_state;
439 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
440 goto out;
441
442 /* Really, if the socket is already in listen state
443 * we can only allow the backlog to be adjusted.
444 */
445 if (old_state != DCCP_LISTEN) {
446 /*
447 * FIXME: here it probably should be sk->sk_prot->listen_start
448 * see tcp_listen_start
449 */
450 err = dccp_listen_start(sk);
451 if (err)
452 goto out;
453 }
454 sk->sk_max_ack_backlog = backlog;
455 err = 0;
456
457out:
458 release_sock(sk);
459 return err;
460}
461
462static const unsigned char dccp_new_state[] = {
463 /* current state: new state: action: */
464 [0] = DCCP_CLOSED,
465 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
466 [DCCP_REQUESTING] = DCCP_CLOSED,
467 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
468 [DCCP_LISTEN] = DCCP_CLOSED,
469 [DCCP_RESPOND] = DCCP_CLOSED,
470 [DCCP_CLOSING] = DCCP_CLOSED,
471 [DCCP_TIME_WAIT] = DCCP_CLOSED,
472 [DCCP_CLOSED] = DCCP_CLOSED,
473};
474
475static int dccp_close_state(struct sock *sk)
476{
477 const int next = dccp_new_state[sk->sk_state];
478 const int ns = next & DCCP_STATE_MASK;
479
480 if (ns != sk->sk_state)
481 dccp_set_state(sk, ns);
482
483 return next & DCCP_ACTION_FIN;
484}
485
486void dccp_close(struct sock *sk, long timeout)
487{
488 struct sk_buff *skb;
489
490 lock_sock(sk);
491
492 sk->sk_shutdown = SHUTDOWN_MASK;
493
494 if (sk->sk_state == DCCP_LISTEN) {
495 dccp_set_state(sk, DCCP_CLOSED);
496
497 /* Special case. */
498 inet_csk_listen_stop(sk);
499
500 goto adjudge_to_death;
501 }
502
503 /*
504 * We need to flush the recv. buffs. We do this only on the
505 * descriptor close, not protocol-sourced closes, because the
506 *reader process may not have drained the data yet!
507 */
508 /* FIXME: check for unread data */
509 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
510 __kfree_skb(skb);
511 }
512
513 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
514 /* Check zero linger _after_ checking for unread data. */
515 sk->sk_prot->disconnect(sk, 0);
516 } else if (dccp_close_state(sk)) {
517 dccp_send_close(sk, 1);
518 }
519
520 sk_stream_wait_close(sk, timeout);
521
522adjudge_to_death:
523 /*
524 * It is the last release_sock in its life. It will remove backlog.
525 */
526 release_sock(sk);
527 /*
528 * Now socket is owned by kernel and we acquire BH lock
529 * to finish close. No need to check for user refs.
530 */
531 local_bh_disable();
532 bh_lock_sock(sk);
533 BUG_TRAP(!sock_owned_by_user(sk));
534
535 sock_hold(sk);
536 sock_orphan(sk);
537
538 /*
539 * The last release_sock may have processed the CLOSE or RESET
540 * packet moving sock to CLOSED state, if not we have to fire
541 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
542 * in draft-ietf-dccp-spec-11. -acme
543 */
544 if (sk->sk_state == DCCP_CLOSING) {
545 /* FIXME: should start at 2 * RTT */
546 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
547 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
548 inet_csk(sk)->icsk_rto,
549 DCCP_RTO_MAX);
550#if 0
551 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
552 dccp_set_state(sk, DCCP_CLOSED);
553#endif
554 }
555
556 atomic_inc(sk->sk_prot->orphan_count);
557 if (sk->sk_state == DCCP_CLOSED)
558 inet_csk_destroy_sock(sk);
559
560 /* Otherwise, socket is reprieved until protocol close. */
561
562 bh_unlock_sock(sk);
563 local_bh_enable();
564 sock_put(sk);
565}
566
567void dccp_shutdown(struct sock *sk, int how)
568{
569 dccp_pr_debug("entry\n");
570}
571
572static struct proto_ops inet_dccp_ops = {
573 .family = PF_INET,
574 .owner = THIS_MODULE,
575 .release = inet_release,
576 .bind = inet_bind,
577 .connect = inet_stream_connect,
578 .socketpair = sock_no_socketpair,
579 .accept = inet_accept,
580 .getname = inet_getname,
581 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
582 .poll = dccp_poll,
583 .ioctl = inet_ioctl,
584 /* FIXME: work on inet_listen to rename it to sock_common_listen */
585 .listen = inet_dccp_listen,
586 .shutdown = inet_shutdown,
587 .setsockopt = sock_common_setsockopt,
588 .getsockopt = sock_common_getsockopt,
589 .sendmsg = inet_sendmsg,
590 .recvmsg = sock_common_recvmsg,
591 .mmap = sock_no_mmap,
592 .sendpage = sock_no_sendpage,
593};
594
595extern struct net_proto_family inet_family_ops;
596
597static struct inet_protosw dccp_v4_protosw = {
598 .type = SOCK_DCCP,
599 .protocol = IPPROTO_DCCP,
600 .prot = &dccp_v4_prot,
601 .ops = &inet_dccp_ops,
602 .capability = -1,
603 .no_check = 0,
604 .flags = 0,
605};
606
607/*
608 * This is the global socket data structure used for responding to
609 * the Out-of-the-blue (OOTB) packets. A control sock will be created
610 * for this socket at the initialization time.
611 */
612struct socket *dccp_ctl_socket;
613
614static char dccp_ctl_socket_err_msg[] __initdata =
615 KERN_ERR "DCCP: Failed to create the control socket.\n";
616
617static int __init dccp_ctl_sock_init(void)
618{
619 int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
620 &dccp_ctl_socket);
621 if (rc < 0)
622 printk(dccp_ctl_socket_err_msg);
623 else {
624 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
625 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
626
627 /* Unhash it so that IP input processing does not even
628 * see it, we do not wish this socket to see incoming
629 * packets.
630 */
631 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
632 }
633
634 return rc;
635}
636
637#ifdef CONFIG_IP_DCCP_UNLOAD_HACK
638void dccp_ctl_sock_exit(void)
639{
640 if (dccp_ctl_socket != NULL) {
641 sock_release(dccp_ctl_socket);
642 dccp_ctl_socket = NULL;
643 }
644}
645
646EXPORT_SYMBOL_GPL(dccp_ctl_sock_exit);
647#endif
648
649static int __init init_dccp_v4_mibs(void)
650{
651 int rc = -ENOMEM;
652
653 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
654 if (dccp_statistics[0] == NULL)
655 goto out;
656
657 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
658 if (dccp_statistics[1] == NULL)
659 goto out_free_one;
660
661 rc = 0;
662out:
663 return rc;
664out_free_one:
665 free_percpu(dccp_statistics[0]);
666 dccp_statistics[0] = NULL;
667 goto out;
668
669}
670
671static int thash_entries;
672module_param(thash_entries, int, 0444);
673MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
674
675#ifdef CONFIG_IP_DCCP_DEBUG
676int dccp_debug;
677module_param(dccp_debug, int, 0444);
678MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
679#endif
680
681static int __init dccp_init(void)
682{
683 unsigned long goal;
684 int ehash_order, bhash_order, i;
685 int rc = proto_register(&dccp_v4_prot, 1);
686
687 if (rc)
688 goto out;
689
690 dccp_hashinfo.bind_bucket_cachep =
691 kmem_cache_create("dccp_bind_bucket",
692 sizeof(struct inet_bind_bucket), 0,
693 SLAB_HWCACHE_ALIGN, NULL, NULL);
694 if (!dccp_hashinfo.bind_bucket_cachep)
695 goto out_proto_unregister;
696
697 /*
698 * Size and allocate the main established and bind bucket
699 * hash tables.
700 *
701 * The methodology is similar to that of the buffer cache.
702 */
703 if (num_physpages >= (128 * 1024))
704 goal = num_physpages >> (21 - PAGE_SHIFT);
705 else
706 goal = num_physpages >> (23 - PAGE_SHIFT);
707
708 if (thash_entries)
709 goal = (thash_entries *
710 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
711 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
712 ;
713 do {
714 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
715 sizeof(struct inet_ehash_bucket);
716 dccp_hashinfo.ehash_size >>= 1;
717 while (dccp_hashinfo.ehash_size &
718 (dccp_hashinfo.ehash_size - 1))
719 dccp_hashinfo.ehash_size--;
720 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
721 __get_free_pages(GFP_ATOMIC, ehash_order);
722 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
723
724 if (!dccp_hashinfo.ehash) {
725 printk(KERN_CRIT "Failed to allocate DCCP "
726 "established hash table\n");
727 goto out_free_bind_bucket_cachep;
728 }
729
730 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
731 rwlock_init(&dccp_hashinfo.ehash[i].lock);
732 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
733 }
734
735 bhash_order = ehash_order;
736
737 do {
738 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
739 sizeof(struct inet_bind_hashbucket);
740 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
741 bhash_order > 0)
742 continue;
743 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
744 __get_free_pages(GFP_ATOMIC, bhash_order);
745 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
746
747 if (!dccp_hashinfo.bhash) {
748 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
749 goto out_free_dccp_ehash;
750 }
751
752 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
753 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
754 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
755 }
756
757 if (init_dccp_v4_mibs())
758 goto out_free_dccp_bhash;
759
760 rc = -EAGAIN;
761 if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
762 goto out_free_dccp_v4_mibs;
763
764 inet_register_protosw(&dccp_v4_protosw);
765
766 rc = dccp_ctl_sock_init();
767 if (rc)
768 goto out_unregister_protosw;
769out:
770 return rc;
771out_unregister_protosw:
772 inet_unregister_protosw(&dccp_v4_protosw);
773 inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
774out_free_dccp_v4_mibs:
775 free_percpu(dccp_statistics[0]);
776 free_percpu(dccp_statistics[1]);
777 dccp_statistics[0] = dccp_statistics[1] = NULL;
778out_free_dccp_bhash:
779 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
780 dccp_hashinfo.bhash = NULL;
781out_free_dccp_ehash:
782 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
783 dccp_hashinfo.ehash = NULL;
784out_free_bind_bucket_cachep:
785 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
786 dccp_hashinfo.bind_bucket_cachep = NULL;
787out_proto_unregister:
788 proto_unregister(&dccp_v4_prot);
789 goto out;
790}
791
792static const char dccp_del_proto_err_msg[] __exitdata =
793 KERN_ERR "can't remove dccp net_protocol\n";
794
795static void __exit dccp_fini(void)
796{
797 inet_unregister_protosw(&dccp_v4_protosw);
798
799 if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
800 printk(dccp_del_proto_err_msg);
801
802 free_percpu(dccp_statistics[0]);
803 free_percpu(dccp_statistics[1]);
804 free_pages((unsigned long)dccp_hashinfo.bhash,
805 get_order(dccp_hashinfo.bhash_size *
806 sizeof(struct inet_bind_hashbucket)));
807 free_pages((unsigned long)dccp_hashinfo.ehash,
808 get_order(dccp_hashinfo.ehash_size *
809 sizeof(struct inet_ehash_bucket)));
810 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
811 proto_unregister(&dccp_v4_prot);
812}
813
814module_init(dccp_init);
815module_exit(dccp_fini);
816
817/*
818 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
819 * values directly, Also cover the case where the protocol is not specified,
820 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
821 */
822MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
823MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
824MODULE_LICENSE("GPL");
825MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
826MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
new file mode 100644
index 000000000000..aa34b576e228
--- /dev/null
+++ b/net/dccp/timer.c
@@ -0,0 +1,255 @@
1/*
2 * net/dccp/timer.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include "dccp.h"
18
19static void dccp_write_timer(unsigned long data);
20static void dccp_keepalive_timer(unsigned long data);
21static void dccp_delack_timer(unsigned long data);
22
23void dccp_init_xmit_timers(struct sock *sk)
24{
25 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
26 &dccp_keepalive_timer);
27}
28
29static void dccp_write_err(struct sock *sk)
30{
31 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
32 sk->sk_error_report(sk);
33
34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_ABORTED);
35 dccp_done(sk);
36 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
37}
38
39/* A write timeout has occurred. Process the after effects. */
40static int dccp_write_timeout(struct sock *sk)
41{
42 const struct inet_connection_sock *icsk = inet_csk(sk);
43 int retry_until;
44
45 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
46 if (icsk->icsk_retransmits != 0)
47 dst_negative_advice(&sk->sk_dst_cache);
48 retry_until = icsk->icsk_syn_retries ? :
49 /* FIXME! */ 3 /* FIXME! sysctl_tcp_syn_retries */;
50 } else {
51 if (icsk->icsk_retransmits >=
52 /* FIXME! sysctl_tcp_retries1 */ 5 /* FIXME! */) {
53 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
54 black hole detection. :-(
55
56 It is place to make it. It is not made. I do not want
57 to make it. It is disguisting. It does not work in any
58 case. Let me to cite the same draft, which requires for
59 us to implement this:
60
61 "The one security concern raised by this memo is that ICMP black holes
62 are often caused by over-zealous security administrators who block
63 all ICMP messages. It is vitally important that those who design and
64 deploy security systems understand the impact of strict filtering on
65 upper-layer protocols. The safest web site in the world is worthless
66 if most TCP implementations cannot transfer data from it. It would
67 be far nicer to have all of the black holes fixed rather than fixing
68 all of the TCP implementations."
69
70 Golden words :-).
71 */
72
73 dst_negative_advice(&sk->sk_dst_cache);
74 }
75
76 retry_until = /* FIXME! */ 15 /* FIXME! sysctl_tcp_retries2 */;
77 /*
78 * FIXME: see tcp_write_timout and tcp_out_of_resources
79 */
80 }
81
82 if (icsk->icsk_retransmits >= retry_until) {
83 /* Has it gone just too far? */
84 dccp_write_err(sk);
85 return 1;
86 }
87 return 0;
88}
89
90/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
91static void dccp_delack_timer(unsigned long data)
92{
93 struct sock *sk = (struct sock *)data;
94 struct inet_connection_sock *icsk = inet_csk(sk);
95
96 bh_lock_sock(sk);
97 if (sock_owned_by_user(sk)) {
98 /* Try again later. */
99 icsk->icsk_ack.blocked = 1;
100 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
101 sk_reset_timer(sk, &icsk->icsk_delack_timer,
102 jiffies + TCP_DELACK_MIN);
103 goto out;
104 }
105
106 if (sk->sk_state == DCCP_CLOSED ||
107 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
108 goto out;
109 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
110 sk_reset_timer(sk, &icsk->icsk_delack_timer,
111 icsk->icsk_ack.timeout);
112 goto out;
113 }
114
115 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
116
117 if (inet_csk_ack_scheduled(sk)) {
118 if (!icsk->icsk_ack.pingpong) {
119 /* Delayed ACK missed: inflate ATO. */
120 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
121 icsk->icsk_rto);
122 } else {
123 /* Delayed ACK missed: leave pingpong mode and
124 * deflate ATO.
125 */
126 icsk->icsk_ack.pingpong = 0;
127 icsk->icsk_ack.ato = TCP_ATO_MIN;
128 }
129 dccp_send_ack(sk);
130 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
131 }
132out:
133 bh_unlock_sock(sk);
134 sock_put(sk);
135}
136
137/*
138 * The DCCP retransmit timer.
139 */
140static void dccp_retransmit_timer(struct sock *sk)
141{
142 struct inet_connection_sock *icsk = inet_csk(sk);
143
144 /*
145 * sk->sk_send_head has to have one skb with
146 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
147 * packet types (REQUEST, RESPONSE, the ACK in the 3way handshake
148 * (PARTOPEN timer), etc).
149 */
150 BUG_TRAP(sk->sk_send_head != NULL);
151
152 /*
153 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
154 * sent, no need to retransmit, this sock is dead.
155 */
156 if (dccp_write_timeout(sk))
157 goto out;
158
159 /*
160 * We want to know the number of packets retransmitted, not the
161 * total number of retransmissions of clones of original packets.
162 */
163 if (icsk->icsk_retransmits == 0)
164 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
165
166 if (dccp_retransmit_skb(sk, sk->sk_send_head) < 0) {
167 /*
168 * Retransmission failed because of local congestion,
169 * do not backoff.
170 */
171 if (icsk->icsk_retransmits == 0)
172 icsk->icsk_retransmits = 1;
173 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
174 min(icsk->icsk_rto,
175 TCP_RESOURCE_PROBE_INTERVAL),
176 DCCP_RTO_MAX);
177 goto out;
178 }
179
180 icsk->icsk_backoff++;
181 icsk->icsk_retransmits++;
182
183 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
184 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
185 DCCP_RTO_MAX);
186 if (icsk->icsk_retransmits > 3 /* FIXME: sysctl_dccp_retries1 */)
187 __sk_dst_reset(sk);
188out:;
189}
190
191static void dccp_write_timer(unsigned long data)
192{
193 struct sock *sk = (struct sock *)data;
194 struct inet_connection_sock *icsk = inet_csk(sk);
195 int event = 0;
196
197 bh_lock_sock(sk);
198 if (sock_owned_by_user(sk)) {
199 /* Try again later */
200 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
201 jiffies + (HZ / 20));
202 goto out;
203 }
204
205 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
206 goto out;
207
208 if (time_after(icsk->icsk_timeout, jiffies)) {
209 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
210 icsk->icsk_timeout);
211 goto out;
212 }
213
214 event = icsk->icsk_pending;
215 icsk->icsk_pending = 0;
216
217 switch (event) {
218 case ICSK_TIME_RETRANS:
219 dccp_retransmit_timer(sk);
220 break;
221 }
222out:
223 bh_unlock_sock(sk);
224 sock_put(sk);
225}
226
227/*
228 * Timer for listening sockets
229 */
230static void dccp_response_timer(struct sock *sk)
231{
232 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
233 DCCP_RTO_MAX);
234}
235
236static void dccp_keepalive_timer(unsigned long data)
237{
238 struct sock *sk = (struct sock *)data;
239
240 /* Only process if socket is not in use. */
241 bh_lock_sock(sk);
242 if (sock_owned_by_user(sk)) {
243 /* Try again later. */
244 inet_csk_reset_keepalive_timer(sk, HZ / 20);
245 goto out;
246 }
247
248 if (sk->sk_state == DCCP_LISTEN) {
249 dccp_response_timer(sk);
250 goto out;
251 }
252out:
253 bh_unlock_sock(sk);
254 sock_put(sk);
255}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acdd18e6adb2..621680f127af 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -118,7 +118,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
118#include <linux/netfilter.h> 118#include <linux/netfilter.h>
119#include <linux/seq_file.h> 119#include <linux/seq_file.h>
120#include <net/sock.h> 120#include <net/sock.h>
121#include <net/tcp.h> 121#include <net/tcp_states.h>
122#include <net/flow.h> 122#include <net/flow.h>
123#include <asm/system.h> 123#include <asm/system.h>
124#include <asm/ioctls.h> 124#include <asm/ioctls.h>
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1763 nskb = skb->next; 1763 nskb = skb->next;
1764 1764
1765 if (skb->len == 0) { 1765 if (skb->len == 0) {
1766 skb_unlink(skb); 1766 skb_unlink(skb, queue);
1767 kfree_skb(skb); 1767 kfree_skb(skb);
1768 /* 1768 /*
1769 * N.B. Don't refer to skb or cb after this point 1769 * N.B. Don't refer to skb or cb after this point
@@ -2064,7 +2064,7 @@ static struct notifier_block dn_dev_notifier = {
2064 .notifier_call = dn_device_event, 2064 .notifier_call = dn_device_event,
2065}; 2065};
2066 2066
2067extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *); 2067extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2068 2068
2069static struct packet_type dn_dix_packet_type = { 2069static struct packet_type dn_dix_packet_type = {
2070 .type = __constant_htons(ETH_P_DNA_RT), 2070 .type = __constant_htons(ETH_P_DNA_RT),
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 00233ecbc9cb..5610bb16dbf9 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -752,16 +752,16 @@ static void rtmsg_ifa(int event, struct dn_ifaddr *ifa)
752 752
753 skb = alloc_skb(size, GFP_KERNEL); 753 skb = alloc_skb(size, GFP_KERNEL);
754 if (!skb) { 754 if (!skb) {
755 netlink_set_err(rtnl, 0, RTMGRP_DECnet_IFADDR, ENOBUFS); 755 netlink_set_err(rtnl, 0, RTNLGRP_DECnet_IFADDR, ENOBUFS);
756 return; 756 return;
757 } 757 }
758 if (dn_dev_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) { 758 if (dn_dev_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) {
759 kfree_skb(skb); 759 kfree_skb(skb);
760 netlink_set_err(rtnl, 0, RTMGRP_DECnet_IFADDR, EINVAL); 760 netlink_set_err(rtnl, 0, RTNLGRP_DECnet_IFADDR, EINVAL);
761 return; 761 return;
762 } 762 }
763 NETLINK_CB(skb).dst_groups = RTMGRP_DECnet_IFADDR; 763 NETLINK_CB(skb).dst_group = RTNLGRP_DECnet_IFADDR;
764 netlink_broadcast(rtnl, skb, 0, RTMGRP_DECnet_IFADDR, GFP_KERNEL); 764 netlink_broadcast(rtnl, skb, 0, RTNLGRP_DECnet_IFADDR, GFP_KERNEL);
765} 765}
766 766
767static int dn_dev_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 767static int dn_dev_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 202dbde9850d..369f25b60f3f 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -60,7 +60,7 @@
60#include <linux/inet.h> 60#include <linux/inet.h>
61#include <linux/route.h> 61#include <linux/route.h>
62#include <net/sock.h> 62#include <net/sock.h>
63#include <net/tcp.h> 63#include <net/tcp_states.h>
64#include <asm/system.h> 64#include <asm/system.h>
65#include <linux/fcntl.h> 65#include <linux/fcntl.h>
66#include <linux/mm.h> 66#include <linux/mm.h>
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 8cce1fdbda90..e0bebf4bbcad 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
479 xmit_count = cb2->xmit_count; 479 xmit_count = cb2->xmit_count;
480 segnum = cb2->segnum; 480 segnum = cb2->segnum;
481 /* Remove and drop ack'ed packet */ 481 /* Remove and drop ack'ed packet */
482 skb_unlink(ack); 482 skb_unlink(ack, q);
483 kfree_skb(ack); 483 kfree_skb(ack);
484 ack = NULL; 484 ack = NULL;
485 485
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 2399fa8a3f86..2c915f305be3 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -572,7 +572,7 @@ static int dn_route_ptp_hello(struct sk_buff *skb)
572 return NET_RX_SUCCESS; 572 return NET_RX_SUCCESS;
573} 573}
574 574
575int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 575int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
576{ 576{
577 struct dn_skb_cb *cb; 577 struct dn_skb_cb *cb;
578 unsigned char flags = 0; 578 unsigned char flags = 0;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 28ba5777a25a..eeba56f99323 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n
79static DEFINE_RWLOCK(dn_fib_tables_lock); 79static DEFINE_RWLOCK(dn_fib_tables_lock);
80struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1]; 80struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1];
81 81
82static kmem_cache_t *dn_hash_kmem; 82static kmem_cache_t *dn_hash_kmem __read_mostly;
83static int dn_fib_hash_zombies; 83static int dn_fib_hash_zombies;
84 84
85static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) 85static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
@@ -349,10 +349,10 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, int tb_id,
349 kfree_skb(skb); 349 kfree_skb(skb);
350 return; 350 return;
351 } 351 }
352 NETLINK_CB(skb).dst_groups = RTMGRP_DECnet_ROUTE; 352 NETLINK_CB(skb).dst_group = RTNLGRP_DECnet_ROUTE;
353 if (nlh->nlmsg_flags & NLM_F_ECHO) 353 if (nlh->nlmsg_flags & NLM_F_ECHO)
354 atomic_inc(&skb->users); 354 atomic_inc(&skb->users);
355 netlink_broadcast(rtnl, skb, pid, RTMGRP_DECnet_ROUTE, GFP_KERNEL); 355 netlink_broadcast(rtnl, skb, pid, RTNLGRP_DECnet_ROUTE, GFP_KERNEL);
356 if (nlh->nlmsg_flags & NLM_F_ECHO) 356 if (nlh->nlmsg_flags & NLM_F_ECHO)
357 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 357 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
358} 358}
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 284a9998e53d..1ab94c6e22ed 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -19,6 +19,7 @@
19#include <linux/netfilter.h> 19#include <linux/netfilter.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/netlink.h> 21#include <linux/netlink.h>
22#include <linux/netfilter_decnet.h>
22 23
23#include <net/sock.h> 24#include <net/sock.h>
24#include <net/flow.h> 25#include <net/flow.h>
@@ -71,10 +72,10 @@ static void dnrmg_send_peer(struct sk_buff *skb)
71 72
72 switch(flags & DN_RT_CNTL_MSK) { 73 switch(flags & DN_RT_CNTL_MSK) {
73 case DN_RT_PKT_L1RT: 74 case DN_RT_PKT_L1RT:
74 group = DNRMG_L1_GROUP; 75 group = DNRNG_NLGRP_L1;
75 break; 76 break;
76 case DN_RT_PKT_L2RT: 77 case DN_RT_PKT_L2RT:
77 group = DNRMG_L2_GROUP; 78 group = DNRNG_NLGRP_L2;
78 break; 79 break;
79 default: 80 default:
80 return; 81 return;
@@ -83,7 +84,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
83 skb2 = dnrmg_build_message(skb, &status); 84 skb2 = dnrmg_build_message(skb, &status);
84 if (skb2 == NULL) 85 if (skb2 == NULL)
85 return; 86 return;
86 NETLINK_CB(skb2).dst_groups = group; 87 NETLINK_CB(skb2).dst_group = group;
87 netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); 88 netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC);
88} 89}
89 90
@@ -138,7 +139,8 @@ static int __init init(void)
138{ 139{
139 int rv = 0; 140 int rv = 0;
140 141
141 dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, dnrmg_receive_user_sk); 142 dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX,
143 dnrmg_receive_user_sk, THIS_MODULE);
142 if (dnrmg == NULL) { 144 if (dnrmg == NULL) {
143 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); 145 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
144 return -ENOMEM; 146 return -ENOMEM;
@@ -162,6 +164,7 @@ static void __exit fini(void)
162MODULE_DESCRIPTION("DECnet Routing Message Grabulator"); 164MODULE_DESCRIPTION("DECnet Routing Message Grabulator");
163MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>"); 165MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>");
164MODULE_LICENSE("GPL"); 166MODULE_LICENSE("GPL");
167MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG);
165 168
166module_init(init); 169module_init(init);
167module_exit(fini); 170module_exit(fini);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index de691e119e17..4a62093eb343 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -159,7 +159,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
159 err = memcpy_toiovec(msg->msg_iov, skb->data, copied); 159 err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
160 if (err) 160 if (err)
161 goto out_free; 161 goto out_free;
162 sk->sk_stamp = skb->stamp; 162 skb_get_timestamp(skb, &sk->sk_stamp);
163 163
164 if (msg->msg_name) 164 if (msg->msg_name)
165 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 165 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
869 869
870foundit: 870foundit:
871 tx_result(skb->sk, eb->cookie, result); 871 tx_result(skb->sk, eb->cookie, result);
872 skb_unlink(skb); 872 skb_unlink(skb, &aun_queue);
873 spin_unlock_irqrestore(&aun_queue_lock, flags); 873 spin_unlock_irqrestore(&aun_queue_lock, flags);
874 kfree_skb(skb); 874 kfree_skb(skb);
875} 875}
@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
947 { 947 {
948 tx_result(skb->sk, eb->cookie, 948 tx_result(skb->sk, eb->cookie,
949 ECTYPE_TRANSMIT_NOT_PRESENT); 949 ECTYPE_TRANSMIT_NOT_PRESENT);
950 skb_unlink(skb); 950 skb_unlink(skb, &aun_queue);
951 kfree_skb(skb); 951 kfree_skb(skb);
952 } 952 }
953 skb = newskb; 953 skb = newskb;
@@ -1009,7 +1009,7 @@ release:
1009 * Receive an Econet frame from a device. 1009 * Receive an Econet frame from a device.
1010 */ 1010 */
1011 1011
1012static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 1012static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
1013{ 1013{
1014 struct ec_framehdr *hdr; 1014 struct ec_framehdr *hdr;
1015 struct sock *sk; 1015 struct sock *sk;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f6dbfb99b14d..87a052a9a84f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -62,8 +62,6 @@
62#include <asm/system.h> 62#include <asm/system.h>
63#include <asm/checksum.h> 63#include <asm/checksum.h>
64 64
65extern int __init netdev_boot_setup(char *str);
66
67__setup("ether=", netdev_boot_setup); 65__setup("ether=", netdev_boot_setup);
68 66
69/* 67/*
@@ -163,7 +161,6 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
163 skb->mac.raw=skb->data; 161 skb->mac.raw=skb->data;
164 skb_pull(skb,ETH_HLEN); 162 skb_pull(skb,ETH_HLEN);
165 eth = eth_hdr(skb); 163 eth = eth_hdr(skb);
166 skb->input_dev = dev;
167 164
168 if(*eth->h_dest&1) 165 if(*eth->h_dest&1)
169 { 166 {
diff --git a/net/ethernet/sysctl_net_ether.c b/net/ethernet/sysctl_net_ether.c
index b81a6d532342..66b39fc342d2 100644
--- a/net/ethernet/sysctl_net_ether.c
+++ b/net/ethernet/sysctl_net_ether.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/if_ether.h>
10 11
11ctl_table ether_table[] = { 12ctl_table ether_table[] = {
12 {0} 13 {0}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 0b3d9f1d8069..e55136ae09f4 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -413,20 +413,19 @@ config INET_TUNNEL
413 413
414 If unsure, say Y. 414 If unsure, say Y.
415 415
416config IP_TCPDIAG 416config INET_DIAG
417 tristate "IP: TCP socket monitoring interface" 417 tristate "INET: socket monitoring interface"
418 default y 418 default y
419 ---help--- 419 ---help---
420 Support for TCP socket monitoring interface used by native Linux 420 Support for INET (TCP, DCCP, etc) socket monitoring interface used by
421 tools such as ss. ss is included in iproute2, currently downloadable 421 native Linux tools such as ss. ss is included in iproute2, currently
422 at <http://developer.osdl.org/dev/iproute2>. If you want IPv6 support 422 downloadable at <http://developer.osdl.org/dev/iproute2>.
423 and have selected IPv6 as a module, you need to build this as a
424 module too.
425 423
426 If unsure, say Y. 424 If unsure, say Y.
427 425
428config IP_TCPDIAG_IPV6 426config INET_TCP_DIAG
429 def_bool (IP_TCPDIAG=y && IPV6=y) || (IP_TCPDIAG=m && IPV6) 427 depends on INET_DIAG
428 def_tristate INET_DIAG
430 429
431config TCP_CONG_ADVANCED 430config TCP_CONG_ADVANCED
432 bool "TCP: advanced congestion control" 431 bool "TCP: advanced congestion control"
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 55dc6cca1e7b..f0435d00db6b 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -4,11 +4,12 @@
4 4
5obj-y := route.o inetpeer.o protocol.o \ 5obj-y := route.o inetpeer.o protocol.o \
6 ip_input.o ip_fragment.o ip_forward.o ip_options.o \ 6 ip_input.o ip_fragment.o ip_forward.o ip_options.o \
7 ip_output.o ip_sockglue.o \ 7 ip_output.o ip_sockglue.o inet_hashtables.o \
8 inet_timewait_sock.o inet_connection_sock.o \
8 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ 9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
9 tcp_minisocks.o tcp_cong.o \ 10 tcp_minisocks.o tcp_cong.o \
10 datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \ 11 datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
11 sysctl_net_ipv4.o fib_frontend.o fib_semantics.o 12 sysctl_net_ipv4.o fib_frontend.o fib_semantics.o netfilter.o
12 13
13obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o 14obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
14obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o 15obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
@@ -29,8 +30,9 @@ obj-$(CONFIG_IP_ROUTE_MULTIPATH_WRANDOM) += multipath_wrandom.o
29obj-$(CONFIG_IP_ROUTE_MULTIPATH_DRR) += multipath_drr.o 30obj-$(CONFIG_IP_ROUTE_MULTIPATH_DRR) += multipath_drr.o
30obj-$(CONFIG_NETFILTER) += netfilter/ 31obj-$(CONFIG_NETFILTER) += netfilter/
31obj-$(CONFIG_IP_VS) += ipvs/ 32obj-$(CONFIG_IP_VS) += ipvs/
32obj-$(CONFIG_IP_TCPDIAG) += tcp_diag.o 33obj-$(CONFIG_INET_DIAG) += inet_diag.o
33obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o 34obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o
35obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
34obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o 36obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
35obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o 37obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o
36obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o 38obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 163ae4068b5f..bf147f8db399 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -99,6 +99,7 @@
99#include <net/arp.h> 99#include <net/arp.h>
100#include <net/route.h> 100#include <net/route.h>
101#include <net/ip_fib.h> 101#include <net/ip_fib.h>
102#include <net/inet_connection_sock.h>
102#include <net/tcp.h> 103#include <net/tcp.h>
103#include <net/udp.h> 104#include <net/udp.h>
104#include <linux/skbuff.h> 105#include <linux/skbuff.h>
@@ -112,11 +113,7 @@
112#include <linux/mroute.h> 113#include <linux/mroute.h>
113#endif 114#endif
114 115
115DEFINE_SNMP_STAT(struct linux_mib, net_statistics); 116DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly;
116
117#ifdef INET_REFCNT_DEBUG
118atomic_t inet_sock_nr;
119#endif
120 117
121extern void ip_mc_drop_socket(struct sock *sk); 118extern void ip_mc_drop_socket(struct sock *sk);
122 119
@@ -153,11 +150,7 @@ void inet_sock_destruct(struct sock *sk)
153 if (inet->opt) 150 if (inet->opt)
154 kfree(inet->opt); 151 kfree(inet->opt);
155 dst_release(sk->sk_dst_cache); 152 dst_release(sk->sk_dst_cache);
156#ifdef INET_REFCNT_DEBUG 153 sk_refcnt_debug_dec(sk);
157 atomic_dec(&inet_sock_nr);
158 printk(KERN_DEBUG "INET socket %p released, %d are still alive\n",
159 sk, atomic_read(&inet_sock_nr));
160#endif
161} 154}
162 155
163/* 156/*
@@ -210,7 +203,7 @@ int inet_listen(struct socket *sock, int backlog)
210 * we can only allow the backlog to be adjusted. 203 * we can only allow the backlog to be adjusted.
211 */ 204 */
212 if (old_state != TCP_LISTEN) { 205 if (old_state != TCP_LISTEN) {
213 err = tcp_listen_start(sk); 206 err = inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
214 if (err) 207 if (err)
215 goto out; 208 goto out;
216 } 209 }
@@ -235,12 +228,14 @@ static int inet_create(struct socket *sock, int protocol)
235 struct proto *answer_prot; 228 struct proto *answer_prot;
236 unsigned char answer_flags; 229 unsigned char answer_flags;
237 char answer_no_check; 230 char answer_no_check;
238 int err; 231 int try_loading_module = 0;
232 int err = -ESOCKTNOSUPPORT;
239 233
240 sock->state = SS_UNCONNECTED; 234 sock->state = SS_UNCONNECTED;
241 235
242 /* Look for the requested type/protocol pair. */ 236 /* Look for the requested type/protocol pair. */
243 answer = NULL; 237 answer = NULL;
238lookup_protocol:
244 rcu_read_lock(); 239 rcu_read_lock();
245 list_for_each_rcu(p, &inetsw[sock->type]) { 240 list_for_each_rcu(p, &inetsw[sock->type]) {
246 answer = list_entry(p, struct inet_protosw, list); 241 answer = list_entry(p, struct inet_protosw, list);
@@ -261,9 +256,28 @@ static int inet_create(struct socket *sock, int protocol)
261 answer = NULL; 256 answer = NULL;
262 } 257 }
263 258
264 err = -ESOCKTNOSUPPORT; 259 if (unlikely(answer == NULL)) {
265 if (!answer) 260 if (try_loading_module < 2) {
266 goto out_rcu_unlock; 261 rcu_read_unlock();
262 /*
263 * Be more specific, e.g. net-pf-2-proto-132-type-1
264 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
265 */
266 if (++try_loading_module == 1)
267 request_module("net-pf-%d-proto-%d-type-%d",
268 PF_INET, protocol, sock->type);
269 /*
270 * Fall back to generic, e.g. net-pf-2-proto-132
271 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
272 */
273 else
274 request_module("net-pf-%d-proto-%d",
275 PF_INET, protocol);
276 goto lookup_protocol;
277 } else
278 goto out_rcu_unlock;
279 }
280
267 err = -EPERM; 281 err = -EPERM;
268 if (answer->capability > 0 && !capable(answer->capability)) 282 if (answer->capability > 0 && !capable(answer->capability))
269 goto out_rcu_unlock; 283 goto out_rcu_unlock;
@@ -317,9 +331,7 @@ static int inet_create(struct socket *sock, int protocol)
317 inet->mc_index = 0; 331 inet->mc_index = 0;
318 inet->mc_list = NULL; 332 inet->mc_list = NULL;
319 333
320#ifdef INET_REFCNT_DEBUG 334 sk_refcnt_debug_inc(sk);
321 atomic_inc(&inet_sock_nr);
322#endif
323 335
324 if (inet->num) { 336 if (inet->num) {
325 /* It assumes that any protocol which allows 337 /* It assumes that any protocol which allows
@@ -847,10 +859,6 @@ static struct net_proto_family inet_family_ops = {
847 .owner = THIS_MODULE, 859 .owner = THIS_MODULE,
848}; 860};
849 861
850
851extern void tcp_init(void);
852extern void tcp_v4_init(struct net_proto_family *);
853
854/* Upon startup we insert all the elements in inetsw_array[] into 862/* Upon startup we insert all the elements in inetsw_array[] into
855 * the linked list inetsw. 863 * the linked list inetsw.
856 */ 864 */
@@ -961,6 +969,119 @@ void inet_unregister_protosw(struct inet_protosw *p)
961 } 969 }
962} 970}
963 971
972/*
973 * Shall we try to damage output packets if routing dev changes?
974 */
975
976int sysctl_ip_dynaddr;
977
978static int inet_sk_reselect_saddr(struct sock *sk)
979{
980 struct inet_sock *inet = inet_sk(sk);
981 int err;
982 struct rtable *rt;
983 __u32 old_saddr = inet->saddr;
984 __u32 new_saddr;
985 __u32 daddr = inet->daddr;
986
987 if (inet->opt && inet->opt->srr)
988 daddr = inet->opt->faddr;
989
990 /* Query new route. */
991 err = ip_route_connect(&rt, daddr, 0,
992 RT_CONN_FLAGS(sk),
993 sk->sk_bound_dev_if,
994 sk->sk_protocol,
995 inet->sport, inet->dport, sk);
996 if (err)
997 return err;
998
999 sk_setup_caps(sk, &rt->u.dst);
1000
1001 new_saddr = rt->rt_src;
1002
1003 if (new_saddr == old_saddr)
1004 return 0;
1005
1006 if (sysctl_ip_dynaddr > 1) {
1007 printk(KERN_INFO "%s(): shifting inet->"
1008 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1009 __FUNCTION__,
1010 NIPQUAD(old_saddr),
1011 NIPQUAD(new_saddr));
1012 }
1013
1014 inet->saddr = inet->rcv_saddr = new_saddr;
1015
1016 /*
1017 * XXX The only one ugly spot where we need to
1018 * XXX really change the sockets identity after
1019 * XXX it has entered the hashes. -DaveM
1020 *
1021 * Besides that, it does not check for connection
1022 * uniqueness. Wait for troubles.
1023 */
1024 __sk_prot_rehash(sk);
1025 return 0;
1026}
1027
1028int inet_sk_rebuild_header(struct sock *sk)
1029{
1030 struct inet_sock *inet = inet_sk(sk);
1031 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1032 u32 daddr;
1033 int err;
1034
1035 /* Route is OK, nothing to do. */
1036 if (rt)
1037 return 0;
1038
1039 /* Reroute. */
1040 daddr = inet->daddr;
1041 if (inet->opt && inet->opt->srr)
1042 daddr = inet->opt->faddr;
1043{
1044 struct flowi fl = {
1045 .oif = sk->sk_bound_dev_if,
1046 .nl_u = {
1047 .ip4_u = {
1048 .daddr = daddr,
1049 .saddr = inet->saddr,
1050 .tos = RT_CONN_FLAGS(sk),
1051 },
1052 },
1053 .proto = sk->sk_protocol,
1054 .uli_u = {
1055 .ports = {
1056 .sport = inet->sport,
1057 .dport = inet->dport,
1058 },
1059 },
1060 };
1061
1062 err = ip_route_output_flow(&rt, &fl, sk, 0);
1063}
1064 if (!err)
1065 sk_setup_caps(sk, &rt->u.dst);
1066 else {
1067 /* Routing failed... */
1068 sk->sk_route_caps = 0;
1069 /*
1070 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1071 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1072 */
1073 if (!sysctl_ip_dynaddr ||
1074 sk->sk_state != TCP_SYN_SENT ||
1075 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1076 (err = inet_sk_reselect_saddr(sk)) != 0)
1077 sk->sk_err_soft = -err;
1078 }
1079
1080 return err;
1081}
1082
1083EXPORT_SYMBOL(inet_sk_rebuild_header);
1084
964#ifdef CONFIG_IP_MULTICAST 1085#ifdef CONFIG_IP_MULTICAST
965static struct net_protocol igmp_protocol = { 1086static struct net_protocol igmp_protocol = {
966 .handler = igmp_rcv, 1087 .handler = igmp_rcv,
@@ -1007,7 +1128,6 @@ static int __init init_ipv4_mibs(void)
1007} 1128}
1008 1129
1009static int ipv4_proc_init(void); 1130static int ipv4_proc_init(void);
1010extern void ipfrag_init(void);
1011 1131
1012/* 1132/*
1013 * IP protocol layer initialiser 1133 * IP protocol layer initialiser
@@ -1128,19 +1248,10 @@ module_init(inet_init);
1128/* ------------------------------------------------------------------------ */ 1248/* ------------------------------------------------------------------------ */
1129 1249
1130#ifdef CONFIG_PROC_FS 1250#ifdef CONFIG_PROC_FS
1131extern int fib_proc_init(void);
1132extern void fib_proc_exit(void);
1133#ifdef CONFIG_IP_FIB_TRIE 1251#ifdef CONFIG_IP_FIB_TRIE
1134extern int fib_stat_proc_init(void); 1252extern int fib_stat_proc_init(void);
1135extern void fib_stat_proc_exit(void); 1253extern void fib_stat_proc_exit(void);
1136#endif 1254#endif
1137extern int ip_misc_proc_init(void);
1138extern int raw_proc_init(void);
1139extern void raw_proc_exit(void);
1140extern int tcp4_proc_init(void);
1141extern void tcp4_proc_exit(void);
1142extern int udp4_proc_init(void);
1143extern void udp4_proc_exit(void);
1144 1255
1145static int __init ipv4_proc_init(void) 1256static int __init ipv4_proc_init(void)
1146{ 1257{
@@ -1205,7 +1316,3 @@ EXPORT_SYMBOL(inet_stream_ops);
1205EXPORT_SYMBOL(inet_unregister_protosw); 1316EXPORT_SYMBOL(inet_unregister_protosw);
1206EXPORT_SYMBOL(net_statistics); 1317EXPORT_SYMBOL(net_statistics);
1207EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); 1318EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
1208
1209#ifdef INET_REFCNT_DEBUG
1210EXPORT_SYMBOL(inet_sock_nr);
1211#endif
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a642fd612853..8bf312bdea13 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -700,7 +700,7 @@ void arp_send(int type, int ptype, u32 dest_ip,
700static void parp_redo(struct sk_buff *skb) 700static void parp_redo(struct sk_buff *skb)
701{ 701{
702 nf_reset(skb); 702 nf_reset(skb);
703 arp_rcv(skb, skb->dev, NULL); 703 arp_rcv(skb, skb->dev, NULL, skb->dev);
704} 704}
705 705
706/* 706/*
@@ -865,7 +865,7 @@ static int arp_process(struct sk_buff *skb)
865 if (n) 865 if (n)
866 neigh_release(n); 866 neigh_release(n);
867 867
868 if (skb->stamp.tv_sec == LOCALLY_ENQUEUED || 868 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
869 skb->pkt_type == PACKET_HOST || 869 skb->pkt_type == PACKET_HOST ||
870 in_dev->arp_parms->proxy_delay == 0) { 870 in_dev->arp_parms->proxy_delay == 0) {
871 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 871 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
@@ -927,7 +927,7 @@ out:
927 * Receive an arp request from the device layer. 927 * Receive an arp request from the device layer.
928 */ 928 */
929 929
930int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 930int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
931{ 931{
932 struct arphdr *arp; 932 struct arphdr *arp;
933 933
@@ -948,6 +948,8 @@ int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
948 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 948 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
949 goto out_of_mem; 949 goto out_of_mem;
950 950
951 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
952
951 return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); 953 return NF_HOOK(NF_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
952 954
953freeskb: 955freeskb:
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index b1db561f2542..c1b42b5257f8 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -16,9 +16,10 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/in.h> 18#include <linux/in.h>
19#include <net/ip.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/tcp.h>
21#include <net/route.h> 21#include <net/route.h>
22#include <net/tcp_states.h>
22 23
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 24int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 25{
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index d8a10e3dd77d..ba2895ae8151 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1111,13 +1111,12 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
1111 struct sk_buff *skb = alloc_skb(size, GFP_KERNEL); 1111 struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
1112 1112
1113 if (!skb) 1113 if (!skb)
1114 netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, ENOBUFS); 1114 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS);
1115 else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { 1115 else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
1116 kfree_skb(skb); 1116 kfree_skb(skb);
1117 netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, EINVAL); 1117 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL);
1118 } else { 1118 } else {
1119 NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_IFADDR; 1119 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_IFADDR, GFP_KERNEL);
1120 netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV4_IFADDR, GFP_KERNEL);
1121 } 1120 }
1122} 1121}
1123 1122
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ba57446d5d1f..b31ffc5053d2 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -331,8 +331,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
331 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 331 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
332 if (!x) 332 if (!x)
333 return; 333 return;
334 NETDEBUG(printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 334 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
335 ntohl(esph->spi), ntohl(iph->daddr))); 335 ntohl(esph->spi), ntohl(iph->daddr));
336 xfrm_state_put(x); 336 xfrm_state_put(x);
337} 337}
338 338
@@ -395,10 +395,10 @@ static int esp_init_state(struct xfrm_state *x)
395 395
396 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 396 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
397 crypto_tfm_alg_digestsize(esp->auth.tfm)) { 397 crypto_tfm_alg_digestsize(esp->auth.tfm)) {
398 NETDEBUG(printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", 398 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
399 x->aalg->alg_name, 399 x->aalg->alg_name,
400 crypto_tfm_alg_digestsize(esp->auth.tfm), 400 crypto_tfm_alg_digestsize(esp->auth.tfm),
401 aalg_desc->uinfo.auth.icv_fullbits/8)); 401 aalg_desc->uinfo.auth.icv_fullbits/8);
402 goto error; 402 goto error;
403 } 403 }
404 404
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cd8e45ab9580..4e1379f71269 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -558,16 +558,15 @@ static void nl_fib_input(struct sock *sk, int len)
558 nl_fib_lookup(frn, tb); 558 nl_fib_lookup(frn, tb);
559 559
560 pid = nlh->nlmsg_pid; /*pid of sending process */ 560 pid = nlh->nlmsg_pid; /*pid of sending process */
561 NETLINK_CB(skb).groups = 0; /* not in mcast group */
562 NETLINK_CB(skb).pid = 0; /* from kernel */ 561 NETLINK_CB(skb).pid = 0; /* from kernel */
563 NETLINK_CB(skb).dst_pid = pid; 562 NETLINK_CB(skb).dst_pid = pid;
564 NETLINK_CB(skb).dst_groups = 0; /* unicast */ 563 NETLINK_CB(skb).dst_group = 0; /* unicast */
565 netlink_unicast(sk, skb, pid, MSG_DONTWAIT); 564 netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
566} 565}
567 566
568static void nl_fib_lookup_init(void) 567static void nl_fib_lookup_init(void)
569{ 568{
570 netlink_kernel_create(NETLINK_FIB_LOOKUP, nl_fib_input); 569 netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, THIS_MODULE);
571} 570}
572 571
573static void fib_disable_ip(struct net_device *dev, int force) 572static void fib_disable_ip(struct net_device *dev, int force)
@@ -662,5 +661,4 @@ void __init ip_fib_init(void)
662} 661}
663 662
664EXPORT_SYMBOL(inet_addr_type); 663EXPORT_SYMBOL(inet_addr_type);
665EXPORT_SYMBOL(ip_dev_find);
666EXPORT_SYMBOL(ip_rt_ioctl); 664EXPORT_SYMBOL(ip_rt_ioctl);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index b10d6bb5ef3d..2a8c9afc3695 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -45,8 +45,8 @@
45 45
46#include "fib_lookup.h" 46#include "fib_lookup.h"
47 47
48static kmem_cache_t *fn_hash_kmem; 48static kmem_cache_t *fn_hash_kmem __read_mostly;
49static kmem_cache_t *fn_alias_kmem; 49static kmem_cache_t *fn_alias_kmem __read_mostly;
50 50
51struct fib_node { 51struct fib_node {
52 struct hlist_node fn_hash; 52 struct hlist_node fn_hash;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index b729d97cfa93..ef6609ea0eb7 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -7,6 +7,7 @@
7 7
8struct fib_alias { 8struct fib_alias {
9 struct list_head fa_list; 9 struct list_head fa_list;
10 struct rcu_head rcu;
10 struct fib_info *fa_info; 11 struct fib_info *fa_info;
11 u8 fa_tos; 12 u8 fa_tos;
12 u8 fa_type; 13 u8 fa_type;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e278cb9d0075..d41219e8037c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -290,10 +290,10 @@ void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return; 291 return;
292 } 292 }
293 NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE; 293 NETLINK_CB(skb).dst_group = RTNLGRP_IPV4_ROUTE;
294 if (n->nlmsg_flags&NLM_F_ECHO) 294 if (n->nlmsg_flags&NLM_F_ECHO)
295 atomic_inc(&skb->users); 295 atomic_inc(&skb->users);
296 netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL); 296 netlink_broadcast(rtnl, skb, pid, RTNLGRP_IPV4_ROUTE, GFP_KERNEL);
297 if (n->nlmsg_flags&NLM_F_ECHO) 297 if (n->nlmsg_flags&NLM_F_ECHO)
298 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 298 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
299} 299}
@@ -854,6 +854,7 @@ failure:
854 return NULL; 854 return NULL;
855} 855}
856 856
857/* Note! fib_semantic_match intentionally uses RCU list functions. */
857int fib_semantic_match(struct list_head *head, const struct flowi *flp, 858int fib_semantic_match(struct list_head *head, const struct flowi *flp,
858 struct fib_result *res, __u32 zone, __u32 mask, 859 struct fib_result *res, __u32 zone, __u32 mask,
859 int prefixlen) 860 int prefixlen)
@@ -861,7 +862,7 @@ int fib_semantic_match(struct list_head *head, const struct flowi *flp,
861 struct fib_alias *fa; 862 struct fib_alias *fa;
862 int nh_sel = 0; 863 int nh_sel = 0;
863 864
864 list_for_each_entry(fa, head, fa_list) { 865 list_for_each_entry_rcu(fa, head, fa_list) {
865 int err; 866 int err;
866 867
867 if (fa->fa_tos && 868 if (fa->fa_tos &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index a701405fab0b..b2dea4e5da77 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -43,7 +43,7 @@
43 * 2 of the License, or (at your option) any later version. 43 * 2 of the License, or (at your option) any later version.
44 */ 44 */
45 45
46#define VERSION "0.325" 46#define VERSION "0.402"
47 47
48#include <linux/config.h> 48#include <linux/config.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
@@ -62,6 +62,7 @@
62#include <linux/netdevice.h> 62#include <linux/netdevice.h>
63#include <linux/if_arp.h> 63#include <linux/if_arp.h>
64#include <linux/proc_fs.h> 64#include <linux/proc_fs.h>
65#include <linux/rcupdate.h>
65#include <linux/skbuff.h> 66#include <linux/skbuff.h>
66#include <linux/netlink.h> 67#include <linux/netlink.h>
67#include <linux/init.h> 68#include <linux/init.h>
@@ -77,56 +78,55 @@
77#undef CONFIG_IP_FIB_TRIE_STATS 78#undef CONFIG_IP_FIB_TRIE_STATS
78#define MAX_CHILDS 16384 79#define MAX_CHILDS 16384
79 80
80#define EXTRACT(p, n, str) ((str)<<(p)>>(32-(n)))
81#define KEYLENGTH (8*sizeof(t_key)) 81#define KEYLENGTH (8*sizeof(t_key))
82#define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l)) 82#define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l))
83#define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset)) 83#define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset))
84 84
85static DEFINE_RWLOCK(fib_lock);
86
87typedef unsigned int t_key; 85typedef unsigned int t_key;
88 86
89#define T_TNODE 0 87#define T_TNODE 0
90#define T_LEAF 1 88#define T_LEAF 1
91#define NODE_TYPE_MASK 0x1UL 89#define NODE_TYPE_MASK 0x1UL
92#define NODE_PARENT(_node) \ 90#define NODE_PARENT(node) \
93 ((struct tnode *)((_node)->_parent & ~NODE_TYPE_MASK)) 91 ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK)))
94#define NODE_SET_PARENT(_node, _ptr) \ 92
95 ((_node)->_parent = (((unsigned long)(_ptr)) | \ 93#define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 ((_node)->_parent & NODE_TYPE_MASK))) 94
97#define NODE_INIT_PARENT(_node, _type) \ 95#define NODE_SET_PARENT(node, ptr) \
98 ((_node)->_parent = (_type)) 96 rcu_assign_pointer((node)->parent, \
99#define NODE_TYPE(_node) \ 97 ((unsigned long)(ptr)) | NODE_TYPE(node))
100 ((_node)->_parent & NODE_TYPE_MASK) 98
101 99#define IS_TNODE(n) (!(n->parent & T_LEAF))
102#define IS_TNODE(n) (!(n->_parent & T_LEAF)) 100#define IS_LEAF(n) (n->parent & T_LEAF)
103#define IS_LEAF(n) (n->_parent & T_LEAF)
104 101
105struct node { 102struct node {
106 t_key key; 103 t_key key;
107 unsigned long _parent; 104 unsigned long parent;
108}; 105};
109 106
110struct leaf { 107struct leaf {
111 t_key key; 108 t_key key;
112 unsigned long _parent; 109 unsigned long parent;
113 struct hlist_head list; 110 struct hlist_head list;
111 struct rcu_head rcu;
114}; 112};
115 113
116struct leaf_info { 114struct leaf_info {
117 struct hlist_node hlist; 115 struct hlist_node hlist;
116 struct rcu_head rcu;
118 int plen; 117 int plen;
119 struct list_head falh; 118 struct list_head falh;
120}; 119};
121 120
122struct tnode { 121struct tnode {
123 t_key key; 122 t_key key;
124 unsigned long _parent; 123 unsigned long parent;
125 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */ 124 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
126 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */ 125 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
127 unsigned short full_children; /* KEYLENGTH bits needed */ 126 unsigned short full_children; /* KEYLENGTH bits needed */
128 unsigned short empty_children; /* KEYLENGTH bits needed */ 127 unsigned short empty_children; /* KEYLENGTH bits needed */
129 struct node *child[0]; 128 struct rcu_head rcu;
129 struct node *child[0];
130}; 130};
131 131
132#ifdef CONFIG_IP_FIB_TRIE_STATS 132#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -150,77 +150,45 @@ struct trie_stat {
150}; 150};
151 151
152struct trie { 152struct trie {
153 struct node *trie; 153 struct node *trie;
154#ifdef CONFIG_IP_FIB_TRIE_STATS 154#ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats; 155 struct trie_use_stats stats;
156#endif 156#endif
157 int size; 157 int size;
158 unsigned int revision; 158 unsigned int revision;
159}; 159};
160 160
161static int trie_debug = 0;
162
163static int tnode_full(struct tnode *tn, struct node *n);
164static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n); 161static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
165static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull); 162static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
166static int tnode_child_length(struct tnode *tn);
167static struct node *resize(struct trie *t, struct tnode *tn); 163static struct node *resize(struct trie *t, struct tnode *tn);
168static struct tnode *inflate(struct trie *t, struct tnode *tn, int *err); 164static struct tnode *inflate(struct trie *t, struct tnode *tn);
169static struct tnode *halve(struct trie *t, struct tnode *tn, int *err); 165static struct tnode *halve(struct trie *t, struct tnode *tn);
170static void tnode_free(struct tnode *tn); 166static void tnode_free(struct tnode *tn);
171static void trie_dump_seq(struct seq_file *seq, struct trie *t); 167static void trie_dump_seq(struct seq_file *seq, struct trie *t);
172extern struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
173extern int fib_detect_death(struct fib_info *fi, int order,
174 struct fib_info **last_resort, int *last_idx, int *dflt);
175
176extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa, int z, int tb_id,
177 struct nlmsghdr *n, struct netlink_skb_parms *req);
178 168
179static kmem_cache_t *fn_alias_kmem; 169static kmem_cache_t *fn_alias_kmem __read_mostly;
180static struct trie *trie_local = NULL, *trie_main = NULL; 170static struct trie *trie_local = NULL, *trie_main = NULL;
181 171
182static void trie_bug(char *err) 172
183{ 173/* rcu_read_lock needs to be hold by caller from readside */
184 printk("Trie Bug: %s\n", err);
185 BUG();
186}
187 174
188static inline struct node *tnode_get_child(struct tnode *tn, int i) 175static inline struct node *tnode_get_child(struct tnode *tn, int i)
189{ 176{
190 if (i >= 1<<tn->bits) 177 BUG_ON(i >= 1 << tn->bits);
191 trie_bug("tnode_get_child");
192 178
193 return tn->child[i]; 179 return rcu_dereference(tn->child[i]);
194} 180}
195 181
196static inline int tnode_child_length(struct tnode *tn) 182static inline int tnode_child_length(const struct tnode *tn)
197{ 183{
198 return 1<<tn->bits; 184 return 1 << tn->bits;
199} 185}
200 186
201/*
202 _________________________________________________________________
203 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
204 ----------------------------------------------------------------
205 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
206
207 _________________________________________________________________
208 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
209 -----------------------------------------------------------------
210 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
211
212 tp->pos = 7
213 tp->bits = 3
214 n->pos = 15
215 n->bits=4
216 KEYLENGTH=32
217*/
218
219static inline t_key tkey_extract_bits(t_key a, int offset, int bits) 187static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
220{ 188{
221 if (offset < KEYLENGTH) 189 if (offset < KEYLENGTH)
222 return ((t_key)(a << offset)) >> (KEYLENGTH - bits); 190 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
223 else 191 else
224 return 0; 192 return 0;
225} 193}
226 194
@@ -233,8 +201,8 @@ static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
233{ 201{
234 if (bits == 0 || offset >= KEYLENGTH) 202 if (bits == 0 || offset >= KEYLENGTH)
235 return 1; 203 return 1;
236 bits = bits > KEYLENGTH ? KEYLENGTH : bits; 204 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
237 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0; 205 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
238} 206}
239 207
240static inline int tkey_mismatch(t_key a, int offset, t_key b) 208static inline int tkey_mismatch(t_key a, int offset, t_key b)
@@ -249,14 +217,6 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
249 return i; 217 return i;
250} 218}
251 219
252/* Candiate for fib_semantics */
253
254static void fn_free_alias(struct fib_alias *fa)
255{
256 fib_release_info(fa->fa_info);
257 kmem_cache_free(fn_alias_kmem, fa);
258}
259
260/* 220/*
261 To understand this stuff, an understanding of keys and all their bits is 221 To understand this stuff, an understanding of keys and all their bits is
262 necessary. Every node in the trie has a key associated with it, but not 222 necessary. Every node in the trie has a key associated with it, but not
@@ -295,7 +255,7 @@ static void fn_free_alias(struct fib_alias *fa)
295 tp->pos = 7 255 tp->pos = 7
296 tp->bits = 3 256 tp->bits = 3
297 n->pos = 15 257 n->pos = 15
298 n->bits=4 258 n->bits = 4
299 259
300 First, let's just ignore the bits that come before the parent tp, that is 260 First, let's just ignore the bits that come before the parent tp, that is
301 the bits from 0 to (tp->pos-1). They are *known* but at this point we do 261 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
@@ -320,60 +280,65 @@ static void fn_free_alias(struct fib_alias *fa)
320 280
321*/ 281*/
322 282
323static void check_tnode(struct tnode *tn) 283static inline void check_tnode(const struct tnode *tn)
324{ 284{
325 if (tn && tn->pos+tn->bits > 32) { 285 WARN_ON(tn && tn->pos+tn->bits > 32);
326 printk("TNODE ERROR tn=%p, pos=%d, bits=%d\n", tn, tn->pos, tn->bits);
327 }
328} 286}
329 287
330static int halve_threshold = 25; 288static int halve_threshold = 25;
331static int inflate_threshold = 50; 289static int inflate_threshold = 50;
332 290
333static struct leaf *leaf_new(void) 291
292static void __alias_free_mem(struct rcu_head *head)
334{ 293{
335 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL); 294 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
336 if (l) { 295 kmem_cache_free(fn_alias_kmem, fa);
337 NODE_INIT_PARENT(l, T_LEAF);
338 INIT_HLIST_HEAD(&l->list);
339 }
340 return l;
341} 296}
342 297
343static struct leaf_info *leaf_info_new(int plen) 298static inline void alias_free_mem_rcu(struct fib_alias *fa)
344{ 299{
345 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); 300 call_rcu(&fa->rcu, __alias_free_mem);
346 if (li) { 301}
347 li->plen = plen; 302
348 INIT_LIST_HEAD(&li->falh); 303static void __leaf_free_rcu(struct rcu_head *head)
349 } 304{
350 return li; 305 kfree(container_of(head, struct leaf, rcu));
306}
307
308static inline void free_leaf(struct leaf *leaf)
309{
310 call_rcu(&leaf->rcu, __leaf_free_rcu);
351} 311}
352 312
353static inline void free_leaf(struct leaf *l) 313static void __leaf_info_free_rcu(struct rcu_head *head)
354{ 314{
355 kfree(l); 315 kfree(container_of(head, struct leaf_info, rcu));
356} 316}
357 317
358static inline void free_leaf_info(struct leaf_info *li) 318static inline void free_leaf_info(struct leaf_info *leaf)
359{ 319{
360 kfree(li); 320 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
361} 321}
362 322
363static struct tnode *tnode_alloc(unsigned int size) 323static struct tnode *tnode_alloc(unsigned int size)
364{ 324{
365 if (size <= PAGE_SIZE) { 325 struct page *pages;
366 return kmalloc(size, GFP_KERNEL); 326
367 } else { 327 if (size <= PAGE_SIZE)
368 return (struct tnode *) 328 return kcalloc(size, 1, GFP_KERNEL);
369 __get_free_pages(GFP_KERNEL, get_order(size)); 329
370 } 330 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
331 if (!pages)
332 return NULL;
333
334 return page_address(pages);
371} 335}
372 336
373static void __tnode_free(struct tnode *tn) 337static void __tnode_free_rcu(struct rcu_head *head)
374{ 338{
339 struct tnode *tn = container_of(head, struct tnode, rcu);
375 unsigned int size = sizeof(struct tnode) + 340 unsigned int size = sizeof(struct tnode) +
376 (1<<tn->bits) * sizeof(struct node *); 341 (1 << tn->bits) * sizeof(struct node *);
377 342
378 if (size <= PAGE_SIZE) 343 if (size <= PAGE_SIZE)
379 kfree(tn); 344 kfree(tn);
@@ -381,15 +346,40 @@ static void __tnode_free(struct tnode *tn)
381 free_pages((unsigned long)tn, get_order(size)); 346 free_pages((unsigned long)tn, get_order(size));
382} 347}
383 348
349static inline void tnode_free(struct tnode *tn)
350{
351 call_rcu(&tn->rcu, __tnode_free_rcu);
352}
353
354static struct leaf *leaf_new(void)
355{
356 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
357 if (l) {
358 l->parent = T_LEAF;
359 INIT_HLIST_HEAD(&l->list);
360 }
361 return l;
362}
363
364static struct leaf_info *leaf_info_new(int plen)
365{
366 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
367 if (li) {
368 li->plen = plen;
369 INIT_LIST_HEAD(&li->falh);
370 }
371 return li;
372}
373
384static struct tnode* tnode_new(t_key key, int pos, int bits) 374static struct tnode* tnode_new(t_key key, int pos, int bits)
385{ 375{
386 int nchildren = 1<<bits; 376 int nchildren = 1<<bits;
387 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *); 377 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
388 struct tnode *tn = tnode_alloc(sz); 378 struct tnode *tn = tnode_alloc(sz);
389 379
390 if (tn) { 380 if (tn) {
391 memset(tn, 0, sz); 381 memset(tn, 0, sz);
392 NODE_INIT_PARENT(tn, T_TNODE); 382 tn->parent = T_TNODE;
393 tn->pos = pos; 383 tn->pos = pos;
394 tn->bits = bits; 384 tn->bits = bits;
395 tn->key = key; 385 tn->key = key;
@@ -397,38 +387,17 @@ static struct tnode* tnode_new(t_key key, int pos, int bits)
397 tn->empty_children = 1<<bits; 387 tn->empty_children = 1<<bits;
398 } 388 }
399 389
400 if (trie_debug > 0) 390 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
401 printk("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode), 391 (unsigned int) (sizeof(struct node) * 1<<bits));
402 (unsigned int) (sizeof(struct node) * 1<<bits));
403 return tn; 392 return tn;
404} 393}
405 394
406static void tnode_free(struct tnode *tn)
407{
408 if (!tn) {
409 trie_bug("tnode_free\n");
410 }
411 if (IS_LEAF(tn)) {
412 free_leaf((struct leaf *)tn);
413 if (trie_debug > 0 )
414 printk("FL %p \n", tn);
415 }
416 else if (IS_TNODE(tn)) {
417 __tnode_free(tn);
418 if (trie_debug > 0 )
419 printk("FT %p \n", tn);
420 }
421 else {
422 trie_bug("tnode_free\n");
423 }
424}
425
426/* 395/*
427 * Check whether a tnode 'n' is "full", i.e. it is an internal node 396 * Check whether a tnode 'n' is "full", i.e. it is an internal node
428 * and no bits are skipped. See discussion in dyntree paper p. 6 397 * and no bits are skipped. See discussion in dyntree paper p. 6
429 */ 398 */
430 399
431static inline int tnode_full(struct tnode *tn, struct node *n) 400static inline int tnode_full(const struct tnode *tn, const struct node *n)
432{ 401{
433 if (n == NULL || IS_LEAF(n)) 402 if (n == NULL || IS_LEAF(n))
434 return 0; 403 return 0;
@@ -448,15 +417,11 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i, struct nod
448 417
449static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull) 418static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
450{ 419{
451 struct node *chi; 420 struct node *chi = tn->child[i];
452 int isfull; 421 int isfull;
453 422
454 if (i >= 1<<tn->bits) { 423 BUG_ON(i >= 1<<tn->bits);
455 printk("bits=%d, i=%d\n", tn->bits, i); 424
456 trie_bug("tnode_put_child_reorg bits");
457 }
458 write_lock_bh(&fib_lock);
459 chi = tn->child[i];
460 425
461 /* update emptyChildren */ 426 /* update emptyChildren */
462 if (n == NULL && chi != NULL) 427 if (n == NULL && chi != NULL)
@@ -465,33 +430,32 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int w
465 tn->empty_children--; 430 tn->empty_children--;
466 431
467 /* update fullChildren */ 432 /* update fullChildren */
468 if (wasfull == -1) 433 if (wasfull == -1)
469 wasfull = tnode_full(tn, chi); 434 wasfull = tnode_full(tn, chi);
470 435
471 isfull = tnode_full(tn, n); 436 isfull = tnode_full(tn, n);
472 if (wasfull && !isfull) 437 if (wasfull && !isfull)
473 tn->full_children--; 438 tn->full_children--;
474
475 else if (!wasfull && isfull) 439 else if (!wasfull && isfull)
476 tn->full_children++; 440 tn->full_children++;
441
477 if (n) 442 if (n)
478 NODE_SET_PARENT(n, tn); 443 NODE_SET_PARENT(n, tn);
479 444
480 tn->child[i] = n; 445 rcu_assign_pointer(tn->child[i], n);
481 write_unlock_bh(&fib_lock);
482} 446}
483 447
484static struct node *resize(struct trie *t, struct tnode *tn) 448static struct node *resize(struct trie *t, struct tnode *tn)
485{ 449{
486 int i; 450 int i;
487 int err = 0; 451 int err = 0;
452 struct tnode *old_tn;
488 453
489 if (!tn) 454 if (!tn)
490 return NULL; 455 return NULL;
491 456
492 if (trie_debug) 457 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
493 printk("In tnode_resize %p inflate_threshold=%d threshold=%d\n", 458 tn, inflate_threshold, halve_threshold);
494 tn, inflate_threshold, halve_threshold);
495 459
496 /* No children */ 460 /* No children */
497 if (tn->empty_children == tnode_child_length(tn)) { 461 if (tn->empty_children == tnode_child_length(tn)) {
@@ -501,20 +465,16 @@ static struct node *resize(struct trie *t, struct tnode *tn)
501 /* One child */ 465 /* One child */
502 if (tn->empty_children == tnode_child_length(tn) - 1) 466 if (tn->empty_children == tnode_child_length(tn) - 1)
503 for (i = 0; i < tnode_child_length(tn); i++) { 467 for (i = 0; i < tnode_child_length(tn); i++) {
468 struct node *n;
504 469
505 write_lock_bh(&fib_lock); 470 n = tn->child[i];
506 if (tn->child[i] != NULL) { 471 if (!n)
507 472 continue;
508 /* compress one level */
509 struct node *n = tn->child[i];
510 if (n)
511 NODE_INIT_PARENT(n, NODE_TYPE(n));
512 473
513 write_unlock_bh(&fib_lock); 474 /* compress one level */
514 tnode_free(tn); 475 NODE_SET_PARENT(n, NULL);
515 return n; 476 tnode_free(tn);
516 } 477 return n;
517 write_unlock_bh(&fib_lock);
518 } 478 }
519 /* 479 /*
520 * Double as long as the resulting node has a number of 480 * Double as long as the resulting node has a number of
@@ -566,16 +526,16 @@ static struct node *resize(struct trie *t, struct tnode *tn)
566 * 526 *
567 * expand not_to_be_doubled and to_be_doubled, and shorten: 527 * expand not_to_be_doubled and to_be_doubled, and shorten:
568 * 100 * (tnode_child_length(tn) - tn->empty_children + 528 * 100 * (tnode_child_length(tn) - tn->empty_children +
569 * tn->full_children ) >= inflate_threshold * new_child_length 529 * tn->full_children) >= inflate_threshold * new_child_length
570 * 530 *
571 * expand new_child_length: 531 * expand new_child_length:
572 * 100 * (tnode_child_length(tn) - tn->empty_children + 532 * 100 * (tnode_child_length(tn) - tn->empty_children +
573 * tn->full_children ) >= 533 * tn->full_children) >=
574 * inflate_threshold * tnode_child_length(tn) * 2 534 * inflate_threshold * tnode_child_length(tn) * 2
575 * 535 *
576 * shorten again: 536 * shorten again:
577 * 50 * (tn->full_children + tnode_child_length(tn) - 537 * 50 * (tn->full_children + tnode_child_length(tn) -
578 * tn->empty_children ) >= inflate_threshold * 538 * tn->empty_children) >= inflate_threshold *
579 * tnode_child_length(tn) 539 * tnode_child_length(tn)
580 * 540 *
581 */ 541 */
@@ -587,9 +547,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
587 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= 547 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
588 inflate_threshold * tnode_child_length(tn))) { 548 inflate_threshold * tnode_child_length(tn))) {
589 549
590 tn = inflate(t, tn, &err); 550 old_tn = tn;
591 551 tn = inflate(t, tn);
592 if (err) { 552 if (IS_ERR(tn)) {
553 tn = old_tn;
593#ifdef CONFIG_IP_FIB_TRIE_STATS 554#ifdef CONFIG_IP_FIB_TRIE_STATS
594 t->stats.resize_node_skipped++; 555 t->stats.resize_node_skipped++;
595#endif 556#endif
@@ -609,9 +570,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
609 100 * (tnode_child_length(tn) - tn->empty_children) < 570 100 * (tnode_child_length(tn) - tn->empty_children) <
610 halve_threshold * tnode_child_length(tn)) { 571 halve_threshold * tnode_child_length(tn)) {
611 572
612 tn = halve(t, tn, &err); 573 old_tn = tn;
613 574 tn = halve(t, tn);
614 if (err) { 575 if (IS_ERR(tn)) {
576 tn = old_tn;
615#ifdef CONFIG_IP_FIB_TRIE_STATS 577#ifdef CONFIG_IP_FIB_TRIE_STATS
616 t->stats.resize_node_skipped++; 578 t->stats.resize_node_skipped++;
617#endif 579#endif
@@ -621,44 +583,37 @@ static struct node *resize(struct trie *t, struct tnode *tn)
621 583
622 584
623 /* Only one child remains */ 585 /* Only one child remains */
624
625 if (tn->empty_children == tnode_child_length(tn) - 1) 586 if (tn->empty_children == tnode_child_length(tn) - 1)
626 for (i = 0; i < tnode_child_length(tn); i++) { 587 for (i = 0; i < tnode_child_length(tn); i++) {
627 588 struct node *n;
628 write_lock_bh(&fib_lock); 589
629 if (tn->child[i] != NULL) { 590 n = tn->child[i];
630 /* compress one level */ 591 if (!n)
631 struct node *n = tn->child[i]; 592 continue;
632 593
633 if (n) 594 /* compress one level */
634 NODE_INIT_PARENT(n, NODE_TYPE(n)); 595
635 596 NODE_SET_PARENT(n, NULL);
636 write_unlock_bh(&fib_lock); 597 tnode_free(tn);
637 tnode_free(tn); 598 return n;
638 return n;
639 }
640 write_unlock_bh(&fib_lock);
641 } 599 }
642 600
643 return (struct node *) tn; 601 return (struct node *) tn;
644} 602}
645 603
646static struct tnode *inflate(struct trie *t, struct tnode *tn, int *err) 604static struct tnode *inflate(struct trie *t, struct tnode *tn)
647{ 605{
648 struct tnode *inode; 606 struct tnode *inode;
649 struct tnode *oldtnode = tn; 607 struct tnode *oldtnode = tn;
650 int olen = tnode_child_length(tn); 608 int olen = tnode_child_length(tn);
651 int i; 609 int i;
652 610
653 if (trie_debug) 611 pr_debug("In inflate\n");
654 printk("In inflate\n");
655 612
656 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1); 613 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
657 614
658 if (!tn) { 615 if (!tn)
659 *err = -ENOMEM; 616 return ERR_PTR(-ENOMEM);
660 return oldtnode;
661 }
662 617
663 /* 618 /*
664 * Preallocate and store tnodes before the actual work so we 619 * Preallocate and store tnodes before the actual work so we
@@ -666,8 +621,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn, int *err)
666 * fails. In case of failure we return the oldnode and inflate 621 * fails. In case of failure we return the oldnode and inflate
667 * of tnode is ignored. 622 * of tnode is ignored.
668 */ 623 */
669 624
670 for(i = 0; i < olen; i++) { 625 for (i = 0; i < olen; i++) {
671 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i); 626 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
672 627
673 if (inode && 628 if (inode &&
@@ -675,46 +630,30 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn, int *err)
675 inode->pos == oldtnode->pos + oldtnode->bits && 630 inode->pos == oldtnode->pos + oldtnode->bits &&
676 inode->bits > 1) { 631 inode->bits > 1) {
677 struct tnode *left, *right; 632 struct tnode *left, *right;
678
679 t_key m = TKEY_GET_MASK(inode->pos, 1); 633 t_key m = TKEY_GET_MASK(inode->pos, 1);
680 634
681 left = tnode_new(inode->key&(~m), inode->pos + 1, 635 left = tnode_new(inode->key&(~m), inode->pos + 1,
682 inode->bits - 1); 636 inode->bits - 1);
637 if (!left)
638 goto nomem;
683 639
684 if (!left) {
685 *err = -ENOMEM;
686 break;
687 }
688
689 right = tnode_new(inode->key|m, inode->pos + 1, 640 right = tnode_new(inode->key|m, inode->pos + 1,
690 inode->bits - 1); 641 inode->bits - 1);
691 642
692 if (!right) { 643 if (!right) {
693 *err = -ENOMEM; 644 tnode_free(left);
694 break; 645 goto nomem;
695 } 646 }
696 647
697 put_child(t, tn, 2*i, (struct node *) left); 648 put_child(t, tn, 2*i, (struct node *) left);
698 put_child(t, tn, 2*i+1, (struct node *) right); 649 put_child(t, tn, 2*i+1, (struct node *) right);
699 } 650 }
700 } 651 }
701 652
702 if (*err) { 653 for (i = 0; i < olen; i++) {
703 int size = tnode_child_length(tn);
704 int j;
705
706 for(j = 0; j < size; j++)
707 if (tn->child[j])
708 tnode_free((struct tnode *)tn->child[j]);
709
710 tnode_free(tn);
711
712 *err = -ENOMEM;
713 return oldtnode;
714 }
715
716 for(i = 0; i < olen; i++) {
717 struct node *node = tnode_get_child(oldtnode, i); 654 struct node *node = tnode_get_child(oldtnode, i);
655 struct tnode *left, *right;
656 int size, j;
718 657
719 /* An empty child */ 658 /* An empty child */
720 if (node == NULL) 659 if (node == NULL)
@@ -740,76 +679,82 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn, int *err)
740 put_child(t, tn, 2*i+1, inode->child[1]); 679 put_child(t, tn, 2*i+1, inode->child[1]);
741 680
742 tnode_free(inode); 681 tnode_free(inode);
682 continue;
743 } 683 }
744 684
745 /* An internal node with more than two children */ 685 /* An internal node with more than two children */
746 else { 686
747 struct tnode *left, *right; 687 /* We will replace this node 'inode' with two new
748 int size, j; 688 * ones, 'left' and 'right', each with half of the
749 689 * original children. The two new nodes will have
750 /* We will replace this node 'inode' with two new 690 * a position one bit further down the key and this
751 * ones, 'left' and 'right', each with half of the 691 * means that the "significant" part of their keys
752 * original children. The two new nodes will have 692 * (see the discussion near the top of this file)
753 * a position one bit further down the key and this 693 * will differ by one bit, which will be "0" in
754 * means that the "significant" part of their keys 694 * left's key and "1" in right's key. Since we are
755 * (see the discussion near the top of this file) 695 * moving the key position by one step, the bit that
756 * will differ by one bit, which will be "0" in 696 * we are moving away from - the bit at position
757 * left's key and "1" in right's key. Since we are 697 * (inode->pos) - is the one that will differ between
758 * moving the key position by one step, the bit that 698 * left and right. So... we synthesize that bit in the
759 * we are moving away from - the bit at position 699 * two new keys.
760 * (inode->pos) - is the one that will differ between 700 * The mask 'm' below will be a single "one" bit at
761 * left and right. So... we synthesize that bit in the 701 * the position (inode->pos)
762 * two new keys. 702 */
763 * The mask 'm' below will be a single "one" bit at
764 * the position (inode->pos)
765 */
766
767 /* Use the old key, but set the new significant
768 * bit to zero.
769 */
770 703
771 left = (struct tnode *) tnode_get_child(tn, 2*i); 704 /* Use the old key, but set the new significant
772 put_child(t, tn, 2*i, NULL); 705 * bit to zero.
706 */
773 707
774 if (!left) 708 left = (struct tnode *) tnode_get_child(tn, 2*i);
775 BUG(); 709 put_child(t, tn, 2*i, NULL);
776 710
777 right = (struct tnode *) tnode_get_child(tn, 2*i+1); 711 BUG_ON(!left);
778 put_child(t, tn, 2*i+1, NULL);
779 712
780 if (!right) 713 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
781 BUG(); 714 put_child(t, tn, 2*i+1, NULL);
782 715
783 size = tnode_child_length(left); 716 BUG_ON(!right);
784 for(j = 0; j < size; j++) {
785 put_child(t, left, j, inode->child[j]);
786 put_child(t, right, j, inode->child[j + size]);
787 }
788 put_child(t, tn, 2*i, resize(t, left));
789 put_child(t, tn, 2*i+1, resize(t, right));
790 717
791 tnode_free(inode); 718 size = tnode_child_length(left);
719 for (j = 0; j < size; j++) {
720 put_child(t, left, j, inode->child[j]);
721 put_child(t, right, j, inode->child[j + size]);
792 } 722 }
723 put_child(t, tn, 2*i, resize(t, left));
724 put_child(t, tn, 2*i+1, resize(t, right));
725
726 tnode_free(inode);
793 } 727 }
794 tnode_free(oldtnode); 728 tnode_free(oldtnode);
795 return tn; 729 return tn;
730nomem:
731 {
732 int size = tnode_child_length(tn);
733 int j;
734
735 for (j = 0; j < size; j++)
736 if (tn->child[j])
737 tnode_free((struct tnode *)tn->child[j]);
738
739 tnode_free(tn);
740
741 return ERR_PTR(-ENOMEM);
742 }
796} 743}
797 744
798static struct tnode *halve(struct trie *t, struct tnode *tn, int *err) 745static struct tnode *halve(struct trie *t, struct tnode *tn)
799{ 746{
800 struct tnode *oldtnode = tn; 747 struct tnode *oldtnode = tn;
801 struct node *left, *right; 748 struct node *left, *right;
802 int i; 749 int i;
803 int olen = tnode_child_length(tn); 750 int olen = tnode_child_length(tn);
804 751
805 if (trie_debug) printk("In halve\n"); 752 pr_debug("In halve\n");
806 753
807 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1); 754 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
808 755
809 if (!tn) { 756 if (!tn)
810 *err = -ENOMEM; 757 return ERR_PTR(-ENOMEM);
811 return oldtnode;
812 }
813 758
814 /* 759 /*
815 * Preallocate and store tnodes before the actual work so we 760 * Preallocate and store tnodes before the actual work so we
@@ -818,38 +763,27 @@ static struct tnode *halve(struct trie *t, struct tnode *tn, int *err)
818 * of tnode is ignored. 763 * of tnode is ignored.
819 */ 764 */
820 765
821 for(i = 0; i < olen; i += 2) { 766 for (i = 0; i < olen; i += 2) {
822 left = tnode_get_child(oldtnode, i); 767 left = tnode_get_child(oldtnode, i);
823 right = tnode_get_child(oldtnode, i+1); 768 right = tnode_get_child(oldtnode, i+1);
824 769
825 /* Two nonempty children */ 770 /* Two nonempty children */
826 if (left && right) { 771 if (left && right) {
827 struct tnode *newBinNode = 772 struct tnode *newn;
828 tnode_new(left->key, tn->pos + tn->bits, 1);
829 773
830 if (!newBinNode) { 774 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
831 *err = -ENOMEM;
832 break;
833 }
834 put_child(t, tn, i/2, (struct node *)newBinNode);
835 }
836 }
837 775
838 if (*err) { 776 if (!newn)
839 int size = tnode_child_length(tn); 777 goto nomem;
840 int j;
841 778
842 for(j = 0; j < size; j++) 779 put_child(t, tn, i/2, (struct node *)newn);
843 if (tn->child[j]) 780 }
844 tnode_free((struct tnode *)tn->child[j]);
845 781
846 tnode_free(tn);
847
848 *err = -ENOMEM;
849 return oldtnode;
850 } 782 }
851 783
852 for(i = 0; i < olen; i += 2) { 784 for (i = 0; i < olen; i += 2) {
785 struct tnode *newBinNode;
786
853 left = tnode_get_child(oldtnode, i); 787 left = tnode_get_child(oldtnode, i);
854 right = tnode_get_child(oldtnode, i+1); 788 right = tnode_get_child(oldtnode, i+1);
855 789
@@ -858,88 +792,99 @@ static struct tnode *halve(struct trie *t, struct tnode *tn, int *err)
858 if (right == NULL) /* Both are empty */ 792 if (right == NULL) /* Both are empty */
859 continue; 793 continue;
860 put_child(t, tn, i/2, right); 794 put_child(t, tn, i/2, right);
861 } else if (right == NULL) 795 continue;
796 }
797
798 if (right == NULL) {
862 put_child(t, tn, i/2, left); 799 put_child(t, tn, i/2, left);
800 continue;
801 }
863 802
864 /* Two nonempty children */ 803 /* Two nonempty children */
865 else { 804 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
866 struct tnode *newBinNode = 805 put_child(t, tn, i/2, NULL);
867 (struct tnode *) tnode_get_child(tn, i/2); 806 put_child(t, newBinNode, 0, left);
868 put_child(t, tn, i/2, NULL); 807 put_child(t, newBinNode, 1, right);
869 808 put_child(t, tn, i/2, resize(t, newBinNode));
870 if (!newBinNode)
871 BUG();
872
873 put_child(t, newBinNode, 0, left);
874 put_child(t, newBinNode, 1, right);
875 put_child(t, tn, i/2, resize(t, newBinNode));
876 }
877 } 809 }
878 tnode_free(oldtnode); 810 tnode_free(oldtnode);
879 return tn; 811 return tn;
812nomem:
813 {
814 int size = tnode_child_length(tn);
815 int j;
816
817 for (j = 0; j < size; j++)
818 if (tn->child[j])
819 tnode_free((struct tnode *)tn->child[j]);
820
821 tnode_free(tn);
822
823 return ERR_PTR(-ENOMEM);
824 }
880} 825}
881 826
882static void *trie_init(struct trie *t) 827static void trie_init(struct trie *t)
883{ 828{
884 if (t) { 829 if (!t)
885 t->size = 0; 830 return;
886 t->trie = NULL; 831
887 t->revision = 0; 832 t->size = 0;
833 rcu_assign_pointer(t->trie, NULL);
834 t->revision = 0;
888#ifdef CONFIG_IP_FIB_TRIE_STATS 835#ifdef CONFIG_IP_FIB_TRIE_STATS
889 memset(&t->stats, 0, sizeof(struct trie_use_stats)); 836 memset(&t->stats, 0, sizeof(struct trie_use_stats));
890#endif 837#endif
891 }
892 return t;
893} 838}
894 839
840/* readside most use rcu_read_lock currently dump routines
841 via get_fa_head and dump */
842
895static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen) 843static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
896{ 844{
897 struct hlist_node *node; 845 struct hlist_node *node;
898 struct leaf_info *li; 846 struct leaf_info *li;
899 847
900 hlist_for_each_entry(li, node, head, hlist) { 848 hlist_for_each_entry_rcu(li, node, head, hlist)
901 if (li->plen == plen) 849 if (li->plen == plen)
902 return li; 850 return li;
903 } 851
904 return NULL; 852 return NULL;
905} 853}
906 854
907static inline struct list_head * get_fa_head(struct leaf *l, int plen) 855static inline struct list_head * get_fa_head(struct leaf *l, int plen)
908{ 856{
909 struct list_head *fa_head = NULL;
910 struct leaf_info *li = find_leaf_info(&l->list, plen); 857 struct leaf_info *li = find_leaf_info(&l->list, plen);
911 858
912 if (li) 859 if (!li)
913 fa_head = &li->falh; 860 return NULL;
914 861
915 return fa_head; 862 return &li->falh;
916} 863}
917 864
918static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) 865static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
919{ 866{
920 struct leaf_info *li = NULL, *last = NULL; 867 struct leaf_info *li = NULL, *last = NULL;
921 struct hlist_node *node, *tmp; 868 struct hlist_node *node;
922 869
923 write_lock_bh(&fib_lock); 870 if (hlist_empty(head)) {
924 871 hlist_add_head_rcu(&new->hlist, head);
925 if (hlist_empty(head)) 872 } else {
926 hlist_add_head(&new->hlist, head); 873 hlist_for_each_entry(li, node, head, hlist) {
927 else { 874 if (new->plen > li->plen)
928 hlist_for_each_entry_safe(li, node, tmp, head, hlist) { 875 break;
929 876
930 if (new->plen > li->plen) 877 last = li;
931 break; 878 }
932 879 if (last)
933 last = li; 880 hlist_add_after_rcu(&last->hlist, &new->hlist);
934 } 881 else
935 if (last) 882 hlist_add_before_rcu(&new->hlist, &li->hlist);
936 hlist_add_after(&last->hlist, &new->hlist); 883 }
937 else
938 hlist_add_before(&new->hlist, &li->hlist);
939 }
940 write_unlock_bh(&fib_lock);
941} 884}
942 885
886/* rcu_read_lock needs to be hold by caller from readside */
887
943static struct leaf * 888static struct leaf *
944fib_find_node(struct trie *t, u32 key) 889fib_find_node(struct trie *t, u32 key)
945{ 890{
@@ -948,61 +893,43 @@ fib_find_node(struct trie *t, u32 key)
948 struct node *n; 893 struct node *n;
949 894
950 pos = 0; 895 pos = 0;
951 n = t->trie; 896 n = rcu_dereference(t->trie);
952 897
953 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 898 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
954 tn = (struct tnode *) n; 899 tn = (struct tnode *) n;
955 900
956 check_tnode(tn); 901 check_tnode(tn);
957 902
958 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { 903 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
959 pos=tn->pos + tn->bits; 904 pos = tn->pos + tn->bits;
960 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); 905 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
961 } 906 } else
962 else
963 break; 907 break;
964 } 908 }
965 /* Case we have found a leaf. Compare prefixes */ 909 /* Case we have found a leaf. Compare prefixes */
966 910
967 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) { 911 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
968 struct leaf *l = (struct leaf *) n; 912 return (struct leaf *)n;
969 return l; 913
970 }
971 return NULL; 914 return NULL;
972} 915}
973 916
974static struct node *trie_rebalance(struct trie *t, struct tnode *tn) 917static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
975{ 918{
976 int i = 0;
977 int wasfull; 919 int wasfull;
978 t_key cindex, key; 920 t_key cindex, key;
979 struct tnode *tp = NULL; 921 struct tnode *tp = NULL;
980 922
981 if (!tn)
982 BUG();
983
984 key = tn->key; 923 key = tn->key;
985 i = 0;
986 924
987 while (tn != NULL && NODE_PARENT(tn) != NULL) { 925 while (tn != NULL && NODE_PARENT(tn) != NULL) {
988 926
989 if (i > 10) {
990 printk("Rebalance tn=%p \n", tn);
991 if (tn) printk("tn->parent=%p \n", NODE_PARENT(tn));
992
993 printk("Rebalance tp=%p \n", tp);
994 if (tp) printk("tp->parent=%p \n", NODE_PARENT(tp));
995 }
996
997 if (i > 12) BUG();
998 i++;
999
1000 tp = NODE_PARENT(tn); 927 tp = NODE_PARENT(tn);
1001 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 928 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1002 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 929 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
1003 tn = (struct tnode *) resize (t, (struct tnode *)tn); 930 tn = (struct tnode *) resize (t, (struct tnode *)tn);
1004 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull); 931 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
1005 932
1006 if (!NODE_PARENT(tn)) 933 if (!NODE_PARENT(tn))
1007 break; 934 break;
1008 935
@@ -1015,6 +942,8 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1015 return (struct node*) tn; 942 return (struct node*) tn;
1016} 943}
1017 944
945/* only used from updater-side */
946
1018static struct list_head * 947static struct list_head *
1019fib_insert_node(struct trie *t, int *err, u32 key, int plen) 948fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1020{ 949{
@@ -1050,20 +979,16 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1050 979
1051 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 980 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1052 tn = (struct tnode *) n; 981 tn = (struct tnode *) n;
1053 982
1054 check_tnode(tn); 983 check_tnode(tn);
1055 984
1056 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { 985 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1057 tp = tn; 986 tp = tn;
1058 pos=tn->pos + tn->bits; 987 pos = tn->pos + tn->bits;
1059 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); 988 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1060 989
1061 if (n && NODE_PARENT(n) != tn) { 990 BUG_ON(n && NODE_PARENT(n) != tn);
1062 printk("BUG tn=%p, n->parent=%p\n", tn, NODE_PARENT(n)); 991 } else
1063 BUG();
1064 }
1065 }
1066 else
1067 break; 992 break;
1068 } 993 }
1069 994
@@ -1073,17 +998,15 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1073 * tp is n's (parent) ----> NULL or TNODE 998 * tp is n's (parent) ----> NULL or TNODE
1074 */ 999 */
1075 1000
1076 if (tp && IS_LEAF(tp)) 1001 BUG_ON(tp && IS_LEAF(tp));
1077 BUG();
1078
1079 1002
1080 /* Case 1: n is a leaf. Compare prefixes */ 1003 /* Case 1: n is a leaf. Compare prefixes */
1081 1004
1082 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) { 1005 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1083 struct leaf *l = ( struct leaf *) n; 1006 struct leaf *l = (struct leaf *) n;
1084 1007
1085 li = leaf_info_new(plen); 1008 li = leaf_info_new(plen);
1086 1009
1087 if (!li) { 1010 if (!li) {
1088 *err = -ENOMEM; 1011 *err = -ENOMEM;
1089 goto err; 1012 goto err;
@@ -1113,35 +1036,29 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1113 fa_head = &li->falh; 1036 fa_head = &li->falh;
1114 insert_leaf_info(&l->list, li); 1037 insert_leaf_info(&l->list, li);
1115 1038
1116 /* Case 2: n is NULL, and will just insert a new leaf */
1117 if (t->trie && n == NULL) { 1039 if (t->trie && n == NULL) {
1040 /* Case 2: n is NULL, and will just insert a new leaf */
1118 1041
1119 NODE_SET_PARENT(l, tp); 1042 NODE_SET_PARENT(l, tp);
1120
1121 if (!tp)
1122 BUG();
1123 1043
1124 else { 1044 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1125 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1045 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1126 put_child(t, (struct tnode *)tp, cindex, (struct node *)l); 1046 } else {
1127 } 1047 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1128 }
1129 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1130 else {
1131 /* 1048 /*
1132 * Add a new tnode here 1049 * Add a new tnode here
1133 * first tnode need some special handling 1050 * first tnode need some special handling
1134 */ 1051 */
1135 1052
1136 if (tp) 1053 if (tp)
1137 pos=tp->pos+tp->bits; 1054 pos = tp->pos+tp->bits;
1138 else 1055 else
1139 pos=0; 1056 pos = 0;
1057
1140 if (n) { 1058 if (n) {
1141 newpos = tkey_mismatch(key, pos, n->key); 1059 newpos = tkey_mismatch(key, pos, n->key);
1142 tn = tnode_new(n->key, newpos, 1); 1060 tn = tnode_new(n->key, newpos, 1);
1143 } 1061 } else {
1144 else {
1145 newpos = 0; 1062 newpos = 0;
1146 tn = tnode_new(key, newpos, 1); /* First tnode */ 1063 tn = tnode_new(key, newpos, 1); /* First tnode */
1147 } 1064 }
@@ -1151,32 +1068,33 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1151 tnode_free((struct tnode *) l); 1068 tnode_free((struct tnode *) l);
1152 *err = -ENOMEM; 1069 *err = -ENOMEM;
1153 goto err; 1070 goto err;
1154 } 1071 }
1155 1072
1156 NODE_SET_PARENT(tn, tp); 1073 NODE_SET_PARENT(tn, tp);
1157 1074
1158 missbit=tkey_extract_bits(key, newpos, 1); 1075 missbit = tkey_extract_bits(key, newpos, 1);
1159 put_child(t, tn, missbit, (struct node *)l); 1076 put_child(t, tn, missbit, (struct node *)l);
1160 put_child(t, tn, 1-missbit, n); 1077 put_child(t, tn, 1-missbit, n);
1161 1078
1162 if (tp) { 1079 if (tp) {
1163 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1080 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1164 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn); 1081 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1165 } 1082 } else {
1166 else { 1083 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1167 t->trie = (struct node*) tn; /* First tnode */
1168 tp = tn; 1084 tp = tn;
1169 } 1085 }
1170 } 1086 }
1171 if (tp && tp->pos+tp->bits > 32) { 1087
1088 if (tp && tp->pos + tp->bits > 32)
1172 printk("ERROR tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", 1089 printk("ERROR tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1173 tp, tp->pos, tp->bits, key, plen); 1090 tp, tp->pos, tp->bits, key, plen);
1174 } 1091
1175 /* Rebalance the trie */ 1092 /* Rebalance the trie */
1176 t->trie = trie_rebalance(t, tp); 1093
1094 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1177done: 1095done:
1178 t->revision++; 1096 t->revision++;
1179err:; 1097err:
1180 return fa_head; 1098 return fa_head;
1181} 1099}
1182 1100
@@ -1204,17 +1122,18 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1204 1122
1205 key = ntohl(key); 1123 key = ntohl(key);
1206 1124
1207 if (trie_debug) 1125 pr_debug("Insert table=%d %08x/%d\n", tb->tb_id, key, plen);
1208 printk("Insert table=%d %08x/%d\n", tb->tb_id, key, plen);
1209 1126
1210 mask = ntohl( inet_make_mask(plen) ); 1127 mask = ntohl(inet_make_mask(plen));
1211 1128
1212 if (key & ~mask) 1129 if (key & ~mask)
1213 return -EINVAL; 1130 return -EINVAL;
1214 1131
1215 key = key & mask; 1132 key = key & mask;
1216 1133
1217 if ((fi = fib_create_info(r, rta, nlhdr, &err)) == NULL) 1134 fi = fib_create_info(r, rta, nlhdr, &err);
1135
1136 if (!fi)
1218 goto err; 1137 goto err;
1219 1138
1220 l = fib_find_node(t, key); 1139 l = fib_find_node(t, key);
@@ -1236,8 +1155,7 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1236 * and we need to allocate a new one of those as well. 1155 * and we need to allocate a new one of those as well.
1237 */ 1156 */
1238 1157
1239 if (fa && 1158 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1240 fa->fa_info->fib_priority == fi->fib_priority) {
1241 struct fib_alias *fa_orig; 1159 struct fib_alias *fa_orig;
1242 1160
1243 err = -EEXIST; 1161 err = -EEXIST;
@@ -1248,22 +1166,27 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1248 struct fib_info *fi_drop; 1166 struct fib_info *fi_drop;
1249 u8 state; 1167 u8 state;
1250 1168
1251 write_lock_bh(&fib_lock); 1169 err = -ENOBUFS;
1170 new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
1171 if (new_fa == NULL)
1172 goto out;
1252 1173
1253 fi_drop = fa->fa_info; 1174 fi_drop = fa->fa_info;
1254 fa->fa_info = fi; 1175 new_fa->fa_tos = fa->fa_tos;
1255 fa->fa_type = type; 1176 new_fa->fa_info = fi;
1256 fa->fa_scope = r->rtm_scope; 1177 new_fa->fa_type = type;
1178 new_fa->fa_scope = r->rtm_scope;
1257 state = fa->fa_state; 1179 state = fa->fa_state;
1258 fa->fa_state &= ~FA_S_ACCESSED; 1180 new_fa->fa_state &= ~FA_S_ACCESSED;
1259 1181
1260 write_unlock_bh(&fib_lock); 1182 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1183 alias_free_mem_rcu(fa);
1261 1184
1262 fib_release_info(fi_drop); 1185 fib_release_info(fi_drop);
1263 if (state & FA_S_ACCESSED) 1186 if (state & FA_S_ACCESSED)
1264 rt_cache_flush(-1); 1187 rt_cache_flush(-1);
1265 1188
1266 goto succeeded; 1189 goto succeeded;
1267 } 1190 }
1268 /* Error if we find a perfect match which 1191 /* Error if we find a perfect match which
1269 * uses the same scope, type, and nexthop 1192 * uses the same scope, type, and nexthop
@@ -1285,7 +1208,7 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1285 fa = fa_orig; 1208 fa = fa_orig;
1286 } 1209 }
1287 err = -ENOENT; 1210 err = -ENOENT;
1288 if (!(nlhdr->nlmsg_flags&NLM_F_CREATE)) 1211 if (!(nlhdr->nlmsg_flags & NLM_F_CREATE))
1289 goto out; 1212 goto out;
1290 1213
1291 err = -ENOBUFS; 1214 err = -ENOBUFS;
@@ -1298,9 +1221,6 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1298 new_fa->fa_type = type; 1221 new_fa->fa_type = type;
1299 new_fa->fa_scope = r->rtm_scope; 1222 new_fa->fa_scope = r->rtm_scope;
1300 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1301#if 0
1302 new_fa->dst = NULL;
1303#endif
1304 /* 1224 /*
1305 * Insert new entry to the list. 1225 * Insert new entry to the list.
1306 */ 1226 */
@@ -1312,12 +1232,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1312 goto out_free_new_fa; 1232 goto out_free_new_fa;
1313 } 1233 }
1314 1234
1315 write_lock_bh(&fib_lock); 1235 list_add_tail_rcu(&new_fa->fa_list,
1316 1236 (fa ? &fa->fa_list : fa_head));
1317 list_add_tail(&new_fa->fa_list,
1318 (fa ? &fa->fa_list : fa_head));
1319
1320 write_unlock_bh(&fib_lock);
1321 1237
1322 rt_cache_flush(-1); 1238 rt_cache_flush(-1);
1323 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req); 1239 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
@@ -1328,38 +1244,40 @@ out_free_new_fa:
1328 kmem_cache_free(fn_alias_kmem, new_fa); 1244 kmem_cache_free(fn_alias_kmem, new_fa);
1329out: 1245out:
1330 fib_release_info(fi); 1246 fib_release_info(fi);
1331err:; 1247err:
1332 return err; 1248 return err;
1333} 1249}
1334 1250
1335static inline int check_leaf(struct trie *t, struct leaf *l, t_key key, int *plen, const struct flowi *flp, 1251
1336 struct fib_result *res, int *err) 1252/* should be clalled with rcu_read_lock */
1253static inline int check_leaf(struct trie *t, struct leaf *l,
1254 t_key key, int *plen, const struct flowi *flp,
1255 struct fib_result *res)
1337{ 1256{
1338 int i; 1257 int err, i;
1339 t_key mask; 1258 t_key mask;
1340 struct leaf_info *li; 1259 struct leaf_info *li;
1341 struct hlist_head *hhead = &l->list; 1260 struct hlist_head *hhead = &l->list;
1342 struct hlist_node *node; 1261 struct hlist_node *node;
1343 1262
1344 hlist_for_each_entry(li, node, hhead, hlist) { 1263 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1345
1346 i = li->plen; 1264 i = li->plen;
1347 mask = ntohl(inet_make_mask(i)); 1265 mask = ntohl(inet_make_mask(i));
1348 if (l->key != (key & mask)) 1266 if (l->key != (key & mask))
1349 continue; 1267 continue;
1350 1268
1351 if (((*err) = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) == 0) { 1269 if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) {
1352 *plen = i; 1270 *plen = i;
1353#ifdef CONFIG_IP_FIB_TRIE_STATS 1271#ifdef CONFIG_IP_FIB_TRIE_STATS
1354 t->stats.semantic_match_passed++; 1272 t->stats.semantic_match_passed++;
1355#endif 1273#endif
1356 return 1; 1274 return err;
1357 } 1275 }
1358#ifdef CONFIG_IP_FIB_TRIE_STATS 1276#ifdef CONFIG_IP_FIB_TRIE_STATS
1359 t->stats.semantic_match_miss++; 1277 t->stats.semantic_match_miss++;
1360#endif 1278#endif
1361 } 1279 }
1362 return 0; 1280 return 1;
1363} 1281}
1364 1282
1365static int 1283static int
@@ -1370,13 +1288,17 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
1370 struct node *n; 1288 struct node *n;
1371 struct tnode *pn; 1289 struct tnode *pn;
1372 int pos, bits; 1290 int pos, bits;
1373 t_key key=ntohl(flp->fl4_dst); 1291 t_key key = ntohl(flp->fl4_dst);
1374 int chopped_off; 1292 int chopped_off;
1375 t_key cindex = 0; 1293 t_key cindex = 0;
1376 int current_prefix_length = KEYLENGTH; 1294 int current_prefix_length = KEYLENGTH;
1377 n = t->trie; 1295 struct tnode *cn;
1296 t_key node_prefix, key_prefix, pref_mismatch;
1297 int mp;
1298
1299 rcu_read_lock();
1378 1300
1379 read_lock(&fib_lock); 1301 n = rcu_dereference(t->trie);
1380 if (!n) 1302 if (!n)
1381 goto failed; 1303 goto failed;
1382 1304
@@ -1386,15 +1308,14 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
1386 1308
1387 /* Just a leaf? */ 1309 /* Just a leaf? */
1388 if (IS_LEAF(n)) { 1310 if (IS_LEAF(n)) {
1389 if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret)) 1311 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1390 goto found; 1312 goto found;
1391 goto failed; 1313 goto failed;
1392 } 1314 }
1393 pn = (struct tnode *) n; 1315 pn = (struct tnode *) n;
1394 chopped_off = 0; 1316 chopped_off = 0;
1395 1317
1396 while (pn) { 1318 while (pn) {
1397
1398 pos = pn->pos; 1319 pos = pn->pos;
1399 bits = pn->bits; 1320 bits = pn->bits;
1400 1321
@@ -1410,130 +1331,129 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
1410 goto backtrace; 1331 goto backtrace;
1411 } 1332 }
1412 1333
1413 if (IS_TNODE(n)) { 1334 if (IS_LEAF(n)) {
1335 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1336 goto found;
1337 else
1338 goto backtrace;
1339 }
1340
1414#define HL_OPTIMIZE 1341#define HL_OPTIMIZE
1415#ifdef HL_OPTIMIZE 1342#ifdef HL_OPTIMIZE
1416 struct tnode *cn = (struct tnode *)n; 1343 cn = (struct tnode *)n;
1417 t_key node_prefix, key_prefix, pref_mismatch;
1418 int mp;
1419 1344
1420 /* 1345 /*
1421 * It's a tnode, and we can do some extra checks here if we 1346 * It's a tnode, and we can do some extra checks here if we
1422 * like, to avoid descending into a dead-end branch. 1347 * like, to avoid descending into a dead-end branch.
1423 * This tnode is in the parent's child array at index 1348 * This tnode is in the parent's child array at index
1424 * key[p_pos..p_pos+p_bits] but potentially with some bits 1349 * key[p_pos..p_pos+p_bits] but potentially with some bits
1425 * chopped off, so in reality the index may be just a 1350 * chopped off, so in reality the index may be just a
1426 * subprefix, padded with zero at the end. 1351 * subprefix, padded with zero at the end.
1427 * We can also take a look at any skipped bits in this 1352 * We can also take a look at any skipped bits in this
1428 * tnode - everything up to p_pos is supposed to be ok, 1353 * tnode - everything up to p_pos is supposed to be ok,
1429 * and the non-chopped bits of the index (se previous 1354 * and the non-chopped bits of the index (se previous
1430 * paragraph) are also guaranteed ok, but the rest is 1355 * paragraph) are also guaranteed ok, but the rest is
1431 * considered unknown. 1356 * considered unknown.
1432 * 1357 *
1433 * The skipped bits are key[pos+bits..cn->pos]. 1358 * The skipped bits are key[pos+bits..cn->pos].
1434 */ 1359 */
1435
1436 /* If current_prefix_length < pos+bits, we are already doing
1437 * actual prefix matching, which means everything from
1438 * pos+(bits-chopped_off) onward must be zero along some
1439 * branch of this subtree - otherwise there is *no* valid
1440 * prefix present. Here we can only check the skipped
1441 * bits. Remember, since we have already indexed into the
1442 * parent's child array, we know that the bits we chopped of
1443 * *are* zero.
1444 */
1445 1360
1446 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */ 1361 /* If current_prefix_length < pos+bits, we are already doing
1447 1362 * actual prefix matching, which means everything from
1448 if (current_prefix_length < pos+bits) { 1363 * pos+(bits-chopped_off) onward must be zero along some
1449 if (tkey_extract_bits(cn->key, current_prefix_length, 1364 * branch of this subtree - otherwise there is *no* valid
1450 cn->pos - current_prefix_length) != 0 || 1365 * prefix present. Here we can only check the skipped
1451 !(cn->child[0])) 1366 * bits. Remember, since we have already indexed into the
1452 goto backtrace; 1367 * parent's child array, we know that the bits we chopped of
1453 } 1368 * *are* zero.
1369 */
1454 1370
1455 /* 1371 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1456 * If chopped_off=0, the index is fully validated and we
1457 * only need to look at the skipped bits for this, the new,
1458 * tnode. What we actually want to do is to find out if
1459 * these skipped bits match our key perfectly, or if we will
1460 * have to count on finding a matching prefix further down,
1461 * because if we do, we would like to have some way of
1462 * verifying the existence of such a prefix at this point.
1463 */
1464 1372
1465 /* The only thing we can do at this point is to verify that 1373 if (current_prefix_length < pos+bits) {
1466 * any such matching prefix can indeed be a prefix to our 1374 if (tkey_extract_bits(cn->key, current_prefix_length,
1467 * key, and if the bits in the node we are inspecting that 1375 cn->pos - current_prefix_length) != 0 ||
1468 * do not match our key are not ZERO, this cannot be true. 1376 !(cn->child[0]))
1469 * Thus, find out where there is a mismatch (before cn->pos) 1377 goto backtrace;
1470 * and verify that all the mismatching bits are zero in the 1378 }
1471 * new tnode's key.
1472 */
1473 1379
1474 /* Note: We aren't very concerned about the piece of the key 1380 /*
1475 * that precede pn->pos+pn->bits, since these have already been 1381 * If chopped_off=0, the index is fully validated and we
1476 * checked. The bits after cn->pos aren't checked since these are 1382 * only need to look at the skipped bits for this, the new,
1477 * by definition "unknown" at this point. Thus, what we want to 1383 * tnode. What we actually want to do is to find out if
1478 * see is if we are about to enter the "prefix matching" state, 1384 * these skipped bits match our key perfectly, or if we will
1479 * and in that case verify that the skipped bits that will prevail 1385 * have to count on finding a matching prefix further down,
1480 * throughout this subtree are zero, as they have to be if we are 1386 * because if we do, we would like to have some way of
1481 * to find a matching prefix. 1387 * verifying the existence of such a prefix at this point.
1482 */ 1388 */
1483 1389
1484 node_prefix = MASK_PFX(cn->key, cn->pos); 1390 /* The only thing we can do at this point is to verify that
1485 key_prefix = MASK_PFX(key, cn->pos); 1391 * any such matching prefix can indeed be a prefix to our
1486 pref_mismatch = key_prefix^node_prefix; 1392 * key, and if the bits in the node we are inspecting that
1487 mp = 0; 1393 * do not match our key are not ZERO, this cannot be true.
1394 * Thus, find out where there is a mismatch (before cn->pos)
1395 * and verify that all the mismatching bits are zero in the
1396 * new tnode's key.
1397 */
1488 1398
1489 /* In short: If skipped bits in this node do not match the search 1399 /* Note: We aren't very concerned about the piece of the key
1490 * key, enter the "prefix matching" state.directly. 1400 * that precede pn->pos+pn->bits, since these have already been
1491 */ 1401 * checked. The bits after cn->pos aren't checked since these are
1492 if (pref_mismatch) { 1402 * by definition "unknown" at this point. Thus, what we want to
1493 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) { 1403 * see is if we are about to enter the "prefix matching" state,
1494 mp++; 1404 * and in that case verify that the skipped bits that will prevail
1495 pref_mismatch = pref_mismatch <<1; 1405 * throughout this subtree are zero, as they have to be if we are
1496 } 1406 * to find a matching prefix.
1497 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp); 1407 */
1498 1408
1499 if (key_prefix != 0) 1409 node_prefix = MASK_PFX(cn->key, cn->pos);
1500 goto backtrace; 1410 key_prefix = MASK_PFX(key, cn->pos);
1501 1411 pref_mismatch = key_prefix^node_prefix;
1502 if (current_prefix_length >= cn->pos) 1412 mp = 0;
1503 current_prefix_length=mp; 1413
1504 } 1414 /* In short: If skipped bits in this node do not match the search
1505#endif 1415 * key, enter the "prefix matching" state.directly.
1506 pn = (struct tnode *)n; /* Descend */ 1416 */
1507 chopped_off = 0; 1417 if (pref_mismatch) {
1508 continue; 1418 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1419 mp++;
1420 pref_mismatch = pref_mismatch <<1;
1421 }
1422 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1423
1424 if (key_prefix != 0)
1425 goto backtrace;
1426
1427 if (current_prefix_length >= cn->pos)
1428 current_prefix_length = mp;
1509 } 1429 }
1510 if (IS_LEAF(n)) { 1430#endif
1511 if (check_leaf(t, (struct leaf *)n, key, &plen, flp, res, &ret)) 1431 pn = (struct tnode *)n; /* Descend */
1512 goto found; 1432 chopped_off = 0;
1513 } 1433 continue;
1434
1514backtrace: 1435backtrace:
1515 chopped_off++; 1436 chopped_off++;
1516 1437
1517 /* As zero don't change the child key (cindex) */ 1438 /* As zero don't change the child key (cindex) */
1518 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1)))) { 1439 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1519 chopped_off++; 1440 chopped_off++;
1520 }
1521 1441
1522 /* Decrease current_... with bits chopped off */ 1442 /* Decrease current_... with bits chopped off */
1523 if (current_prefix_length > pn->pos + pn->bits - chopped_off) 1443 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1524 current_prefix_length = pn->pos + pn->bits - chopped_off; 1444 current_prefix_length = pn->pos + pn->bits - chopped_off;
1525 1445
1526 /* 1446 /*
1527 * Either we do the actual chop off according or if we have 1447 * Either we do the actual chop off according or if we have
1528 * chopped off all bits in this tnode walk up to our parent. 1448 * chopped off all bits in this tnode walk up to our parent.
1529 */ 1449 */
1530 1450
1531 if (chopped_off <= pn->bits) 1451 if (chopped_off <= pn->bits) {
1532 cindex &= ~(1 << (chopped_off-1)); 1452 cindex &= ~(1 << (chopped_off-1));
1533 else { 1453 } else {
1534 if (NODE_PARENT(pn) == NULL) 1454 if (NODE_PARENT(pn) == NULL)
1535 goto failed; 1455 goto failed;
1536 1456
1537 /* Get Child's index */ 1457 /* Get Child's index */
1538 cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits); 1458 cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits);
1539 pn = NODE_PARENT(pn); 1459 pn = NODE_PARENT(pn);
@@ -1548,10 +1468,11 @@ backtrace:
1548failed: 1468failed:
1549 ret = 1; 1469 ret = 1;
1550found: 1470found:
1551 read_unlock(&fib_lock); 1471 rcu_read_unlock();
1552 return ret; 1472 return ret;
1553} 1473}
1554 1474
1475/* only called from updater side */
1555static int trie_leaf_remove(struct trie *t, t_key key) 1476static int trie_leaf_remove(struct trie *t, t_key key)
1556{ 1477{
1557 t_key cindex; 1478 t_key cindex;
@@ -1559,24 +1480,20 @@ static int trie_leaf_remove(struct trie *t, t_key key)
1559 struct node *n = t->trie; 1480 struct node *n = t->trie;
1560 struct leaf *l; 1481 struct leaf *l;
1561 1482
1562 if (trie_debug) 1483 pr_debug("entering trie_leaf_remove(%p)\n", n);
1563 printk("entering trie_leaf_remove(%p)\n", n);
1564 1484
1565 /* Note that in the case skipped bits, those bits are *not* checked! 1485 /* Note that in the case skipped bits, those bits are *not* checked!
1566 * When we finish this, we will have NULL or a T_LEAF, and the 1486 * When we finish this, we will have NULL or a T_LEAF, and the
1567 * T_LEAF may or may not match our key. 1487 * T_LEAF may or may not match our key.
1568 */ 1488 */
1569 1489
1570 while (n != NULL && IS_TNODE(n)) { 1490 while (n != NULL && IS_TNODE(n)) {
1571 struct tnode *tn = (struct tnode *) n; 1491 struct tnode *tn = (struct tnode *) n;
1572 check_tnode(tn); 1492 check_tnode(tn);
1573 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits)); 1493 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1574 1494
1575 if (n && NODE_PARENT(n) != tn) { 1495 BUG_ON(n && NODE_PARENT(n) != tn);
1576 printk("BUG tn=%p, n->parent=%p\n", tn, NODE_PARENT(n)); 1496 }
1577 BUG();
1578 }
1579 }
1580 l = (struct leaf *) n; 1497 l = (struct leaf *) n;
1581 1498
1582 if (!n || !tkey_equals(l->key, key)) 1499 if (!n || !tkey_equals(l->key, key))
@@ -1590,23 +1507,24 @@ static int trie_leaf_remove(struct trie *t, t_key key)
1590 t->revision++; 1507 t->revision++;
1591 t->size--; 1508 t->size--;
1592 1509
1510 preempt_disable();
1593 tp = NODE_PARENT(n); 1511 tp = NODE_PARENT(n);
1594 tnode_free((struct tnode *) n); 1512 tnode_free((struct tnode *) n);
1595 1513
1596 if (tp) { 1514 if (tp) {
1597 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 1515 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1598 put_child(t, (struct tnode *)tp, cindex, NULL); 1516 put_child(t, (struct tnode *)tp, cindex, NULL);
1599 t->trie = trie_rebalance(t, tp); 1517 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1600 } 1518 } else
1601 else 1519 rcu_assign_pointer(t->trie, NULL);
1602 t->trie = NULL; 1520 preempt_enable();
1603 1521
1604 return 1; 1522 return 1;
1605} 1523}
1606 1524
1607static int 1525static int
1608fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, 1526fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1609 struct nlmsghdr *nlhdr, struct netlink_skb_parms *req) 1527 struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
1610{ 1528{
1611 struct trie *t = (struct trie *) tb->tb_data; 1529 struct trie *t = (struct trie *) tb->tb_data;
1612 u32 key, mask; 1530 u32 key, mask;
@@ -1615,6 +1533,8 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1615 struct fib_alias *fa, *fa_to_delete; 1533 struct fib_alias *fa, *fa_to_delete;
1616 struct list_head *fa_head; 1534 struct list_head *fa_head;
1617 struct leaf *l; 1535 struct leaf *l;
1536 struct leaf_info *li;
1537
1618 1538
1619 if (plen > 32) 1539 if (plen > 32)
1620 return -EINVAL; 1540 return -EINVAL;
@@ -1624,7 +1544,7 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1624 memcpy(&key, rta->rta_dst, 4); 1544 memcpy(&key, rta->rta_dst, 4);
1625 1545
1626 key = ntohl(key); 1546 key = ntohl(key);
1627 mask = ntohl( inet_make_mask(plen) ); 1547 mask = ntohl(inet_make_mask(plen));
1628 1548
1629 if (key & ~mask) 1549 if (key & ~mask)
1630 return -EINVAL; 1550 return -EINVAL;
@@ -1641,11 +1561,11 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1641 if (!fa) 1561 if (!fa)
1642 return -ESRCH; 1562 return -ESRCH;
1643 1563
1644 if (trie_debug) 1564 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1645 printk("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1646 1565
1647 fa_to_delete = NULL; 1566 fa_to_delete = NULL;
1648 fa_head = fa->fa_list.prev; 1567 fa_head = fa->fa_list.prev;
1568
1649 list_for_each_entry(fa, fa_head, fa_list) { 1569 list_for_each_entry(fa, fa_head, fa_list) {
1650 struct fib_info *fi = fa->fa_info; 1570 struct fib_info *fi = fa->fa_info;
1651 1571
@@ -1664,39 +1584,31 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1664 } 1584 }
1665 } 1585 }
1666 1586
1667 if (fa_to_delete) { 1587 if (!fa_to_delete)
1668 int kill_li = 0; 1588 return -ESRCH;
1669 struct leaf_info *li;
1670
1671 fa = fa_to_delete;
1672 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
1673 1589
1674 l = fib_find_node(t, key); 1590 fa = fa_to_delete;
1675 li = find_leaf_info(&l->list, plen); 1591 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
1676 1592
1677 write_lock_bh(&fib_lock); 1593 l = fib_find_node(t, key);
1594 li = find_leaf_info(&l->list, plen);
1678 1595
1679 list_del(&fa->fa_list); 1596 list_del_rcu(&fa->fa_list);
1680 1597
1681 if (list_empty(fa_head)) { 1598 if (list_empty(fa_head)) {
1682 hlist_del(&li->hlist); 1599 hlist_del_rcu(&li->hlist);
1683 kill_li = 1; 1600 free_leaf_info(li);
1684 } 1601 }
1685 write_unlock_bh(&fib_lock);
1686
1687 if (kill_li)
1688 free_leaf_info(li);
1689 1602
1690 if (hlist_empty(&l->list)) 1603 if (hlist_empty(&l->list))
1691 trie_leaf_remove(t, key); 1604 trie_leaf_remove(t, key);
1692 1605
1693 if (fa->fa_state & FA_S_ACCESSED) 1606 if (fa->fa_state & FA_S_ACCESSED)
1694 rt_cache_flush(-1); 1607 rt_cache_flush(-1);
1695 1608
1696 fn_free_alias(fa); 1609 fib_release_info(fa->fa_info);
1697 return 0; 1610 alias_free_mem_rcu(fa);
1698 } 1611 return 0;
1699 return -ESRCH;
1700} 1612}
1701 1613
1702static int trie_flush_list(struct trie *t, struct list_head *head) 1614static int trie_flush_list(struct trie *t, struct list_head *head)
@@ -1706,14 +1618,11 @@ static int trie_flush_list(struct trie *t, struct list_head *head)
1706 1618
1707 list_for_each_entry_safe(fa, fa_node, head, fa_list) { 1619 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1708 struct fib_info *fi = fa->fa_info; 1620 struct fib_info *fi = fa->fa_info;
1709
1710 if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
1711
1712 write_lock_bh(&fib_lock);
1713 list_del(&fa->fa_list);
1714 write_unlock_bh(&fib_lock);
1715 1621
1716 fn_free_alias(fa); 1622 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1623 list_del_rcu(&fa->fa_list);
1624 fib_release_info(fa->fa_info);
1625 alias_free_mem_rcu(fa);
1717 found++; 1626 found++;
1718 } 1627 }
1719 } 1628 }
@@ -1728,37 +1637,34 @@ static int trie_flush_leaf(struct trie *t, struct leaf *l)
1728 struct leaf_info *li = NULL; 1637 struct leaf_info *li = NULL;
1729 1638
1730 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { 1639 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1731
1732 found += trie_flush_list(t, &li->falh); 1640 found += trie_flush_list(t, &li->falh);
1733 1641
1734 if (list_empty(&li->falh)) { 1642 if (list_empty(&li->falh)) {
1735 1643 hlist_del_rcu(&li->hlist);
1736 write_lock_bh(&fib_lock);
1737 hlist_del(&li->hlist);
1738 write_unlock_bh(&fib_lock);
1739
1740 free_leaf_info(li); 1644 free_leaf_info(li);
1741 } 1645 }
1742 } 1646 }
1743 return found; 1647 return found;
1744} 1648}
1745 1649
1650/* rcu_read_lock needs to be hold by caller from readside */
1651
1746static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf) 1652static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1747{ 1653{
1748 struct node *c = (struct node *) thisleaf; 1654 struct node *c = (struct node *) thisleaf;
1749 struct tnode *p; 1655 struct tnode *p;
1750 int idx; 1656 int idx;
1657 struct node *trie = rcu_dereference(t->trie);
1751 1658
1752 if (c == NULL) { 1659 if (c == NULL) {
1753 if (t->trie == NULL) 1660 if (trie == NULL)
1754 return NULL; 1661 return NULL;
1755 1662
1756 if (IS_LEAF(t->trie)) /* trie w. just a leaf */ 1663 if (IS_LEAF(trie)) /* trie w. just a leaf */
1757 return (struct leaf *) t->trie; 1664 return (struct leaf *) trie;
1758 1665
1759 p = (struct tnode*) t->trie; /* Start */ 1666 p = (struct tnode*) trie; /* Start */
1760 } 1667 } else
1761 else
1762 p = (struct tnode *) NODE_PARENT(c); 1668 p = (struct tnode *) NODE_PARENT(c);
1763 1669
1764 while (p) { 1670 while (p) {
@@ -1771,29 +1677,31 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1771 pos = 0; 1677 pos = 0;
1772 1678
1773 last = 1 << p->bits; 1679 last = 1 << p->bits;
1774 for(idx = pos; idx < last ; idx++) { 1680 for (idx = pos; idx < last ; idx++) {
1775 if (p->child[idx]) { 1681 c = rcu_dereference(p->child[idx]);
1776 1682
1777 /* Decend if tnode */ 1683 if (!c)
1778 1684 continue;
1779 while (IS_TNODE(p->child[idx])) { 1685
1780 p = (struct tnode*) p->child[idx]; 1686 /* Decend if tnode */
1781 idx = 0; 1687 while (IS_TNODE(c)) {
1782 1688 p = (struct tnode *) c;
1783 /* Rightmost non-NULL branch */ 1689 idx = 0;
1784 if (p && IS_TNODE(p)) 1690
1785 while (p->child[idx] == NULL && idx < (1 << p->bits)) idx++; 1691 /* Rightmost non-NULL branch */
1786 1692 if (p && IS_TNODE(p))
1787 /* Done with this tnode? */ 1693 while (!(c = rcu_dereference(p->child[idx]))
1788 if (idx >= (1 << p->bits) || p->child[idx] == NULL ) 1694 && idx < (1<<p->bits)) idx++;
1789 goto up; 1695
1790 } 1696 /* Done with this tnode? */
1791 return (struct leaf*) p->child[idx]; 1697 if (idx >= (1 << p->bits) || !c)
1698 goto up;
1792 } 1699 }
1700 return (struct leaf *) c;
1793 } 1701 }
1794up: 1702up:
1795 /* No more children go up one step */ 1703 /* No more children go up one step */
1796 c = (struct node*) p; 1704 c = (struct node *) p;
1797 p = (struct tnode *) NODE_PARENT(p); 1705 p = (struct tnode *) NODE_PARENT(p);
1798 } 1706 }
1799 return NULL; /* Ready. Root of trie */ 1707 return NULL; /* Ready. Root of trie */
@@ -1807,23 +1715,24 @@ static int fn_trie_flush(struct fib_table *tb)
1807 1715
1808 t->revision++; 1716 t->revision++;
1809 1717
1810 for (h=0; (l = nextleaf(t, l)) != NULL; h++) { 1718 rcu_read_lock();
1719 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1811 found += trie_flush_leaf(t, l); 1720 found += trie_flush_leaf(t, l);
1812 1721
1813 if (ll && hlist_empty(&ll->list)) 1722 if (ll && hlist_empty(&ll->list))
1814 trie_leaf_remove(t, ll->key); 1723 trie_leaf_remove(t, ll->key);
1815 ll = l; 1724 ll = l;
1816 } 1725 }
1726 rcu_read_unlock();
1817 1727
1818 if (ll && hlist_empty(&ll->list)) 1728 if (ll && hlist_empty(&ll->list))
1819 trie_leaf_remove(t, ll->key); 1729 trie_leaf_remove(t, ll->key);
1820 1730
1821 if (trie_debug) 1731 pr_debug("trie_flush found=%d\n", found);
1822 printk("trie_flush found=%d\n", found);
1823 return found; 1732 return found;
1824} 1733}
1825 1734
1826static int trie_last_dflt=-1; 1735static int trie_last_dflt = -1;
1827 1736
1828static void 1737static void
1829fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) 1738fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
@@ -1840,7 +1749,7 @@ fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
1840 last_resort = NULL; 1749 last_resort = NULL;
1841 order = -1; 1750 order = -1;
1842 1751
1843 read_lock(&fib_lock); 1752 rcu_read_lock();
1844 1753
1845 l = fib_find_node(t, 0); 1754 l = fib_find_node(t, 0);
1846 if (!l) 1755 if (!l)
@@ -1853,20 +1762,20 @@ fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
1853 if (list_empty(fa_head)) 1762 if (list_empty(fa_head))
1854 goto out; 1763 goto out;
1855 1764
1856 list_for_each_entry(fa, fa_head, fa_list) { 1765 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1857 struct fib_info *next_fi = fa->fa_info; 1766 struct fib_info *next_fi = fa->fa_info;
1858 1767
1859 if (fa->fa_scope != res->scope || 1768 if (fa->fa_scope != res->scope ||
1860 fa->fa_type != RTN_UNICAST) 1769 fa->fa_type != RTN_UNICAST)
1861 continue; 1770 continue;
1862 1771
1863 if (next_fi->fib_priority > res->fi->fib_priority) 1772 if (next_fi->fib_priority > res->fi->fib_priority)
1864 break; 1773 break;
1865 if (!next_fi->fib_nh[0].nh_gw || 1774 if (!next_fi->fib_nh[0].nh_gw ||
1866 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1775 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1867 continue; 1776 continue;
1868 fa->fa_state |= FA_S_ACCESSED; 1777 fa->fa_state |= FA_S_ACCESSED;
1869 1778
1870 if (fi == NULL) { 1779 if (fi == NULL) {
1871 if (next_fi != res->fi) 1780 if (next_fi != res->fi)
1872 break; 1781 break;
@@ -1904,7 +1813,7 @@ fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
1904 } 1813 }
1905 trie_last_dflt = last_idx; 1814 trie_last_dflt = last_idx;
1906 out:; 1815 out:;
1907 read_unlock(&fib_lock); 1816 rcu_read_unlock();
1908} 1817}
1909 1818
1910static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb, 1819static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
@@ -1913,12 +1822,14 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fi
1913 int i, s_i; 1822 int i, s_i;
1914 struct fib_alias *fa; 1823 struct fib_alias *fa;
1915 1824
1916 u32 xkey=htonl(key); 1825 u32 xkey = htonl(key);
1917 1826
1918 s_i=cb->args[3]; 1827 s_i = cb->args[3];
1919 i = 0; 1828 i = 0;
1920 1829
1921 list_for_each_entry(fa, fah, fa_list) { 1830 /* rcu_read_lock is hold by caller */
1831
1832 list_for_each_entry_rcu(fa, fah, fa_list) {
1922 if (i < s_i) { 1833 if (i < s_i) {
1923 i++; 1834 i++;
1924 continue; 1835 continue;
@@ -1946,10 +1857,10 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fi
1946 fa->fa_info, 0) < 0) { 1857 fa->fa_info, 0) < 0) {
1947 cb->args[3] = i; 1858 cb->args[3] = i;
1948 return -1; 1859 return -1;
1949 } 1860 }
1950 i++; 1861 i++;
1951 } 1862 }
1952 cb->args[3]=i; 1863 cb->args[3] = i;
1953 return skb->len; 1864 return skb->len;
1954} 1865}
1955 1866
@@ -1959,10 +1870,10 @@ static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, str
1959 int h, s_h; 1870 int h, s_h;
1960 struct list_head *fa_head; 1871 struct list_head *fa_head;
1961 struct leaf *l = NULL; 1872 struct leaf *l = NULL;
1962 s_h=cb->args[2];
1963 1873
1964 for (h=0; (l = nextleaf(t, l)) != NULL; h++) { 1874 s_h = cb->args[2];
1965 1875
1876 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1966 if (h < s_h) 1877 if (h < s_h)
1967 continue; 1878 continue;
1968 if (h > s_h) 1879 if (h > s_h)
@@ -1970,7 +1881,7 @@ static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, str
1970 sizeof(cb->args) - 3*sizeof(cb->args[0])); 1881 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1971 1882
1972 fa_head = get_fa_head(l, plen); 1883 fa_head = get_fa_head(l, plen);
1973 1884
1974 if (!fa_head) 1885 if (!fa_head)
1975 continue; 1886 continue;
1976 1887
@@ -1978,11 +1889,11 @@ static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, str
1978 continue; 1889 continue;
1979 1890
1980 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) { 1891 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1981 cb->args[2]=h; 1892 cb->args[2] = h;
1982 return -1; 1893 return -1;
1983 } 1894 }
1984 } 1895 }
1985 cb->args[2]=h; 1896 cb->args[2] = h;
1986 return skb->len; 1897 return skb->len;
1987} 1898}
1988 1899
@@ -1993,25 +1904,24 @@ static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin
1993 1904
1994 s_m = cb->args[1]; 1905 s_m = cb->args[1];
1995 1906
1996 read_lock(&fib_lock); 1907 rcu_read_lock();
1997 for (m=0; m<=32; m++) { 1908 for (m = 0; m <= 32; m++) {
1998
1999 if (m < s_m) 1909 if (m < s_m)
2000 continue; 1910 continue;
2001 if (m > s_m) 1911 if (m > s_m)
2002 memset(&cb->args[2], 0, 1912 memset(&cb->args[2], 0,
2003 sizeof(cb->args) - 2*sizeof(cb->args[0])); 1913 sizeof(cb->args) - 2*sizeof(cb->args[0]));
2004 1914
2005 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) { 1915 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
2006 cb->args[1] = m; 1916 cb->args[1] = m;
2007 goto out; 1917 goto out;
2008 } 1918 }
2009 } 1919 }
2010 read_unlock(&fib_lock); 1920 rcu_read_unlock();
2011 cb->args[1] = m; 1921 cb->args[1] = m;
2012 return skb->len; 1922 return skb->len;
2013 out: 1923out:
2014 read_unlock(&fib_lock); 1924 rcu_read_unlock();
2015 return -1; 1925 return -1;
2016} 1926}
2017 1927
@@ -2051,9 +1961,9 @@ struct fib_table * __init fib_hash_init(int id)
2051 trie_init(t); 1961 trie_init(t);
2052 1962
2053 if (id == RT_TABLE_LOCAL) 1963 if (id == RT_TABLE_LOCAL)
2054 trie_local = t; 1964 trie_local = t;
2055 else if (id == RT_TABLE_MAIN) 1965 else if (id == RT_TABLE_MAIN)
2056 trie_main = t; 1966 trie_main = t;
2057 1967
2058 if (id == RT_TABLE_LOCAL) 1968 if (id == RT_TABLE_LOCAL)
2059 printk("IPv4 FIB: Using LC-trie version %s\n", VERSION); 1969 printk("IPv4 FIB: Using LC-trie version %s\n", VERSION);
@@ -2065,7 +1975,8 @@ struct fib_table * __init fib_hash_init(int id)
2065 1975
2066static void putspace_seq(struct seq_file *seq, int n) 1976static void putspace_seq(struct seq_file *seq, int n)
2067{ 1977{
2068 while (n--) seq_printf(seq, " "); 1978 while (n--)
1979 seq_printf(seq, " ");
2069} 1980}
2070 1981
2071static void printbin_seq(struct seq_file *seq, unsigned int v, int bits) 1982static void printbin_seq(struct seq_file *seq, unsigned int v, int bits)
@@ -2086,29 +1997,22 @@ static void printnode_seq(struct seq_file *seq, int indent, struct node *n,
2086 seq_printf(seq, "%d/", cindex); 1997 seq_printf(seq, "%d/", cindex);
2087 printbin_seq(seq, cindex, bits); 1998 printbin_seq(seq, cindex, bits);
2088 seq_printf(seq, ": "); 1999 seq_printf(seq, ": ");
2089 } 2000 } else
2090 else
2091 seq_printf(seq, "<root>: "); 2001 seq_printf(seq, "<root>: ");
2092 seq_printf(seq, "%s:%p ", IS_LEAF(n)?"Leaf":"Internal node", n); 2002 seq_printf(seq, "%s:%p ", IS_LEAF(n)?"Leaf":"Internal node", n);
2093 2003
2094 if (IS_LEAF(n))
2095 seq_printf(seq, "key=%d.%d.%d.%d\n",
2096 n->key >> 24, (n->key >> 16) % 256, (n->key >> 8) % 256, n->key % 256);
2097 else {
2098 int plen = ((struct tnode *)n)->pos;
2099 t_key prf=MASK_PFX(n->key, plen);
2100 seq_printf(seq, "key=%d.%d.%d.%d/%d\n",
2101 prf >> 24, (prf >> 16) % 256, (prf >> 8) % 256, prf % 256, plen);
2102 }
2103 if (IS_LEAF(n)) { 2004 if (IS_LEAF(n)) {
2104 struct leaf *l=(struct leaf *)n; 2005 struct leaf *l = (struct leaf *)n;
2105 struct fib_alias *fa; 2006 struct fib_alias *fa;
2106 int i; 2007 int i;
2107 for (i=32; i>=0; i--) 2008
2108 if (find_leaf_info(&l->list, i)) { 2009 seq_printf(seq, "key=%d.%d.%d.%d\n",
2109 2010 n->key >> 24, (n->key >> 16) % 256, (n->key >> 8) % 256, n->key % 256);
2011
2012 for (i = 32; i >= 0; i--)
2013 if (find_leaf_info(&l->list, i)) {
2110 struct list_head *fa_head = get_fa_head(l, i); 2014 struct list_head *fa_head = get_fa_head(l, i);
2111 2015
2112 if (!fa_head) 2016 if (!fa_head)
2113 continue; 2017 continue;
2114 2018
@@ -2118,17 +2022,16 @@ static void printnode_seq(struct seq_file *seq, int indent, struct node *n,
2118 putspace_seq(seq, indent+2); 2022 putspace_seq(seq, indent+2);
2119 seq_printf(seq, "{/%d...dumping}\n", i); 2023 seq_printf(seq, "{/%d...dumping}\n", i);
2120 2024
2121 2025 list_for_each_entry_rcu(fa, fa_head, fa_list) {
2122 list_for_each_entry(fa, fa_head, fa_list) {
2123 putspace_seq(seq, indent+2); 2026 putspace_seq(seq, indent+2);
2124 if (fa->fa_info->fib_nh == NULL) {
2125 seq_printf(seq, "Error _fib_nh=NULL\n");
2126 continue;
2127 }
2128 if (fa->fa_info == NULL) { 2027 if (fa->fa_info == NULL) {
2129 seq_printf(seq, "Error fa_info=NULL\n"); 2028 seq_printf(seq, "Error fa_info=NULL\n");
2130 continue; 2029 continue;
2131 } 2030 }
2031 if (fa->fa_info->fib_nh == NULL) {
2032 seq_printf(seq, "Error _fib_nh=NULL\n");
2033 continue;
2034 }
2132 2035
2133 seq_printf(seq, "{type=%d scope=%d TOS=%d}\n", 2036 seq_printf(seq, "{type=%d scope=%d TOS=%d}\n",
2134 fa->fa_type, 2037 fa->fa_type,
@@ -2136,11 +2039,16 @@ static void printnode_seq(struct seq_file *seq, int indent, struct node *n,
2136 fa->fa_tos); 2039 fa->fa_tos);
2137 } 2040 }
2138 } 2041 }
2139 } 2042 } else {
2140 else if (IS_TNODE(n)) {
2141 struct tnode *tn = (struct tnode *)n; 2043 struct tnode *tn = (struct tnode *)n;
2044 int plen = ((struct tnode *)n)->pos;
2045 t_key prf = MASK_PFX(n->key, plen);
2046
2047 seq_printf(seq, "key=%d.%d.%d.%d/%d\n",
2048 prf >> 24, (prf >> 16) % 256, (prf >> 8) % 256, prf % 256, plen);
2049
2142 putspace_seq(seq, indent); seq_printf(seq, "| "); 2050 putspace_seq(seq, indent); seq_printf(seq, "| ");
2143 seq_printf(seq, "{key prefix=%08x/", tn->key&TKEY_GET_MASK(0, tn->pos)); 2051 seq_printf(seq, "{key prefix=%08x/", tn->key & TKEY_GET_MASK(0, tn->pos));
2144 printbin_seq(seq, tkey_extract_bits(tn->key, 0, tn->pos), tn->pos); 2052 printbin_seq(seq, tkey_extract_bits(tn->key, 0, tn->pos), tn->pos);
2145 seq_printf(seq, "}\n"); 2053 seq_printf(seq, "}\n");
2146 putspace_seq(seq, indent); seq_printf(seq, "| "); 2054 putspace_seq(seq, indent); seq_printf(seq, "| ");
@@ -2154,194 +2062,196 @@ static void printnode_seq(struct seq_file *seq, int indent, struct node *n,
2154 2062
2155static void trie_dump_seq(struct seq_file *seq, struct trie *t) 2063static void trie_dump_seq(struct seq_file *seq, struct trie *t)
2156{ 2064{
2157 struct node *n = t->trie; 2065 struct node *n;
2158 int cindex=0; 2066 int cindex = 0;
2159 int indent=1; 2067 int indent = 1;
2160 int pend=0; 2068 int pend = 0;
2161 int depth = 0; 2069 int depth = 0;
2070 struct tnode *tn;
2162 2071
2163 read_lock(&fib_lock); 2072 rcu_read_lock();
2164 2073 n = rcu_dereference(t->trie);
2165 seq_printf(seq, "------ trie_dump of t=%p ------\n", t); 2074 seq_printf(seq, "------ trie_dump of t=%p ------\n", t);
2166 if (n) {
2167 printnode_seq(seq, indent, n, pend, cindex, 0);
2168 if (IS_TNODE(n)) {
2169 struct tnode *tn = (struct tnode *)n;
2170 pend = tn->pos+tn->bits;
2171 putspace_seq(seq, indent); seq_printf(seq, "\\--\n");
2172 indent += 3;
2173 depth++;
2174
2175 while (tn && cindex < (1 << tn->bits)) {
2176 if (tn->child[cindex]) {
2177
2178 /* Got a child */
2179
2180 printnode_seq(seq, indent, tn->child[cindex], pend, cindex, tn->bits);
2181 if (IS_LEAF(tn->child[cindex])) {
2182 cindex++;
2183
2184 }
2185 else {
2186 /*
2187 * New tnode. Decend one level
2188 */
2189
2190 depth++;
2191 n = tn->child[cindex];
2192 tn = (struct tnode *)n;
2193 pend = tn->pos+tn->bits;
2194 putspace_seq(seq, indent); seq_printf(seq, "\\--\n");
2195 indent+=3;
2196 cindex=0;
2197 }
2198 }
2199 else
2200 cindex++;
2201 2075
2076 if (!n) {
2077 seq_printf(seq, "------ trie is empty\n");
2078
2079 rcu_read_unlock();
2080 return;
2081 }
2082
2083 printnode_seq(seq, indent, n, pend, cindex, 0);
2084
2085 if (!IS_TNODE(n)) {
2086 rcu_read_unlock();
2087 return;
2088 }
2089
2090 tn = (struct tnode *)n;
2091 pend = tn->pos+tn->bits;
2092 putspace_seq(seq, indent); seq_printf(seq, "\\--\n");
2093 indent += 3;
2094 depth++;
2095
2096 while (tn && cindex < (1 << tn->bits)) {
2097 struct node *child = rcu_dereference(tn->child[cindex]);
2098 if (!child)
2099 cindex++;
2100 else {
2101 /* Got a child */
2102 printnode_seq(seq, indent, child, pend,
2103 cindex, tn->bits);
2104
2105 if (IS_LEAF(child))
2106 cindex++;
2107
2108 else {
2202 /* 2109 /*
2203 * Test if we are done 2110 * New tnode. Decend one level
2204 */ 2111 */
2205
2206 while (cindex >= (1 << tn->bits)) {
2207 2112
2208 /* 2113 depth++;
2209 * Move upwards and test for root 2114 n = child;
2210 * pop off all traversed nodes 2115 tn = (struct tnode *)n;
2211 */ 2116 pend = tn->pos+tn->bits;
2212 2117 putspace_seq(seq, indent);
2213 if (NODE_PARENT(tn) == NULL) { 2118 seq_printf(seq, "\\--\n");
2214 tn = NULL; 2119 indent += 3;
2215 n = NULL; 2120 cindex = 0;
2216 break;
2217 }
2218 else {
2219 cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
2220 tn = NODE_PARENT(tn);
2221 cindex++;
2222 n = (struct node *)tn;
2223 pend = tn->pos+tn->bits;
2224 indent-=3;
2225 depth--;
2226 }
2227 }
2228 } 2121 }
2229 } 2122 }
2230 else n = NULL;
2231 }
2232 else seq_printf(seq, "------ trie is empty\n");
2233 2123
2234 read_unlock(&fib_lock); 2124 /*
2125 * Test if we are done
2126 */
2127
2128 while (cindex >= (1 << tn->bits)) {
2129 /*
2130 * Move upwards and test for root
2131 * pop off all traversed nodes
2132 */
2133
2134 if (NODE_PARENT(tn) == NULL) {
2135 tn = NULL;
2136 break;
2137 }
2138
2139 cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
2140 cindex++;
2141 tn = NODE_PARENT(tn);
2142 pend = tn->pos + tn->bits;
2143 indent -= 3;
2144 depth--;
2145 }
2146 }
2147 rcu_read_unlock();
2235} 2148}
2236 2149
2237static struct trie_stat *trie_stat_new(void) 2150static struct trie_stat *trie_stat_new(void)
2238{ 2151{
2239 struct trie_stat *s = kmalloc(sizeof(struct trie_stat), GFP_KERNEL); 2152 struct trie_stat *s;
2240 int i; 2153 int i;
2241 2154
2242 if (s) { 2155 s = kmalloc(sizeof(struct trie_stat), GFP_KERNEL);
2243 s->totdepth = 0; 2156 if (!s)
2244 s->maxdepth = 0; 2157 return NULL;
2245 s->tnodes = 0; 2158
2246 s->leaves = 0; 2159 s->totdepth = 0;
2247 s->nullpointers = 0; 2160 s->maxdepth = 0;
2248 2161 s->tnodes = 0;
2249 for(i=0; i< MAX_CHILDS; i++) 2162 s->leaves = 0;
2250 s->nodesizes[i] = 0; 2163 s->nullpointers = 0;
2251 } 2164
2165 for (i = 0; i < MAX_CHILDS; i++)
2166 s->nodesizes[i] = 0;
2167
2252 return s; 2168 return s;
2253} 2169}
2254 2170
2255static struct trie_stat *trie_collect_stats(struct trie *t) 2171static struct trie_stat *trie_collect_stats(struct trie *t)
2256{ 2172{
2257 struct node *n = t->trie; 2173 struct node *n;
2258 struct trie_stat *s = trie_stat_new(); 2174 struct trie_stat *s = trie_stat_new();
2259 int cindex = 0; 2175 int cindex = 0;
2260 int indent = 1;
2261 int pend = 0; 2176 int pend = 0;
2262 int depth = 0; 2177 int depth = 0;
2263 2178
2264 read_lock(&fib_lock); 2179 if (!s)
2180 return NULL;
2265 2181
2266 if (s) { 2182 rcu_read_lock();
2267 if (n) { 2183 n = rcu_dereference(t->trie);
2268 if (IS_TNODE(n)) {
2269 struct tnode *tn = (struct tnode *)n;
2270 pend = tn->pos+tn->bits;
2271 indent += 3;
2272 s->nodesizes[tn->bits]++;
2273 depth++;
2274 2184
2275 while (tn && cindex < (1 << tn->bits)) { 2185 if (!n)
2276 if (tn->child[cindex]) { 2186 return s;
2277 /* Got a child */ 2187
2278 2188 if (IS_TNODE(n)) {
2279 if (IS_LEAF(tn->child[cindex])) { 2189 struct tnode *tn = (struct tnode *)n;
2280 cindex++; 2190 pend = tn->pos+tn->bits;
2281 2191 s->nodesizes[tn->bits]++;
2282 /* stats */ 2192 depth++;
2283 if (depth > s->maxdepth) 2193
2284 s->maxdepth = depth; 2194 while (tn && cindex < (1 << tn->bits)) {
2285 s->totdepth += depth; 2195 struct node *ch = rcu_dereference(tn->child[cindex]);
2286 s->leaves++; 2196 if (ch) {
2287 }
2288
2289 else {
2290 /*
2291 * New tnode. Decend one level
2292 */
2293
2294 s->tnodes++;
2295 s->nodesizes[tn->bits]++;
2296 depth++;
2297
2298 n = tn->child[cindex];
2299 tn = (struct tnode *)n;
2300 pend = tn->pos+tn->bits;
2301
2302 indent += 3;
2303 cindex = 0;
2304 }
2305 }
2306 else {
2307 cindex++;
2308 s->nullpointers++;
2309 }
2310 2197
2198 /* Got a child */
2199
2200 if (IS_LEAF(tn->child[cindex])) {
2201 cindex++;
2202
2203 /* stats */
2204 if (depth > s->maxdepth)
2205 s->maxdepth = depth;
2206 s->totdepth += depth;
2207 s->leaves++;
2208 } else {
2311 /* 2209 /*
2312 * Test if we are done 2210 * New tnode. Decend one level
2313 */ 2211 */
2314 2212
2315 while (cindex >= (1 << tn->bits)) { 2213 s->tnodes++;
2316 2214 s->nodesizes[tn->bits]++;
2317 /* 2215 depth++;
2318 * Move upwards and test for root 2216
2319 * pop off all traversed nodes 2217 n = ch;
2320 */ 2218 tn = (struct tnode *)n;
2321 2219 pend = tn->pos+tn->bits;
2322 2220
2323 if (NODE_PARENT(tn) == NULL) { 2221 cindex = 0;
2324 tn = NULL;
2325 n = NULL;
2326 break;
2327 }
2328 else {
2329 cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
2330 tn = NODE_PARENT(tn);
2331 cindex++;
2332 n = (struct node *)tn;
2333 pend = tn->pos+tn->bits;
2334 indent -= 3;
2335 depth--;
2336 }
2337 }
2338 } 2222 }
2223 } else {
2224 cindex++;
2225 s->nullpointers++;
2339 } 2226 }
2340 else n = NULL; 2227
2228 /*
2229 * Test if we are done
2230 */
2231
2232 while (cindex >= (1 << tn->bits)) {
2233 /*
2234 * Move upwards and test for root
2235 * pop off all traversed nodes
2236 */
2237
2238 if (NODE_PARENT(tn) == NULL) {
2239 tn = NULL;
2240 n = NULL;
2241 break;
2242 }
2243
2244 cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
2245 tn = NODE_PARENT(tn);
2246 cindex++;
2247 n = (struct node *)tn;
2248 pend = tn->pos+tn->bits;
2249 depth--;
2250 }
2341 } 2251 }
2342 } 2252 }
2343 2253
2344 read_unlock(&fib_lock); 2254 rcu_read_unlock();
2345 return s; 2255 return s;
2346} 2256}
2347 2257
@@ -2359,17 +2269,22 @@ static struct fib_alias *fib_triestat_get_next(struct seq_file *seq)
2359 2269
2360static void *fib_triestat_seq_start(struct seq_file *seq, loff_t *pos) 2270static void *fib_triestat_seq_start(struct seq_file *seq, loff_t *pos)
2361{ 2271{
2362 void *v = NULL; 2272 if (!ip_fib_main_table)
2273 return NULL;
2363 2274
2364 if (ip_fib_main_table) 2275 if (*pos)
2365 v = *pos ? fib_triestat_get_next(seq) : SEQ_START_TOKEN; 2276 return fib_triestat_get_next(seq);
2366 return v; 2277 else
2278 return SEQ_START_TOKEN;
2367} 2279}
2368 2280
2369static void *fib_triestat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2281static void *fib_triestat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2370{ 2282{
2371 ++*pos; 2283 ++*pos;
2372 return v == SEQ_START_TOKEN ? fib_triestat_get_first(seq) : fib_triestat_get_next(seq); 2284 if (v == SEQ_START_TOKEN)
2285 return fib_triestat_get_first(seq);
2286 else
2287 return fib_triestat_get_next(seq);
2373} 2288}
2374 2289
2375static void fib_triestat_seq_stop(struct seq_file *seq, void *v) 2290static void fib_triestat_seq_stop(struct seq_file *seq, void *v)
@@ -2388,22 +2303,22 @@ static void collect_and_show(struct trie *t, struct seq_file *seq)
2388{ 2303{
2389 int bytes = 0; /* How many bytes are used, a ref is 4 bytes */ 2304 int bytes = 0; /* How many bytes are used, a ref is 4 bytes */
2390 int i, max, pointers; 2305 int i, max, pointers;
2391 struct trie_stat *stat; 2306 struct trie_stat *stat;
2392 int avdepth; 2307 int avdepth;
2393 2308
2394 stat = trie_collect_stats(t); 2309 stat = trie_collect_stats(t);
2395 2310
2396 bytes=0; 2311 bytes = 0;
2397 seq_printf(seq, "trie=%p\n", t); 2312 seq_printf(seq, "trie=%p\n", t);
2398 2313
2399 if (stat) { 2314 if (stat) {
2400 if (stat->leaves) 2315 if (stat->leaves)
2401 avdepth=stat->totdepth*100 / stat->leaves; 2316 avdepth = stat->totdepth*100 / stat->leaves;
2402 else 2317 else
2403 avdepth=0; 2318 avdepth = 0;
2404 seq_printf(seq, "Aver depth: %d.%02d\n", avdepth / 100, avdepth % 100 ); 2319 seq_printf(seq, "Aver depth: %d.%02d\n", avdepth / 100, avdepth % 100);
2405 seq_printf(seq, "Max depth: %4d\n", stat->maxdepth); 2320 seq_printf(seq, "Max depth: %4d\n", stat->maxdepth);
2406 2321
2407 seq_printf(seq, "Leaves: %d\n", stat->leaves); 2322 seq_printf(seq, "Leaves: %d\n", stat->leaves);
2408 bytes += sizeof(struct leaf) * stat->leaves; 2323 bytes += sizeof(struct leaf) * stat->leaves;
2409 seq_printf(seq, "Internal nodes: %d\n", stat->tnodes); 2324 seq_printf(seq, "Internal nodes: %d\n", stat->tnodes);
@@ -2455,11 +2370,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2455 2370
2456 if (trie_main) 2371 if (trie_main)
2457 collect_and_show(trie_main, seq); 2372 collect_and_show(trie_main, seq);
2458 } 2373 } else {
2459 else { 2374 snprintf(bf, sizeof(bf), "*\t%08X\t%08X", 200, 400);
2460 snprintf(bf, sizeof(bf), 2375
2461 "*\t%08X\t%08X", 200, 400);
2462
2463 seq_printf(seq, "%-127s\n", bf); 2376 seq_printf(seq, "%-127s\n", bf);
2464 } 2377 }
2465 return 0; 2378 return 0;
@@ -2520,22 +2433,27 @@ static struct fib_alias *fib_trie_get_next(struct seq_file *seq)
2520 2433
2521static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) 2434static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2522{ 2435{
2523 void *v = NULL; 2436 if (!ip_fib_main_table)
2437 return NULL;
2524 2438
2525 if (ip_fib_main_table) 2439 if (*pos)
2526 v = *pos ? fib_trie_get_next(seq) : SEQ_START_TOKEN; 2440 return fib_trie_get_next(seq);
2527 return v; 2441 else
2442 return SEQ_START_TOKEN;
2528} 2443}
2529 2444
2530static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2445static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2531{ 2446{
2532 ++*pos; 2447 ++*pos;
2533 return v == SEQ_START_TOKEN ? fib_trie_get_first(seq) : fib_trie_get_next(seq); 2448 if (v == SEQ_START_TOKEN)
2449 return fib_trie_get_first(seq);
2450 else
2451 return fib_trie_get_next(seq);
2452
2534} 2453}
2535 2454
2536static void fib_trie_seq_stop(struct seq_file *seq, void *v) 2455static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2537{ 2456{
2538
2539} 2457}
2540 2458
2541/* 2459/*
@@ -2555,9 +2473,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
2555 2473
2556 if (trie_main) 2474 if (trie_main)
2557 trie_dump_seq(seq, trie_main); 2475 trie_dump_seq(seq, trie_main);
2558 } 2476 } else {
2559
2560 else {
2561 snprintf(bf, sizeof(bf), 2477 snprintf(bf, sizeof(bf),
2562 "*\t%08X\t%08X", 200, 400); 2478 "*\t%08X\t%08X", 200, 400);
2563 seq_printf(seq, "%-127s\n", bf); 2479 seq_printf(seq, "%-127s\n", bf);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index badfc5849973..24eb56ae1b5a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
114/* 114/*
115 * Statistics 115 * Statistics
116 */ 116 */
117DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); 117DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly;
118 118
119/* An array of errno for error messages from dest unreach. */ 119/* An array of errno for error messages from dest unreach. */
120/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 120/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
@@ -627,11 +627,10 @@ static void icmp_unreach(struct sk_buff *skb)
627 break; 627 break;
628 case ICMP_FRAG_NEEDED: 628 case ICMP_FRAG_NEEDED:
629 if (ipv4_config.no_pmtu_disc) { 629 if (ipv4_config.no_pmtu_disc) {
630 LIMIT_NETDEBUG( 630 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: "
631 printk(KERN_INFO "ICMP: %u.%u.%u.%u: "
632 "fragmentation needed " 631 "fragmentation needed "
633 "and DF set.\n", 632 "and DF set.\n",
634 NIPQUAD(iph->daddr))); 633 NIPQUAD(iph->daddr));
635 } else { 634 } else {
636 info = ip_rt_frag_needed(iph, 635 info = ip_rt_frag_needed(iph,
637 ntohs(icmph->un.frag.mtu)); 636 ntohs(icmph->un.frag.mtu));
@@ -640,10 +639,9 @@ static void icmp_unreach(struct sk_buff *skb)
640 } 639 }
641 break; 640 break;
642 case ICMP_SR_FAILED: 641 case ICMP_SR_FAILED:
643 LIMIT_NETDEBUG( 642 LIMIT_NETDEBUG(KERN_INFO "ICMP: %u.%u.%u.%u: Source "
644 printk(KERN_INFO "ICMP: %u.%u.%u.%u: Source "
645 "Route Failed.\n", 643 "Route Failed.\n",
646 NIPQUAD(iph->daddr))); 644 NIPQUAD(iph->daddr));
647 break; 645 break;
648 default: 646 default:
649 break; 647 break;
@@ -936,7 +934,7 @@ int icmp_rcv(struct sk_buff *skb)
936 case CHECKSUM_HW: 934 case CHECKSUM_HW:
937 if (!(u16)csum_fold(skb->csum)) 935 if (!(u16)csum_fold(skb->csum))
938 break; 936 break;
939 LIMIT_NETDEBUG(printk(KERN_DEBUG "icmp v4 hw csum failure\n")); 937 LIMIT_NETDEBUG(KERN_DEBUG "icmp v4 hw csum failure\n");
940 case CHECKSUM_NONE: 938 case CHECKSUM_NONE:
941 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) 939 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
942 goto error; 940 goto error;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5088f90835ae..44607f4767b8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -904,7 +904,7 @@ int igmp_rcv(struct sk_buff *skb)
904 case IGMP_MTRACE_RESP: 904 case IGMP_MTRACE_RESP:
905 break; 905 break;
906 default: 906 default:
907 NETDEBUG(printk(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type)); 907 NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
908 } 908 }
909 in_dev_put(in_dev); 909 in_dev_put(in_dev);
910 kfree_skb(skb); 910 kfree_skb(skb);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
new file mode 100644
index 000000000000..fe3c6d3d0c91
--- /dev/null
+++ b/net/ipv4/inet_connection_sock.c
@@ -0,0 +1,641 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/jhash.h>
19
20#include <net/inet_connection_sock.h>
21#include <net/inet_hashtables.h>
22#include <net/inet_timewait_sock.h>
23#include <net/ip.h>
24#include <net/route.h>
25#include <net/tcp_states.h>
26#include <net/xfrm.h>
27
28#ifdef INET_CSK_DEBUG
29const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
30EXPORT_SYMBOL(inet_csk_timer_bug_msg);
31#endif
32
33/*
34 * This array holds the first and last local port number.
35 * For high-usage systems, use sysctl to change this to
36 * 32768-61000
37 */
38int sysctl_local_port_range[2] = { 1024, 4999 };
39
40static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
41{
42 const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
43 struct sock *sk2;
44 struct hlist_node *node;
45 int reuse = sk->sk_reuse;
46
47 sk_for_each_bound(sk2, node, &tb->owners) {
48 if (sk != sk2 &&
49 !inet_v6_ipv6only(sk2) &&
50 (!sk->sk_bound_dev_if ||
51 !sk2->sk_bound_dev_if ||
52 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
53 if (!reuse || !sk2->sk_reuse ||
54 sk2->sk_state == TCP_LISTEN) {
55 const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
57 sk2_rcv_saddr == sk_rcv_saddr)
58 break;
59 }
60 }
61 }
62 return node != NULL;
63}
64
65/* Obtain a reference to a local port for the given sock,
66 * if snum is zero it means select any available local port.
67 */
68int inet_csk_get_port(struct inet_hashinfo *hashinfo,
69 struct sock *sk, unsigned short snum)
70{
71 struct inet_bind_hashbucket *head;
72 struct hlist_node *node;
73 struct inet_bind_bucket *tb;
74 int ret;
75
76 local_bh_disable();
77 if (!snum) {
78 int low = sysctl_local_port_range[0];
79 int high = sysctl_local_port_range[1];
80 int remaining = (high - low) + 1;
81 int rover;
82
83 spin_lock(&hashinfo->portalloc_lock);
84 if (hashinfo->port_rover < low)
85 rover = low;
86 else
87 rover = hashinfo->port_rover;
88 do {
89 rover++;
90 if (rover > high)
91 rover = low;
92 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
93 spin_lock(&head->lock);
94 inet_bind_bucket_for_each(tb, node, &head->chain)
95 if (tb->port == rover)
96 goto next;
97 break;
98 next:
99 spin_unlock(&head->lock);
100 } while (--remaining > 0);
101 hashinfo->port_rover = rover;
102 spin_unlock(&hashinfo->portalloc_lock);
103
104 /* Exhausted local port range during search? It is not
105 * possible for us to be holding one of the bind hash
106 * locks if this test triggers, because if 'remaining'
107 * drops to zero, we broke out of the do/while loop at
108 * the top level, not from the 'break;' statement.
109 */
110 ret = 1;
111 if (remaining <= 0)
112 goto fail;
113
114 /* OK, here is the one we will use. HEAD is
115 * non-NULL and we hold it's mutex.
116 */
117 snum = rover;
118 } else {
119 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
120 spin_lock(&head->lock);
121 inet_bind_bucket_for_each(tb, node, &head->chain)
122 if (tb->port == snum)
123 goto tb_found;
124 }
125 tb = NULL;
126 goto tb_not_found;
127tb_found:
128 if (!hlist_empty(&tb->owners)) {
129 if (sk->sk_reuse > 1)
130 goto success;
131 if (tb->fastreuse > 0 &&
132 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
133 goto success;
134 } else {
135 ret = 1;
136 if (inet_csk_bind_conflict(sk, tb))
137 goto fail_unlock;
138 }
139 }
140tb_not_found:
141 ret = 1;
142 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)
143 goto fail_unlock;
144 if (hlist_empty(&tb->owners)) {
145 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
146 tb->fastreuse = 1;
147 else
148 tb->fastreuse = 0;
149 } else if (tb->fastreuse &&
150 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
151 tb->fastreuse = 0;
152success:
153 if (!inet_csk(sk)->icsk_bind_hash)
154 inet_bind_hash(sk, tb, snum);
155 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
156 ret = 0;
157
158fail_unlock:
159 spin_unlock(&head->lock);
160fail:
161 local_bh_enable();
162 return ret;
163}
164
165EXPORT_SYMBOL_GPL(inet_csk_get_port);
166
167/*
168 * Wait for an incoming connection, avoid race conditions. This must be called
169 * with the socket locked.
170 */
171static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
172{
173 struct inet_connection_sock *icsk = inet_csk(sk);
174 DEFINE_WAIT(wait);
175 int err;
176
177 /*
178 * True wake-one mechanism for incoming connections: only
179 * one process gets woken up, not the 'whole herd'.
180 * Since we do not 'race & poll' for established sockets
181 * anymore, the common case will execute the loop only once.
182 *
183 * Subtle issue: "add_wait_queue_exclusive()" will be added
184 * after any current non-exclusive waiters, and we know that
185 * it will always _stay_ after any new non-exclusive waiters
186 * because all non-exclusive waiters are added at the
187 * beginning of the wait-queue. As such, it's ok to "drop"
188 * our exclusiveness temporarily when we get woken up without
189 * having to remove and re-insert us on the wait queue.
190 */
191 for (;;) {
192 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
193 TASK_INTERRUPTIBLE);
194 release_sock(sk);
195 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
196 timeo = schedule_timeout(timeo);
197 lock_sock(sk);
198 err = 0;
199 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
200 break;
201 err = -EINVAL;
202 if (sk->sk_state != TCP_LISTEN)
203 break;
204 err = sock_intr_errno(timeo);
205 if (signal_pending(current))
206 break;
207 err = -EAGAIN;
208 if (!timeo)
209 break;
210 }
211 finish_wait(sk->sk_sleep, &wait);
212 return err;
213}
214
215/*
216 * This will accept the next outstanding connection.
217 */
218struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
219{
220 struct inet_connection_sock *icsk = inet_csk(sk);
221 struct sock *newsk;
222 int error;
223
224 lock_sock(sk);
225
226 /* We need to make sure that this socket is listening,
227 * and that it has something pending.
228 */
229 error = -EINVAL;
230 if (sk->sk_state != TCP_LISTEN)
231 goto out_err;
232
233 /* Find already established connection */
234 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
235 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
236
237 /* If this is a non blocking socket don't sleep */
238 error = -EAGAIN;
239 if (!timeo)
240 goto out_err;
241
242 error = inet_csk_wait_for_connect(sk, timeo);
243 if (error)
244 goto out_err;
245 }
246
247 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
248 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
249out:
250 release_sock(sk);
251 return newsk;
252out_err:
253 newsk = NULL;
254 *err = error;
255 goto out;
256}
257
258EXPORT_SYMBOL(inet_csk_accept);
259
260/*
261 * Using different timers for retransmit, delayed acks and probes
262 * We may wish use just one timer maintaining a list of expire jiffies
263 * to optimize.
264 */
265void inet_csk_init_xmit_timers(struct sock *sk,
266 void (*retransmit_handler)(unsigned long),
267 void (*delack_handler)(unsigned long),
268 void (*keepalive_handler)(unsigned long))
269{
270 struct inet_connection_sock *icsk = inet_csk(sk);
271
272 init_timer(&icsk->icsk_retransmit_timer);
273 init_timer(&icsk->icsk_delack_timer);
274 init_timer(&sk->sk_timer);
275
276 icsk->icsk_retransmit_timer.function = retransmit_handler;
277 icsk->icsk_delack_timer.function = delack_handler;
278 sk->sk_timer.function = keepalive_handler;
279
280 icsk->icsk_retransmit_timer.data =
281 icsk->icsk_delack_timer.data =
282 sk->sk_timer.data = (unsigned long)sk;
283
284 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
285}
286
287EXPORT_SYMBOL(inet_csk_init_xmit_timers);
288
289void inet_csk_clear_xmit_timers(struct sock *sk)
290{
291 struct inet_connection_sock *icsk = inet_csk(sk);
292
293 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
294
295 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
296 sk_stop_timer(sk, &icsk->icsk_delack_timer);
297 sk_stop_timer(sk, &sk->sk_timer);
298}
299
300EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
301
302void inet_csk_delete_keepalive_timer(struct sock *sk)
303{
304 sk_stop_timer(sk, &sk->sk_timer);
305}
306
307EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
308
309void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
310{
311 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
312}
313
314EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
315
316struct dst_entry* inet_csk_route_req(struct sock *sk,
317 const struct request_sock *req)
318{
319 struct rtable *rt;
320 const struct inet_request_sock *ireq = inet_rsk(req);
321 struct ip_options *opt = inet_rsk(req)->opt;
322 struct flowi fl = { .oif = sk->sk_bound_dev_if,
323 .nl_u = { .ip4_u =
324 { .daddr = ((opt && opt->srr) ?
325 opt->faddr :
326 ireq->rmt_addr),
327 .saddr = ireq->loc_addr,
328 .tos = RT_CONN_FLAGS(sk) } },
329 .proto = sk->sk_protocol,
330 .uli_u = { .ports =
331 { .sport = inet_sk(sk)->sport,
332 .dport = ireq->rmt_port } } };
333
334 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
335 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
336 return NULL;
337 }
338 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
339 ip_rt_put(rt);
340 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
341 return NULL;
342 }
343 return &rt->u.dst;
344}
345
346EXPORT_SYMBOL_GPL(inet_csk_route_req);
347
348static inline u32 inet_synq_hash(const u32 raddr, const u16 rport,
349 const u32 rnd, const u16 synq_hsize)
350{
351 return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1);
352}
353
354#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
355#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
356#else
357#define AF_INET_FAMILY(fam) 1
358#endif
359
360struct request_sock *inet_csk_search_req(const struct sock *sk,
361 struct request_sock ***prevp,
362 const __u16 rport, const __u32 raddr,
363 const __u32 laddr)
364{
365 const struct inet_connection_sock *icsk = inet_csk(sk);
366 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
367 struct request_sock *req, **prev;
368
369 for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
370 lopt->nr_table_entries)];
371 (req = *prev) != NULL;
372 prev = &req->dl_next) {
373 const struct inet_request_sock *ireq = inet_rsk(req);
374
375 if (ireq->rmt_port == rport &&
376 ireq->rmt_addr == raddr &&
377 ireq->loc_addr == laddr &&
378 AF_INET_FAMILY(req->rsk_ops->family)) {
379 BUG_TRAP(!req->sk);
380 *prevp = prev;
381 break;
382 }
383 }
384
385 return req;
386}
387
388EXPORT_SYMBOL_GPL(inet_csk_search_req);
389
390void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
391 const unsigned timeout)
392{
393 struct inet_connection_sock *icsk = inet_csk(sk);
394 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
395 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
396 lopt->hash_rnd, lopt->nr_table_entries);
397
398 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
399 inet_csk_reqsk_queue_added(sk, timeout);
400}
401
402/* Only thing we need from tcp.h */
403extern int sysctl_tcp_synack_retries;
404
405EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
406
407void inet_csk_reqsk_queue_prune(struct sock *parent,
408 const unsigned long interval,
409 const unsigned long timeout,
410 const unsigned long max_rto)
411{
412 struct inet_connection_sock *icsk = inet_csk(parent);
413 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
414 struct listen_sock *lopt = queue->listen_opt;
415 int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
416 int thresh = max_retries;
417 unsigned long now = jiffies;
418 struct request_sock **reqp, *req;
419 int i, budget;
420
421 if (lopt == NULL || lopt->qlen == 0)
422 return;
423
424 /* Normally all the openreqs are young and become mature
425 * (i.e. converted to established socket) for first timeout.
426 * If synack was not acknowledged for 3 seconds, it means
427 * one of the following things: synack was lost, ack was lost,
428 * rtt is high or nobody planned to ack (i.e. synflood).
429 * When server is a bit loaded, queue is populated with old
430 * open requests, reducing effective size of queue.
431 * When server is well loaded, queue size reduces to zero
432 * after several minutes of work. It is not synflood,
433 * it is normal operation. The solution is pruning
434 * too old entries overriding normal timeout, when
435 * situation becomes dangerous.
436 *
437 * Essentially, we reserve half of room for young
438 * embrions; and abort old ones without pity, if old
439 * ones are about to clog our table.
440 */
441 if (lopt->qlen>>(lopt->max_qlen_log-1)) {
442 int young = (lopt->qlen_young<<1);
443
444 while (thresh > 2) {
445 if (lopt->qlen < young)
446 break;
447 thresh--;
448 young <<= 1;
449 }
450 }
451
452 if (queue->rskq_defer_accept)
453 max_retries = queue->rskq_defer_accept;
454
455 budget = 2 * (lopt->nr_table_entries / (timeout / interval));
456 i = lopt->clock_hand;
457
458 do {
459 reqp=&lopt->syn_table[i];
460 while ((req = *reqp) != NULL) {
461 if (time_after_eq(now, req->expires)) {
462 if ((req->retrans < thresh ||
463 (inet_rsk(req)->acked && req->retrans < max_retries))
464 && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) {
465 unsigned long timeo;
466
467 if (req->retrans++ == 0)
468 lopt->qlen_young--;
469 timeo = min((timeout << req->retrans), max_rto);
470 req->expires = now + timeo;
471 reqp = &req->dl_next;
472 continue;
473 }
474
475 /* Drop this request */
476 inet_csk_reqsk_queue_unlink(parent, req, reqp);
477 reqsk_queue_removed(queue, req);
478 reqsk_free(req);
479 continue;
480 }
481 reqp = &req->dl_next;
482 }
483
484 i = (i + 1) & (lopt->nr_table_entries - 1);
485
486 } while (--budget > 0);
487
488 lopt->clock_hand = i;
489
490 if (lopt->qlen)
491 inet_csk_reset_keepalive_timer(parent, interval);
492}
493
494EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
495
496struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
497 const unsigned int __nocast priority)
498{
499 struct sock *newsk = sk_clone(sk, priority);
500
501 if (newsk != NULL) {
502 struct inet_connection_sock *newicsk = inet_csk(newsk);
503
504 newsk->sk_state = TCP_SYN_RECV;
505 newicsk->icsk_bind_hash = NULL;
506
507 inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
508 newsk->sk_write_space = sk_stream_write_space;
509
510 newicsk->icsk_retransmits = 0;
511 newicsk->icsk_backoff = 0;
512 newicsk->icsk_probes_out = 0;
513
514 /* Deinitialize accept_queue to trap illegal accesses. */
515 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
516 }
517 return newsk;
518}
519
520EXPORT_SYMBOL_GPL(inet_csk_clone);
521
522/*
523 * At this point, there should be no process reference to this
524 * socket, and thus no user references at all. Therefore we
525 * can assume the socket waitqueue is inactive and nobody will
526 * try to jump onto it.
527 */
528void inet_csk_destroy_sock(struct sock *sk)
529{
530 BUG_TRAP(sk->sk_state == TCP_CLOSE);
531 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
532
533 /* It cannot be in hash table! */
534 BUG_TRAP(sk_unhashed(sk));
535
536 /* If it has not 0 inet_sk(sk)->num, it must be bound */
537 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
538
539 sk->sk_prot->destroy(sk);
540
541 sk_stream_kill_queues(sk);
542
543 xfrm_sk_free_policy(sk);
544
545 sk_refcnt_debug_release(sk);
546
547 atomic_dec(sk->sk_prot->orphan_count);
548 sock_put(sk);
549}
550
551EXPORT_SYMBOL(inet_csk_destroy_sock);
552
553int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
554{
555 struct inet_sock *inet = inet_sk(sk);
556 struct inet_connection_sock *icsk = inet_csk(sk);
557 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
558
559 if (rc != 0)
560 return rc;
561
562 sk->sk_max_ack_backlog = 0;
563 sk->sk_ack_backlog = 0;
564 inet_csk_delack_init(sk);
565
566 /* There is race window here: we announce ourselves listening,
567 * but this transition is still not validated by get_port().
568 * It is OK, because this socket enters to hash table only
569 * after validation is complete.
570 */
571 sk->sk_state = TCP_LISTEN;
572 if (!sk->sk_prot->get_port(sk, inet->num)) {
573 inet->sport = htons(inet->num);
574
575 sk_dst_reset(sk);
576 sk->sk_prot->hash(sk);
577
578 return 0;
579 }
580
581 sk->sk_state = TCP_CLOSE;
582 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
583 return -EADDRINUSE;
584}
585
586EXPORT_SYMBOL_GPL(inet_csk_listen_start);
587
588/*
589 * This routine closes sockets which have been at least partially
590 * opened, but not yet accepted.
591 */
592void inet_csk_listen_stop(struct sock *sk)
593{
594 struct inet_connection_sock *icsk = inet_csk(sk);
595 struct request_sock *acc_req;
596 struct request_sock *req;
597
598 inet_csk_delete_keepalive_timer(sk);
599
600 /* make all the listen_opt local to us */
601 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
602
603 /* Following specs, it would be better either to send FIN
604 * (and enter FIN-WAIT-1, it is normal close)
605 * or to send active reset (abort).
606 * Certainly, it is pretty dangerous while synflood, but it is
607 * bad justification for our negligence 8)
608 * To be honest, we are not able to make either
609 * of the variants now. --ANK
610 */
611 reqsk_queue_destroy(&icsk->icsk_accept_queue);
612
613 while ((req = acc_req) != NULL) {
614 struct sock *child = req->sk;
615
616 acc_req = req->dl_next;
617
618 local_bh_disable();
619 bh_lock_sock(child);
620 BUG_TRAP(!sock_owned_by_user(child));
621 sock_hold(child);
622
623 sk->sk_prot->disconnect(child, O_NONBLOCK);
624
625 sock_orphan(child);
626
627 atomic_inc(sk->sk_prot->orphan_count);
628
629 inet_csk_destroy_sock(child);
630
631 bh_unlock_sock(child);
632 local_bh_enable();
633 sock_put(child);
634
635 sk_acceptq_removed(sk);
636 __reqsk_free(req);
637 }
638 BUG_TRAP(!sk->sk_ack_backlog);
639}
640
641EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
new file mode 100644
index 000000000000..71f3c7350c6e
--- /dev/null
+++ b/net/ipv4/inet_diag.c
@@ -0,0 +1,868 @@
1/*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 *
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/fcntl.h>
18#include <linux/random.h>
19#include <linux/cache.h>
20#include <linux/init.h>
21#include <linux/time.h>
22
23#include <net/icmp.h>
24#include <net/tcp.h>
25#include <net/ipv6.h>
26#include <net/inet_common.h>
27#include <net/inet_connection_sock.h>
28#include <net/inet_hashtables.h>
29#include <net/inet_timewait_sock.h>
30#include <net/inet6_hashtables.h>
31
32#include <linux/inet.h>
33#include <linux/stddef.h>
34
35#include <linux/inet_diag.h>
36
37static const struct inet_diag_handler **inet_diag_table;
38
39struct inet_diag_entry {
40 u32 *saddr;
41 u32 *daddr;
42 u16 sport;
43 u16 dport;
44 u16 family;
45 u16 userlocks;
46};
47
48static struct sock *idiagnl;
49
50#define INET_DIAG_PUT(skb, attrtype, attrlen) \
51 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
52
53static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
54 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
55 const struct nlmsghdr *unlh)
56{
57 const struct inet_sock *inet = inet_sk(sk);
58 const struct inet_connection_sock *icsk = inet_csk(sk);
59 struct inet_diag_msg *r;
60 struct nlmsghdr *nlh;
61 void *info = NULL;
62 struct inet_diag_meminfo *minfo = NULL;
63 unsigned char *b = skb->tail;
64 const struct inet_diag_handler *handler;
65
66 handler = inet_diag_table[unlh->nlmsg_type];
67 BUG_ON(handler == NULL);
68
69 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
70 nlh->nlmsg_flags = nlmsg_flags;
71
72 r = NLMSG_DATA(nlh);
73 if (sk->sk_state != TCP_TIME_WAIT) {
74 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
75 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO,
76 sizeof(*minfo));
77 if (ext & (1 << (INET_DIAG_INFO - 1)))
78 info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
79 handler->idiag_info_size);
80
81 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
82 size_t len = strlen(icsk->icsk_ca_ops->name);
83 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
84 icsk->icsk_ca_ops->name);
85 }
86 }
87 r->idiag_family = sk->sk_family;
88 r->idiag_state = sk->sk_state;
89 r->idiag_timer = 0;
90 r->idiag_retrans = 0;
91
92 r->id.idiag_if = sk->sk_bound_dev_if;
93 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
94 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
95
96 if (r->idiag_state == TCP_TIME_WAIT) {
97 const struct inet_timewait_sock *tw = inet_twsk(sk);
98 long tmo = tw->tw_ttd - jiffies;
99 if (tmo < 0)
100 tmo = 0;
101
102 r->id.idiag_sport = tw->tw_sport;
103 r->id.idiag_dport = tw->tw_dport;
104 r->id.idiag_src[0] = tw->tw_rcv_saddr;
105 r->id.idiag_dst[0] = tw->tw_daddr;
106 r->idiag_state = tw->tw_substate;
107 r->idiag_timer = 3;
108 r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
109 r->idiag_rqueue = 0;
110 r->idiag_wqueue = 0;
111 r->idiag_uid = 0;
112 r->idiag_inode = 0;
113#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
114 if (r->idiag_family == AF_INET6) {
115 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
116
117 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
118 &tcp6tw->tw_v6_rcv_saddr);
119 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
120 &tcp6tw->tw_v6_daddr);
121 }
122#endif
123 nlh->nlmsg_len = skb->tail - b;
124 return skb->len;
125 }
126
127 r->id.idiag_sport = inet->sport;
128 r->id.idiag_dport = inet->dport;
129 r->id.idiag_src[0] = inet->rcv_saddr;
130 r->id.idiag_dst[0] = inet->daddr;
131
132#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
133 if (r->idiag_family == AF_INET6) {
134 struct ipv6_pinfo *np = inet6_sk(sk);
135
136 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
137 &np->rcv_saddr);
138 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
139 &np->daddr);
140 }
141#endif
142
143#define EXPIRES_IN_MS(tmo) ((tmo - jiffies) * 1000 + HZ - 1) / HZ
144
145 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
146 r->idiag_timer = 1;
147 r->idiag_retrans = icsk->icsk_retransmits;
148 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
149 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
150 r->idiag_timer = 4;
151 r->idiag_retrans = icsk->icsk_probes_out;
152 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
153 } else if (timer_pending(&sk->sk_timer)) {
154 r->idiag_timer = 2;
155 r->idiag_retrans = icsk->icsk_probes_out;
156 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
157 } else {
158 r->idiag_timer = 0;
159 r->idiag_expires = 0;
160 }
161#undef EXPIRES_IN_MS
162
163 r->idiag_uid = sock_i_uid(sk);
164 r->idiag_inode = sock_i_ino(sk);
165
166 if (minfo) {
167 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc);
168 minfo->idiag_wmem = sk->sk_wmem_queued;
169 minfo->idiag_fmem = sk->sk_forward_alloc;
170 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc);
171 }
172
173 handler->idiag_get_info(sk, r, info);
174
175 if (sk->sk_state < TCP_TIME_WAIT &&
176 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
177 icsk->icsk_ca_ops->get_info(sk, ext, skb);
178
179 nlh->nlmsg_len = skb->tail - b;
180 return skb->len;
181
182rtattr_failure:
183nlmsg_failure:
184 skb_trim(skb, b - skb->data);
185 return -1;
186}
187
188static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
189{
190 int err;
191 struct sock *sk;
192 struct inet_diag_req *req = NLMSG_DATA(nlh);
193 struct sk_buff *rep;
194 struct inet_hashinfo *hashinfo;
195 const struct inet_diag_handler *handler;
196
197 handler = inet_diag_table[nlh->nlmsg_type];
198 BUG_ON(handler == NULL);
199 hashinfo = handler->idiag_hashinfo;
200
201 if (req->idiag_family == AF_INET) {
202 sk = inet_lookup(hashinfo, req->id.idiag_dst[0],
203 req->id.idiag_dport, req->id.idiag_src[0],
204 req->id.idiag_sport, req->id.idiag_if);
205 }
206#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
207 else if (req->idiag_family == AF_INET6) {
208 sk = inet6_lookup(hashinfo,
209 (struct in6_addr *)req->id.idiag_dst,
210 req->id.idiag_dport,
211 (struct in6_addr *)req->id.idiag_src,
212 req->id.idiag_sport,
213 req->id.idiag_if);
214 }
215#endif
216 else {
217 return -EINVAL;
218 }
219
220 if (sk == NULL)
221 return -ENOENT;
222
223 err = -ESTALE;
224 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
225 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
226 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
227 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
228 goto out;
229
230 err = -ENOMEM;
231 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
232 sizeof(struct inet_diag_meminfo) +
233 handler->idiag_info_size + 64)),
234 GFP_KERNEL);
235 if (!rep)
236 goto out;
237
238 if (inet_diag_fill(rep, sk, req->idiag_ext,
239 NETLINK_CB(in_skb).pid,
240 nlh->nlmsg_seq, 0, nlh) <= 0)
241 BUG();
242
243 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
244 MSG_DONTWAIT);
245 if (err > 0)
246 err = 0;
247
248out:
249 if (sk) {
250 if (sk->sk_state == TCP_TIME_WAIT)
251 inet_twsk_put((struct inet_timewait_sock *)sk);
252 else
253 sock_put(sk);
254 }
255 return err;
256}
257
258static int bitstring_match(const u32 *a1, const u32 *a2, int bits)
259{
260 int words = bits >> 5;
261
262 bits &= 0x1f;
263
264 if (words) {
265 if (memcmp(a1, a2, words << 2))
266 return 0;
267 }
268 if (bits) {
269 __u32 w1, w2;
270 __u32 mask;
271
272 w1 = a1[words];
273 w2 = a2[words];
274
275 mask = htonl((0xffffffff) << (32 - bits));
276
277 if ((w1 ^ w2) & mask)
278 return 0;
279 }
280
281 return 1;
282}
283
284
285static int inet_diag_bc_run(const void *bc, int len,
286 const struct inet_diag_entry *entry)
287{
288 while (len > 0) {
289 int yes = 1;
290 const struct inet_diag_bc_op *op = bc;
291
292 switch (op->code) {
293 case INET_DIAG_BC_NOP:
294 break;
295 case INET_DIAG_BC_JMP:
296 yes = 0;
297 break;
298 case INET_DIAG_BC_S_GE:
299 yes = entry->sport >= op[1].no;
300 break;
301 case INET_DIAG_BC_S_LE:
302 yes = entry->dport <= op[1].no;
303 break;
304 case INET_DIAG_BC_D_GE:
305 yes = entry->dport >= op[1].no;
306 break;
307 case INET_DIAG_BC_D_LE:
308 yes = entry->dport <= op[1].no;
309 break;
310 case INET_DIAG_BC_AUTO:
311 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
312 break;
313 case INET_DIAG_BC_S_COND:
314 case INET_DIAG_BC_D_COND: {
315 struct inet_diag_hostcond *cond;
316 u32 *addr;
317
318 cond = (struct inet_diag_hostcond *)(op + 1);
319 if (cond->port != -1 &&
320 cond->port != (op->code == INET_DIAG_BC_S_COND ?
321 entry->sport : entry->dport)) {
322 yes = 0;
323 break;
324 }
325
326 if (cond->prefix_len == 0)
327 break;
328
329 if (op->code == INET_DIAG_BC_S_COND)
330 addr = entry->saddr;
331 else
332 addr = entry->daddr;
333
334 if (bitstring_match(addr, cond->addr, cond->prefix_len))
335 break;
336 if (entry->family == AF_INET6 &&
337 cond->family == AF_INET) {
338 if (addr[0] == 0 && addr[1] == 0 &&
339 addr[2] == htonl(0xffff) &&
340 bitstring_match(addr + 3, cond->addr,
341 cond->prefix_len))
342 break;
343 }
344 yes = 0;
345 break;
346 }
347 }
348
349 if (yes) {
350 len -= op->yes;
351 bc += op->yes;
352 } else {
353 len -= op->no;
354 bc += op->no;
355 }
356 }
357 return (len == 0);
358}
359
360static int valid_cc(const void *bc, int len, int cc)
361{
362 while (len >= 0) {
363 const struct inet_diag_bc_op *op = bc;
364
365 if (cc > len)
366 return 0;
367 if (cc == len)
368 return 1;
369 if (op->yes < 4)
370 return 0;
371 len -= op->yes;
372 bc += op->yes;
373 }
374 return 0;
375}
376
377static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
378{
379 const unsigned char *bc = bytecode;
380 int len = bytecode_len;
381
382 while (len > 0) {
383 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
384
385//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
386 switch (op->code) {
387 case INET_DIAG_BC_AUTO:
388 case INET_DIAG_BC_S_COND:
389 case INET_DIAG_BC_D_COND:
390 case INET_DIAG_BC_S_GE:
391 case INET_DIAG_BC_S_LE:
392 case INET_DIAG_BC_D_GE:
393 case INET_DIAG_BC_D_LE:
394 if (op->yes < 4 || op->yes > len + 4)
395 return -EINVAL;
396 case INET_DIAG_BC_JMP:
397 if (op->no < 4 || op->no > len + 4)
398 return -EINVAL;
399 if (op->no < len &&
400 !valid_cc(bytecode, bytecode_len, len - op->no))
401 return -EINVAL;
402 break;
403 case INET_DIAG_BC_NOP:
404 if (op->yes < 4 || op->yes > len + 4)
405 return -EINVAL;
406 break;
407 default:
408 return -EINVAL;
409 }
410 bc += op->yes;
411 len -= op->yes;
412 }
413 return len == 0 ? 0 : -EINVAL;
414}
415
416static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk,
417 struct netlink_callback *cb)
418{
419 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
420
421 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
422 struct inet_diag_entry entry;
423 struct rtattr *bc = (struct rtattr *)(r + 1);
424 struct inet_sock *inet = inet_sk(sk);
425
426 entry.family = sk->sk_family;
427#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
428 if (entry.family == AF_INET6) {
429 struct ipv6_pinfo *np = inet6_sk(sk);
430
431 entry.saddr = np->rcv_saddr.s6_addr32;
432 entry.daddr = np->daddr.s6_addr32;
433 } else
434#endif
435 {
436 entry.saddr = &inet->rcv_saddr;
437 entry.daddr = &inet->daddr;
438 }
439 entry.sport = inet->num;
440 entry.dport = ntohs(inet->dport);
441 entry.userlocks = sk->sk_userlocks;
442
443 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
444 return 0;
445 }
446
447 return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid,
448 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
449}
450
451static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
452 struct request_sock *req,
453 u32 pid, u32 seq,
454 const struct nlmsghdr *unlh)
455{
456 const struct inet_request_sock *ireq = inet_rsk(req);
457 struct inet_sock *inet = inet_sk(sk);
458 unsigned char *b = skb->tail;
459 struct inet_diag_msg *r;
460 struct nlmsghdr *nlh;
461 long tmo;
462
463 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
464 nlh->nlmsg_flags = NLM_F_MULTI;
465 r = NLMSG_DATA(nlh);
466
467 r->idiag_family = sk->sk_family;
468 r->idiag_state = TCP_SYN_RECV;
469 r->idiag_timer = 1;
470 r->idiag_retrans = req->retrans;
471
472 r->id.idiag_if = sk->sk_bound_dev_if;
473 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
474 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
475
476 tmo = req->expires - jiffies;
477 if (tmo < 0)
478 tmo = 0;
479
480 r->id.idiag_sport = inet->sport;
481 r->id.idiag_dport = ireq->rmt_port;
482 r->id.idiag_src[0] = ireq->loc_addr;
483 r->id.idiag_dst[0] = ireq->rmt_addr;
484 r->idiag_expires = jiffies_to_msecs(tmo);
485 r->idiag_rqueue = 0;
486 r->idiag_wqueue = 0;
487 r->idiag_uid = sock_i_uid(sk);
488 r->idiag_inode = 0;
489#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
490 if (r->idiag_family == AF_INET6) {
491 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
492 &tcp6_rsk(req)->loc_addr);
493 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
494 &tcp6_rsk(req)->rmt_addr);
495 }
496#endif
497 nlh->nlmsg_len = skb->tail - b;
498
499 return skb->len;
500
501nlmsg_failure:
502 skb_trim(skb, b - skb->data);
503 return -1;
504}
505
506static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
507 struct netlink_callback *cb)
508{
509 struct inet_diag_entry entry;
510 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
511 struct inet_connection_sock *icsk = inet_csk(sk);
512 struct listen_sock *lopt;
513 struct rtattr *bc = NULL;
514 struct inet_sock *inet = inet_sk(sk);
515 int j, s_j;
516 int reqnum, s_reqnum;
517 int err = 0;
518
519 s_j = cb->args[3];
520 s_reqnum = cb->args[4];
521
522 if (s_j > 0)
523 s_j--;
524
525 entry.family = sk->sk_family;
526
527 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
528
529 lopt = icsk->icsk_accept_queue.listen_opt;
530 if (!lopt || !lopt->qlen)
531 goto out;
532
533 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
534 bc = (struct rtattr *)(r + 1);
535 entry.sport = inet->num;
536 entry.userlocks = sk->sk_userlocks;
537 }
538
539 for (j = s_j; j < lopt->nr_table_entries; j++) {
540 struct request_sock *req, *head = lopt->syn_table[j];
541
542 reqnum = 0;
543 for (req = head; req; reqnum++, req = req->dl_next) {
544 struct inet_request_sock *ireq = inet_rsk(req);
545
546 if (reqnum < s_reqnum)
547 continue;
548 if (r->id.idiag_dport != ireq->rmt_port &&
549 r->id.idiag_dport)
550 continue;
551
552 if (bc) {
553 entry.saddr =
554#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
555 (entry.family == AF_INET6) ?
556 tcp6_rsk(req)->loc_addr.s6_addr32 :
557#endif
558 &ireq->loc_addr;
559 entry.daddr =
560#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
561 (entry.family == AF_INET6) ?
562 tcp6_rsk(req)->rmt_addr.s6_addr32 :
563#endif
564 &ireq->rmt_addr;
565 entry.dport = ntohs(ireq->rmt_port);
566
567 if (!inet_diag_bc_run(RTA_DATA(bc),
568 RTA_PAYLOAD(bc), &entry))
569 continue;
570 }
571
572 err = inet_diag_fill_req(skb, sk, req,
573 NETLINK_CB(cb->skb).pid,
574 cb->nlh->nlmsg_seq, cb->nlh);
575 if (err < 0) {
576 cb->args[3] = j + 1;
577 cb->args[4] = reqnum;
578 goto out;
579 }
580 }
581
582 s_reqnum = 0;
583 }
584
585out:
586 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
587
588 return err;
589}
590
591static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
592{
593 int i, num;
594 int s_i, s_num;
595 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
596 const struct inet_diag_handler *handler;
597 struct inet_hashinfo *hashinfo;
598
599 handler = inet_diag_table[cb->nlh->nlmsg_type];
600 BUG_ON(handler == NULL);
601 hashinfo = handler->idiag_hashinfo;
602
603 s_i = cb->args[1];
604 s_num = num = cb->args[2];
605
606 if (cb->args[0] == 0) {
607 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
608 goto skip_listen_ht;
609
610 inet_listen_lock(hashinfo);
611 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
612 struct sock *sk;
613 struct hlist_node *node;
614
615 num = 0;
616 sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
617 struct inet_sock *inet = inet_sk(sk);
618
619 if (num < s_num) {
620 num++;
621 continue;
622 }
623
624 if (r->id.idiag_sport != inet->sport &&
625 r->id.idiag_sport)
626 goto next_listen;
627
628 if (!(r->idiag_states & TCPF_LISTEN) ||
629 r->id.idiag_dport ||
630 cb->args[3] > 0)
631 goto syn_recv;
632
633 if (inet_diag_dump_sock(skb, sk, cb) < 0) {
634 inet_listen_unlock(hashinfo);
635 goto done;
636 }
637
638syn_recv:
639 if (!(r->idiag_states & TCPF_SYN_RECV))
640 goto next_listen;
641
642 if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
643 inet_listen_unlock(hashinfo);
644 goto done;
645 }
646
647next_listen:
648 cb->args[3] = 0;
649 cb->args[4] = 0;
650 ++num;
651 }
652
653 s_num = 0;
654 cb->args[3] = 0;
655 cb->args[4] = 0;
656 }
657 inet_listen_unlock(hashinfo);
658skip_listen_ht:
659 cb->args[0] = 1;
660 s_i = num = s_num = 0;
661 }
662
663 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
664 return skb->len;
665
666 for (i = s_i; i < hashinfo->ehash_size; i++) {
667 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
668 struct sock *sk;
669 struct hlist_node *node;
670
671 if (i > s_i)
672 s_num = 0;
673
674 read_lock_bh(&head->lock);
675
676 num = 0;
677 sk_for_each(sk, node, &head->chain) {
678 struct inet_sock *inet = inet_sk(sk);
679
680 if (num < s_num)
681 goto next_normal;
682 if (!(r->idiag_states & (1 << sk->sk_state)))
683 goto next_normal;
684 if (r->id.idiag_sport != inet->sport &&
685 r->id.idiag_sport)
686 goto next_normal;
687 if (r->id.idiag_dport != inet->dport && r->id.idiag_dport)
688 goto next_normal;
689 if (inet_diag_dump_sock(skb, sk, cb) < 0) {
690 read_unlock_bh(&head->lock);
691 goto done;
692 }
693next_normal:
694 ++num;
695 }
696
697 if (r->idiag_states & TCPF_TIME_WAIT) {
698 sk_for_each(sk, node,
699 &hashinfo->ehash[i + hashinfo->ehash_size].chain) {
700 struct inet_sock *inet = inet_sk(sk);
701
702 if (num < s_num)
703 goto next_dying;
704 if (r->id.idiag_sport != inet->sport &&
705 r->id.idiag_sport)
706 goto next_dying;
707 if (r->id.idiag_dport != inet->dport &&
708 r->id.idiag_dport)
709 goto next_dying;
710 if (inet_diag_dump_sock(skb, sk, cb) < 0) {
711 read_unlock_bh(&head->lock);
712 goto done;
713 }
714next_dying:
715 ++num;
716 }
717 }
718 read_unlock_bh(&head->lock);
719 }
720
721done:
722 cb->args[1] = i;
723 cb->args[2] = num;
724 return skb->len;
725}
726
727static int inet_diag_dump_done(struct netlink_callback *cb)
728{
729 return 0;
730}
731
732
733static __inline__ int
734inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
735{
736 if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
737 return 0;
738
739 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX)
740 goto err_inval;
741
742 if (inet_diag_table[nlh->nlmsg_type] == NULL)
743 return -ENOENT;
744
745 if (NLMSG_LENGTH(sizeof(struct inet_diag_req)) > skb->len)
746 goto err_inval;
747
748 if (nlh->nlmsg_flags&NLM_F_DUMP) {
749 if (nlh->nlmsg_len >
750 (4 + NLMSG_SPACE(sizeof(struct inet_diag_req)))) {
751 struct rtattr *rta = (void *)(NLMSG_DATA(nlh) +
752 sizeof(struct inet_diag_req));
753 if (rta->rta_type != INET_DIAG_REQ_BYTECODE ||
754 rta->rta_len < 8 ||
755 rta->rta_len >
756 (nlh->nlmsg_len -
757 NLMSG_SPACE(sizeof(struct inet_diag_req))))
758 goto err_inval;
759 if (inet_diag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta)))
760 goto err_inval;
761 }
762 return netlink_dump_start(idiagnl, skb, nlh,
763 inet_diag_dump,
764 inet_diag_dump_done);
765 } else {
766 return inet_diag_get_exact(skb, nlh);
767 }
768
769err_inval:
770 return -EINVAL;
771}
772
773
774static inline void inet_diag_rcv_skb(struct sk_buff *skb)
775{
776 int err;
777 struct nlmsghdr * nlh;
778
779 if (skb->len >= NLMSG_SPACE(0)) {
780 nlh = (struct nlmsghdr *)skb->data;
781 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
782 return;
783 err = inet_diag_rcv_msg(skb, nlh);
784 if (err || nlh->nlmsg_flags & NLM_F_ACK)
785 netlink_ack(skb, nlh, err);
786 }
787}
788
789static void inet_diag_rcv(struct sock *sk, int len)
790{
791 struct sk_buff *skb;
792 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
793
794 while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
795 inet_diag_rcv_skb(skb);
796 kfree_skb(skb);
797 }
798}
799
800static DEFINE_SPINLOCK(inet_diag_register_lock);
801
802int inet_diag_register(const struct inet_diag_handler *h)
803{
804 const __u16 type = h->idiag_type;
805 int err = -EINVAL;
806
807 if (type >= INET_DIAG_GETSOCK_MAX)
808 goto out;
809
810 spin_lock(&inet_diag_register_lock);
811 err = -EEXIST;
812 if (inet_diag_table[type] == NULL) {
813 inet_diag_table[type] = h;
814 err = 0;
815 }
816 spin_unlock(&inet_diag_register_lock);
817out:
818 return err;
819}
820EXPORT_SYMBOL_GPL(inet_diag_register);
821
822void inet_diag_unregister(const struct inet_diag_handler *h)
823{
824 const __u16 type = h->idiag_type;
825
826 if (type >= INET_DIAG_GETSOCK_MAX)
827 return;
828
829 spin_lock(&inet_diag_register_lock);
830 inet_diag_table[type] = NULL;
831 spin_unlock(&inet_diag_register_lock);
832
833 synchronize_rcu();
834}
835EXPORT_SYMBOL_GPL(inet_diag_unregister);
836
837static int __init inet_diag_init(void)
838{
839 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
840 sizeof(struct inet_diag_handler *));
841 int err = -ENOMEM;
842
843 inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL);
844 if (!inet_diag_table)
845 goto out;
846
847 memset(inet_diag_table, 0, inet_diag_table_size);
848 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
849 THIS_MODULE);
850 if (idiagnl == NULL)
851 goto out_free_table;
852 err = 0;
853out:
854 return err;
855out_free_table:
856 kfree(inet_diag_table);
857 goto out;
858}
859
860static void __exit inet_diag_exit(void)
861{
862 sock_release(idiagnl->sk_socket);
863 kfree(inet_diag_table);
864}
865
866module_init(inet_diag_init);
867module_exit(inet_diag_exit);
868MODULE_LICENSE("GPL");
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
new file mode 100644
index 000000000000..e8d29fe736d2
--- /dev/null
+++ b/net/ipv4/inet_hashtables.c
@@ -0,0 +1,165 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/wait.h>
21
22#include <net/inet_connection_sock.h>
23#include <net/inet_hashtables.h>
24
25/*
26 * Allocate and initialize a new local port bind bucket.
27 * The bindhash mutex for snum's hash chain must be held here.
28 */
29struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
30 struct inet_bind_hashbucket *head,
31 const unsigned short snum)
32{
33 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
34
35 if (tb != NULL) {
36 tb->port = snum;
37 tb->fastreuse = 0;
38 INIT_HLIST_HEAD(&tb->owners);
39 hlist_add_head(&tb->node, &head->chain);
40 }
41 return tb;
42}
43
44EXPORT_SYMBOL(inet_bind_bucket_create);
45
46/*
47 * Caller must hold hashbucket lock for this tb with local BH disabled
48 */
49void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
50{
51 if (hlist_empty(&tb->owners)) {
52 __hlist_del(&tb->node);
53 kmem_cache_free(cachep, tb);
54 }
55}
56
57void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
58 const unsigned short snum)
59{
60 inet_sk(sk)->num = snum;
61 sk_add_bind_node(sk, &tb->owners);
62 inet_csk(sk)->icsk_bind_hash = tb;
63}
64
65EXPORT_SYMBOL(inet_bind_hash);
66
67/*
68 * Get rid of any references to a local port held by the given sock.
69 */
70static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
71{
72 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
73 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
74 struct inet_bind_bucket *tb;
75
76 spin_lock(&head->lock);
77 tb = inet_csk(sk)->icsk_bind_hash;
78 __sk_del_bind_node(sk);
79 inet_csk(sk)->icsk_bind_hash = NULL;
80 inet_sk(sk)->num = 0;
81 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
82 spin_unlock(&head->lock);
83}
84
85void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
86{
87 local_bh_disable();
88 __inet_put_port(hashinfo, sk);
89 local_bh_enable();
90}
91
92EXPORT_SYMBOL(inet_put_port);
93
94/*
95 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
96 * Look, when several writers sleep and reader wakes them up, all but one
97 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
98 * this, _but_ remember, it adds useless work on UP machines (wake up each
99 * exclusive lock release). It should be ifdefed really.
100 */
101void inet_listen_wlock(struct inet_hashinfo *hashinfo)
102{
103 write_lock(&hashinfo->lhash_lock);
104
105 if (atomic_read(&hashinfo->lhash_users)) {
106 DEFINE_WAIT(wait);
107
108 for (;;) {
109 prepare_to_wait_exclusive(&hashinfo->lhash_wait,
110 &wait, TASK_UNINTERRUPTIBLE);
111 if (!atomic_read(&hashinfo->lhash_users))
112 break;
113 write_unlock_bh(&hashinfo->lhash_lock);
114 schedule();
115 write_lock_bh(&hashinfo->lhash_lock);
116 }
117
118 finish_wait(&hashinfo->lhash_wait, &wait);
119 }
120}
121
122EXPORT_SYMBOL(inet_listen_wlock);
123
124/*
125 * Don't inline this cruft. Here are some nice properties to exploit here. The
126 * BSD API does not allow a listening sock to specify the remote port nor the
127 * remote address for the connection. So always assume those are both
128 * wildcarded during the search since they can never be otherwise.
129 */
130struct sock *__inet_lookup_listener(const struct hlist_head *head, const u32 daddr,
131 const unsigned short hnum, const int dif)
132{
133 struct sock *result = NULL, *sk;
134 const struct hlist_node *node;
135 int hiscore = -1;
136
137 sk_for_each(sk, node, head) {
138 const struct inet_sock *inet = inet_sk(sk);
139
140 if (inet->num == hnum && !ipv6_only_sock(sk)) {
141 const __u32 rcv_saddr = inet->rcv_saddr;
142 int score = sk->sk_family == PF_INET ? 1 : 0;
143
144 if (rcv_saddr) {
145 if (rcv_saddr != daddr)
146 continue;
147 score += 2;
148 }
149 if (sk->sk_bound_dev_if) {
150 if (sk->sk_bound_dev_if != dif)
151 continue;
152 score += 2;
153 }
154 if (score == 5)
155 return sk;
156 if (score > hiscore) {
157 hiscore = score;
158 result = sk;
159 }
160 }
161 }
162 return result;
163}
164
165EXPORT_SYMBOL_GPL(__inet_lookup_listener);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
new file mode 100644
index 000000000000..4d1502a49852
--- /dev/null
+++ b/net/ipv4/inet_timewait_sock.c
@@ -0,0 +1,384 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic TIME_WAIT sockets functions
7 *
8 * From code orinally in TCP
9 */
10
11#include <linux/config.h>
12
13#include <net/inet_hashtables.h>
14#include <net/inet_timewait_sock.h>
15#include <net/ip.h>
16
17/* Must be called with locally disabled BHs. */
18void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo)
19{
20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent];
24
25 write_lock(&ehead->lock);
26 if (hlist_unhashed(&tw->tw_node)) {
27 write_unlock(&ehead->lock);
28 return;
29 }
30 __hlist_del(&tw->tw_node);
31 sk_node_init(&tw->tw_node);
32 write_unlock(&ehead->lock);
33
34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
36 spin_lock(&bhead->lock);
37 tb = tw->tw_tb;
38 __hlist_del(&tw->tw_bind_node);
39 tw->tw_tb = NULL;
40 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
41 spin_unlock(&bhead->lock);
42#ifdef SOCK_REFCNT_DEBUG
43 if (atomic_read(&tw->tw_refcnt) != 1) {
44 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
45 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
46 }
47#endif
48 inet_twsk_put(tw);
49}
50
51EXPORT_SYMBOL_GPL(__inet_twsk_kill);
52
53/*
54 * Enter the time wait state. This is called with locally disabled BH.
55 * Essentially we whip up a timewait bucket, copy the relevant info into it
56 * from the SK, and mess with hash chains and list linkage.
57 */
58void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
59 struct inet_hashinfo *hashinfo)
60{
61 const struct inet_sock *inet = inet_sk(sk);
62 const struct inet_connection_sock *icsk = inet_csk(sk);
63 struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
64 struct inet_bind_hashbucket *bhead;
65 /* Step 1: Put TW into bind hash. Original socket stays there too.
66 Note, that any socket with inet->num != 0 MUST be bound in
67 binding cache, even if it is closed.
68 */
69 bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
70 spin_lock(&bhead->lock);
71 tw->tw_tb = icsk->icsk_bind_hash;
72 BUG_TRAP(icsk->icsk_bind_hash);
73 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
74 spin_unlock(&bhead->lock);
75
76 write_lock(&ehead->lock);
77
78 /* Step 2: Remove SK from established hash. */
79 if (__sk_del_node_init(sk))
80 sock_prot_dec_use(sk->sk_prot);
81
82 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
83 inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain);
84 atomic_inc(&tw->tw_refcnt);
85
86 write_unlock(&ehead->lock);
87}
88
89EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
90
91struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
92{
93 struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab,
94 SLAB_ATOMIC);
95 if (tw != NULL) {
96 const struct inet_sock *inet = inet_sk(sk);
97
98 /* Give us an identity. */
99 tw->tw_daddr = inet->daddr;
100 tw->tw_rcv_saddr = inet->rcv_saddr;
101 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
102 tw->tw_num = inet->num;
103 tw->tw_state = TCP_TIME_WAIT;
104 tw->tw_substate = state;
105 tw->tw_sport = inet->sport;
106 tw->tw_dport = inet->dport;
107 tw->tw_family = sk->sk_family;
108 tw->tw_reuse = sk->sk_reuse;
109 tw->tw_hashent = sk->sk_hashent;
110 tw->tw_ipv6only = 0;
111 tw->tw_prot = sk->sk_prot_creator;
112 atomic_set(&tw->tw_refcnt, 1);
113 inet_twsk_dead_node_init(tw);
114 }
115
116 return tw;
117}
118
119EXPORT_SYMBOL_GPL(inet_twsk_alloc);
120
121/* Returns non-zero if quota exceeded. */
122static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
123 const int slot)
124{
125 struct inet_timewait_sock *tw;
126 struct hlist_node *node;
127 unsigned int killed;
128 int ret;
129
130 /* NOTE: compare this to previous version where lock
131 * was released after detaching chain. It was racy,
132 * because tw buckets are scheduled in not serialized context
133 * in 2.3 (with netfilter), and with softnet it is common, because
134 * soft irqs are not sequenced.
135 */
136 killed = 0;
137 ret = 0;
138rescan:
139 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
140 __inet_twsk_del_dead_node(tw);
141 spin_unlock(&twdr->death_lock);
142 __inet_twsk_kill(tw, twdr->hashinfo);
143 inet_twsk_put(tw);
144 killed++;
145 spin_lock(&twdr->death_lock);
146 if (killed > INET_TWDR_TWKILL_QUOTA) {
147 ret = 1;
148 break;
149 }
150
151 /* While we dropped twdr->death_lock, another cpu may have
152 * killed off the next TW bucket in the list, therefore
153 * do a fresh re-read of the hlist head node with the
154 * lock reacquired. We still use the hlist traversal
155 * macro in order to get the prefetches.
156 */
157 goto rescan;
158 }
159
160 twdr->tw_count -= killed;
161 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
162
163 return ret;
164}
165
166void inet_twdr_hangman(unsigned long data)
167{
168 struct inet_timewait_death_row *twdr;
169 int unsigned need_timer;
170
171 twdr = (struct inet_timewait_death_row *)data;
172 spin_lock(&twdr->death_lock);
173
174 if (twdr->tw_count == 0)
175 goto out;
176
177 need_timer = 0;
178 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
179 twdr->thread_slots |= (1 << twdr->slot);
180 mb();
181 schedule_work(&twdr->twkill_work);
182 need_timer = 1;
183 } else {
184 /* We purged the entire slot, anything left? */
185 if (twdr->tw_count)
186 need_timer = 1;
187 }
188 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
189 if (need_timer)
190 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
191out:
192 spin_unlock(&twdr->death_lock);
193}
194
195EXPORT_SYMBOL_GPL(inet_twdr_hangman);
196
197extern void twkill_slots_invalid(void);
198
199void inet_twdr_twkill_work(void *data)
200{
201 struct inet_timewait_death_row *twdr = data;
202 int i;
203
204 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
205 twkill_slots_invalid();
206
207 while (twdr->thread_slots) {
208 spin_lock_bh(&twdr->death_lock);
209 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
210 if (!(twdr->thread_slots & (1 << i)))
211 continue;
212
213 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
214 if (need_resched()) {
215 spin_unlock_bh(&twdr->death_lock);
216 schedule();
217 spin_lock_bh(&twdr->death_lock);
218 }
219 }
220
221 twdr->thread_slots &= ~(1 << i);
222 }
223 spin_unlock_bh(&twdr->death_lock);
224 }
225}
226
227EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
228
229/* These are always called from BH context. See callers in
230 * tcp_input.c to verify this.
231 */
232
233/* This is for handling early-kills of TIME_WAIT sockets. */
234void inet_twsk_deschedule(struct inet_timewait_sock *tw,
235 struct inet_timewait_death_row *twdr)
236{
237 spin_lock(&twdr->death_lock);
238 if (inet_twsk_del_dead_node(tw)) {
239 inet_twsk_put(tw);
240 if (--twdr->tw_count == 0)
241 del_timer(&twdr->tw_timer);
242 }
243 spin_unlock(&twdr->death_lock);
244 __inet_twsk_kill(tw, twdr->hashinfo);
245}
246
247EXPORT_SYMBOL(inet_twsk_deschedule);
248
249void inet_twsk_schedule(struct inet_timewait_sock *tw,
250 struct inet_timewait_death_row *twdr,
251 const int timeo, const int timewait_len)
252{
253 struct hlist_head *list;
254 int slot;
255
256 /* timeout := RTO * 3.5
257 *
258 * 3.5 = 1+2+0.5 to wait for two retransmits.
259 *
260 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
261 * our ACK acking that FIN can be lost. If N subsequent retransmitted
262 * FINs (or previous seqments) are lost (probability of such event
263 * is p^(N+1), where p is probability to lose single packet and
264 * time to detect the loss is about RTO*(2^N - 1) with exponential
265 * backoff). Normal timewait length is calculated so, that we
266 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
267 * [ BTW Linux. following BSD, violates this requirement waiting
268 * only for 60sec, we should wait at least for 240 secs.
269 * Well, 240 consumes too much of resources 8)
270 * ]
271 * This interval is not reduced to catch old duplicate and
272 * responces to our wandering segments living for two MSLs.
273 * However, if we use PAWS to detect
274 * old duplicates, we can reduce the interval to bounds required
275 * by RTO, rather than MSL. So, if peer understands PAWS, we
276 * kill tw bucket after 3.5*RTO (it is important that this number
277 * is greater than TS tick!) and detect old duplicates with help
278 * of PAWS.
279 */
280 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
281
282 spin_lock(&twdr->death_lock);
283
284 /* Unlink it, if it was scheduled */
285 if (inet_twsk_del_dead_node(tw))
286 twdr->tw_count--;
287 else
288 atomic_inc(&tw->tw_refcnt);
289
290 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
291 /* Schedule to slow timer */
292 if (timeo >= timewait_len) {
293 slot = INET_TWDR_TWKILL_SLOTS - 1;
294 } else {
295 slot = (timeo + twdr->period - 1) / twdr->period;
296 if (slot >= INET_TWDR_TWKILL_SLOTS)
297 slot = INET_TWDR_TWKILL_SLOTS - 1;
298 }
299 tw->tw_ttd = jiffies + timeo;
300 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
301 list = &twdr->cells[slot];
302 } else {
303 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
304
305 if (twdr->twcal_hand < 0) {
306 twdr->twcal_hand = 0;
307 twdr->twcal_jiffie = jiffies;
308 twdr->twcal_timer.expires = twdr->twcal_jiffie +
309 (slot << INET_TWDR_RECYCLE_TICK);
310 add_timer(&twdr->twcal_timer);
311 } else {
312 if (time_after(twdr->twcal_timer.expires,
313 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
314 mod_timer(&twdr->twcal_timer,
315 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
316 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
317 }
318 list = &twdr->twcal_row[slot];
319 }
320
321 hlist_add_head(&tw->tw_death_node, list);
322
323 if (twdr->tw_count++ == 0)
324 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
325 spin_unlock(&twdr->death_lock);
326}
327
328EXPORT_SYMBOL_GPL(inet_twsk_schedule);
329
330void inet_twdr_twcal_tick(unsigned long data)
331{
332 struct inet_timewait_death_row *twdr;
333 int n, slot;
334 unsigned long j;
335 unsigned long now = jiffies;
336 int killed = 0;
337 int adv = 0;
338
339 twdr = (struct inet_timewait_death_row *)data;
340
341 spin_lock(&twdr->death_lock);
342 if (twdr->twcal_hand < 0)
343 goto out;
344
345 slot = twdr->twcal_hand;
346 j = twdr->twcal_jiffie;
347
348 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
349 if (time_before_eq(j, now)) {
350 struct hlist_node *node, *safe;
351 struct inet_timewait_sock *tw;
352
353 inet_twsk_for_each_inmate_safe(tw, node, safe,
354 &twdr->twcal_row[slot]) {
355 __inet_twsk_del_dead_node(tw);
356 __inet_twsk_kill(tw, twdr->hashinfo);
357 inet_twsk_put(tw);
358 killed++;
359 }
360 } else {
361 if (!adv) {
362 adv = 1;
363 twdr->twcal_jiffie = j;
364 twdr->twcal_hand = slot;
365 }
366
367 if (!hlist_empty(&twdr->twcal_row[slot])) {
368 mod_timer(&twdr->twcal_timer, j);
369 goto out;
370 }
371 }
372 j += 1 << INET_TWDR_RECYCLE_TICK;
373 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
374 }
375 twdr->twcal_hand = -1;
376
377out:
378 if ((twdr->tw_count -= killed) == 0)
379 del_timer(&twdr->tw_timer);
380 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
381 spin_unlock(&twdr->death_lock);
382}
383
384EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index ab18a853d7ce..f84ba9c96551 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/net.h> 22#include <linux/net.h>
23#include <net/ip.h>
23#include <net/inetpeer.h> 24#include <net/inetpeer.h>
24 25
25/* 26/*
@@ -72,7 +73,7 @@
72/* Exported for inet_getid inline function. */ 73/* Exported for inet_getid inline function. */
73DEFINE_SPINLOCK(inet_peer_idlock); 74DEFINE_SPINLOCK(inet_peer_idlock);
74 75
75static kmem_cache_t *peer_cachep; 76static kmem_cache_t *peer_cachep __read_mostly;
76 77
77#define node_height(x) x->avl_height 78#define node_height(x) x->avl_height
78static struct inet_peer peer_fake_node = { 79static struct inet_peer peer_fake_node = {
@@ -459,5 +460,3 @@ static void peer_check_expire(unsigned long dummy)
459 peer_total / inet_peer_threshold * HZ; 460 peer_total / inet_peer_threshold * HZ;
460 add_timer(&peer_periodic_timer); 461 add_timer(&peer_periodic_timer);
461} 462}
462
463EXPORT_SYMBOL(inet_peer_idlock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 77094aac6c28..0923add122b4 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -76,16 +76,12 @@ int ip_forward(struct sk_buff *skb)
76 * that reaches zero, we must reply an ICMP control message telling 76 * that reaches zero, we must reply an ICMP control message telling
77 * that the packet's lifetime expired. 77 * that the packet's lifetime expired.
78 */ 78 */
79 79 if (skb->nh.iph->ttl <= 1)
80 iph = skb->nh.iph;
81
82 if (iph->ttl <= 1)
83 goto too_many_hops; 80 goto too_many_hops;
84 81
85 if (!xfrm4_route_forward(skb)) 82 if (!xfrm4_route_forward(skb))
86 goto drop; 83 goto drop;
87 84
88 iph = skb->nh.iph;
89 rt = (struct rtable*)skb->dst; 85 rt = (struct rtable*)skb->dst;
90 86
91 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index eb377ae15305..9e6e683cc34d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -377,7 +377,7 @@ static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
377 return ip_frag_intern(hash, qp); 377 return ip_frag_intern(hash, qp);
378 378
379out_nomem: 379out_nomem:
380 LIMIT_NETDEBUG(printk(KERN_ERR "ip_frag_create: no memory left !\n")); 380 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
381 return NULL; 381 return NULL;
382} 382}
383 383
@@ -533,7 +533,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
533 if (skb->dev) 533 if (skb->dev)
534 qp->iif = skb->dev->ifindex; 534 qp->iif = skb->dev->ifindex;
535 skb->dev = NULL; 535 skb->dev = NULL;
536 qp->stamp = skb->stamp; 536 skb_get_timestamp(skb, &qp->stamp);
537 qp->meat += skb->len; 537 qp->meat += skb->len;
538 atomic_add(skb->truesize, &ip_frag_mem); 538 atomic_add(skb->truesize, &ip_frag_mem);
539 if (offset == 0) 539 if (offset == 0)
@@ -615,7 +615,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
615 615
616 head->next = NULL; 616 head->next = NULL;
617 head->dev = dev; 617 head->dev = dev;
618 head->stamp = qp->stamp; 618 skb_set_timestamp(head, &qp->stamp);
619 619
620 iph = head->nh.iph; 620 iph = head->nh.iph;
621 iph->frag_off = 0; 621 iph->frag_off = 0;
@@ -625,8 +625,8 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
625 return head; 625 return head;
626 626
627out_nomem: 627out_nomem:
628 LIMIT_NETDEBUG(printk(KERN_ERR "IP: queue_glue: no memory for gluing " 628 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
629 "queue %p\n", qp)); 629 "queue %p\n", qp);
630 goto out_fail; 630 goto out_fail;
631out_oversize: 631out_oversize:
632 if (net_ratelimit()) 632 if (net_ratelimit())
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index c703528e0bcd..473d0f2b2e0d 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -150,7 +150,7 @@
150 * SNMP management statistics 150 * SNMP management statistics
151 */ 151 */
152 152
153DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); 153DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
154 154
155/* 155/*
156 * Process Router Attention IP option 156 * Process Router Attention IP option
@@ -225,8 +225,8 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
225 /* If there maybe a raw socket we must check - if not we 225 /* If there maybe a raw socket we must check - if not we
226 * don't care less 226 * don't care less
227 */ 227 */
228 if (raw_sk) 228 if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash))
229 raw_v4_input(skb, skb->nh.iph, hash); 229 raw_sk = NULL;
230 230
231 if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { 231 if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) {
232 int ret; 232 int ret;
@@ -279,18 +279,70 @@ int ip_local_deliver(struct sk_buff *skb)
279 ip_local_deliver_finish); 279 ip_local_deliver_finish);
280} 280}
281 281
282static inline int ip_rcv_finish(struct sk_buff *skb) 282static inline int ip_rcv_options(struct sk_buff *skb)
283{ 283{
284 struct ip_options *opt;
285 struct iphdr *iph;
284 struct net_device *dev = skb->dev; 286 struct net_device *dev = skb->dev;
287
288 /* It looks as overkill, because not all
289 IP options require packet mangling.
290 But it is the easiest for now, especially taking
291 into account that combination of IP options
292 and running sniffer is extremely rare condition.
293 --ANK (980813)
294 */
295 if (skb_cow(skb, skb_headroom(skb))) {
296 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
297 goto drop;
298 }
299
300 iph = skb->nh.iph;
301
302 if (ip_options_compile(NULL, skb)) {
303 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
304 goto drop;
305 }
306
307 opt = &(IPCB(skb)->opt);
308 if (unlikely(opt->srr)) {
309 struct in_device *in_dev = in_dev_get(dev);
310 if (in_dev) {
311 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
312 if (IN_DEV_LOG_MARTIANS(in_dev) &&
313 net_ratelimit())
314 printk(KERN_INFO "source route option "
315 "%u.%u.%u.%u -> %u.%u.%u.%u\n",
316 NIPQUAD(iph->saddr),
317 NIPQUAD(iph->daddr));
318 in_dev_put(in_dev);
319 goto drop;
320 }
321
322 in_dev_put(in_dev);
323 }
324
325 if (ip_options_rcv_srr(skb))
326 goto drop;
327 }
328
329 return 0;
330drop:
331 return -1;
332}
333
334static inline int ip_rcv_finish(struct sk_buff *skb)
335{
285 struct iphdr *iph = skb->nh.iph; 336 struct iphdr *iph = skb->nh.iph;
286 int err;
287 337
288 /* 338 /*
289 * Initialise the virtual path cache for the packet. It describes 339 * Initialise the virtual path cache for the packet. It describes
290 * how the packet travels inside Linux networking. 340 * how the packet travels inside Linux networking.
291 */ 341 */
292 if (skb->dst == NULL) { 342 if (likely(skb->dst == NULL)) {
293 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 343 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
344 skb->dev);
345 if (unlikely(err)) {
294 if (err == -EHOSTUNREACH) 346 if (err == -EHOSTUNREACH)
295 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 347 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
296 goto drop; 348 goto drop;
@@ -298,7 +350,7 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
298 } 350 }
299 351
300#ifdef CONFIG_NET_CLS_ROUTE 352#ifdef CONFIG_NET_CLS_ROUTE
301 if (skb->dst->tclassid) { 353 if (unlikely(skb->dst->tclassid)) {
302 struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id(); 354 struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id();
303 u32 idx = skb->dst->tclassid; 355 u32 idx = skb->dst->tclassid;
304 st[idx&0xFF].o_packets++; 356 st[idx&0xFF].o_packets++;
@@ -308,48 +360,11 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
308 } 360 }
309#endif 361#endif
310 362
311 if (iph->ihl > 5) { 363 if (iph->ihl > 5 && ip_rcv_options(skb))
312 struct ip_options *opt; 364 goto drop;
313
314 /* It looks as overkill, because not all
315 IP options require packet mangling.
316 But it is the easiest for now, especially taking
317 into account that combination of IP options
318 and running sniffer is extremely rare condition.
319 --ANK (980813)
320 */
321
322 if (skb_cow(skb, skb_headroom(skb))) {
323 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
324 goto drop;
325 }
326 iph = skb->nh.iph;
327
328 if (ip_options_compile(NULL, skb))
329 goto inhdr_error;
330
331 opt = &(IPCB(skb)->opt);
332 if (opt->srr) {
333 struct in_device *in_dev = in_dev_get(dev);
334 if (in_dev) {
335 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
336 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
337 printk(KERN_INFO "source route option %u.%u.%u.%u -> %u.%u.%u.%u\n",
338 NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
339 in_dev_put(in_dev);
340 goto drop;
341 }
342 in_dev_put(in_dev);
343 }
344 if (ip_options_rcv_srr(skb))
345 goto drop;
346 }
347 }
348 365
349 return dst_input(skb); 366 return dst_input(skb);
350 367
351inhdr_error:
352 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
353drop: 368drop:
354 kfree_skb(skb); 369 kfree_skb(skb);
355 return NET_RX_DROP; 370 return NET_RX_DROP;
@@ -358,9 +373,10 @@ drop:
358/* 373/*
359 * Main IP Receive routine. 374 * Main IP Receive routine.
360 */ 375 */
361int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 376int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
362{ 377{
363 struct iphdr *iph; 378 struct iphdr *iph;
379 u32 len;
364 380
365 /* When the interface is in promisc. mode, drop all the crap 381 /* When the interface is in promisc. mode, drop all the crap
366 * that it receives, do not try to analyse it. 382 * that it receives, do not try to analyse it.
@@ -392,29 +408,27 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
392 */ 408 */
393 409
394 if (iph->ihl < 5 || iph->version != 4) 410 if (iph->ihl < 5 || iph->version != 4)
395 goto inhdr_error; 411 goto inhdr_error;
396 412
397 if (!pskb_may_pull(skb, iph->ihl*4)) 413 if (!pskb_may_pull(skb, iph->ihl*4))
398 goto inhdr_error; 414 goto inhdr_error;
399 415
400 iph = skb->nh.iph; 416 iph = skb->nh.iph;
401 417
402 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 418 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
403 goto inhdr_error; 419 goto inhdr_error;
404 420
405 { 421 len = ntohs(iph->tot_len);
406 __u32 len = ntohs(iph->tot_len); 422 if (skb->len < len || len < (iph->ihl*4))
407 if (skb->len < len || len < (iph->ihl<<2)) 423 goto inhdr_error;
408 goto inhdr_error;
409 424
410 /* Our transport medium may have padded the buffer out. Now we know it 425 /* Our transport medium may have padded the buffer out. Now we know it
411 * is IP we can trim to the true length of the frame. 426 * is IP we can trim to the true length of the frame.
412 * Note this now means skb->len holds ntohs(iph->tot_len). 427 * Note this now means skb->len holds ntohs(iph->tot_len).
413 */ 428 */
414 if (pskb_trim_rcsum(skb, len)) { 429 if (pskb_trim_rcsum(skb, len)) {
415 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); 430 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
416 goto drop; 431 goto drop;
417 }
418 } 432 }
419 433
420 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
@@ -428,5 +442,4 @@ out:
428 return NET_RX_DROP; 442 return NET_RX_DROP;
429} 443}
430 444
431EXPORT_SYMBOL(ip_rcv);
432EXPORT_SYMBOL(ip_statistics); 445EXPORT_SYMBOL(ip_statistics);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 6d89f3f3e701..bce4e875193b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -489,23 +489,18 @@ void ip_options_undo(struct ip_options * opt)
489 } 489 }
490} 490}
491 491
492int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user) 492static struct ip_options *ip_options_get_alloc(const int optlen)
493{ 493{
494 struct ip_options *opt; 494 struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3),
495 GFP_KERNEL);
496 if (opt)
497 memset(opt, 0, sizeof(*opt));
498 return opt;
499}
495 500
496 opt = kmalloc(sizeof(struct ip_options)+((optlen+3)&~3), GFP_KERNEL); 501static int ip_options_get_finish(struct ip_options **optp,
497 if (!opt) 502 struct ip_options *opt, int optlen)
498 return -ENOMEM; 503{
499 memset(opt, 0, sizeof(struct ip_options));
500 if (optlen) {
501 if (user) {
502 if (copy_from_user(opt->__data, data, optlen)) {
503 kfree(opt);
504 return -EFAULT;
505 }
506 } else
507 memcpy(opt->__data, data, optlen);
508 }
509 while (optlen & 3) 504 while (optlen & 3)
510 opt->__data[optlen++] = IPOPT_END; 505 opt->__data[optlen++] = IPOPT_END;
511 opt->optlen = optlen; 506 opt->optlen = optlen;
@@ -521,6 +516,30 @@ int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, in
521 return 0; 516 return 0;
522} 517}
523 518
519int ip_options_get_from_user(struct ip_options **optp, unsigned char __user *data, int optlen)
520{
521 struct ip_options *opt = ip_options_get_alloc(optlen);
522
523 if (!opt)
524 return -ENOMEM;
525 if (optlen && copy_from_user(opt->__data, data, optlen)) {
526 kfree(opt);
527 return -EFAULT;
528 }
529 return ip_options_get_finish(optp, opt, optlen);
530}
531
532int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen)
533{
534 struct ip_options *opt = ip_options_get_alloc(optlen);
535
536 if (!opt)
537 return -ENOMEM;
538 if (optlen)
539 memcpy(opt->__data, data, optlen);
540 return ip_options_get_finish(optp, opt, optlen);
541}
542
524void ip_forward_options(struct sk_buff *skb) 543void ip_forward_options(struct sk_buff *skb)
525{ 544{
526 struct ip_options * opt = &(IPCB(skb)->opt); 545 struct ip_options * opt = &(IPCB(skb)->opt);
@@ -620,6 +639,3 @@ int ip_options_rcv_srr(struct sk_buff *skb)
620 } 639 }
621 return 0; 640 return 0;
622} 641}
623
624EXPORT_SYMBOL(ip_options_compile);
625EXPORT_SYMBOL(ip_options_undo);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 80d13103b2b0..3f1a263e1249 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -69,13 +69,10 @@
69#include <net/ip.h> 69#include <net/ip.h>
70#include <net/protocol.h> 70#include <net/protocol.h>
71#include <net/route.h> 71#include <net/route.h>
72#include <net/tcp.h>
73#include <net/udp.h>
74#include <linux/skbuff.h> 72#include <linux/skbuff.h>
75#include <net/sock.h> 73#include <net/sock.h>
76#include <net/arp.h> 74#include <net/arp.h>
77#include <net/icmp.h> 75#include <net/icmp.h>
78#include <net/raw.h>
79#include <net/checksum.h> 76#include <net/checksum.h>
80#include <net/inetpeer.h> 77#include <net/inetpeer.h>
81#include <net/checksum.h> 78#include <net/checksum.h>
@@ -84,12 +81,8 @@
84#include <linux/netfilter_bridge.h> 81#include <linux/netfilter_bridge.h>
85#include <linux/mroute.h> 82#include <linux/mroute.h>
86#include <linux/netlink.h> 83#include <linux/netlink.h>
84#include <linux/tcp.h>
87 85
88/*
89 * Shall we try to damage output packets if routing dev changes?
90 */
91
92int sysctl_ip_dynaddr;
93int sysctl_ip_default_ttl = IPDEFTTL; 86int sysctl_ip_default_ttl = IPDEFTTL;
94 87
95/* Generate a checksum for an outgoing IP datagram. */ 88/* Generate a checksum for an outgoing IP datagram. */
@@ -165,6 +158,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
165 dst_output); 158 dst_output);
166} 159}
167 160
161EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
162
168static inline int ip_finish_output2(struct sk_buff *skb) 163static inline int ip_finish_output2(struct sk_buff *skb)
169{ 164{
170 struct dst_entry *dst = skb->dst; 165 struct dst_entry *dst = skb->dst;
@@ -205,7 +200,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
205 return -EINVAL; 200 return -EINVAL;
206} 201}
207 202
208int ip_finish_output(struct sk_buff *skb) 203static inline int ip_finish_output(struct sk_buff *skb)
209{ 204{
210 struct net_device *dev = skb->dst->dev; 205 struct net_device *dev = skb->dst->dev;
211 206
@@ -329,8 +324,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
329 if (ip_route_output_flow(&rt, &fl, sk, 0)) 324 if (ip_route_output_flow(&rt, &fl, sk, 0))
330 goto no_route; 325 goto no_route;
331 } 326 }
332 __sk_dst_set(sk, &rt->u.dst); 327 sk_setup_caps(sk, &rt->u.dst);
333 tcp_v4_setup_caps(sk, &rt->u.dst);
334 } 328 }
335 skb->dst = dst_clone(&rt->u.dst); 329 skb->dst = dst_clone(&rt->u.dst);
336 330
@@ -392,7 +386,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
392#endif 386#endif
393#ifdef CONFIG_NETFILTER 387#ifdef CONFIG_NETFILTER
394 to->nfmark = from->nfmark; 388 to->nfmark = from->nfmark;
395 to->nfcache = from->nfcache;
396 /* Connection association is same as pre-frag packet */ 389 /* Connection association is same as pre-frag packet */
397 nf_conntrack_put(to->nfct); 390 nf_conntrack_put(to->nfct);
398 to->nfct = from->nfct; 391 to->nfct = from->nfct;
@@ -580,7 +573,7 @@ slow_path:
580 */ 573 */
581 574
582 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) { 575 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
583 NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n")); 576 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
584 err = -ENOMEM; 577 err = -ENOMEM;
585 goto fail; 578 goto fail;
586 } 579 }
@@ -1329,12 +1322,7 @@ void __init ip_init(void)
1329#endif 1322#endif
1330} 1323}
1331 1324
1332EXPORT_SYMBOL(ip_finish_output);
1333EXPORT_SYMBOL(ip_fragment); 1325EXPORT_SYMBOL(ip_fragment);
1334EXPORT_SYMBOL(ip_generic_getfrag); 1326EXPORT_SYMBOL(ip_generic_getfrag);
1335EXPORT_SYMBOL(ip_queue_xmit); 1327EXPORT_SYMBOL(ip_queue_xmit);
1336EXPORT_SYMBOL(ip_send_check); 1328EXPORT_SYMBOL(ip_send_check);
1337
1338#ifdef CONFIG_SYSCTL
1339EXPORT_SYMBOL(sysctl_ip_default_ttl);
1340#endif
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ff4bd067b397..2f0b47da5b37 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -153,7 +153,7 @@ int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc)
153 switch (cmsg->cmsg_type) { 153 switch (cmsg->cmsg_type) {
154 case IP_RETOPTS: 154 case IP_RETOPTS:
155 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 155 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
156 err = ip_options_get(&ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40, 0); 156 err = ip_options_get(&ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40);
157 if (err) 157 if (err)
158 return err; 158 return err;
159 break; 159 break;
@@ -425,7 +425,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
425 struct ip_options * opt = NULL; 425 struct ip_options * opt = NULL;
426 if (optlen > 40 || optlen < 0) 426 if (optlen > 40 || optlen < 0)
427 goto e_inval; 427 goto e_inval;
428 err = ip_options_get(&opt, optval, optlen, 1); 428 err = ip_options_get_from_user(&opt, optval, optlen);
429 if (err) 429 if (err)
430 break; 430 break;
431 if (sk->sk_type == SOCK_STREAM) { 431 if (sk->sk_type == SOCK_STREAM) {
@@ -614,7 +614,6 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
614 } 614 }
615 case IP_MSFILTER: 615 case IP_MSFILTER:
616 { 616 {
617 extern int sysctl_optmem_max;
618 extern int sysctl_igmp_max_msf; 617 extern int sysctl_igmp_max_msf;
619 struct ip_msfilter *msf; 618 struct ip_msfilter *msf;
620 619
@@ -769,7 +768,6 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
769 } 768 }
770 case MCAST_MSFILTER: 769 case MCAST_MSFILTER:
771 { 770 {
772 extern int sysctl_optmem_max;
773 extern int sysctl_igmp_max_msf; 771 extern int sysctl_igmp_max_msf;
774 struct sockaddr_in *psin; 772 struct sockaddr_in *psin;
775 struct ip_msfilter *msf = NULL; 773 struct ip_msfilter *msf = NULL;
@@ -1090,7 +1088,5 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
1090 1088
1091EXPORT_SYMBOL(ip_cmsg_recv); 1089EXPORT_SYMBOL(ip_cmsg_recv);
1092 1090
1093#ifdef CONFIG_IP_SCTP_MODULE
1094EXPORT_SYMBOL(ip_getsockopt); 1091EXPORT_SYMBOL(ip_getsockopt);
1095EXPORT_SYMBOL(ip_setsockopt); 1092EXPORT_SYMBOL(ip_setsockopt);
1096#endif
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 7ded6e60f43a..dcb7ee6c4858 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -214,8 +214,8 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
214 spi, IPPROTO_COMP, AF_INET); 214 spi, IPPROTO_COMP, AF_INET);
215 if (!x) 215 if (!x)
216 return; 216 return;
217 NETDEBUG(printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", 217 NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n",
218 spi, NIPQUAD(iph->daddr))); 218 spi, NIPQUAD(iph->daddr));
219 xfrm_state_put(x); 219 xfrm_state_put(x);
220} 220}
221 221
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index d2bf8e1930a3..63e106605f28 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -393,7 +393,7 @@ static int __init ic_defaults(void)
393 393
394#ifdef IPCONFIG_RARP 394#ifdef IPCONFIG_RARP
395 395
396static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt); 396static int ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
397 397
398static struct packet_type rarp_packet_type __initdata = { 398static struct packet_type rarp_packet_type __initdata = {
399 .type = __constant_htons(ETH_P_RARP), 399 .type = __constant_htons(ETH_P_RARP),
@@ -414,7 +414,7 @@ static inline void ic_rarp_cleanup(void)
414 * Process received RARP packet. 414 * Process received RARP packet.
415 */ 415 */
416static int __init 416static int __init
417ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 417ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
418{ 418{
419 struct arphdr *rarp; 419 struct arphdr *rarp;
420 unsigned char *rarp_ptr; 420 unsigned char *rarp_ptr;
@@ -555,7 +555,7 @@ struct bootp_pkt { /* BOOTP packet format */
555#define DHCPRELEASE 7 555#define DHCPRELEASE 7
556#define DHCPINFORM 8 556#define DHCPINFORM 8
557 557
558static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt); 558static int ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev);
559 559
560static struct packet_type bootp_packet_type __initdata = { 560static struct packet_type bootp_packet_type __initdata = {
561 .type = __constant_htons(ETH_P_IP), 561 .type = __constant_htons(ETH_P_IP),
@@ -823,7 +823,7 @@ static void __init ic_do_bootp_ext(u8 *ext)
823/* 823/*
824 * Receive BOOTP reply. 824 * Receive BOOTP reply.
825 */ 825 */
826static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 826static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
827{ 827{
828 struct bootp_pkt *b; 828 struct bootp_pkt *b;
829 struct iphdr *h; 829 struct iphdr *h;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index dc806b578427..9dbf5909f3a6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
103 In this case data path is free of exclusive locks at all. 103 In this case data path is free of exclusive locks at all.
104 */ 104 */
105 105
106static kmem_cache_t *mrt_cachep; 106static kmem_cache_t *mrt_cachep __read_mostly;
107 107
108static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 108static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
109static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); 109static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index d9212addd193..6e092dadb388 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -26,6 +26,7 @@
26#include <linux/in.h> 26#include <linux/in.h>
27#include <linux/ip.h> 27#include <linux/ip.h>
28#include <net/protocol.h> 28#include <net/protocol.h>
29#include <net/tcp.h>
29#include <asm/system.h> 30#include <asm/system.h>
30#include <linux/stat.h> 31#include <linux/stat.h>
31#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index d0145a8b1551..e11952ea17af 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -40,7 +40,7 @@
40static struct list_head *ip_vs_conn_tab; 40static struct list_head *ip_vs_conn_tab;
41 41
42/* SLAB cache for IPVS connections */ 42/* SLAB cache for IPVS connections */
43static kmem_cache_t *ip_vs_conn_cachep; 43static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
44 44
45/* counter for current IPVS connections */ 45/* counter for current IPVS connections */
46static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); 46static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 5fb257dd07cb..3ac7eeca04ac 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -22,6 +22,7 @@
22 * 22 *
23 * Changes: 23 * Changes:
24 * Paul `Rusty' Russell properly handle non-linear skbs 24 * Paul `Rusty' Russell properly handle non-linear skbs
25 * Harald Welte don't use nfcache
25 * 26 *
26 */ 27 */
27 28
@@ -529,7 +530,7 @@ static unsigned int ip_vs_post_routing(unsigned int hooknum,
529 const struct net_device *out, 530 const struct net_device *out,
530 int (*okfn)(struct sk_buff *)) 531 int (*okfn)(struct sk_buff *))
531{ 532{
532 if (!((*pskb)->nfcache & NFC_IPVS_PROPERTY)) 533 if (!((*pskb)->ipvs_property))
533 return NF_ACCEPT; 534 return NF_ACCEPT;
534 535
535 /* The packet was sent from IPVS, exit this chain */ 536 /* The packet was sent from IPVS, exit this chain */
@@ -701,7 +702,7 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related)
701 /* do the statistics and put it back */ 702 /* do the statistics and put it back */
702 ip_vs_out_stats(cp, skb); 703 ip_vs_out_stats(cp, skb);
703 704
704 skb->nfcache |= NFC_IPVS_PROPERTY; 705 skb->ipvs_property = 1;
705 verdict = NF_ACCEPT; 706 verdict = NF_ACCEPT;
706 707
707 out: 708 out:
@@ -739,7 +740,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
739 740
740 EnterFunction(11); 741 EnterFunction(11);
741 742
742 if (skb->nfcache & NFC_IPVS_PROPERTY) 743 if (skb->ipvs_property)
743 return NF_ACCEPT; 744 return NF_ACCEPT;
744 745
745 iph = skb->nh.iph; 746 iph = skb->nh.iph;
@@ -821,7 +822,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
821 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); 822 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
822 ip_vs_conn_put(cp); 823 ip_vs_conn_put(cp);
823 824
824 skb->nfcache |= NFC_IPVS_PROPERTY; 825 skb->ipvs_property = 1;
825 826
826 LeaveFunction(11); 827 LeaveFunction(11);
827 return NF_ACCEPT; 828 return NF_ACCEPT;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 7d99ede2ef79..2d66848e7aa0 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -1598,7 +1598,7 @@ static ctl_table vs_table[] = {
1598 { .ctl_name = 0 } 1598 { .ctl_name = 0 }
1599}; 1599};
1600 1600
1601static ctl_table ipv4_table[] = { 1601static ctl_table ipvs_ipv4_table[] = {
1602 { 1602 {
1603 .ctl_name = NET_IPV4, 1603 .ctl_name = NET_IPV4,
1604 .procname = "ipv4", 1604 .procname = "ipv4",
@@ -1613,7 +1613,7 @@ static ctl_table vs_root_table[] = {
1613 .ctl_name = CTL_NET, 1613 .ctl_name = CTL_NET,
1614 .procname = "net", 1614 .procname = "net",
1615 .mode = 0555, 1615 .mode = 0555,
1616 .child = ipv4_table, 1616 .child = ipvs_ipv4_table,
1617 }, 1617 },
1618 { .ctl_name = 0 } 1618 { .ctl_name = 0 }
1619}; 1619};
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index c035838b780a..561cda326fa8 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -131,7 +131,7 @@ static ctl_table vs_table[] = {
131 { .ctl_name = 0 } 131 { .ctl_name = 0 }
132}; 132};
133 133
134static ctl_table ipv4_table[] = { 134static ctl_table ipvs_ipv4_table[] = {
135 { 135 {
136 .ctl_name = NET_IPV4, 136 .ctl_name = NET_IPV4,
137 .procname = "ipv4", 137 .procname = "ipv4",
@@ -146,7 +146,7 @@ static ctl_table lblc_root_table[] = {
146 .ctl_name = CTL_NET, 146 .ctl_name = CTL_NET,
147 .procname = "net", 147 .procname = "net",
148 .mode = 0555, 148 .mode = 0555,
149 .child = ipv4_table 149 .child = ipvs_ipv4_table
150 }, 150 },
151 { .ctl_name = 0 } 151 { .ctl_name = 0 }
152}; 152};
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 22b5dd55d271..ce456dbf09a5 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -320,7 +320,7 @@ static ctl_table vs_table[] = {
320 { .ctl_name = 0 } 320 { .ctl_name = 0 }
321}; 321};
322 322
323static ctl_table ipv4_table[] = { 323static ctl_table ipvs_ipv4_table[] = {
324 { 324 {
325 .ctl_name = NET_IPV4, 325 .ctl_name = NET_IPV4,
326 .procname = "ipv4", 326 .procname = "ipv4",
@@ -335,7 +335,7 @@ static ctl_table lblcr_root_table[] = {
335 .ctl_name = CTL_NET, 335 .ctl_name = CTL_NET,
336 .procname = "net", 336 .procname = "net",
337 .mode = 0555, 337 .mode = 0555,
338 .child = ipv4_table 338 .child = ipvs_ipv4_table
339 }, 339 },
340 { .ctl_name = 0 } 340 { .ctl_name = 0 }
341}; 341};
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index e65de675da74..c19408973c09 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -604,14 +604,14 @@ void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
604} 604}
605 605
606 606
607static void tcp_init(struct ip_vs_protocol *pp) 607static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
608{ 608{
609 IP_VS_INIT_HASH_TABLE(tcp_apps); 609 IP_VS_INIT_HASH_TABLE(tcp_apps);
610 pp->timeout_table = tcp_timeouts; 610 pp->timeout_table = tcp_timeouts;
611} 611}
612 612
613 613
614static void tcp_exit(struct ip_vs_protocol *pp) 614static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
615{ 615{
616} 616}
617 617
@@ -621,8 +621,8 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
621 .protocol = IPPROTO_TCP, 621 .protocol = IPPROTO_TCP,
622 .dont_defrag = 0, 622 .dont_defrag = 0,
623 .appcnt = ATOMIC_INIT(0), 623 .appcnt = ATOMIC_INIT(0),
624 .init = tcp_init, 624 .init = ip_vs_tcp_init,
625 .exit = tcp_exit, 625 .exit = ip_vs_tcp_exit,
626 .register_app = tcp_register_app, 626 .register_app = tcp_register_app,
627 .unregister_app = tcp_unregister_app, 627 .unregister_app = tcp_unregister_app,
628 .conn_schedule = tcp_conn_schedule, 628 .conn_schedule = tcp_conn_schedule,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index a8512a3fd08a..3b87482049cf 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -127,7 +127,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
127 127
128#define IP_VS_XMIT(skb, rt) \ 128#define IP_VS_XMIT(skb, rt) \
129do { \ 129do { \
130 (skb)->nfcache |= NFC_IPVS_PROPERTY; \ 130 (skb)->ipvs_property = 1; \
131 (skb)->ip_summed = CHECKSUM_NONE; \ 131 (skb)->ip_summed = CHECKSUM_NONE; \
132 NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \ 132 NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \
133 (rt)->u.dst.dev, dst_output); \ 133 (rt)->u.dst.dev, dst_output); \
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c
index c9cf8726051d..db67373f9b34 100644
--- a/net/ipv4/multipath_drr.c
+++ b/net/ipv4/multipath_drr.c
@@ -107,7 +107,7 @@ static int drr_dev_event(struct notifier_block *this,
107 return NOTIFY_DONE; 107 return NOTIFY_DONE;
108} 108}
109 109
110struct notifier_block drr_dev_notifier = { 110static struct notifier_block drr_dev_notifier = {
111 .notifier_call = drr_dev_event, 111 .notifier_call = drr_dev_event,
112}; 112};
113 113
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
new file mode 100644
index 000000000000..ae0779d82c5d
--- /dev/null
+++ b/net/ipv4/netfilter.c
@@ -0,0 +1,139 @@
1/* IPv4 specific functions of netfilter core */
2
3#include <linux/config.h>
4#ifdef CONFIG_NETFILTER
5
6#include <linux/kernel.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter_ipv4.h>
9
10#include <linux/tcp.h>
11#include <linux/udp.h>
12#include <linux/icmp.h>
13#include <net/route.h>
14#include <linux/ip.h>
15
16/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
17int ip_route_me_harder(struct sk_buff **pskb)
18{
19 struct iphdr *iph = (*pskb)->nh.iph;
20 struct rtable *rt;
21 struct flowi fl = {};
22 struct dst_entry *odst;
23 unsigned int hh_len;
24
25 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
26 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
27 */
28 if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
29 fl.nl_u.ip4_u.daddr = iph->daddr;
30 fl.nl_u.ip4_u.saddr = iph->saddr;
31 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
32 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
33#ifdef CONFIG_IP_ROUTE_FWMARK
34 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
35#endif
36 fl.proto = iph->protocol;
37 if (ip_route_output_key(&rt, &fl) != 0)
38 return -1;
39
40 /* Drop old route. */
41 dst_release((*pskb)->dst);
42 (*pskb)->dst = &rt->u.dst;
43 } else {
44 /* non-local src, find valid iif to satisfy
45 * rp-filter when calling ip_route_input. */
46 fl.nl_u.ip4_u.daddr = iph->saddr;
47 if (ip_route_output_key(&rt, &fl) != 0)
48 return -1;
49
50 odst = (*pskb)->dst;
51 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
52 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
53 dst_release(&rt->u.dst);
54 return -1;
55 }
56 dst_release(&rt->u.dst);
57 dst_release(odst);
58 }
59
60 if ((*pskb)->dst->error)
61 return -1;
62
63 /* Change in oif may mean change in hh_len. */
64 hh_len = (*pskb)->dst->dev->hard_header_len;
65 if (skb_headroom(*pskb) < hh_len) {
66 struct sk_buff *nskb;
67
68 nskb = skb_realloc_headroom(*pskb, hh_len);
69 if (!nskb)
70 return -1;
71 if ((*pskb)->sk)
72 skb_set_owner_w(nskb, (*pskb)->sk);
73 kfree_skb(*pskb);
74 *pskb = nskb;
75 }
76
77 return 0;
78}
79EXPORT_SYMBOL(ip_route_me_harder);
80
81/*
82 * Extra routing may needed on local out, as the QUEUE target never
83 * returns control to the table.
84 */
85
86struct ip_rt_info {
87 u_int32_t daddr;
88 u_int32_t saddr;
89 u_int8_t tos;
90};
91
92static void queue_save(const struct sk_buff *skb, struct nf_info *info)
93{
94 struct ip_rt_info *rt_info = nf_info_reroute(info);
95
96 if (info->hook == NF_IP_LOCAL_OUT) {
97 const struct iphdr *iph = skb->nh.iph;
98
99 rt_info->tos = iph->tos;
100 rt_info->daddr = iph->daddr;
101 rt_info->saddr = iph->saddr;
102 }
103}
104
105static int queue_reroute(struct sk_buff **pskb, const struct nf_info *info)
106{
107 const struct ip_rt_info *rt_info = nf_info_reroute(info);
108
109 if (info->hook == NF_IP_LOCAL_OUT) {
110 struct iphdr *iph = (*pskb)->nh.iph;
111
112 if (!(iph->tos == rt_info->tos
113 && iph->daddr == rt_info->daddr
114 && iph->saddr == rt_info->saddr))
115 return ip_route_me_harder(pskb);
116 }
117 return 0;
118}
119
120static struct nf_queue_rerouter ip_reroute = {
121 .rer_size = sizeof(struct ip_rt_info),
122 .save = queue_save,
123 .reroute = queue_reroute,
124};
125
126static int init(void)
127{
128 return nf_register_queue_rerouter(PF_INET, &ip_reroute);
129}
130
131static void fini(void)
132{
133 nf_unregister_queue_rerouter(PF_INET);
134}
135
136module_init(init);
137module_exit(fini);
138
139#endif /* CONFIG_NETFILTER */
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 46d4cb1c06f0..e046f5521814 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -40,6 +40,16 @@ config IP_NF_CONNTRACK_MARK
40 of packets, but this mark value is kept in the conntrack session 40 of packets, but this mark value is kept in the conntrack session
41 instead of the individual packets. 41 instead of the individual packets.
42 42
43config IP_NF_CONNTRACK_EVENTS
44 bool "Connection tracking events"
45 depends on IP_NF_CONNTRACK
46 help
47 If this option is enabled, the connection tracking code will
48 provide a notifier chain that can be used by other kernel code
49 to get notified about changes in the connection tracking state.
50
51 IF unsure, say `N'.
52
43config IP_NF_CT_PROTO_SCTP 53config IP_NF_CT_PROTO_SCTP
44 tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' 54 tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)'
45 depends on IP_NF_CONNTRACK && EXPERIMENTAL 55 depends on IP_NF_CONNTRACK && EXPERIMENTAL
@@ -100,11 +110,15 @@ config IP_NF_AMANDA
100 To compile it as a module, choose M here. If unsure, say Y. 110 To compile it as a module, choose M here. If unsure, say Y.
101 111
102config IP_NF_QUEUE 112config IP_NF_QUEUE
103 tristate "Userspace queueing via NETLINK" 113 tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
104 help 114 help
105 Netfilter has the ability to queue packets to user space: the 115 Netfilter has the ability to queue packets to user space: the
106 netlink device can be used to access them using this driver. 116 netlink device can be used to access them using this driver.
107 117
118 This option enables the old IPv4-only "ip_queue" implementation
119 which has been obsoleted by the new "nfnetlink_queue" code (see
120 CONFIG_NETFILTER_NETLINK_QUEUE).
121
108 To compile it as a module, choose M here. If unsure, say N. 122 To compile it as a module, choose M here. If unsure, say N.
109 123
110config IP_NF_IPTABLES 124config IP_NF_IPTABLES
@@ -340,6 +354,17 @@ config IP_NF_MATCH_SCTP
340 If you want to compile it as a module, say M here and read 354 If you want to compile it as a module, say M here and read
341 <file:Documentation/modules.txt>. If unsure, say `N'. 355 <file:Documentation/modules.txt>. If unsure, say `N'.
342 356
357config IP_NF_MATCH_DCCP
358 tristate 'DCCP protocol match support'
359 depends on IP_NF_IPTABLES
360 help
361 With this option enabled, you will be able to use the iptables
362 `dccp' match in order to match on DCCP source/destination ports
363 and DCCP flags.
364
365 If you want to compile it as a module, say M here and read
366 <file:Documentation/modules.txt>. If unsure, say `N'.
367
343config IP_NF_MATCH_COMMENT 368config IP_NF_MATCH_COMMENT
344 tristate 'comment match support' 369 tristate 'comment match support'
345 depends on IP_NF_IPTABLES 370 depends on IP_NF_IPTABLES
@@ -361,6 +386,16 @@ config IP_NF_MATCH_CONNMARK
361 <file:Documentation/modules.txt>. The module will be called 386 <file:Documentation/modules.txt>. The module will be called
362 ipt_connmark.o. If unsure, say `N'. 387 ipt_connmark.o. If unsure, say `N'.
363 388
389config IP_NF_MATCH_CONNBYTES
390 tristate 'Connection byte/packet counter match support'
391 depends on IP_NF_CT_ACCT && IP_NF_IPTABLES
392 help
393 This option adds a `connbytes' match, which allows you to match the
394 number of bytes and/or packets for each direction within a connection.
395
396 If you want to compile it as a module, say M here and read
397 <file:Documentation/modules.txt>. If unsure, say `N'.
398
364config IP_NF_MATCH_HASHLIMIT 399config IP_NF_MATCH_HASHLIMIT
365 tristate 'hashlimit match support' 400 tristate 'hashlimit match support'
366 depends on IP_NF_IPTABLES 401 depends on IP_NF_IPTABLES
@@ -375,6 +410,19 @@ config IP_NF_MATCH_HASHLIMIT
375 destination IP' or `500pps from any given source IP' with a single 410 destination IP' or `500pps from any given source IP' with a single
376 IPtables rule. 411 IPtables rule.
377 412
413config IP_NF_MATCH_STRING
414 tristate 'string match support'
415 depends on IP_NF_IPTABLES
416 select TEXTSEARCH
417 select TEXTSEARCH_KMP
418 select TEXTSEARCH_BM
419 select TEXTSEARCH_FSM
420 help
421 This option adds a `string' match, which allows you to look for
422 pattern matchings in packets.
423
424 To compile it as a module, choose M here. If unsure, say N.
425
378# `filter', generic and specific targets 426# `filter', generic and specific targets
379config IP_NF_FILTER 427config IP_NF_FILTER
380 tristate "Packet filtering" 428 tristate "Packet filtering"
@@ -616,6 +664,20 @@ config IP_NF_TARGET_CLASSIFY
616 664
617 To compile it as a module, choose M here. If unsure, say N. 665 To compile it as a module, choose M here. If unsure, say N.
618 666
667config IP_NF_TARGET_TTL
668 tristate 'TTL target support'
669 depends on IP_NF_MANGLE
670 help
671 This option adds a `TTL' target, which enables the user to modify
672 the TTL value of the IP header.
673
674 While it is safe to decrement/lower the TTL, this target also enables
675 functionality to increment and set the TTL value of the IP header to
676 arbitrary values. This is EXTREMELY DANGEROUS since you can easily
677 create immortal packets that loop forever on the network.
678
679 To compile it as a module, choose M here. If unsure, say N.
680
619config IP_NF_TARGET_CONNMARK 681config IP_NF_TARGET_CONNMARK
620 tristate 'CONNMARK target support' 682 tristate 'CONNMARK target support'
621 depends on IP_NF_CONNTRACK_MARK && IP_NF_MANGLE 683 depends on IP_NF_CONNTRACK_MARK && IP_NF_MANGLE
@@ -692,5 +754,11 @@ config IP_NF_ARP_MANGLE
692 Allows altering the ARP packet payload: source and destination 754 Allows altering the ARP packet payload: source and destination
693 hardware and network addresses. 755 hardware and network addresses.
694 756
757config IP_NF_CONNTRACK_NETLINK
758 tristate 'Connection tracking netlink interface'
759 depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
760 help
761 This option enables support for a netlink-based userspace interface
762
695endmenu 763endmenu
696 764
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 45796d5924dd..a7bd38f50522 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -9,6 +9,10 @@ iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helpe
9# connection tracking 9# connection tracking
10obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o 10obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
11 11
12# conntrack netlink interface
13obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o
14
15
12# SCTP protocol connection tracking 16# SCTP protocol connection tracking
13obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o 17obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o
14 18
@@ -38,6 +42,7 @@ obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
38obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o 42obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
39obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o 43obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
40obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o 44obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
45obj-$(CONFIG_IP_NF_MATCH_DCCP) += ipt_dccp.o
41obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o 46obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
42obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o 47obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
43obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o 48obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
@@ -54,11 +59,13 @@ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
54obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o 59obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
55obj-$(CONFIG_IP_NF_MATCH_CONNMARK) += ipt_connmark.o 60obj-$(CONFIG_IP_NF_MATCH_CONNMARK) += ipt_connmark.o
56obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o 61obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
62obj-$(CONFIG_IP_NF_MATCH_CONNBYTES) += ipt_connbytes.o
57obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o 63obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
58obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o 64obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
59obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 65obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
60obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o 66obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
61obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o 67obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
68obj-$(CONFIG_IP_NF_MATCH_STRING) += ipt_string.o
62 69
63# targets 70# targets
64obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o 71obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
@@ -78,6 +85,7 @@ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
78obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o 85obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
79obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o 86obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
80obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 87obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
88obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
81 89
82# generic ARP tables 90# generic ARP tables
83obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o 91obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
@@ -87,3 +95,4 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
87obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o 95obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
88 96
89obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o 97obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
98obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ipt_NFQUEUE.o
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index 01e1b58322a9..be4c9eb3243f 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(master_timeout, "timeout for the master connection");
40static char *conns[] = { "DATA ", "MESG ", "INDEX " }; 40static char *conns[] = { "DATA ", "MESG ", "INDEX " };
41 41
42/* This is slow, but it's simple. --RR */ 42/* This is slow, but it's simple. --RR */
43static char amanda_buffer[65536]; 43static char *amanda_buffer;
44static DEFINE_SPINLOCK(amanda_buffer_lock); 44static DEFINE_SPINLOCK(amanda_buffer_lock);
45 45
46unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb, 46unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb,
@@ -153,11 +153,25 @@ static struct ip_conntrack_helper amanda_helper = {
153static void __exit fini(void) 153static void __exit fini(void)
154{ 154{
155 ip_conntrack_helper_unregister(&amanda_helper); 155 ip_conntrack_helper_unregister(&amanda_helper);
156 kfree(amanda_buffer);
156} 157}
157 158
158static int __init init(void) 159static int __init init(void)
159{ 160{
160 return ip_conntrack_helper_register(&amanda_helper); 161 int ret;
162
163 amanda_buffer = kmalloc(65536, GFP_KERNEL);
164 if (!amanda_buffer)
165 return -ENOMEM;
166
167 ret = ip_conntrack_helper_register(&amanda_helper);
168 if (ret < 0) {
169 kfree(amanda_buffer);
170 return ret;
171 }
172 return 0;
173
174
161} 175}
162 176
163module_init(init); 177module_init(init);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index a7f0c821a9b2..a0648600190e 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -37,6 +37,7 @@
37#include <linux/err.h> 37#include <linux/err.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/notifier.h>
40 41
41/* ip_conntrack_lock protects the main hash table, protocol/helper/expected 42/* ip_conntrack_lock protects the main hash table, protocol/helper/expected
42 registrations, conntrack timers*/ 43 registrations, conntrack timers*/
@@ -49,7 +50,7 @@
49#include <linux/netfilter_ipv4/ip_conntrack_core.h> 50#include <linux/netfilter_ipv4/ip_conntrack_core.h>
50#include <linux/netfilter_ipv4/listhelp.h> 51#include <linux/netfilter_ipv4/listhelp.h>
51 52
52#define IP_CONNTRACK_VERSION "2.1" 53#define IP_CONNTRACK_VERSION "2.3"
53 54
54#if 0 55#if 0
55#define DEBUGP printk 56#define DEBUGP printk
@@ -69,22 +70,81 @@ static LIST_HEAD(helpers);
69unsigned int ip_conntrack_htable_size = 0; 70unsigned int ip_conntrack_htable_size = 0;
70int ip_conntrack_max; 71int ip_conntrack_max;
71struct list_head *ip_conntrack_hash; 72struct list_head *ip_conntrack_hash;
72static kmem_cache_t *ip_conntrack_cachep; 73static kmem_cache_t *ip_conntrack_cachep __read_mostly;
73static kmem_cache_t *ip_conntrack_expect_cachep; 74static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
74struct ip_conntrack ip_conntrack_untracked; 75struct ip_conntrack ip_conntrack_untracked;
75unsigned int ip_ct_log_invalid; 76unsigned int ip_ct_log_invalid;
76static LIST_HEAD(unconfirmed); 77static LIST_HEAD(unconfirmed);
77static int ip_conntrack_vmalloc; 78static int ip_conntrack_vmalloc;
78 79
79DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); 80static unsigned int ip_conntrack_next_id = 1;
81static unsigned int ip_conntrack_expect_next_id = 1;
82#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
83struct notifier_block *ip_conntrack_chain;
84struct notifier_block *ip_conntrack_expect_chain;
85
86DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
80 87
81void 88/* deliver cached events and clear cache entry - must be called with locally
82ip_conntrack_put(struct ip_conntrack *ct) 89 * disabled softirqs */
90static inline void
91__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
83{ 92{
84 IP_NF_ASSERT(ct); 93 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
85 nf_conntrack_put(&ct->ct_general); 94 if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
95 notifier_call_chain(&ip_conntrack_chain, ecache->events,
96 ecache->ct);
97 ecache->events = 0;
98 ip_conntrack_put(ecache->ct);
99 ecache->ct = NULL;
86} 100}
87 101
102/* Deliver all cached events for a particular conntrack. This is called
103 * by code prior to async packet handling or freeing the skb */
104void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
105{
106 struct ip_conntrack_ecache *ecache;
107
108 local_bh_disable();
109 ecache = &__get_cpu_var(ip_conntrack_ecache);
110 if (ecache->ct == ct)
111 __ip_ct_deliver_cached_events(ecache);
112 local_bh_enable();
113}
114
115void __ip_ct_event_cache_init(struct ip_conntrack *ct)
116{
117 struct ip_conntrack_ecache *ecache;
118
119 /* take care of delivering potentially old events */
120 ecache = &__get_cpu_var(ip_conntrack_ecache);
121 BUG_ON(ecache->ct == ct);
122 if (ecache->ct)
123 __ip_ct_deliver_cached_events(ecache);
124 /* initialize for this conntrack/packet */
125 ecache->ct = ct;
126 nf_conntrack_get(&ct->ct_general);
127}
128
129/* flush the event cache - touches other CPU's data and must not be called while
130 * packets are still passing through the code */
131static void ip_ct_event_cache_flush(void)
132{
133 struct ip_conntrack_ecache *ecache;
134 int cpu;
135
136 for_each_cpu(cpu) {
137 ecache = &per_cpu(ip_conntrack_ecache, cpu);
138 if (ecache->ct)
139 ip_conntrack_put(ecache->ct);
140 }
141}
142#else
143static inline void ip_ct_event_cache_flush(void) {}
144#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
145
146DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
147
88static int ip_conntrack_hash_rnd_initted; 148static int ip_conntrack_hash_rnd_initted;
89static unsigned int ip_conntrack_hash_rnd; 149static unsigned int ip_conntrack_hash_rnd;
90 150
@@ -144,6 +204,13 @@ static void unlink_expect(struct ip_conntrack_expect *exp)
144 list_del(&exp->list); 204 list_del(&exp->list);
145 CONNTRACK_STAT_INC(expect_delete); 205 CONNTRACK_STAT_INC(expect_delete);
146 exp->master->expecting--; 206 exp->master->expecting--;
207 ip_conntrack_expect_put(exp);
208}
209
210void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp)
211{
212 unlink_expect(exp);
213 ip_conntrack_expect_put(exp);
147} 214}
148 215
149static void expectation_timed_out(unsigned long ul_expect) 216static void expectation_timed_out(unsigned long ul_expect)
@@ -156,6 +223,33 @@ static void expectation_timed_out(unsigned long ul_expect)
156 ip_conntrack_expect_put(exp); 223 ip_conntrack_expect_put(exp);
157} 224}
158 225
226struct ip_conntrack_expect *
227__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
228{
229 struct ip_conntrack_expect *i;
230
231 list_for_each_entry(i, &ip_conntrack_expect_list, list) {
232 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
233 atomic_inc(&i->use);
234 return i;
235 }
236 }
237 return NULL;
238}
239
240/* Just find a expectation corresponding to a tuple. */
241struct ip_conntrack_expect *
242ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
243{
244 struct ip_conntrack_expect *i;
245
246 read_lock_bh(&ip_conntrack_lock);
247 i = __ip_conntrack_expect_find(tuple);
248 read_unlock_bh(&ip_conntrack_lock);
249
250 return i;
251}
252
159/* If an expectation for this connection is found, it gets delete from 253/* If an expectation for this connection is found, it gets delete from
160 * global list then returned. */ 254 * global list then returned. */
161static struct ip_conntrack_expect * 255static struct ip_conntrack_expect *
@@ -180,7 +274,7 @@ find_expectation(const struct ip_conntrack_tuple *tuple)
180} 274}
181 275
182/* delete all expectations for this conntrack */ 276/* delete all expectations for this conntrack */
183static void remove_expectations(struct ip_conntrack *ct) 277void ip_ct_remove_expectations(struct ip_conntrack *ct)
184{ 278{
185 struct ip_conntrack_expect *i, *tmp; 279 struct ip_conntrack_expect *i, *tmp;
186 280
@@ -210,7 +304,7 @@ clean_from_lists(struct ip_conntrack *ct)
210 LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]); 304 LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
211 305
212 /* Destroy all pending expectations */ 306 /* Destroy all pending expectations */
213 remove_expectations(ct); 307 ip_ct_remove_expectations(ct);
214} 308}
215 309
216static void 310static void
@@ -223,10 +317,13 @@ destroy_conntrack(struct nf_conntrack *nfct)
223 IP_NF_ASSERT(atomic_read(&nfct->use) == 0); 317 IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
224 IP_NF_ASSERT(!timer_pending(&ct->timeout)); 318 IP_NF_ASSERT(!timer_pending(&ct->timeout));
225 319
320 ip_conntrack_event(IPCT_DESTROY, ct);
321 set_bit(IPS_DYING_BIT, &ct->status);
322
226 /* To make sure we don't get any weird locking issues here: 323 /* To make sure we don't get any weird locking issues here:
227 * destroy_conntrack() MUST NOT be called with a write lock 324 * destroy_conntrack() MUST NOT be called with a write lock
228 * to ip_conntrack_lock!!! -HW */ 325 * to ip_conntrack_lock!!! -HW */
229 proto = ip_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); 326 proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
230 if (proto && proto->destroy) 327 if (proto && proto->destroy)
231 proto->destroy(ct); 328 proto->destroy(ct);
232 329
@@ -238,7 +335,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
238 * except TFTP can create an expectation on the first packet, 335 * except TFTP can create an expectation on the first packet,
239 * before connection is in the list, so we need to clean here, 336 * before connection is in the list, so we need to clean here,
240 * too. */ 337 * too. */
241 remove_expectations(ct); 338 ip_ct_remove_expectations(ct);
242 339
243 /* We overload first tuple to link into unconfirmed list. */ 340 /* We overload first tuple to link into unconfirmed list. */
244 if (!is_confirmed(ct)) { 341 if (!is_confirmed(ct)) {
@@ -253,8 +350,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
253 ip_conntrack_put(ct->master); 350 ip_conntrack_put(ct->master);
254 351
255 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); 352 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
256 kmem_cache_free(ip_conntrack_cachep, ct); 353 ip_conntrack_free(ct);
257 atomic_dec(&ip_conntrack_count);
258} 354}
259 355
260static void death_by_timeout(unsigned long ul_conntrack) 356static void death_by_timeout(unsigned long ul_conntrack)
@@ -280,7 +376,7 @@ conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
280 && ip_ct_tuple_equal(tuple, &i->tuple); 376 && ip_ct_tuple_equal(tuple, &i->tuple);
281} 377}
282 378
283static struct ip_conntrack_tuple_hash * 379struct ip_conntrack_tuple_hash *
284__ip_conntrack_find(const struct ip_conntrack_tuple *tuple, 380__ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
285 const struct ip_conntrack *ignored_conntrack) 381 const struct ip_conntrack *ignored_conntrack)
286{ 382{
@@ -315,6 +411,29 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
315 return h; 411 return h;
316} 412}
317 413
414static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
415 unsigned int hash,
416 unsigned int repl_hash)
417{
418 ct->id = ++ip_conntrack_next_id;
419 list_prepend(&ip_conntrack_hash[hash],
420 &ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
421 list_prepend(&ip_conntrack_hash[repl_hash],
422 &ct->tuplehash[IP_CT_DIR_REPLY].list);
423}
424
425void ip_conntrack_hash_insert(struct ip_conntrack *ct)
426{
427 unsigned int hash, repl_hash;
428
429 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
430 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
431
432 write_lock_bh(&ip_conntrack_lock);
433 __ip_conntrack_hash_insert(ct, hash, repl_hash);
434 write_unlock_bh(&ip_conntrack_lock);
435}
436
318/* Confirm a connection given skb; places it in hash table */ 437/* Confirm a connection given skb; places it in hash table */
319int 438int
320__ip_conntrack_confirm(struct sk_buff **pskb) 439__ip_conntrack_confirm(struct sk_buff **pskb)
@@ -361,10 +480,7 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
361 /* Remove from unconfirmed list */ 480 /* Remove from unconfirmed list */
362 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 481 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
363 482
364 list_prepend(&ip_conntrack_hash[hash], 483 __ip_conntrack_hash_insert(ct, hash, repl_hash);
365 &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
366 list_prepend(&ip_conntrack_hash[repl_hash],
367 &ct->tuplehash[IP_CT_DIR_REPLY]);
368 /* Timer relative to confirmation time, not original 484 /* Timer relative to confirmation time, not original
369 setting time, otherwise we'd get timer wrap in 485 setting time, otherwise we'd get timer wrap in
370 weird delay cases. */ 486 weird delay cases. */
@@ -374,6 +490,16 @@ __ip_conntrack_confirm(struct sk_buff **pskb)
374 set_bit(IPS_CONFIRMED_BIT, &ct->status); 490 set_bit(IPS_CONFIRMED_BIT, &ct->status);
375 CONNTRACK_STAT_INC(insert); 491 CONNTRACK_STAT_INC(insert);
376 write_unlock_bh(&ip_conntrack_lock); 492 write_unlock_bh(&ip_conntrack_lock);
493 if (ct->helper)
494 ip_conntrack_event_cache(IPCT_HELPER, *pskb);
495#ifdef CONFIG_IP_NF_NAT_NEEDED
496 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
497 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
498 ip_conntrack_event_cache(IPCT_NATINFO, *pskb);
499#endif
500 ip_conntrack_event_cache(master_ct(ct) ?
501 IPCT_RELATED : IPCT_NEW, *pskb);
502
377 return NF_ACCEPT; 503 return NF_ACCEPT;
378 } 504 }
379 505
@@ -438,34 +564,84 @@ static inline int helper_cmp(const struct ip_conntrack_helper *i,
438 return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask); 564 return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
439} 565}
440 566
441static struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple) 567static struct ip_conntrack_helper *
568__ip_conntrack_helper_find( const struct ip_conntrack_tuple *tuple)
442{ 569{
443 return LIST_FIND(&helpers, helper_cmp, 570 return LIST_FIND(&helpers, helper_cmp,
444 struct ip_conntrack_helper *, 571 struct ip_conntrack_helper *,
445 tuple); 572 tuple);
446} 573}
447 574
448/* Allocate a new conntrack: we return -ENOMEM if classification 575struct ip_conntrack_helper *
449 failed due to stress. Otherwise it really is unclassifiable. */ 576ip_conntrack_helper_find_get( const struct ip_conntrack_tuple *tuple)
450static struct ip_conntrack_tuple_hash * 577{
451init_conntrack(const struct ip_conntrack_tuple *tuple, 578 struct ip_conntrack_helper *helper;
452 struct ip_conntrack_protocol *protocol, 579
453 struct sk_buff *skb) 580 /* need ip_conntrack_lock to assure that helper exists until
581 * try_module_get() is called */
582 read_lock_bh(&ip_conntrack_lock);
583
584 helper = __ip_conntrack_helper_find(tuple);
585 if (helper) {
586 /* need to increase module usage count to assure helper will
587 * not go away while the caller is e.g. busy putting a
588 * conntrack in the hash that uses the helper */
589 if (!try_module_get(helper->me))
590 helper = NULL;
591 }
592
593 read_unlock_bh(&ip_conntrack_lock);
594
595 return helper;
596}
597
598void ip_conntrack_helper_put(struct ip_conntrack_helper *helper)
599{
600 module_put(helper->me);
601}
602
603struct ip_conntrack_protocol *
604__ip_conntrack_proto_find(u_int8_t protocol)
605{
606 return ip_ct_protos[protocol];
607}
608
609/* this is guaranteed to always return a valid protocol helper, since
610 * it falls back to generic_protocol */
611struct ip_conntrack_protocol *
612ip_conntrack_proto_find_get(u_int8_t protocol)
613{
614 struct ip_conntrack_protocol *p;
615
616 preempt_disable();
617 p = __ip_conntrack_proto_find(protocol);
618 if (p) {
619 if (!try_module_get(p->me))
620 p = &ip_conntrack_generic_protocol;
621 }
622 preempt_enable();
623
624 return p;
625}
626
627void ip_conntrack_proto_put(struct ip_conntrack_protocol *p)
628{
629 module_put(p->me);
630}
631
632struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
633 struct ip_conntrack_tuple *repl)
454{ 634{
455 struct ip_conntrack *conntrack; 635 struct ip_conntrack *conntrack;
456 struct ip_conntrack_tuple repl_tuple;
457 size_t hash;
458 struct ip_conntrack_expect *exp;
459 636
460 if (!ip_conntrack_hash_rnd_initted) { 637 if (!ip_conntrack_hash_rnd_initted) {
461 get_random_bytes(&ip_conntrack_hash_rnd, 4); 638 get_random_bytes(&ip_conntrack_hash_rnd, 4);
462 ip_conntrack_hash_rnd_initted = 1; 639 ip_conntrack_hash_rnd_initted = 1;
463 } 640 }
464 641
465 hash = hash_conntrack(tuple);
466
467 if (ip_conntrack_max 642 if (ip_conntrack_max
468 && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) { 643 && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
644 unsigned int hash = hash_conntrack(orig);
469 /* Try dropping from this hash chain. */ 645 /* Try dropping from this hash chain. */
470 if (!early_drop(&ip_conntrack_hash[hash])) { 646 if (!early_drop(&ip_conntrack_hash[hash])) {
471 if (net_ratelimit()) 647 if (net_ratelimit())
@@ -476,11 +652,6 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
476 } 652 }
477 } 653 }
478 654
479 if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) {
480 DEBUGP("Can't invert tuple.\n");
481 return NULL;
482 }
483
484 conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); 655 conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
485 if (!conntrack) { 656 if (!conntrack) {
486 DEBUGP("Can't allocate conntrack.\n"); 657 DEBUGP("Can't allocate conntrack.\n");
@@ -490,17 +661,50 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
490 memset(conntrack, 0, sizeof(*conntrack)); 661 memset(conntrack, 0, sizeof(*conntrack));
491 atomic_set(&conntrack->ct_general.use, 1); 662 atomic_set(&conntrack->ct_general.use, 1);
492 conntrack->ct_general.destroy = destroy_conntrack; 663 conntrack->ct_general.destroy = destroy_conntrack;
493 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple; 664 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
494 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple; 665 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
495 if (!protocol->new(conntrack, skb)) {
496 kmem_cache_free(ip_conntrack_cachep, conntrack);
497 return NULL;
498 }
499 /* Don't set timer yet: wait for confirmation */ 666 /* Don't set timer yet: wait for confirmation */
500 init_timer(&conntrack->timeout); 667 init_timer(&conntrack->timeout);
501 conntrack->timeout.data = (unsigned long)conntrack; 668 conntrack->timeout.data = (unsigned long)conntrack;
502 conntrack->timeout.function = death_by_timeout; 669 conntrack->timeout.function = death_by_timeout;
503 670
671 atomic_inc(&ip_conntrack_count);
672
673 return conntrack;
674}
675
676void
677ip_conntrack_free(struct ip_conntrack *conntrack)
678{
679 atomic_dec(&ip_conntrack_count);
680 kmem_cache_free(ip_conntrack_cachep, conntrack);
681}
682
683/* Allocate a new conntrack: we return -ENOMEM if classification
684 * failed due to stress. Otherwise it really is unclassifiable */
685static struct ip_conntrack_tuple_hash *
686init_conntrack(struct ip_conntrack_tuple *tuple,
687 struct ip_conntrack_protocol *protocol,
688 struct sk_buff *skb)
689{
690 struct ip_conntrack *conntrack;
691 struct ip_conntrack_tuple repl_tuple;
692 struct ip_conntrack_expect *exp;
693
694 if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) {
695 DEBUGP("Can't invert tuple.\n");
696 return NULL;
697 }
698
699 conntrack = ip_conntrack_alloc(tuple, &repl_tuple);
700 if (conntrack == NULL || IS_ERR(conntrack))
701 return (struct ip_conntrack_tuple_hash *)conntrack;
702
703 if (!protocol->new(conntrack, skb)) {
704 ip_conntrack_free(conntrack);
705 return NULL;
706 }
707
504 write_lock_bh(&ip_conntrack_lock); 708 write_lock_bh(&ip_conntrack_lock);
505 exp = find_expectation(tuple); 709 exp = find_expectation(tuple);
506 710
@@ -521,7 +725,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
521 nf_conntrack_get(&conntrack->master->ct_general); 725 nf_conntrack_get(&conntrack->master->ct_general);
522 CONNTRACK_STAT_INC(expect_new); 726 CONNTRACK_STAT_INC(expect_new);
523 } else { 727 } else {
524 conntrack->helper = ip_ct_find_helper(&repl_tuple); 728 conntrack->helper = __ip_conntrack_helper_find(&repl_tuple);
525 729
526 CONNTRACK_STAT_INC(new); 730 CONNTRACK_STAT_INC(new);
527 } 731 }
@@ -529,7 +733,6 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
529 /* Overload tuple linked list to put us in unconfirmed list. */ 733 /* Overload tuple linked list to put us in unconfirmed list. */
530 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); 734 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
531 735
532 atomic_inc(&ip_conntrack_count);
533 write_unlock_bh(&ip_conntrack_lock); 736 write_unlock_bh(&ip_conntrack_lock);
534 737
535 if (exp) { 738 if (exp) {
@@ -607,7 +810,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
607 struct ip_conntrack *ct; 810 struct ip_conntrack *ct;
608 enum ip_conntrack_info ctinfo; 811 enum ip_conntrack_info ctinfo;
609 struct ip_conntrack_protocol *proto; 812 struct ip_conntrack_protocol *proto;
610 int set_reply; 813 int set_reply = 0;
611 int ret; 814 int ret;
612 815
613 /* Previously seen (loopback or untracked)? Ignore. */ 816 /* Previously seen (loopback or untracked)? Ignore. */
@@ -625,9 +828,6 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
625 return NF_DROP; 828 return NF_DROP;
626 } 829 }
627 830
628 /* FIXME: Do this right please. --RR */
629 (*pskb)->nfcache |= NFC_UNKNOWN;
630
631/* Doesn't cover locally-generated broadcast, so not worth it. */ 831/* Doesn't cover locally-generated broadcast, so not worth it. */
632#if 0 832#if 0
633 /* Ignore broadcast: no `connection'. */ 833 /* Ignore broadcast: no `connection'. */
@@ -643,7 +843,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
643 } 843 }
644#endif 844#endif
645 845
646 proto = ip_ct_find_proto((*pskb)->nh.iph->protocol); 846 proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol);
647 847
648 /* It may be an special packet, error, unclean... 848 /* It may be an special packet, error, unclean...
649 * inverse of the return code tells to the netfilter 849 * inverse of the return code tells to the netfilter
@@ -679,8 +879,8 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
679 return -ret; 879 return -ret;
680 } 880 }
681 881
682 if (set_reply) 882 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
683 set_bit(IPS_SEEN_REPLY_BIT, &ct->status); 883 ip_conntrack_event_cache(IPCT_STATUS, *pskb);
684 884
685 return ret; 885 return ret;
686} 886}
@@ -689,7 +889,7 @@ int invert_tuplepr(struct ip_conntrack_tuple *inverse,
689 const struct ip_conntrack_tuple *orig) 889 const struct ip_conntrack_tuple *orig)
690{ 890{
691 return ip_ct_invert_tuple(inverse, orig, 891 return ip_ct_invert_tuple(inverse, orig,
692 ip_ct_find_proto(orig->dst.protonum)); 892 __ip_conntrack_proto_find(orig->dst.protonum));
693} 893}
694 894
695/* Would two expected things clash? */ 895/* Would two expected things clash? */
@@ -769,6 +969,8 @@ static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp)
769 exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ; 969 exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
770 add_timer(&exp->timeout); 970 add_timer(&exp->timeout);
771 971
972 exp->id = ++ip_conntrack_expect_next_id;
973 atomic_inc(&exp->use);
772 CONNTRACK_STAT_INC(expect_create); 974 CONNTRACK_STAT_INC(expect_create);
773} 975}
774 976
@@ -827,6 +1029,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
827 evict_oldest_expect(expect->master); 1029 evict_oldest_expect(expect->master);
828 1030
829 ip_conntrack_expect_insert(expect); 1031 ip_conntrack_expect_insert(expect);
1032 ip_conntrack_expect_event(IPEXP_NEW, expect);
830 ret = 0; 1033 ret = 0;
831out: 1034out:
832 write_unlock_bh(&ip_conntrack_lock); 1035 write_unlock_bh(&ip_conntrack_lock);
@@ -847,7 +1050,7 @@ void ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
847 1050
848 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 1051 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
849 if (!conntrack->master && conntrack->expecting == 0) 1052 if (!conntrack->master && conntrack->expecting == 0)
850 conntrack->helper = ip_ct_find_helper(newreply); 1053 conntrack->helper = __ip_conntrack_helper_find(newreply);
851 write_unlock_bh(&ip_conntrack_lock); 1054 write_unlock_bh(&ip_conntrack_lock);
852} 1055}
853 1056
@@ -861,11 +1064,26 @@ int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
861 return 0; 1064 return 0;
862} 1065}
863 1066
1067struct ip_conntrack_helper *
1068__ip_conntrack_helper_find_byname(const char *name)
1069{
1070 struct ip_conntrack_helper *h;
1071
1072 list_for_each_entry(h, &helpers, list) {
1073 if (!strcmp(h->name, name))
1074 return h;
1075 }
1076
1077 return NULL;
1078}
1079
864static inline int unhelp(struct ip_conntrack_tuple_hash *i, 1080static inline int unhelp(struct ip_conntrack_tuple_hash *i,
865 const struct ip_conntrack_helper *me) 1081 const struct ip_conntrack_helper *me)
866{ 1082{
867 if (tuplehash_to_ctrack(i)->helper == me) 1083 if (tuplehash_to_ctrack(i)->helper == me) {
1084 ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
868 tuplehash_to_ctrack(i)->helper = NULL; 1085 tuplehash_to_ctrack(i)->helper = NULL;
1086 }
869 return 0; 1087 return 0;
870} 1088}
871 1089
@@ -927,12 +1145,46 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
927 if (del_timer(&ct->timeout)) { 1145 if (del_timer(&ct->timeout)) {
928 ct->timeout.expires = jiffies + extra_jiffies; 1146 ct->timeout.expires = jiffies + extra_jiffies;
929 add_timer(&ct->timeout); 1147 add_timer(&ct->timeout);
1148 ip_conntrack_event_cache(IPCT_REFRESH, skb);
930 } 1149 }
931 ct_add_counters(ct, ctinfo, skb); 1150 ct_add_counters(ct, ctinfo, skb);
932 write_unlock_bh(&ip_conntrack_lock); 1151 write_unlock_bh(&ip_conntrack_lock);
933 } 1152 }
934} 1153}
935 1154
1155#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
1156 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
1157/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1158 * in ip_conntrack_core, since we don't want the protocols to autoload
1159 * or depend on ctnetlink */
1160int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb,
1161 const struct ip_conntrack_tuple *tuple)
1162{
1163 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
1164 &tuple->src.u.tcp.port);
1165 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
1166 &tuple->dst.u.tcp.port);
1167 return 0;
1168
1169nfattr_failure:
1170 return -1;
1171}
1172
1173int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
1174 struct ip_conntrack_tuple *t)
1175{
1176 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
1177 return -EINVAL;
1178
1179 t->src.u.tcp.port =
1180 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
1181 t->dst.u.tcp.port =
1182 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
1183
1184 return 0;
1185}
1186#endif
1187
936/* Returns new sk_buff, or NULL */ 1188/* Returns new sk_buff, or NULL */
937struct sk_buff * 1189struct sk_buff *
938ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) 1190ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
@@ -943,10 +1195,8 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
943 skb = ip_defrag(skb, user); 1195 skb = ip_defrag(skb, user);
944 local_bh_enable(); 1196 local_bh_enable();
945 1197
946 if (skb) { 1198 if (skb)
947 ip_send_check(skb->nh.iph); 1199 ip_send_check(skb->nh.iph);
948 skb->nfcache |= NFC_ALTERED;
949 }
950 return skb; 1200 return skb;
951} 1201}
952 1202
@@ -1096,16 +1346,14 @@ static void free_conntrack_hash(void)
1096 * ip_conntrack_htable_size)); 1346 * ip_conntrack_htable_size));
1097} 1347}
1098 1348
1099/* Mishearing the voices in his head, our hero wonders how he's 1349void ip_conntrack_flush()
1100 supposed to kill the mall. */
1101void ip_conntrack_cleanup(void)
1102{ 1350{
1103 ip_ct_attach = NULL;
1104 /* This makes sure all current packets have passed through 1351 /* This makes sure all current packets have passed through
1105 netfilter framework. Roll on, two-stage module 1352 netfilter framework. Roll on, two-stage module
1106 delete... */ 1353 delete... */
1107 synchronize_net(); 1354 synchronize_net();
1108 1355
1356 ip_ct_event_cache_flush();
1109 i_see_dead_people: 1357 i_see_dead_people:
1110 ip_ct_iterate_cleanup(kill_all, NULL); 1358 ip_ct_iterate_cleanup(kill_all, NULL);
1111 if (atomic_read(&ip_conntrack_count) != 0) { 1359 if (atomic_read(&ip_conntrack_count) != 0) {
@@ -1115,7 +1363,14 @@ void ip_conntrack_cleanup(void)
1115 /* wait until all references to ip_conntrack_untracked are dropped */ 1363 /* wait until all references to ip_conntrack_untracked are dropped */
1116 while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1) 1364 while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
1117 schedule(); 1365 schedule();
1366}
1118 1367
1368/* Mishearing the voices in his head, our hero wonders how he's
1369 supposed to kill the mall. */
1370void ip_conntrack_cleanup(void)
1371{
1372 ip_ct_attach = NULL;
1373 ip_conntrack_flush();
1119 kmem_cache_destroy(ip_conntrack_cachep); 1374 kmem_cache_destroy(ip_conntrack_cachep);
1120 kmem_cache_destroy(ip_conntrack_expect_cachep); 1375 kmem_cache_destroy(ip_conntrack_expect_cachep);
1121 free_conntrack_hash(); 1376 free_conntrack_hash();
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
index 7a3b773be3f9..3a2627db1729 100644
--- a/net/ipv4/netfilter/ip_conntrack_ftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_ftp.c
@@ -25,8 +25,7 @@ MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
25MODULE_DESCRIPTION("ftp connection tracking helper"); 25MODULE_DESCRIPTION("ftp connection tracking helper");
26 26
27/* This is slow, but it's simple. --RR */ 27/* This is slow, but it's simple. --RR */
28static char ftp_buffer[65536]; 28static char *ftp_buffer;
29
30static DEFINE_SPINLOCK(ip_ftp_lock); 29static DEFINE_SPINLOCK(ip_ftp_lock);
31 30
32#define MAX_PORTS 8 31#define MAX_PORTS 8
@@ -262,7 +261,8 @@ static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
262} 261}
263 262
264/* We don't update if it's older than what we have. */ 263/* We don't update if it's older than what we have. */
265static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir) 264static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir,
265 struct sk_buff *skb)
266{ 266{
267 unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; 267 unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
268 268
@@ -276,10 +276,13 @@ static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir)
276 oldest = i; 276 oldest = i;
277 } 277 }
278 278
279 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) 279 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
280 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; 280 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
281 else if (oldest != NUM_SEQ_TO_REMEMBER) 281 ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
282 } else if (oldest != NUM_SEQ_TO_REMEMBER) {
282 info->seq_aft_nl[dir][oldest] = nl_seq; 283 info->seq_aft_nl[dir][oldest] = nl_seq;
284 ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
285 }
283} 286}
284 287
285static int help(struct sk_buff **pskb, 288static int help(struct sk_buff **pskb,
@@ -439,7 +442,7 @@ out_update_nl:
439 /* Now if this ends in \n, update ftp info. Seq may have been 442 /* Now if this ends in \n, update ftp info. Seq may have been
440 * adjusted by NAT code. */ 443 * adjusted by NAT code. */
441 if (ends_in_nl) 444 if (ends_in_nl)
442 update_nl_seq(seq, ct_ftp_info,dir); 445 update_nl_seq(seq, ct_ftp_info,dir, *pskb);
443 out: 446 out:
444 spin_unlock_bh(&ip_ftp_lock); 447 spin_unlock_bh(&ip_ftp_lock);
445 return ret; 448 return ret;
@@ -457,6 +460,8 @@ static void fini(void)
457 ports[i]); 460 ports[i]);
458 ip_conntrack_helper_unregister(&ftp[i]); 461 ip_conntrack_helper_unregister(&ftp[i]);
459 } 462 }
463
464 kfree(ftp_buffer);
460} 465}
461 466
462static int __init init(void) 467static int __init init(void)
@@ -464,6 +469,10 @@ static int __init init(void)
464 int i, ret; 469 int i, ret;
465 char *tmpname; 470 char *tmpname;
466 471
472 ftp_buffer = kmalloc(65536, GFP_KERNEL);
473 if (!ftp_buffer)
474 return -ENOMEM;
475
467 if (ports_c == 0) 476 if (ports_c == 0)
468 ports[ports_c++] = FTP_PORT; 477 ports[ports_c++] = FTP_PORT;
469 478
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
index 4a28f297d502..25438eec21a1 100644
--- a/net/ipv4/netfilter/ip_conntrack_irc.c
+++ b/net/ipv4/netfilter/ip_conntrack_irc.c
@@ -39,7 +39,7 @@ static int ports_c;
39static int max_dcc_channels = 8; 39static int max_dcc_channels = 8;
40static unsigned int dcc_timeout = 300; 40static unsigned int dcc_timeout = 300;
41/* This is slow, but it's simple. --RR */ 41/* This is slow, but it's simple. --RR */
42static char irc_buffer[65536]; 42static char *irc_buffer;
43static DEFINE_SPINLOCK(irc_buffer_lock); 43static DEFINE_SPINLOCK(irc_buffer_lock);
44 44
45unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb, 45unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb,
@@ -257,6 +257,10 @@ static int __init init(void)
257 printk("ip_conntrack_irc: dcc_timeout must be a positive integer\n"); 257 printk("ip_conntrack_irc: dcc_timeout must be a positive integer\n");
258 return -EBUSY; 258 return -EBUSY;
259 } 259 }
260
261 irc_buffer = kmalloc(65536, GFP_KERNEL);
262 if (!irc_buffer)
263 return -ENOMEM;
260 264
261 /* If no port given, default to standard irc port */ 265 /* If no port given, default to standard irc port */
262 if (ports_c == 0) 266 if (ports_c == 0)
@@ -304,6 +308,7 @@ static void fini(void)
304 ports[i]); 308 ports[i]);
305 ip_conntrack_helper_unregister(&irc_helpers[i]); 309 ip_conntrack_helper_unregister(&irc_helpers[i]);
306 } 310 }
311 kfree(irc_buffer);
307} 312}
308 313
309module_init(init); 314module_init(init);
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
new file mode 100644
index 000000000000..a4e9278db4ed
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -0,0 +1,1579 @@
1/* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005 by Pablo Neira Ayuso <pablo@eurodev.net>
8 *
9 * I've reworked this stuff to use attributes instead of conntrack
10 * structures. 5.44 am. I need more tea. --pablo 05/07/11.
11 *
12 * Initial connection tracking via netlink development funded and
13 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
14 *
15 * Further development of this code funded by Astaro AG (http://www.astaro.com)
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/timer.h>
26#include <linux/skbuff.h>
27#include <linux/errno.h>
28#include <linux/netlink.h>
29#include <linux/spinlock.h>
30#include <linux/notifier.h>
31#include <linux/rtnetlink.h>
32
33#include <linux/netfilter.h>
34#include <linux/netfilter_ipv4.h>
35#include <linux/netfilter_ipv4/ip_tables.h>
36#include <linux/netfilter_ipv4/ip_conntrack.h>
37#include <linux/netfilter_ipv4/ip_conntrack_core.h>
38#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
39#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
40#include <linux/netfilter_ipv4/ip_nat_protocol.h>
41
42#include <linux/netfilter/nfnetlink.h>
43#include <linux/netfilter/nfnetlink_conntrack.h>
44
45MODULE_LICENSE("GPL");
46
47static char __initdata version[] = "0.90";
48
49#if 0
50#define DEBUGP printk
51#else
52#define DEBUGP(format, args...)
53#endif
54
55
56static inline int
57ctnetlink_dump_tuples_proto(struct sk_buff *skb,
58 const struct ip_conntrack_tuple *tuple)
59{
60 struct ip_conntrack_protocol *proto;
61
62 NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum);
63
64 proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
65 if (proto && proto->tuple_to_nfattr)
66 return proto->tuple_to_nfattr(skb, tuple);
67
68 return 0;
69
70nfattr_failure:
71 return -1;
72}
73
74static inline int
75ctnetlink_dump_tuples(struct sk_buff *skb,
76 const struct ip_conntrack_tuple *tuple)
77{
78 struct nfattr *nest_parms;
79
80 nest_parms = NFA_NEST(skb, CTA_TUPLE_IP);
81 NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(u_int32_t), &tuple->src.ip);
82 NFA_PUT(skb, CTA_IP_V4_DST, sizeof(u_int32_t), &tuple->dst.ip);
83 NFA_NEST_END(skb, nest_parms);
84
85 nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO);
86 ctnetlink_dump_tuples_proto(skb, tuple);
87 NFA_NEST_END(skb, nest_parms);
88
89 return 0;
90
91nfattr_failure:
92 return -1;
93}
94
95static inline int
96ctnetlink_dump_status(struct sk_buff *skb, const struct ip_conntrack *ct)
97{
98 u_int32_t status = htonl((u_int32_t) ct->status);
99 NFA_PUT(skb, CTA_STATUS, sizeof(status), &status);
100 return 0;
101
102nfattr_failure:
103 return -1;
104}
105
106static inline int
107ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct)
108{
109 long timeout_l = ct->timeout.expires - jiffies;
110 u_int32_t timeout;
111
112 if (timeout_l < 0)
113 timeout = 0;
114 else
115 timeout = htonl(timeout_l / HZ);
116
117 NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout);
118 return 0;
119
120nfattr_failure:
121 return -1;
122}
123
124static inline int
125ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
126{
127 struct ip_conntrack_protocol *proto = ip_conntrack_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
128
129 struct nfattr *nest_proto;
130 int ret;
131
132 if (!proto || !proto->to_nfattr)
133 return 0;
134
135 nest_proto = NFA_NEST(skb, CTA_PROTOINFO);
136
137 ret = proto->to_nfattr(skb, nest_proto, ct);
138
139 ip_conntrack_proto_put(proto);
140
141 NFA_NEST_END(skb, nest_proto);
142
143 return ret;
144
145nfattr_failure:
146 return -1;
147}
148
149static inline int
150ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct)
151{
152 struct nfattr *nest_helper;
153
154 if (!ct->helper)
155 return 0;
156
157 nest_helper = NFA_NEST(skb, CTA_HELP);
158 NFA_PUT(skb, CTA_HELP_NAME, CTA_HELP_MAXNAMESIZE, &ct->helper->name);
159
160 if (ct->helper->to_nfattr)
161 ct->helper->to_nfattr(skb, ct);
162
163 NFA_NEST_END(skb, nest_helper);
164
165 return 0;
166
167nfattr_failure:
168 return -1;
169}
170
171#ifdef CONFIG_IP_NF_CT_ACCT
172static inline int
173ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
174 enum ip_conntrack_dir dir)
175{
176 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
177 struct nfattr *nest_count = NFA_NEST(skb, type);
178 u_int64_t tmp;
179
180 tmp = cpu_to_be64(ct->counters[dir].packets);
181 NFA_PUT(skb, CTA_COUNTERS_PACKETS, sizeof(u_int64_t), &tmp);
182
183 tmp = cpu_to_be64(ct->counters[dir].bytes);
184 NFA_PUT(skb, CTA_COUNTERS_BYTES, sizeof(u_int64_t), &tmp);
185
186 NFA_NEST_END(skb, nest_count);
187
188 return 0;
189
190nfattr_failure:
191 return -1;
192}
193#else
194#define ctnetlink_dump_counters(a, b, c) (0)
195#endif
196
197#ifdef CONFIG_IP_NF_CONNTRACK_MARK
198static inline int
199ctnetlink_dump_mark(struct sk_buff *skb, const struct ip_conntrack *ct)
200{
201 u_int32_t mark = htonl(ct->mark);
202
203 NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark);
204 return 0;
205
206nfattr_failure:
207 return -1;
208}
209#else
210#define ctnetlink_dump_mark(a, b) (0)
211#endif
212
213static inline int
214ctnetlink_dump_id(struct sk_buff *skb, const struct ip_conntrack *ct)
215{
216 u_int32_t id = htonl(ct->id);
217 NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id);
218 return 0;
219
220nfattr_failure:
221 return -1;
222}
223
224static inline int
225ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct)
226{
227 unsigned int use = htonl(atomic_read(&ct->ct_general.use));
228
229 NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use);
230 return 0;
231
232nfattr_failure:
233 return -1;
234}
235
236#define tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
237
238static int
239ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
240 int event, int nowait,
241 const struct ip_conntrack *ct)
242{
243 struct nlmsghdr *nlh;
244 struct nfgenmsg *nfmsg;
245 struct nfattr *nest_parms;
246 unsigned char *b;
247
248 b = skb->tail;
249
250 event |= NFNL_SUBSYS_CTNETLINK << 8;
251 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
252 nfmsg = NLMSG_DATA(nlh);
253
254 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
255 nfmsg->nfgen_family = AF_INET;
256 nfmsg->version = NFNETLINK_V0;
257 nfmsg->res_id = 0;
258
259 nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG);
260 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
261 goto nfattr_failure;
262 NFA_NEST_END(skb, nest_parms);
263
264 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
265 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
266 goto nfattr_failure;
267 NFA_NEST_END(skb, nest_parms);
268
269 if (ctnetlink_dump_status(skb, ct) < 0 ||
270 ctnetlink_dump_timeout(skb, ct) < 0 ||
271 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
272 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
273 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
274 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
275 ctnetlink_dump_mark(skb, ct) < 0 ||
276 ctnetlink_dump_id(skb, ct) < 0 ||
277 ctnetlink_dump_use(skb, ct) < 0)
278 goto nfattr_failure;
279
280 nlh->nlmsg_len = skb->tail - b;
281 return skb->len;
282
283nlmsg_failure:
284nfattr_failure:
285 skb_trim(skb, b - skb->data);
286 return -1;
287}
288
289#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
290static int ctnetlink_conntrack_event(struct notifier_block *this,
291 unsigned long events, void *ptr)
292{
293 struct nlmsghdr *nlh;
294 struct nfgenmsg *nfmsg;
295 struct nfattr *nest_parms;
296 struct ip_conntrack *ct = (struct ip_conntrack *)ptr;
297 struct sk_buff *skb;
298 unsigned int type;
299 unsigned char *b;
300 unsigned int flags = 0, group;
301
302 /* ignore our fake conntrack entry */
303 if (ct == &ip_conntrack_untracked)
304 return NOTIFY_DONE;
305
306 if (events & IPCT_DESTROY) {
307 type = IPCTNL_MSG_CT_DELETE;
308 group = NFNLGRP_CONNTRACK_DESTROY;
309 goto alloc_skb;
310 }
311 if (events & (IPCT_NEW | IPCT_RELATED)) {
312 type = IPCTNL_MSG_CT_NEW;
313 flags = NLM_F_CREATE|NLM_F_EXCL;
314 /* dump everything */
315 events = ~0UL;
316 group = NFNLGRP_CONNTRACK_NEW;
317 goto alloc_skb;
318 }
319 if (events & (IPCT_STATUS |
320 IPCT_PROTOINFO |
321 IPCT_HELPER |
322 IPCT_HELPINFO |
323 IPCT_NATINFO)) {
324 type = IPCTNL_MSG_CT_NEW;
325 group = NFNLGRP_CONNTRACK_UPDATE;
326 goto alloc_skb;
327 }
328
329 return NOTIFY_DONE;
330
331alloc_skb:
332 /* FIXME: Check if there are any listeners before, don't hurt performance */
333
334 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
335 if (!skb)
336 return NOTIFY_DONE;
337
338 b = skb->tail;
339
340 type |= NFNL_SUBSYS_CTNETLINK << 8;
341 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
342 nfmsg = NLMSG_DATA(nlh);
343
344 nlh->nlmsg_flags = flags;
345 nfmsg->nfgen_family = AF_INET;
346 nfmsg->version = NFNETLINK_V0;
347 nfmsg->res_id = 0;
348
349 nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG);
350 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
351 goto nfattr_failure;
352 NFA_NEST_END(skb, nest_parms);
353
354 nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY);
355 if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
356 goto nfattr_failure;
357 NFA_NEST_END(skb, nest_parms);
358
359 /* NAT stuff is now a status flag */
360 if ((events & IPCT_STATUS || events & IPCT_NATINFO)
361 && ctnetlink_dump_status(skb, ct) < 0)
362 goto nfattr_failure;
363 if (events & IPCT_REFRESH
364 && ctnetlink_dump_timeout(skb, ct) < 0)
365 goto nfattr_failure;
366 if (events & IPCT_PROTOINFO
367 && ctnetlink_dump_protoinfo(skb, ct) < 0)
368 goto nfattr_failure;
369 if (events & IPCT_HELPINFO
370 && ctnetlink_dump_helpinfo(skb, ct) < 0)
371 goto nfattr_failure;
372
373 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
374 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
375 goto nfattr_failure;
376
377 nlh->nlmsg_len = skb->tail - b;
378 nfnetlink_send(skb, 0, group, 0);
379 return NOTIFY_DONE;
380
381nlmsg_failure:
382nfattr_failure:
383 kfree_skb(skb);
384 return NOTIFY_DONE;
385}
386#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
387
388static int ctnetlink_done(struct netlink_callback *cb)
389{
390 DEBUGP("entered %s\n", __FUNCTION__);
391 return 0;
392}
393
394static int
395ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
396{
397 struct ip_conntrack *ct = NULL;
398 struct ip_conntrack_tuple_hash *h;
399 struct list_head *i;
400 u_int32_t *id = (u_int32_t *) &cb->args[1];
401
402 DEBUGP("entered %s, last bucket=%lu id=%u\n", __FUNCTION__,
403 cb->args[0], *id);
404
405 read_lock_bh(&ip_conntrack_lock);
406 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++, *id = 0) {
407 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
408 h = (struct ip_conntrack_tuple_hash *) i;
409 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
410 continue;
411 ct = tuplehash_to_ctrack(h);
412 if (ct->id <= *id)
413 continue;
414 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
415 cb->nlh->nlmsg_seq,
416 IPCTNL_MSG_CT_NEW,
417 1, ct) < 0)
418 goto out;
419 *id = ct->id;
420 }
421 }
422out:
423 read_unlock_bh(&ip_conntrack_lock);
424
425 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
426
427 return skb->len;
428}
429
430#ifdef CONFIG_IP_NF_CT_ACCT
431static int
432ctnetlink_dump_table_w(struct sk_buff *skb, struct netlink_callback *cb)
433{
434 struct ip_conntrack *ct = NULL;
435 struct ip_conntrack_tuple_hash *h;
436 struct list_head *i;
437 u_int32_t *id = (u_int32_t *) &cb->args[1];
438
439 DEBUGP("entered %s, last bucket=%u id=%u\n", __FUNCTION__,
440 cb->args[0], *id);
441
442 write_lock_bh(&ip_conntrack_lock);
443 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++, *id = 0) {
444 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
445 h = (struct ip_conntrack_tuple_hash *) i;
446 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
447 continue;
448 ct = tuplehash_to_ctrack(h);
449 if (ct->id <= *id)
450 continue;
451 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
452 cb->nlh->nlmsg_seq,
453 IPCTNL_MSG_CT_NEW,
454 1, ct) < 0)
455 goto out;
456 *id = ct->id;
457
458 memset(&ct->counters, 0, sizeof(ct->counters));
459 }
460 }
461out:
462 write_unlock_bh(&ip_conntrack_lock);
463
464 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
465
466 return skb->len;
467}
468#endif
469
470static const int cta_min_ip[CTA_IP_MAX] = {
471 [CTA_IP_V4_SRC-1] = sizeof(u_int32_t),
472 [CTA_IP_V4_DST-1] = sizeof(u_int32_t),
473};
474
475static inline int
476ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
477{
478 struct nfattr *tb[CTA_IP_MAX];
479
480 DEBUGP("entered %s\n", __FUNCTION__);
481
482
483 if (nfattr_parse_nested(tb, CTA_IP_MAX, attr) < 0)
484 goto nfattr_failure;
485
486 if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
487 return -EINVAL;
488
489 if (!tb[CTA_IP_V4_SRC-1])
490 return -EINVAL;
491 tuple->src.ip = *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_SRC-1]);
492
493 if (!tb[CTA_IP_V4_DST-1])
494 return -EINVAL;
495 tuple->dst.ip = *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
496
497 DEBUGP("leaving\n");
498
499 return 0;
500
501nfattr_failure:
502 return -1;
503}
504
505static const int cta_min_proto[CTA_PROTO_MAX] = {
506 [CTA_PROTO_NUM-1] = sizeof(u_int16_t),
507 [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t),
508 [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t),
509 [CTA_PROTO_ICMP_TYPE-1] = sizeof(u_int8_t),
510 [CTA_PROTO_ICMP_CODE-1] = sizeof(u_int8_t),
511 [CTA_PROTO_ICMP_ID-1] = sizeof(u_int16_t),
512};
513
514static inline int
515ctnetlink_parse_tuple_proto(struct nfattr *attr,
516 struct ip_conntrack_tuple *tuple)
517{
518 struct nfattr *tb[CTA_PROTO_MAX];
519 struct ip_conntrack_protocol *proto;
520 int ret = 0;
521
522 DEBUGP("entered %s\n", __FUNCTION__);
523
524 if (nfattr_parse_nested(tb, CTA_PROTO_MAX, attr) < 0)
525 goto nfattr_failure;
526
527 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
528 return -EINVAL;
529
530 if (!tb[CTA_PROTO_NUM-1])
531 return -EINVAL;
532 tuple->dst.protonum = *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]);
533
534 proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
535
536 if (likely(proto && proto->nfattr_to_tuple)) {
537 ret = proto->nfattr_to_tuple(tb, tuple);
538 ip_conntrack_proto_put(proto);
539 }
540
541 return ret;
542
543nfattr_failure:
544 return -1;
545}
546
547static inline int
548ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
549 enum ctattr_tuple type)
550{
551 struct nfattr *tb[CTA_TUPLE_MAX];
552 int err;
553
554 DEBUGP("entered %s\n", __FUNCTION__);
555
556 memset(tuple, 0, sizeof(*tuple));
557
558 if (nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]) < 0)
559 goto nfattr_failure;
560
561 if (!tb[CTA_TUPLE_IP-1])
562 return -EINVAL;
563
564 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP-1], tuple);
565 if (err < 0)
566 return err;
567
568 if (!tb[CTA_TUPLE_PROTO-1])
569 return -EINVAL;
570
571 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO-1], tuple);
572 if (err < 0)
573 return err;
574
575 /* orig and expect tuples get DIR_ORIGINAL */
576 if (type == CTA_TUPLE_REPLY)
577 tuple->dst.dir = IP_CT_DIR_REPLY;
578 else
579 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
580
581 DUMP_TUPLE(tuple);
582
583 DEBUGP("leaving\n");
584
585 return 0;
586
587nfattr_failure:
588 return -1;
589}
590
591#ifdef CONFIG_IP_NF_NAT_NEEDED
592static const int cta_min_protonat[CTA_PROTONAT_MAX] = {
593 [CTA_PROTONAT_PORT_MIN-1] = sizeof(u_int16_t),
594 [CTA_PROTONAT_PORT_MAX-1] = sizeof(u_int16_t),
595};
596
597static int ctnetlink_parse_nat_proto(struct nfattr *attr,
598 const struct ip_conntrack *ct,
599 struct ip_nat_range *range)
600{
601 struct nfattr *tb[CTA_PROTONAT_MAX];
602 struct ip_nat_protocol *npt;
603
604 DEBUGP("entered %s\n", __FUNCTION__);
605
606 if (nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr) < 0)
607 goto nfattr_failure;
608
609 if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
610 goto nfattr_failure;
611
612 npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
613 if (!npt)
614 return 0;
615
616 if (!npt->nfattr_to_range) {
617 ip_nat_proto_put(npt);
618 return 0;
619 }
620
621 /* nfattr_to_range returns 1 if it parsed, 0 if not, neg. on error */
622 if (npt->nfattr_to_range(tb, range) > 0)
623 range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
624
625 ip_nat_proto_put(npt);
626
627 DEBUGP("leaving\n");
628 return 0;
629
630nfattr_failure:
631 return -1;
632}
633
634static inline int
635ctnetlink_parse_nat(struct nfattr *cda[],
636 const struct ip_conntrack *ct, struct ip_nat_range *range)
637{
638 struct nfattr *tb[CTA_NAT_MAX];
639 int err;
640
641 DEBUGP("entered %s\n", __FUNCTION__);
642
643 memset(range, 0, sizeof(*range));
644
645 if (nfattr_parse_nested(tb, CTA_NAT_MAX, cda[CTA_NAT-1]) < 0)
646 goto nfattr_failure;
647
648 if (tb[CTA_NAT_MINIP-1])
649 range->min_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
650
651 if (!tb[CTA_NAT_MAXIP-1])
652 range->max_ip = range->min_ip;
653 else
654 range->max_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MAXIP-1]);
655
656 if (range->min_ip)
657 range->flags |= IP_NAT_RANGE_MAP_IPS;
658
659 if (!tb[CTA_NAT_PROTO-1])
660 return 0;
661
662 err = ctnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range);
663 if (err < 0)
664 return err;
665
666 DEBUGP("leaving\n");
667 return 0;
668
669nfattr_failure:
670 return -1;
671}
672#endif
673
674static inline int
675ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
676{
677 struct nfattr *tb[CTA_HELP_MAX];
678
679 DEBUGP("entered %s\n", __FUNCTION__);
680
681 if (nfattr_parse_nested(tb, CTA_HELP_MAX, attr) < 0)
682 goto nfattr_failure;
683
684 if (!tb[CTA_HELP_NAME-1])
685 return -EINVAL;
686
687 *helper_name = NFA_DATA(tb[CTA_HELP_NAME-1]);
688
689 return 0;
690
691nfattr_failure:
692 return -1;
693}
694
695static int
696ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
697 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
698{
699 struct ip_conntrack_tuple_hash *h;
700 struct ip_conntrack_tuple tuple;
701 struct ip_conntrack *ct;
702 int err = 0;
703
704 DEBUGP("entered %s\n", __FUNCTION__);
705
706 if (cda[CTA_TUPLE_ORIG-1])
707 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG);
708 else if (cda[CTA_TUPLE_REPLY-1])
709 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY);
710 else {
711 /* Flush the whole table */
712 ip_conntrack_flush();
713 return 0;
714 }
715
716 if (err < 0)
717 return err;
718
719 h = ip_conntrack_find_get(&tuple, NULL);
720 if (!h) {
721 DEBUGP("tuple not found in conntrack hash\n");
722 return -ENOENT;
723 }
724
725 ct = tuplehash_to_ctrack(h);
726
727 if (cda[CTA_ID-1]) {
728 u_int32_t id = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_ID-1]));
729 if (ct->id != id) {
730 ip_conntrack_put(ct);
731 return -ENOENT;
732 }
733 }
734 if (del_timer(&ct->timeout)) {
735 ip_conntrack_put(ct);
736 ct->timeout.function((unsigned long)ct);
737 return 0;
738 }
739 ip_conntrack_put(ct);
740 DEBUGP("leaving\n");
741
742 return 0;
743}
744
745static int
746ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
747 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
748{
749 struct ip_conntrack_tuple_hash *h;
750 struct ip_conntrack_tuple tuple;
751 struct ip_conntrack *ct;
752 struct sk_buff *skb2 = NULL;
753 int err = 0;
754
755 DEBUGP("entered %s\n", __FUNCTION__);
756
757 if (nlh->nlmsg_flags & NLM_F_DUMP) {
758 struct nfgenmsg *msg = NLMSG_DATA(nlh);
759 u32 rlen;
760
761 if (msg->nfgen_family != AF_INET)
762 return -EAFNOSUPPORT;
763
764 if (NFNL_MSG_TYPE(nlh->nlmsg_type) ==
765 IPCTNL_MSG_CT_GET_CTRZERO) {
766#ifdef CONFIG_IP_NF_CT_ACCT
767 if ((*errp = netlink_dump_start(ctnl, skb, nlh,
768 ctnetlink_dump_table_w,
769 ctnetlink_done)) != 0)
770 return -EINVAL;
771#else
772 return -ENOTSUPP;
773#endif
774 } else {
775 if ((*errp = netlink_dump_start(ctnl, skb, nlh,
776 ctnetlink_dump_table,
777 ctnetlink_done)) != 0)
778 return -EINVAL;
779 }
780
781 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
782 if (rlen > skb->len)
783 rlen = skb->len;
784 skb_pull(skb, rlen);
785 return 0;
786 }
787
788 if (cda[CTA_TUPLE_ORIG-1])
789 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG);
790 else if (cda[CTA_TUPLE_REPLY-1])
791 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY);
792 else
793 return -EINVAL;
794
795 if (err < 0)
796 return err;
797
798 h = ip_conntrack_find_get(&tuple, NULL);
799 if (!h) {
800 DEBUGP("tuple not found in conntrack hash");
801 return -ENOENT;
802 }
803 DEBUGP("tuple found\n");
804 ct = tuplehash_to_ctrack(h);
805
806 err = -ENOMEM;
807 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
808 if (!skb2) {
809 ip_conntrack_put(ct);
810 return -ENOMEM;
811 }
812 NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
813
814 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
815 IPCTNL_MSG_CT_NEW, 1, ct);
816 ip_conntrack_put(ct);
817 if (err <= 0)
818 goto out;
819
820 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
821 if (err < 0)
822 goto out;
823
824 DEBUGP("leaving\n");
825 return 0;
826
827out:
828 if (skb2)
829 kfree_skb(skb2);
830 return -1;
831}
832
833static inline int
834ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
835{
836 unsigned long d, status = *(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]);
837 d = ct->status ^ status;
838
839 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
840 /* unchangeable */
841 return -EINVAL;
842
843 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
844 /* SEEN_REPLY bit can only be set */
845 return -EINVAL;
846
847
848 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
849 /* ASSURED bit can only be set */
850 return -EINVAL;
851
852 if (cda[CTA_NAT-1]) {
853#ifndef CONFIG_IP_NF_NAT_NEEDED
854 return -EINVAL;
855#else
856 unsigned int hooknum;
857 struct ip_nat_range range;
858
859 if (ctnetlink_parse_nat(cda, ct, &range) < 0)
860 return -EINVAL;
861
862 DEBUGP("NAT: %u.%u.%u.%u-%u.%u.%u.%u:%u-%u\n",
863 NIPQUAD(range.min_ip), NIPQUAD(range.max_ip),
864 htons(range.min.all), htons(range.max.all));
865
866 /* This is tricky but it works. ip_nat_setup_info needs the
867 * hook number as parameter, so let's do the correct
868 * conversion and run away */
869 if (status & IPS_SRC_NAT_DONE)
870 hooknum = NF_IP_POST_ROUTING; /* IP_NAT_MANIP_SRC */
871 else if (status & IPS_DST_NAT_DONE)
872 hooknum = NF_IP_PRE_ROUTING; /* IP_NAT_MANIP_DST */
873 else
874 return -EINVAL; /* Missing NAT flags */
875
876 DEBUGP("NAT status: %lu\n",
877 status & (IPS_NAT_MASK | IPS_NAT_DONE_MASK));
878
879 if (ip_nat_initialized(ct, hooknum))
880 return -EEXIST;
881 ip_nat_setup_info(ct, &range, hooknum);
882
883 DEBUGP("NAT status after setup_info: %lu\n",
884 ct->status & (IPS_NAT_MASK | IPS_NAT_DONE_MASK));
885#endif
886 }
887
888 /* Be careful here, modifying NAT bits can screw up things,
889 * so don't let users modify them directly if they don't pass
890 * ip_nat_range. */
891 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
892 return 0;
893}
894
895
896static inline int
897ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
898{
899 struct ip_conntrack_helper *helper;
900 char *helpname;
901 int err;
902
903 DEBUGP("entered %s\n", __FUNCTION__);
904
905 /* don't change helper of sibling connections */
906 if (ct->master)
907 return -EINVAL;
908
909 err = ctnetlink_parse_help(cda[CTA_HELP-1], &helpname);
910 if (err < 0)
911 return err;
912
913 helper = __ip_conntrack_helper_find_byname(helpname);
914 if (!helper) {
915 if (!strcmp(helpname, ""))
916 helper = NULL;
917 else
918 return -EINVAL;
919 }
920
921 if (ct->helper) {
922 if (!helper) {
923 /* we had a helper before ... */
924 ip_ct_remove_expectations(ct);
925 ct->helper = NULL;
926 } else {
927 /* need to zero data of old helper */
928 memset(&ct->help, 0, sizeof(ct->help));
929 }
930 }
931
932 ct->helper = helper;
933
934 return 0;
935}
936
937static inline int
938ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
939{
940 u_int32_t timeout = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1]));
941
942 if (!del_timer(&ct->timeout))
943 return -ETIME;
944
945 ct->timeout.expires = jiffies + timeout * HZ;
946 add_timer(&ct->timeout);
947
948 return 0;
949}
950
951static int
952ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
953{
954 int err;
955
956 DEBUGP("entered %s\n", __FUNCTION__);
957
958 if (cda[CTA_HELP-1]) {
959 err = ctnetlink_change_helper(ct, cda);
960 if (err < 0)
961 return err;
962 }
963
964 if (cda[CTA_TIMEOUT-1]) {
965 err = ctnetlink_change_timeout(ct, cda);
966 if (err < 0)
967 return err;
968 }
969
970 if (cda[CTA_STATUS-1]) {
971 err = ctnetlink_change_status(ct, cda);
972 if (err < 0)
973 return err;
974 }
975
976 DEBUGP("all done\n");
977 return 0;
978}
979
980static int
981ctnetlink_create_conntrack(struct nfattr *cda[],
982 struct ip_conntrack_tuple *otuple,
983 struct ip_conntrack_tuple *rtuple)
984{
985 struct ip_conntrack *ct;
986 int err = -EINVAL;
987
988 DEBUGP("entered %s\n", __FUNCTION__);
989
990 ct = ip_conntrack_alloc(otuple, rtuple);
991 if (ct == NULL || IS_ERR(ct))
992 return -ENOMEM;
993
994 if (!cda[CTA_TIMEOUT-1])
995 goto err;
996 ct->timeout.expires = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1]));
997
998 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
999 ct->status |= IPS_CONFIRMED;
1000
1001 err = ctnetlink_change_status(ct, cda);
1002 if (err < 0)
1003 goto err;
1004
1005 ct->helper = ip_conntrack_helper_find_get(rtuple);
1006
1007 add_timer(&ct->timeout);
1008 ip_conntrack_hash_insert(ct);
1009
1010 if (ct->helper)
1011 ip_conntrack_helper_put(ct->helper);
1012
1013 DEBUGP("conntrack with id %u inserted\n", ct->id);
1014 return 0;
1015
1016err:
1017 ip_conntrack_free(ct);
1018 return err;
1019}
1020
1021static int
1022ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1023 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1024{
1025 struct ip_conntrack_tuple otuple, rtuple;
1026 struct ip_conntrack_tuple_hash *h = NULL;
1027 int err = 0;
1028
1029 DEBUGP("entered %s\n", __FUNCTION__);
1030
1031 if (cda[CTA_TUPLE_ORIG-1]) {
1032 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG);
1033 if (err < 0)
1034 return err;
1035 }
1036
1037 if (cda[CTA_TUPLE_REPLY-1]) {
1038 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY);
1039 if (err < 0)
1040 return err;
1041 }
1042
1043 write_lock_bh(&ip_conntrack_lock);
1044 if (cda[CTA_TUPLE_ORIG-1])
1045 h = __ip_conntrack_find(&otuple, NULL);
1046 else if (cda[CTA_TUPLE_REPLY-1])
1047 h = __ip_conntrack_find(&rtuple, NULL);
1048
1049 if (h == NULL) {
1050 write_unlock_bh(&ip_conntrack_lock);
1051 DEBUGP("no such conntrack, create new\n");
1052 err = -ENOENT;
1053 if (nlh->nlmsg_flags & NLM_F_CREATE)
1054 err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
1055 return err;
1056 }
1057 /* implicit 'else' */
1058
1059 /* we only allow nat config for new conntracks */
1060 if (cda[CTA_NAT-1]) {
1061 err = -EINVAL;
1062 goto out_unlock;
1063 }
1064
1065 /* We manipulate the conntrack inside the global conntrack table lock,
1066 * so there's no need to increase the refcount */
1067 DEBUGP("conntrack found\n");
1068 err = -EEXIST;
1069 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1070 err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda);
1071
1072out_unlock:
1073 write_unlock_bh(&ip_conntrack_lock);
1074 return err;
1075}
1076
1077/***********************************************************************
1078 * EXPECT
1079 ***********************************************************************/
1080
1081static inline int
1082ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1083 const struct ip_conntrack_tuple *tuple,
1084 enum ctattr_expect type)
1085{
1086 struct nfattr *nest_parms = NFA_NEST(skb, type);
1087
1088 if (ctnetlink_dump_tuples(skb, tuple) < 0)
1089 goto nfattr_failure;
1090
1091 NFA_NEST_END(skb, nest_parms);
1092
1093 return 0;
1094
1095nfattr_failure:
1096 return -1;
1097}
1098
1099static inline int
1100ctnetlink_exp_dump_expect(struct sk_buff *skb,
1101 const struct ip_conntrack_expect *exp)
1102{
1103 struct ip_conntrack *master = exp->master;
1104 u_int32_t timeout = htonl((exp->timeout.expires - jiffies) / HZ);
1105 u_int32_t id = htonl(exp->id);
1106
1107 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
1108 goto nfattr_failure;
1109 if (ctnetlink_exp_dump_tuple(skb, &exp->mask, CTA_EXPECT_MASK) < 0)
1110 goto nfattr_failure;
1111 if (ctnetlink_exp_dump_tuple(skb,
1112 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1113 CTA_EXPECT_MASTER) < 0)
1114 goto nfattr_failure;
1115
1116 NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout);
1117 NFA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id);
1118
1119 return 0;
1120
1121nfattr_failure:
1122 return -1;
1123}
1124
1125static int
1126ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1127 int event,
1128 int nowait,
1129 const struct ip_conntrack_expect *exp)
1130{
1131 struct nlmsghdr *nlh;
1132 struct nfgenmsg *nfmsg;
1133 unsigned char *b;
1134
1135 b = skb->tail;
1136
1137 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1138 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg));
1139 nfmsg = NLMSG_DATA(nlh);
1140
1141 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
1142 nfmsg->nfgen_family = AF_INET;
1143 nfmsg->version = NFNETLINK_V0;
1144 nfmsg->res_id = 0;
1145
1146 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1147 goto nfattr_failure;
1148
1149 nlh->nlmsg_len = skb->tail - b;
1150 return skb->len;
1151
1152nlmsg_failure:
1153nfattr_failure:
1154 skb_trim(skb, b - skb->data);
1155 return -1;
1156}
1157
1158#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1159static int ctnetlink_expect_event(struct notifier_block *this,
1160 unsigned long events, void *ptr)
1161{
1162 struct nlmsghdr *nlh;
1163 struct nfgenmsg *nfmsg;
1164 struct ip_conntrack_expect *exp = (struct ip_conntrack_expect *)ptr;
1165 struct sk_buff *skb;
1166 unsigned int type;
1167 unsigned char *b;
1168 int flags = 0;
1169 u16 proto;
1170
1171 if (events & IPEXP_NEW) {
1172 type = IPCTNL_MSG_EXP_NEW;
1173 flags = NLM_F_CREATE|NLM_F_EXCL;
1174 } else
1175 return NOTIFY_DONE;
1176
1177 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
1178 if (!skb)
1179 return NOTIFY_DONE;
1180
1181 b = skb->tail;
1182
1183 type |= NFNL_SUBSYS_CTNETLINK << 8;
1184 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1185 nfmsg = NLMSG_DATA(nlh);
1186
1187 nlh->nlmsg_flags = flags;
1188 nfmsg->nfgen_family = AF_INET;
1189 nfmsg->version = NFNETLINK_V0;
1190 nfmsg->res_id = 0;
1191
1192 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1193 goto nfattr_failure;
1194
1195 nlh->nlmsg_len = skb->tail - b;
1196 proto = exp->tuple.dst.protonum;
1197 nfnetlink_send(skb, 0, NFNLGRP_CONNTRACK_EXP_NEW, 0);
1198 return NOTIFY_DONE;
1199
1200nlmsg_failure:
1201nfattr_failure:
1202 kfree_skb(skb);
1203 return NOTIFY_DONE;
1204}
1205#endif
1206
1207static int
1208ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1209{
1210 struct ip_conntrack_expect *exp = NULL;
1211 struct list_head *i;
1212 u_int32_t *id = (u_int32_t *) &cb->args[0];
1213
1214 DEBUGP("entered %s, last id=%llu\n", __FUNCTION__, *id);
1215
1216 read_lock_bh(&ip_conntrack_lock);
1217 list_for_each_prev(i, &ip_conntrack_expect_list) {
1218 exp = (struct ip_conntrack_expect *) i;
1219 if (exp->id <= *id)
1220 continue;
1221 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid,
1222 cb->nlh->nlmsg_seq,
1223 IPCTNL_MSG_EXP_NEW,
1224 1, exp) < 0)
1225 goto out;
1226 *id = exp->id;
1227 }
1228out:
1229 read_unlock_bh(&ip_conntrack_lock);
1230
1231 DEBUGP("leaving, last id=%llu\n", *id);
1232
1233 return skb->len;
1234}
1235
1236static int
1237ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1238 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1239{
1240 struct ip_conntrack_tuple tuple;
1241 struct ip_conntrack_expect *exp;
1242 struct sk_buff *skb2;
1243 int err = 0;
1244
1245 DEBUGP("entered %s\n", __FUNCTION__);
1246
1247 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1248 struct nfgenmsg *msg = NLMSG_DATA(nlh);
1249 u32 rlen;
1250
1251 if (msg->nfgen_family != AF_INET)
1252 return -EAFNOSUPPORT;
1253
1254 if ((*errp = netlink_dump_start(ctnl, skb, nlh,
1255 ctnetlink_exp_dump_table,
1256 ctnetlink_done)) != 0)
1257 return -EINVAL;
1258 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
1259 if (rlen > skb->len)
1260 rlen = skb->len;
1261 skb_pull(skb, rlen);
1262 return 0;
1263 }
1264
1265 if (cda[CTA_EXPECT_MASTER-1])
1266 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER);
1267 else
1268 return -EINVAL;
1269
1270 if (err < 0)
1271 return err;
1272
1273 exp = ip_conntrack_expect_find_get(&tuple);
1274 if (!exp)
1275 return -ENOENT;
1276
1277 err = -ENOMEM;
1278 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1279 if (!skb2)
1280 goto out;
1281 NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
1282
1283 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
1284 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
1285 1, exp);
1286 if (err <= 0)
1287 goto out;
1288
1289 ip_conntrack_expect_put(exp);
1290
1291 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1292 if (err < 0)
1293 goto free;
1294
1295 return err;
1296
1297out:
1298 ip_conntrack_expect_put(exp);
1299free:
1300 if (skb2)
1301 kfree_skb(skb2);
1302 return err;
1303}
1304
1305static int
1306ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1307 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1308{
1309 struct ip_conntrack_expect *exp, *tmp;
1310 struct ip_conntrack_tuple tuple;
1311 struct ip_conntrack_helper *h;
1312 int err;
1313
1314 if (cda[CTA_EXPECT_TUPLE-1]) {
1315 /* delete a single expect by tuple */
1316 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
1317 if (err < 0)
1318 return err;
1319
1320 /* bump usage count to 2 */
1321 exp = ip_conntrack_expect_find_get(&tuple);
1322 if (!exp)
1323 return -ENOENT;
1324
1325 if (cda[CTA_EXPECT_ID-1]) {
1326 u_int32_t id =
1327 *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
1328 if (exp->id != ntohl(id)) {
1329 ip_conntrack_expect_put(exp);
1330 return -ENOENT;
1331 }
1332 }
1333
1334 /* after list removal, usage count == 1 */
1335 ip_conntrack_unexpect_related(exp);
1336 /* have to put what we 'get' above.
1337 * after this line usage count == 0 */
1338 ip_conntrack_expect_put(exp);
1339 } else if (cda[CTA_EXPECT_HELP_NAME-1]) {
1340 char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]);
1341
1342 /* delete all expectations for this helper */
1343 write_lock_bh(&ip_conntrack_lock);
1344 h = __ip_conntrack_helper_find_byname(name);
1345 if (!h) {
1346 write_unlock_bh(&ip_conntrack_lock);
1347 return -EINVAL;
1348 }
1349 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
1350 list) {
1351 if (exp->master->helper == h
1352 && del_timer(&exp->timeout))
1353 __ip_ct_expect_unlink_destroy(exp);
1354 }
1355 write_unlock(&ip_conntrack_lock);
1356 } else {
1357 /* This basically means we have to flush everything*/
1358 write_lock_bh(&ip_conntrack_lock);
1359 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
1360 list) {
1361 if (del_timer(&exp->timeout))
1362 __ip_ct_expect_unlink_destroy(exp);
1363 }
1364 write_unlock_bh(&ip_conntrack_lock);
1365 }
1366
1367 return 0;
1368}
1369static int
1370ctnetlink_change_expect(struct ip_conntrack_expect *x, struct nfattr *cda[])
1371{
1372 return -EOPNOTSUPP;
1373}
1374
1375static int
1376ctnetlink_create_expect(struct nfattr *cda[])
1377{
1378 struct ip_conntrack_tuple tuple, mask, master_tuple;
1379 struct ip_conntrack_tuple_hash *h = NULL;
1380 struct ip_conntrack_expect *exp;
1381 struct ip_conntrack *ct;
1382 int err = 0;
1383
1384 DEBUGP("entered %s\n", __FUNCTION__);
1385
1386 /* caller guarantees that those three CTA_EXPECT_* exist */
1387 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
1388 if (err < 0)
1389 return err;
1390 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK);
1391 if (err < 0)
1392 return err;
1393 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER);
1394 if (err < 0)
1395 return err;
1396
1397 /* Look for master conntrack of this expectation */
1398 h = ip_conntrack_find_get(&master_tuple, NULL);
1399 if (!h)
1400 return -ENOENT;
1401 ct = tuplehash_to_ctrack(h);
1402
1403 if (!ct->helper) {
1404 /* such conntrack hasn't got any helper, abort */
1405 err = -EINVAL;
1406 goto out;
1407 }
1408
1409 exp = ip_conntrack_expect_alloc(ct);
1410 if (!exp) {
1411 err = -ENOMEM;
1412 goto out;
1413 }
1414
1415 exp->expectfn = NULL;
1416 exp->master = ct;
1417 memcpy(&exp->tuple, &tuple, sizeof(struct ip_conntrack_tuple));
1418 memcpy(&exp->mask, &mask, sizeof(struct ip_conntrack_tuple));
1419
1420 err = ip_conntrack_expect_related(exp);
1421 ip_conntrack_expect_put(exp);
1422
1423out:
1424 ip_conntrack_put(tuplehash_to_ctrack(h));
1425 return err;
1426}
1427
1428static int
1429ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1430 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp)
1431{
1432 struct ip_conntrack_tuple tuple;
1433 struct ip_conntrack_expect *exp;
1434 int err = 0;
1435
1436 DEBUGP("entered %s\n", __FUNCTION__);
1437
1438 if (!cda[CTA_EXPECT_TUPLE-1]
1439 || !cda[CTA_EXPECT_MASK-1]
1440 || !cda[CTA_EXPECT_MASTER-1])
1441 return -EINVAL;
1442
1443 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
1444 if (err < 0)
1445 return err;
1446
1447 write_lock_bh(&ip_conntrack_lock);
1448 exp = __ip_conntrack_expect_find(&tuple);
1449
1450 if (!exp) {
1451 write_unlock_bh(&ip_conntrack_lock);
1452 err = -ENOENT;
1453 if (nlh->nlmsg_flags & NLM_F_CREATE)
1454 err = ctnetlink_create_expect(cda);
1455 return err;
1456 }
1457
1458 err = -EEXIST;
1459 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1460 err = ctnetlink_change_expect(exp, cda);
1461 write_unlock_bh(&ip_conntrack_lock);
1462
1463 DEBUGP("leaving\n");
1464
1465 return err;
1466}
1467
1468#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1469static struct notifier_block ctnl_notifier = {
1470 .notifier_call = ctnetlink_conntrack_event,
1471};
1472
1473static struct notifier_block ctnl_notifier_exp = {
1474 .notifier_call = ctnetlink_expect_event,
1475};
1476#endif
1477
1478static struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
1479 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
1480 .attr_count = CTA_MAX,
1481 .cap_required = CAP_NET_ADMIN },
1482 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
1483 .attr_count = CTA_MAX,
1484 .cap_required = CAP_NET_ADMIN },
1485 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
1486 .attr_count = CTA_MAX,
1487 .cap_required = CAP_NET_ADMIN },
1488 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
1489 .attr_count = CTA_MAX,
1490 .cap_required = CAP_NET_ADMIN },
1491};
1492
1493static struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
1494 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
1495 .attr_count = CTA_EXPECT_MAX,
1496 .cap_required = CAP_NET_ADMIN },
1497 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
1498 .attr_count = CTA_EXPECT_MAX,
1499 .cap_required = CAP_NET_ADMIN },
1500 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
1501 .attr_count = CTA_EXPECT_MAX,
1502 .cap_required = CAP_NET_ADMIN },
1503};
1504
1505static struct nfnetlink_subsystem ctnl_subsys = {
1506 .name = "conntrack",
1507 .subsys_id = NFNL_SUBSYS_CTNETLINK,
1508 .cb_count = IPCTNL_MSG_MAX,
1509 .cb = ctnl_cb,
1510};
1511
1512static struct nfnetlink_subsystem ctnl_exp_subsys = {
1513 .name = "conntrack_expect",
1514 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
1515 .cb_count = IPCTNL_MSG_EXP_MAX,
1516 .cb = ctnl_exp_cb,
1517};
1518
1519static int __init ctnetlink_init(void)
1520{
1521 int ret;
1522
1523 printk("ctnetlink v%s: registering with nfnetlink.\n", version);
1524 ret = nfnetlink_subsys_register(&ctnl_subsys);
1525 if (ret < 0) {
1526 printk("ctnetlink_init: cannot register with nfnetlink.\n");
1527 goto err_out;
1528 }
1529
1530 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
1531 if (ret < 0) {
1532 printk("ctnetlink_init: cannot register exp with nfnetlink.\n");
1533 goto err_unreg_subsys;
1534 }
1535
1536#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1537 ret = ip_conntrack_register_notifier(&ctnl_notifier);
1538 if (ret < 0) {
1539 printk("ctnetlink_init: cannot register notifier.\n");
1540 goto err_unreg_exp_subsys;
1541 }
1542
1543 ret = ip_conntrack_expect_register_notifier(&ctnl_notifier_exp);
1544 if (ret < 0) {
1545 printk("ctnetlink_init: cannot expect register notifier.\n");
1546 goto err_unreg_notifier;
1547 }
1548#endif
1549
1550 return 0;
1551
1552#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1553err_unreg_notifier:
1554 ip_conntrack_unregister_notifier(&ctnl_notifier);
1555err_unreg_exp_subsys:
1556 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
1557#endif
1558err_unreg_subsys:
1559 nfnetlink_subsys_unregister(&ctnl_subsys);
1560err_out:
1561 return ret;
1562}
1563
1564static void __exit ctnetlink_exit(void)
1565{
1566 printk("ctnetlink: unregistering from nfnetlink.\n");
1567
1568#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1569 ip_conntrack_unregister_notifier(&ctnl_notifier_exp);
1570 ip_conntrack_unregister_notifier(&ctnl_notifier);
1571#endif
1572
1573 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
1574 nfnetlink_subsys_unregister(&ctnl_subsys);
1575 return;
1576}
1577
1578module_init(ctnetlink_init);
1579module_exit(ctnetlink_exit);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 602c74db3252..838d1d69b36e 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -102,22 +102,24 @@ static int icmp_packet(struct ip_conntrack *ct,
102 ct->timeout.function((unsigned long)ct); 102 ct->timeout.function((unsigned long)ct);
103 } else { 103 } else {
104 atomic_inc(&ct->proto.icmp.count); 104 atomic_inc(&ct->proto.icmp.count);
105 ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
105 ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout); 106 ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout);
106 } 107 }
107 108
108 return NF_ACCEPT; 109 return NF_ACCEPT;
109} 110}
110 111
112static u_int8_t valid_new[] = {
113 [ICMP_ECHO] = 1,
114 [ICMP_TIMESTAMP] = 1,
115 [ICMP_INFO_REQUEST] = 1,
116 [ICMP_ADDRESS] = 1
117};
118
111/* Called when a new connection for this protocol found. */ 119/* Called when a new connection for this protocol found. */
112static int icmp_new(struct ip_conntrack *conntrack, 120static int icmp_new(struct ip_conntrack *conntrack,
113 const struct sk_buff *skb) 121 const struct sk_buff *skb)
114{ 122{
115 static u_int8_t valid_new[]
116 = { [ICMP_ECHO] = 1,
117 [ICMP_TIMESTAMP] = 1,
118 [ICMP_INFO_REQUEST] = 1,
119 [ICMP_ADDRESS] = 1 };
120
121 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) 123 if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
122 || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { 124 || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) {
123 /* Can't create a new ICMP `conn' with this. */ 125 /* Can't create a new ICMP `conn' with this. */
@@ -158,11 +160,12 @@ icmp_error_message(struct sk_buff *skb,
158 return NF_ACCEPT; 160 return NF_ACCEPT;
159 } 161 }
160 162
161 innerproto = ip_ct_find_proto(inside->ip.protocol); 163 innerproto = ip_conntrack_proto_find_get(inside->ip.protocol);
162 dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp) + inside->ip.ihl*4; 164 dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp) + inside->ip.ihl*4;
163 /* Are they talking about one of our connections? */ 165 /* Are they talking about one of our connections? */
164 if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) { 166 if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) {
165 DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol); 167 DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol);
168 ip_conntrack_proto_put(innerproto);
166 return NF_ACCEPT; 169 return NF_ACCEPT;
167 } 170 }
168 171
@@ -170,8 +173,10 @@ icmp_error_message(struct sk_buff *skb,
170 been preserved inside the ICMP. */ 173 been preserved inside the ICMP. */
171 if (!ip_ct_invert_tuple(&innertuple, &origtuple, innerproto)) { 174 if (!ip_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
172 DEBUGP("icmp_error_track: Can't invert tuple\n"); 175 DEBUGP("icmp_error_track: Can't invert tuple\n");
176 ip_conntrack_proto_put(innerproto);
173 return NF_ACCEPT; 177 return NF_ACCEPT;
174 } 178 }
179 ip_conntrack_proto_put(innerproto);
175 180
176 *ctinfo = IP_CT_RELATED; 181 *ctinfo = IP_CT_RELATED;
177 182
@@ -212,7 +217,7 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
212 icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih); 217 icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih);
213 if (icmph == NULL) { 218 if (icmph == NULL) {
214 if (LOG_INVALID(IPPROTO_ICMP)) 219 if (LOG_INVALID(IPPROTO_ICMP))
215 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 220 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
216 "ip_ct_icmp: short packet "); 221 "ip_ct_icmp: short packet ");
217 return -NF_ACCEPT; 222 return -NF_ACCEPT;
218 } 223 }
@@ -226,13 +231,13 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
226 if (!(u16)csum_fold(skb->csum)) 231 if (!(u16)csum_fold(skb->csum))
227 break; 232 break;
228 if (LOG_INVALID(IPPROTO_ICMP)) 233 if (LOG_INVALID(IPPROTO_ICMP))
229 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 234 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
230 "ip_ct_icmp: bad HW ICMP checksum "); 235 "ip_ct_icmp: bad HW ICMP checksum ");
231 return -NF_ACCEPT; 236 return -NF_ACCEPT;
232 case CHECKSUM_NONE: 237 case CHECKSUM_NONE:
233 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) { 238 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
234 if (LOG_INVALID(IPPROTO_ICMP)) 239 if (LOG_INVALID(IPPROTO_ICMP))
235 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 240 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
236 "ip_ct_icmp: bad ICMP checksum "); 241 "ip_ct_icmp: bad ICMP checksum ");
237 return -NF_ACCEPT; 242 return -NF_ACCEPT;
238 } 243 }
@@ -249,7 +254,7 @@ checksum_skipped:
249 */ 254 */
250 if (icmph->type > NR_ICMP_TYPES) { 255 if (icmph->type > NR_ICMP_TYPES) {
251 if (LOG_INVALID(IPPROTO_ICMP)) 256 if (LOG_INVALID(IPPROTO_ICMP))
252 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 257 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
253 "ip_ct_icmp: invalid ICMP type "); 258 "ip_ct_icmp: invalid ICMP type ");
254 return -NF_ACCEPT; 259 return -NF_ACCEPT;
255 } 260 }
@@ -265,6 +270,47 @@ checksum_skipped:
265 return icmp_error_message(skb, ctinfo, hooknum); 270 return icmp_error_message(skb, ctinfo, hooknum);
266} 271}
267 272
273#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
274 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
275static int icmp_tuple_to_nfattr(struct sk_buff *skb,
276 const struct ip_conntrack_tuple *t)
277{
278 NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(u_int16_t),
279 &t->src.u.icmp.id);
280 NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t),
281 &t->dst.u.icmp.type);
282 NFA_PUT(skb, CTA_PROTO_ICMP_CODE, sizeof(u_int8_t),
283 &t->dst.u.icmp.code);
284
285 if (t->dst.u.icmp.type >= sizeof(valid_new)
286 || !valid_new[t->dst.u.icmp.type])
287 return -EINVAL;
288
289 return 0;
290
291nfattr_failure:
292 return -1;
293}
294
295static int icmp_nfattr_to_tuple(struct nfattr *tb[],
296 struct ip_conntrack_tuple *tuple)
297{
298 if (!tb[CTA_PROTO_ICMP_TYPE-1]
299 || !tb[CTA_PROTO_ICMP_CODE-1]
300 || !tb[CTA_PROTO_ICMP_ID-1])
301 return -1;
302
303 tuple->dst.u.icmp.type =
304 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]);
305 tuple->dst.u.icmp.code =
306 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
307 tuple->src.u.icmp.id =
308 *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
309
310 return 0;
311}
312#endif
313
268struct ip_conntrack_protocol ip_conntrack_protocol_icmp = 314struct ip_conntrack_protocol ip_conntrack_protocol_icmp =
269{ 315{
270 .proto = IPPROTO_ICMP, 316 .proto = IPPROTO_ICMP,
@@ -276,4 +322,9 @@ struct ip_conntrack_protocol ip_conntrack_protocol_icmp =
276 .packet = icmp_packet, 322 .packet = icmp_packet,
277 .new = icmp_new, 323 .new = icmp_new,
278 .error = icmp_error, 324 .error = icmp_error,
325#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
326 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
327 .tuple_to_nfattr = icmp_tuple_to_nfattr,
328 .nfattr_to_tuple = icmp_nfattr_to_tuple,
329#endif
279}; 330};
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index 31d75390bf12..a875f35e576d 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -404,6 +404,8 @@ static int sctp_packet(struct ip_conntrack *conntrack,
404 } 404 }
405 405
406 conntrack->proto.sctp.state = newconntrack; 406 conntrack->proto.sctp.state = newconntrack;
407 if (oldsctpstate != newconntrack)
408 ip_conntrack_event_cache(IPCT_PROTOINFO, skb);
407 write_unlock_bh(&sctp_lock); 409 write_unlock_bh(&sctp_lock);
408 } 410 }
409 411
@@ -503,7 +505,12 @@ static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = {
503 .packet = sctp_packet, 505 .packet = sctp_packet,
504 .new = sctp_new, 506 .new = sctp_new,
505 .destroy = NULL, 507 .destroy = NULL,
506 .me = THIS_MODULE 508 .me = THIS_MODULE,
509#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
510 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
511 .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
512 .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
513#endif
507}; 514};
508 515
509#ifdef CONFIG_SYSCTL 516#ifdef CONFIG_SYSCTL
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 809dfed766d4..f23ef1f88c46 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -336,6 +336,23 @@ static int tcp_print_conntrack(struct seq_file *s,
336 return seq_printf(s, "%s ", tcp_conntrack_names[state]); 336 return seq_printf(s, "%s ", tcp_conntrack_names[state]);
337} 337}
338 338
339#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
340 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
341static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
342 const struct ip_conntrack *ct)
343{
344 read_lock_bh(&tcp_lock);
345 NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
346 &ct->proto.tcp.state);
347 read_unlock_bh(&tcp_lock);
348
349 return 0;
350
351nfattr_failure:
352 return -1;
353}
354#endif
355
339static unsigned int get_conntrack_index(const struct tcphdr *tcph) 356static unsigned int get_conntrack_index(const struct tcphdr *tcph)
340{ 357{
341 if (tcph->rst) return TCP_RST_SET; 358 if (tcph->rst) return TCP_RST_SET;
@@ -699,7 +716,7 @@ static int tcp_in_window(struct ip_ct_tcp *state,
699 res = 1; 716 res = 1;
700 } else { 717 } else {
701 if (LOG_INVALID(IPPROTO_TCP)) 718 if (LOG_INVALID(IPPROTO_TCP))
702 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 719 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
703 "ip_ct_tcp: %s ", 720 "ip_ct_tcp: %s ",
704 before(seq, sender->td_maxend + 1) ? 721 before(seq, sender->td_maxend + 1) ?
705 after(end, sender->td_end - receiver->td_maxwin - 1) ? 722 after(end, sender->td_end - receiver->td_maxwin - 1) ?
@@ -798,7 +815,7 @@ static int tcp_error(struct sk_buff *skb,
798 sizeof(_tcph), &_tcph); 815 sizeof(_tcph), &_tcph);
799 if (th == NULL) { 816 if (th == NULL) {
800 if (LOG_INVALID(IPPROTO_TCP)) 817 if (LOG_INVALID(IPPROTO_TCP))
801 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 818 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
802 "ip_ct_tcp: short packet "); 819 "ip_ct_tcp: short packet ");
803 return -NF_ACCEPT; 820 return -NF_ACCEPT;
804 } 821 }
@@ -806,7 +823,7 @@ static int tcp_error(struct sk_buff *skb,
806 /* Not whole TCP header or malformed packet */ 823 /* Not whole TCP header or malformed packet */
807 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { 824 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
808 if (LOG_INVALID(IPPROTO_TCP)) 825 if (LOG_INVALID(IPPROTO_TCP))
809 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 826 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
810 "ip_ct_tcp: truncated/malformed packet "); 827 "ip_ct_tcp: truncated/malformed packet ");
811 return -NF_ACCEPT; 828 return -NF_ACCEPT;
812 } 829 }
@@ -823,7 +840,7 @@ static int tcp_error(struct sk_buff *skb,
823 skb->ip_summed == CHECKSUM_HW ? skb->csum 840 skb->ip_summed == CHECKSUM_HW ? skb->csum
824 : skb_checksum(skb, iph->ihl*4, tcplen, 0))) { 841 : skb_checksum(skb, iph->ihl*4, tcplen, 0))) {
825 if (LOG_INVALID(IPPROTO_TCP)) 842 if (LOG_INVALID(IPPROTO_TCP))
826 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 843 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
827 "ip_ct_tcp: bad TCP checksum "); 844 "ip_ct_tcp: bad TCP checksum ");
828 return -NF_ACCEPT; 845 return -NF_ACCEPT;
829 } 846 }
@@ -832,7 +849,7 @@ static int tcp_error(struct sk_buff *skb,
832 tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR)); 849 tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR));
833 if (!tcp_valid_flags[tcpflags]) { 850 if (!tcp_valid_flags[tcpflags]) {
834 if (LOG_INVALID(IPPROTO_TCP)) 851 if (LOG_INVALID(IPPROTO_TCP))
835 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 852 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
836 "ip_ct_tcp: invalid TCP flag combination "); 853 "ip_ct_tcp: invalid TCP flag combination ");
837 return -NF_ACCEPT; 854 return -NF_ACCEPT;
838 } 855 }
@@ -880,8 +897,9 @@ static int tcp_packet(struct ip_conntrack *conntrack,
880 */ 897 */
881 write_unlock_bh(&tcp_lock); 898 write_unlock_bh(&tcp_lock);
882 if (LOG_INVALID(IPPROTO_TCP)) 899 if (LOG_INVALID(IPPROTO_TCP))
883 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 900 nf_log_packet(PF_INET, 0, skb, NULL, NULL,
884 "ip_ct_tcp: killing out of sync session "); 901 NULL, "ip_ct_tcp: "
902 "killing out of sync session ");
885 if (del_timer(&conntrack->timeout)) 903 if (del_timer(&conntrack->timeout))
886 conntrack->timeout.function((unsigned long) 904 conntrack->timeout.function((unsigned long)
887 conntrack); 905 conntrack);
@@ -895,7 +913,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
895 913
896 write_unlock_bh(&tcp_lock); 914 write_unlock_bh(&tcp_lock);
897 if (LOG_INVALID(IPPROTO_TCP)) 915 if (LOG_INVALID(IPPROTO_TCP))
898 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 916 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
899 "ip_ct_tcp: invalid packet ignored "); 917 "ip_ct_tcp: invalid packet ignored ");
900 return NF_ACCEPT; 918 return NF_ACCEPT;
901 case TCP_CONNTRACK_MAX: 919 case TCP_CONNTRACK_MAX:
@@ -905,7 +923,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
905 old_state); 923 old_state);
906 write_unlock_bh(&tcp_lock); 924 write_unlock_bh(&tcp_lock);
907 if (LOG_INVALID(IPPROTO_TCP)) 925 if (LOG_INVALID(IPPROTO_TCP))
908 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 926 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
909 "ip_ct_tcp: invalid state "); 927 "ip_ct_tcp: invalid state ");
910 return -NF_ACCEPT; 928 return -NF_ACCEPT;
911 case TCP_CONNTRACK_SYN_SENT: 929 case TCP_CONNTRACK_SYN_SENT:
@@ -926,7 +944,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
926 write_unlock_bh(&tcp_lock); 944 write_unlock_bh(&tcp_lock);
927 if (LOG_INVALID(IPPROTO_TCP)) 945 if (LOG_INVALID(IPPROTO_TCP))
928 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 946 nf_log_packet(PF_INET, 0, skb, NULL, NULL,
929 "ip_ct_tcp: invalid SYN"); 947 NULL, "ip_ct_tcp: invalid SYN");
930 return -NF_ACCEPT; 948 return -NF_ACCEPT;
931 } 949 }
932 case TCP_CONNTRACK_CLOSE: 950 case TCP_CONNTRACK_CLOSE:
@@ -973,6 +991,10 @@ static int tcp_packet(struct ip_conntrack *conntrack,
973 ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state]; 991 ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state];
974 write_unlock_bh(&tcp_lock); 992 write_unlock_bh(&tcp_lock);
975 993
994 ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
995 if (new_state != old_state)
996 ip_conntrack_event_cache(IPCT_PROTOINFO, skb);
997
976 if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { 998 if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
977 /* If only reply is a RST, we can consider ourselves not to 999 /* If only reply is a RST, we can consider ourselves not to
978 have an established connection: this is a fairly common 1000 have an established connection: this is a fairly common
@@ -1096,4 +1118,10 @@ struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
1096 .packet = tcp_packet, 1118 .packet = tcp_packet,
1097 .new = tcp_new, 1119 .new = tcp_new,
1098 .error = tcp_error, 1120 .error = tcp_error,
1121#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
1122 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
1123 .to_nfattr = tcp_to_nfattr,
1124 .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
1125 .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
1126#endif
1099}; 1127};
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
index 8c1eaba098d4..f2dcac7c7660 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
@@ -73,7 +73,8 @@ static int udp_packet(struct ip_conntrack *conntrack,
73 ip_ct_refresh_acct(conntrack, ctinfo, skb, 73 ip_ct_refresh_acct(conntrack, ctinfo, skb,
74 ip_ct_udp_timeout_stream); 74 ip_ct_udp_timeout_stream);
75 /* Also, more likely to be important, and not a probe */ 75 /* Also, more likely to be important, and not a probe */
76 set_bit(IPS_ASSURED_BIT, &conntrack->status); 76 if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
77 ip_conntrack_event_cache(IPCT_STATUS, skb);
77 } else 78 } else
78 ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout); 79 ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout);
79 80
@@ -97,7 +98,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
97 hdr = skb_header_pointer(skb, iph->ihl*4, sizeof(_hdr), &_hdr); 98 hdr = skb_header_pointer(skb, iph->ihl*4, sizeof(_hdr), &_hdr);
98 if (hdr == NULL) { 99 if (hdr == NULL) {
99 if (LOG_INVALID(IPPROTO_UDP)) 100 if (LOG_INVALID(IPPROTO_UDP))
100 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 101 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
101 "ip_ct_udp: short packet "); 102 "ip_ct_udp: short packet ");
102 return -NF_ACCEPT; 103 return -NF_ACCEPT;
103 } 104 }
@@ -105,7 +106,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
105 /* Truncated/malformed packets */ 106 /* Truncated/malformed packets */
106 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { 107 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
107 if (LOG_INVALID(IPPROTO_UDP)) 108 if (LOG_INVALID(IPPROTO_UDP))
108 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 109 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
109 "ip_ct_udp: truncated/malformed packet "); 110 "ip_ct_udp: truncated/malformed packet ");
110 return -NF_ACCEPT; 111 return -NF_ACCEPT;
111 } 112 }
@@ -125,7 +126,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
125 skb->ip_summed == CHECKSUM_HW ? skb->csum 126 skb->ip_summed == CHECKSUM_HW ? skb->csum
126 : skb_checksum(skb, iph->ihl*4, udplen, 0))) { 127 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
127 if (LOG_INVALID(IPPROTO_UDP)) 128 if (LOG_INVALID(IPPROTO_UDP))
128 nf_log_packet(PF_INET, 0, skb, NULL, NULL, 129 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
129 "ip_ct_udp: bad UDP checksum "); 130 "ip_ct_udp: bad UDP checksum ");
130 return -NF_ACCEPT; 131 return -NF_ACCEPT;
131 } 132 }
@@ -144,4 +145,9 @@ struct ip_conntrack_protocol ip_conntrack_protocol_udp =
144 .packet = udp_packet, 145 .packet = udp_packet,
145 .new = udp_new, 146 .new = udp_new,
146 .error = udp_error, 147 .error = udp_error,
148#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
149 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
150 .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
151 .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
152#endif
147}; 153};
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 61798c46e91d..ee5895afd0c3 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -5,7 +5,7 @@
5*/ 5*/
6 6
7/* (C) 1999-2001 Paul `Rusty' Russell 7/* (C) 1999-2001 Paul `Rusty' Russell
8 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 8 * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -147,8 +147,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
147 if (DIRECTION(hash)) 147 if (DIRECTION(hash))
148 return 0; 148 return 0;
149 149
150 proto = ip_ct_find_proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL] 150 proto = __ip_conntrack_proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
151 .tuple.dst.protonum);
152 IP_NF_ASSERT(proto); 151 IP_NF_ASSERT(proto);
153 152
154 if (seq_printf(s, "%-8s %u %ld ", 153 if (seq_printf(s, "%-8s %u %ld ",
@@ -185,7 +184,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
185 return -ENOSPC; 184 return -ENOSPC;
186 185
187#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 186#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
188 if (seq_printf(s, "mark=%lu ", conntrack->mark)) 187 if (seq_printf(s, "mark=%u ", conntrack->mark))
189 return -ENOSPC; 188 return -ENOSPC;
190#endif 189#endif
191 190
@@ -283,7 +282,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
283 seq_printf(s, "proto=%u ", expect->tuple.dst.protonum); 282 seq_printf(s, "proto=%u ", expect->tuple.dst.protonum);
284 283
285 print_tuple(s, &expect->tuple, 284 print_tuple(s, &expect->tuple,
286 ip_ct_find_proto(expect->tuple.dst.protonum)); 285 __ip_conntrack_proto_find(expect->tuple.dst.protonum));
287 return seq_putc(s, '\n'); 286 return seq_putc(s, '\n');
288} 287}
289 288
@@ -889,6 +888,7 @@ static int init_or_cleanup(int init)
889 return ret; 888 return ret;
890 889
891 cleanup: 890 cleanup:
891 synchronize_net();
892#ifdef CONFIG_SYSCTL 892#ifdef CONFIG_SYSCTL
893 unregister_sysctl_table(ip_ct_sysctl_header); 893 unregister_sysctl_table(ip_ct_sysctl_header);
894 cleanup_localinops: 894 cleanup_localinops:
@@ -971,6 +971,14 @@ void need_ip_conntrack(void)
971{ 971{
972} 972}
973 973
974#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
975EXPORT_SYMBOL_GPL(ip_conntrack_chain);
976EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain);
977EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier);
978EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier);
979EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init);
980EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache);
981#endif
974EXPORT_SYMBOL(ip_conntrack_protocol_register); 982EXPORT_SYMBOL(ip_conntrack_protocol_register);
975EXPORT_SYMBOL(ip_conntrack_protocol_unregister); 983EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
976EXPORT_SYMBOL(ip_ct_get_tuple); 984EXPORT_SYMBOL(ip_ct_get_tuple);
@@ -982,12 +990,16 @@ EXPORT_SYMBOL(ip_conntrack_helper_register);
982EXPORT_SYMBOL(ip_conntrack_helper_unregister); 990EXPORT_SYMBOL(ip_conntrack_helper_unregister);
983EXPORT_SYMBOL(ip_ct_iterate_cleanup); 991EXPORT_SYMBOL(ip_ct_iterate_cleanup);
984EXPORT_SYMBOL(ip_ct_refresh_acct); 992EXPORT_SYMBOL(ip_ct_refresh_acct);
985EXPORT_SYMBOL(ip_ct_protos); 993
986EXPORT_SYMBOL(ip_ct_find_proto);
987EXPORT_SYMBOL(ip_conntrack_expect_alloc); 994EXPORT_SYMBOL(ip_conntrack_expect_alloc);
988EXPORT_SYMBOL(ip_conntrack_expect_put); 995EXPORT_SYMBOL(ip_conntrack_expect_put);
996EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
989EXPORT_SYMBOL(ip_conntrack_expect_related); 997EXPORT_SYMBOL(ip_conntrack_expect_related);
990EXPORT_SYMBOL(ip_conntrack_unexpect_related); 998EXPORT_SYMBOL(ip_conntrack_unexpect_related);
999EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
1000EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
1001EXPORT_SYMBOL_GPL(__ip_ct_expect_unlink_destroy);
1002
991EXPORT_SYMBOL(ip_conntrack_tuple_taken); 1003EXPORT_SYMBOL(ip_conntrack_tuple_taken);
992EXPORT_SYMBOL(ip_ct_gather_frags); 1004EXPORT_SYMBOL(ip_ct_gather_frags);
993EXPORT_SYMBOL(ip_conntrack_htable_size); 1005EXPORT_SYMBOL(ip_conntrack_htable_size);
@@ -995,7 +1007,28 @@ EXPORT_SYMBOL(ip_conntrack_lock);
995EXPORT_SYMBOL(ip_conntrack_hash); 1007EXPORT_SYMBOL(ip_conntrack_hash);
996EXPORT_SYMBOL(ip_conntrack_untracked); 1008EXPORT_SYMBOL(ip_conntrack_untracked);
997EXPORT_SYMBOL_GPL(ip_conntrack_find_get); 1009EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
998EXPORT_SYMBOL_GPL(ip_conntrack_put);
999#ifdef CONFIG_IP_NF_NAT_NEEDED 1010#ifdef CONFIG_IP_NF_NAT_NEEDED
1000EXPORT_SYMBOL(ip_conntrack_tcp_update); 1011EXPORT_SYMBOL(ip_conntrack_tcp_update);
1001#endif 1012#endif
1013
1014EXPORT_SYMBOL_GPL(ip_conntrack_flush);
1015EXPORT_SYMBOL_GPL(__ip_conntrack_find);
1016
1017EXPORT_SYMBOL_GPL(ip_conntrack_alloc);
1018EXPORT_SYMBOL_GPL(ip_conntrack_free);
1019EXPORT_SYMBOL_GPL(ip_conntrack_hash_insert);
1020
1021EXPORT_SYMBOL_GPL(ip_ct_remove_expectations);
1022
1023EXPORT_SYMBOL_GPL(ip_conntrack_helper_find_get);
1024EXPORT_SYMBOL_GPL(ip_conntrack_helper_put);
1025EXPORT_SYMBOL_GPL(__ip_conntrack_helper_find_byname);
1026
1027EXPORT_SYMBOL_GPL(ip_conntrack_proto_find_get);
1028EXPORT_SYMBOL_GPL(ip_conntrack_proto_put);
1029EXPORT_SYMBOL_GPL(__ip_conntrack_proto_find);
1030#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
1031 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
1032EXPORT_SYMBOL_GPL(ip_ct_port_tuple_to_nfattr);
1033EXPORT_SYMBOL_GPL(ip_ct_port_nfattr_to_tuple);
1034#endif
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 739b6dde1c82..1adedb743f60 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -47,8 +47,39 @@ DEFINE_RWLOCK(ip_nat_lock);
47static unsigned int ip_nat_htable_size; 47static unsigned int ip_nat_htable_size;
48 48
49static struct list_head *bysource; 49static struct list_head *bysource;
50
51#define MAX_IP_NAT_PROTO 256
50struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO]; 52struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO];
51 53
54static inline struct ip_nat_protocol *
55__ip_nat_proto_find(u_int8_t protonum)
56{
57 return ip_nat_protos[protonum];
58}
59
60struct ip_nat_protocol *
61ip_nat_proto_find_get(u_int8_t protonum)
62{
63 struct ip_nat_protocol *p;
64
65 /* we need to disable preemption to make sure 'p' doesn't get
66 * removed until we've grabbed the reference */
67 preempt_disable();
68 p = __ip_nat_proto_find(protonum);
69 if (p) {
70 if (!try_module_get(p->me))
71 p = &ip_nat_unknown_protocol;
72 }
73 preempt_enable();
74
75 return p;
76}
77
78void
79ip_nat_proto_put(struct ip_nat_protocol *p)
80{
81 module_put(p->me);
82}
52 83
53/* We keep an extra hash for each conntrack, for fast searching. */ 84/* We keep an extra hash for each conntrack, for fast searching. */
54static inline unsigned int 85static inline unsigned int
@@ -103,7 +134,8 @@ static int
103in_range(const struct ip_conntrack_tuple *tuple, 134in_range(const struct ip_conntrack_tuple *tuple,
104 const struct ip_nat_range *range) 135 const struct ip_nat_range *range)
105{ 136{
106 struct ip_nat_protocol *proto = ip_nat_find_proto(tuple->dst.protonum); 137 struct ip_nat_protocol *proto =
138 __ip_nat_proto_find(tuple->dst.protonum);
107 139
108 /* If we are supposed to map IPs, then we must be in the 140 /* If we are supposed to map IPs, then we must be in the
109 range specified, otherwise let this drag us onto a new src IP. */ 141 range specified, otherwise let this drag us onto a new src IP. */
@@ -216,8 +248,7 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
216 struct ip_conntrack *conntrack, 248 struct ip_conntrack *conntrack,
217 enum ip_nat_manip_type maniptype) 249 enum ip_nat_manip_type maniptype)
218{ 250{
219 struct ip_nat_protocol *proto 251 struct ip_nat_protocol *proto;
220 = ip_nat_find_proto(orig_tuple->dst.protonum);
221 252
222 /* 1) If this srcip/proto/src-proto-part is currently mapped, 253 /* 1) If this srcip/proto/src-proto-part is currently mapped,
223 and that same mapping gives a unique tuple within the given 254 and that same mapping gives a unique tuple within the given
@@ -242,14 +273,20 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
242 /* 3) The per-protocol part of the manip is made to map into 273 /* 3) The per-protocol part of the manip is made to map into
243 the range to make a unique tuple. */ 274 the range to make a unique tuple. */
244 275
276 proto = ip_nat_proto_find_get(orig_tuple->dst.protonum);
277
245 /* Only bother mapping if it's not already in range and unique */ 278 /* Only bother mapping if it's not already in range and unique */
246 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) 279 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
247 || proto->in_range(tuple, maniptype, &range->min, &range->max)) 280 || proto->in_range(tuple, maniptype, &range->min, &range->max))
248 && !ip_nat_used_tuple(tuple, conntrack)) 281 && !ip_nat_used_tuple(tuple, conntrack)) {
282 ip_nat_proto_put(proto);
249 return; 283 return;
284 }
250 285
251 /* Last change: get protocol to try to obtain unique tuple. */ 286 /* Last change: get protocol to try to obtain unique tuple. */
252 proto->unique_tuple(tuple, range, maniptype, conntrack); 287 proto->unique_tuple(tuple, range, maniptype, conntrack);
288
289 ip_nat_proto_put(proto);
253} 290}
254 291
255unsigned int 292unsigned int
@@ -320,17 +357,20 @@ manip_pkt(u_int16_t proto,
320 enum ip_nat_manip_type maniptype) 357 enum ip_nat_manip_type maniptype)
321{ 358{
322 struct iphdr *iph; 359 struct iphdr *iph;
360 struct ip_nat_protocol *p;
323 361
324 (*pskb)->nfcache |= NFC_ALTERED; 362 if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
325 if (!skb_ip_make_writable(pskb, iphdroff + sizeof(*iph)))
326 return 0; 363 return 0;
327 364
328 iph = (void *)(*pskb)->data + iphdroff; 365 iph = (void *)(*pskb)->data + iphdroff;
329 366
330 /* Manipulate protcol part. */ 367 /* Manipulate protcol part. */
331 if (!ip_nat_find_proto(proto)->manip_pkt(pskb, iphdroff, 368 p = ip_nat_proto_find_get(proto);
332 target, maniptype)) 369 if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) {
370 ip_nat_proto_put(p);
333 return 0; 371 return 0;
372 }
373 ip_nat_proto_put(p);
334 374
335 iph = (void *)(*pskb)->data + iphdroff; 375 iph = (void *)(*pskb)->data + iphdroff;
336 376
@@ -391,7 +431,7 @@ int icmp_reply_translation(struct sk_buff **pskb,
391 struct ip_conntrack_tuple inner, target; 431 struct ip_conntrack_tuple inner, target;
392 int hdrlen = (*pskb)->nh.iph->ihl * 4; 432 int hdrlen = (*pskb)->nh.iph->ihl * 4;
393 433
394 if (!skb_ip_make_writable(pskb, hdrlen + sizeof(*inside))) 434 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
395 return 0; 435 return 0;
396 436
397 inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; 437 inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
@@ -426,7 +466,8 @@ int icmp_reply_translation(struct sk_buff **pskb,
426 466
427 if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + 467 if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
428 sizeof(struct icmphdr) + inside->ip.ihl*4, 468 sizeof(struct icmphdr) + inside->ip.ihl*4,
429 &inner, ip_ct_find_proto(inside->ip.protocol))) 469 &inner,
470 __ip_conntrack_proto_find(inside->ip.protocol)))
430 return 0; 471 return 0;
431 472
432 /* Change inner back to look like incoming packet. We do the 473 /* Change inner back to look like incoming packet. We do the
@@ -496,6 +537,49 @@ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
496 synchronize_net(); 537 synchronize_net();
497} 538}
498 539
540#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
541 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
542int
543ip_nat_port_range_to_nfattr(struct sk_buff *skb,
544 const struct ip_nat_range *range)
545{
546 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(u_int16_t),
547 &range->min.tcp.port);
548 NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(u_int16_t),
549 &range->max.tcp.port);
550
551 return 0;
552
553nfattr_failure:
554 return -1;
555}
556
557int
558ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
559{
560 int ret = 0;
561
562 /* we have to return whether we actually parsed something or not */
563
564 if (tb[CTA_PROTONAT_PORT_MIN-1]) {
565 ret = 1;
566 range->min.tcp.port =
567 *(u_int16_t *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
568 }
569
570 if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
571 if (ret)
572 range->max.tcp.port = range->min.tcp.port;
573 } else {
574 ret = 1;
575 range->max.tcp.port =
576 *(u_int16_t *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
577 }
578
579 return ret;
580}
581#endif
582
499int __init ip_nat_init(void) 583int __init ip_nat_init(void)
500{ 584{
501 size_t i; 585 size_t i;
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index 158f34f32c04..d2dd5d313556 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -168,7 +168,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
168 struct tcphdr *tcph; 168 struct tcphdr *tcph;
169 int datalen; 169 int datalen;
170 170
171 if (!skb_ip_make_writable(pskb, (*pskb)->len)) 171 if (!skb_make_writable(pskb, (*pskb)->len))
172 return 0; 172 return 0;
173 173
174 if (rep_len > match_len 174 if (rep_len > match_len
@@ -228,7 +228,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
228 match_offset + match_len) 228 match_offset + match_len)
229 return 0; 229 return 0;
230 230
231 if (!skb_ip_make_writable(pskb, (*pskb)->len)) 231 if (!skb_make_writable(pskb, (*pskb)->len))
232 return 0; 232 return 0;
233 233
234 if (rep_len > match_len 234 if (rep_len > match_len
@@ -315,7 +315,7 @@ ip_nat_sack_adjust(struct sk_buff **pskb,
315 optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr); 315 optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
316 optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4; 316 optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
317 317
318 if (!skb_ip_make_writable(pskb, optend)) 318 if (!skb_make_writable(pskb, optend))
319 return 0; 319 return 0;
320 320
321 dir = CTINFO2DIR(ctinfo); 321 dir = CTINFO2DIR(ctinfo);
@@ -363,7 +363,7 @@ ip_nat_seq_adjust(struct sk_buff **pskb,
363 this_way = &ct->nat.info.seq[dir]; 363 this_way = &ct->nat.info.seq[dir];
364 other_way = &ct->nat.info.seq[!dir]; 364 other_way = &ct->nat.info.seq[!dir];
365 365
366 if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) 366 if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
367 return 0; 367 return 0;
368 368
369 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; 369 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
index 6596c9ee1655..938719043999 100644
--- a/net/ipv4/netfilter/ip_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c
@@ -62,7 +62,7 @@ icmp_manip_pkt(struct sk_buff **pskb,
62 struct icmphdr *hdr; 62 struct icmphdr *hdr;
63 unsigned int hdroff = iphdroff + iph->ihl*4; 63 unsigned int hdroff = iphdroff + iph->ihl*4;
64 64
65 if (!skb_ip_make_writable(pskb, hdroff + sizeof(*hdr))) 65 if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
66 return 0; 66 return 0;
67 67
68 hdr = (struct icmphdr *)((*pskb)->data + hdroff); 68 hdr = (struct icmphdr *)((*pskb)->data + hdroff);
@@ -106,11 +106,18 @@ icmp_print_range(char *buffer, const struct ip_nat_range *range)
106 else return 0; 106 else return 0;
107} 107}
108 108
109struct ip_nat_protocol ip_nat_protocol_icmp 109struct ip_nat_protocol ip_nat_protocol_icmp = {
110= { "ICMP", IPPROTO_ICMP, 110 .name = "ICMP",
111 icmp_manip_pkt, 111 .protonum = IPPROTO_ICMP,
112 icmp_in_range, 112 .me = THIS_MODULE,
113 icmp_unique_tuple, 113 .manip_pkt = icmp_manip_pkt,
114 icmp_print, 114 .in_range = icmp_in_range,
115 icmp_print_range 115 .unique_tuple = icmp_unique_tuple,
116 .print = icmp_print,
117 .print_range = icmp_print_range,
118#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
119 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
120 .range_to_nfattr = ip_nat_port_range_to_nfattr,
121 .nfattr_to_range = ip_nat_port_nfattr_to_range,
122#endif
116}; 123};
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index a98e36d2b3c6..1d381bf68574 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -12,6 +12,7 @@
12#include <linux/ip.h> 12#include <linux/ip.h>
13#include <linux/tcp.h> 13#include <linux/tcp.h>
14#include <linux/if.h> 14#include <linux/if.h>
15#include <linux/netfilter/nfnetlink_conntrack.h>
15#include <linux/netfilter_ipv4/ip_nat.h> 16#include <linux/netfilter_ipv4/ip_nat.h>
16#include <linux/netfilter_ipv4/ip_nat_rule.h> 17#include <linux/netfilter_ipv4/ip_nat_rule.h>
17#include <linux/netfilter_ipv4/ip_nat_protocol.h> 18#include <linux/netfilter_ipv4/ip_nat_protocol.h>
@@ -102,7 +103,7 @@ tcp_manip_pkt(struct sk_buff **pskb,
102 if ((*pskb)->len >= hdroff + sizeof(struct tcphdr)) 103 if ((*pskb)->len >= hdroff + sizeof(struct tcphdr))
103 hdrsize = sizeof(struct tcphdr); 104 hdrsize = sizeof(struct tcphdr);
104 105
105 if (!skb_ip_make_writable(pskb, hdroff + hdrsize)) 106 if (!skb_make_writable(pskb, hdroff + hdrsize))
106 return 0; 107 return 0;
107 108
108 iph = (struct iphdr *)((*pskb)->data + iphdroff); 109 iph = (struct iphdr *)((*pskb)->data + iphdroff);
@@ -169,11 +170,18 @@ tcp_print_range(char *buffer, const struct ip_nat_range *range)
169 else return 0; 170 else return 0;
170} 171}
171 172
172struct ip_nat_protocol ip_nat_protocol_tcp 173struct ip_nat_protocol ip_nat_protocol_tcp = {
173= { "TCP", IPPROTO_TCP, 174 .name = "TCP",
174 tcp_manip_pkt, 175 .protonum = IPPROTO_TCP,
175 tcp_in_range, 176 .me = THIS_MODULE,
176 tcp_unique_tuple, 177 .manip_pkt = tcp_manip_pkt,
177 tcp_print, 178 .in_range = tcp_in_range,
178 tcp_print_range 179 .unique_tuple = tcp_unique_tuple,
180 .print = tcp_print,
181 .print_range = tcp_print_range,
182#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
183 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
184 .range_to_nfattr = ip_nat_port_range_to_nfattr,
185 .nfattr_to_range = ip_nat_port_nfattr_to_range,
186#endif
179}; 187};
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 9f66e5625664..c4906e1aa24a 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -94,7 +94,7 @@ udp_manip_pkt(struct sk_buff **pskb,
94 u32 oldip, newip; 94 u32 oldip, newip;
95 u16 *portptr, newport; 95 u16 *portptr, newport;
96 96
97 if (!skb_ip_make_writable(pskb, hdroff + sizeof(*hdr))) 97 if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
98 return 0; 98 return 0;
99 99
100 iph = (struct iphdr *)((*pskb)->data + iphdroff); 100 iph = (struct iphdr *)((*pskb)->data + iphdroff);
@@ -156,11 +156,18 @@ udp_print_range(char *buffer, const struct ip_nat_range *range)
156 else return 0; 156 else return 0;
157} 157}
158 158
159struct ip_nat_protocol ip_nat_protocol_udp 159struct ip_nat_protocol ip_nat_protocol_udp = {
160= { "UDP", IPPROTO_UDP, 160 .name = "UDP",
161 udp_manip_pkt, 161 .protonum = IPPROTO_UDP,
162 udp_in_range, 162 .me = THIS_MODULE,
163 udp_unique_tuple, 163 .manip_pkt = udp_manip_pkt,
164 udp_print, 164 .in_range = udp_in_range,
165 udp_print_range 165 .unique_tuple = udp_unique_tuple,
166 .print = udp_print,
167 .print_range = udp_print_range,
168#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
169 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
170 .range_to_nfattr = ip_nat_port_range_to_nfattr,
171 .nfattr_to_range = ip_nat_port_nfattr_to_range,
172#endif
166}; 173};
diff --git a/net/ipv4/netfilter/ip_nat_proto_unknown.c b/net/ipv4/netfilter/ip_nat_proto_unknown.c
index f5525bd58d16..99bbef56f84e 100644
--- a/net/ipv4/netfilter/ip_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/ip_nat_proto_unknown.c
@@ -61,10 +61,11 @@ unknown_print_range(char *buffer, const struct ip_nat_range *range)
61} 61}
62 62
63struct ip_nat_protocol ip_nat_unknown_protocol = { 63struct ip_nat_protocol ip_nat_unknown_protocol = {
64 "unknown", 0, 64 .name = "unknown",
65 unknown_manip_pkt, 65 .me = THIS_MODULE,
66 unknown_in_range, 66 .manip_pkt = unknown_manip_pkt,
67 unknown_unique_tuple, 67 .in_range = unknown_in_range,
68 unknown_print, 68 .unique_tuple = unknown_unique_tuple,
69 unknown_print_range 69 .print = unknown_print,
70 .print_range = unknown_print_range
70}; 71};
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 2a48b6e635ae..93b2c5111bb2 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1275,7 +1275,7 @@ static int help(struct sk_buff **pskb,
1275 return NF_DROP; 1275 return NF_DROP;
1276 } 1276 }
1277 1277
1278 if (!skb_ip_make_writable(pskb, (*pskb)->len)) 1278 if (!skb_make_writable(pskb, (*pskb)->len))
1279 return NF_DROP; 1279 return NF_DROP;
1280 1280
1281 spin_lock_bh(&snmp_lock); 1281 spin_lock_bh(&snmp_lock);
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 91d5ea1dbbc9..89db052add81 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -73,8 +73,6 @@ ip_nat_fn(unsigned int hooknum,
73 IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off 73 IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off
74 & htons(IP_MF|IP_OFFSET))); 74 & htons(IP_MF|IP_OFFSET)));
75 75
76 (*pskb)->nfcache |= NFC_UNKNOWN;
77
78 /* If we had a hardware checksum before, it's now invalid */ 76 /* If we had a hardware checksum before, it's now invalid */
79 if ((*pskb)->ip_summed == CHECKSUM_HW) 77 if ((*pskb)->ip_summed == CHECKSUM_HW)
80 if (skb_checksum_help(*pskb, (out == NULL))) 78 if (skb_checksum_help(*pskb, (out == NULL)))
@@ -396,6 +394,8 @@ module_exit(fini);
396EXPORT_SYMBOL(ip_nat_setup_info); 394EXPORT_SYMBOL(ip_nat_setup_info);
397EXPORT_SYMBOL(ip_nat_protocol_register); 395EXPORT_SYMBOL(ip_nat_protocol_register);
398EXPORT_SYMBOL(ip_nat_protocol_unregister); 396EXPORT_SYMBOL(ip_nat_protocol_unregister);
397EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
398EXPORT_SYMBOL_GPL(ip_nat_proto_put);
399EXPORT_SYMBOL(ip_nat_cheat_check); 399EXPORT_SYMBOL(ip_nat_cheat_check);
400EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); 400EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
401EXPORT_SYMBOL(ip_nat_mangle_udp_packet); 401EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index c6baa8174389..d54f14d926f6 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -43,17 +43,10 @@
43#define NET_IPQ_QMAX 2088 43#define NET_IPQ_QMAX 2088
44#define NET_IPQ_QMAX_NAME "ip_queue_maxlen" 44#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
45 45
46struct ipq_rt_info {
47 __u8 tos;
48 __u32 daddr;
49 __u32 saddr;
50};
51
52struct ipq_queue_entry { 46struct ipq_queue_entry {
53 struct list_head list; 47 struct list_head list;
54 struct nf_info *info; 48 struct nf_info *info;
55 struct sk_buff *skb; 49 struct sk_buff *skb;
56 struct ipq_rt_info rt_info;
57}; 50};
58 51
59typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long); 52typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
@@ -247,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
247 240
248 pmsg->packet_id = (unsigned long )entry; 241 pmsg->packet_id = (unsigned long )entry;
249 pmsg->data_len = data_len; 242 pmsg->data_len = data_len;
250 pmsg->timestamp_sec = entry->skb->stamp.tv_sec; 243 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec;
251 pmsg->timestamp_usec = entry->skb->stamp.tv_usec; 244 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec;
252 pmsg->mark = entry->skb->nfmark; 245 pmsg->mark = entry->skb->nfmark;
253 pmsg->hook = entry->info->hook; 246 pmsg->hook = entry->info->hook;
254 pmsg->hw_protocol = entry->skb->protocol; 247 pmsg->hw_protocol = entry->skb->protocol;
@@ -287,7 +280,8 @@ nlmsg_failure:
287} 280}
288 281
289static int 282static int
290ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data) 283ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
284 unsigned int queuenum, void *data)
291{ 285{
292 int status = -EINVAL; 286 int status = -EINVAL;
293 struct sk_buff *nskb; 287 struct sk_buff *nskb;
@@ -305,14 +299,6 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
305 entry->info = info; 299 entry->info = info;
306 entry->skb = skb; 300 entry->skb = skb;
307 301
308 if (entry->info->hook == NF_IP_LOCAL_OUT) {
309 struct iphdr *iph = skb->nh.iph;
310
311 entry->rt_info.tos = iph->tos;
312 entry->rt_info.daddr = iph->daddr;
313 entry->rt_info.saddr = iph->saddr;
314 }
315
316 nskb = ipq_build_packet_message(entry, &status); 302 nskb = ipq_build_packet_message(entry, &status);
317 if (nskb == NULL) 303 if (nskb == NULL)
318 goto err_out_free; 304 goto err_out_free;
@@ -388,24 +374,11 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
388 } 374 }
389 skb_put(e->skb, diff); 375 skb_put(e->skb, diff);
390 } 376 }
391 if (!skb_ip_make_writable(&e->skb, v->data_len)) 377 if (!skb_make_writable(&e->skb, v->data_len))
392 return -ENOMEM; 378 return -ENOMEM;
393 memcpy(e->skb->data, v->payload, v->data_len); 379 memcpy(e->skb->data, v->payload, v->data_len);
394 e->skb->ip_summed = CHECKSUM_NONE; 380 e->skb->ip_summed = CHECKSUM_NONE;
395 e->skb->nfcache |= NFC_ALTERED; 381
396
397 /*
398 * Extra routing may needed on local out, as the QUEUE target never
399 * returns control to the table.
400 */
401 if (e->info->hook == NF_IP_LOCAL_OUT) {
402 struct iphdr *iph = e->skb->nh.iph;
403
404 if (!(iph->tos == e->rt_info.tos
405 && iph->daddr == e->rt_info.daddr
406 && iph->saddr == e->rt_info.saddr))
407 return ip_route_me_harder(&e->skb);
408 }
409 return 0; 382 return 0;
410} 383}
411 384
@@ -683,6 +656,11 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
683} 656}
684#endif /* CONFIG_PROC_FS */ 657#endif /* CONFIG_PROC_FS */
685 658
659static struct nf_queue_handler nfqh = {
660 .name = "ip_queue",
661 .outfn = &ipq_enqueue_packet,
662};
663
686static int 664static int
687init_or_cleanup(int init) 665init_or_cleanup(int init)
688{ 666{
@@ -693,7 +671,8 @@ init_or_cleanup(int init)
693 goto cleanup; 671 goto cleanup;
694 672
695 netlink_register_notifier(&ipq_nl_notifier); 673 netlink_register_notifier(&ipq_nl_notifier);
696 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, ipq_rcv_sk); 674 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
675 THIS_MODULE);
697 if (ipqnl == NULL) { 676 if (ipqnl == NULL) {
698 printk(KERN_ERR "ip_queue: failed to create netlink socket\n"); 677 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
699 goto cleanup_netlink_notifier; 678 goto cleanup_netlink_notifier;
@@ -710,7 +689,7 @@ init_or_cleanup(int init)
710 register_netdevice_notifier(&ipq_dev_notifier); 689 register_netdevice_notifier(&ipq_dev_notifier);
711 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); 690 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0);
712 691
713 status = nf_register_queue_handler(PF_INET, ipq_enqueue_packet, NULL); 692 status = nf_register_queue_handler(PF_INET, &nfqh);
714 if (status < 0) { 693 if (status < 0) {
715 printk(KERN_ERR "ip_queue: failed to register queue handler\n"); 694 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
716 goto cleanup_sysctl; 695 goto cleanup_sysctl;
@@ -718,7 +697,7 @@ init_or_cleanup(int init)
718 return status; 697 return status;
719 698
720cleanup: 699cleanup:
721 nf_unregister_queue_handler(PF_INET); 700 nf_unregister_queue_handlers(&nfqh);
722 synchronize_net(); 701 synchronize_net();
723 ipq_flush(NF_DROP); 702 ipq_flush(NF_DROP);
724 703
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c88dfcd38c56..eef99a1b5de6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -312,7 +312,6 @@ ipt_do_table(struct sk_buff **pskb,
312 do { 312 do {
313 IP_NF_ASSERT(e); 313 IP_NF_ASSERT(e);
314 IP_NF_ASSERT(back); 314 IP_NF_ASSERT(back);
315 (*pskb)->nfcache |= e->nfcache;
316 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) { 315 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
317 struct ipt_entry_target *t; 316 struct ipt_entry_target *t;
318 317
@@ -341,8 +340,8 @@ ipt_do_table(struct sk_buff **pskb,
341 back->comefrom); 340 back->comefrom);
342 continue; 341 continue;
343 } 342 }
344 if (table_base + v 343 if (table_base + v != (void *)e + e->next_offset
345 != (void *)e + e->next_offset) { 344 && !(e->ip.flags & IPT_F_GOTO)) {
346 /* Save old back ptr in next entry */ 345 /* Save old back ptr in next entry */
347 struct ipt_entry *next 346 struct ipt_entry *next
348 = (void *)e + e->next_offset; 347 = (void *)e + e->next_offset;
diff --git a/net/ipv4/netfilter/ipt_CLASSIFY.c b/net/ipv4/netfilter/ipt_CLASSIFY.c
index 9842e6e23184..dab78d8bd494 100644
--- a/net/ipv4/netfilter/ipt_CLASSIFY.c
+++ b/net/ipv4/netfilter/ipt_CLASSIFY.c
@@ -32,10 +32,8 @@ target(struct sk_buff **pskb,
32{ 32{
33 const struct ipt_classify_target_info *clinfo = targinfo; 33 const struct ipt_classify_target_info *clinfo = targinfo;
34 34
35 if((*pskb)->priority != clinfo->priority) { 35 if((*pskb)->priority != clinfo->priority)
36 (*pskb)->priority = clinfo->priority; 36 (*pskb)->priority = clinfo->priority;
37 (*pskb)->nfcache |= NFC_ALTERED;
38 }
39 37
40 return IPT_CONTINUE; 38 return IPT_CONTINUE;
41} 39}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 6706d3a1bc4f..2d05cafec221 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -367,7 +367,7 @@ target(struct sk_buff **pskb,
367#ifdef DEBUG_CLUSTERP 367#ifdef DEBUG_CLUSTERP
368 DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 368 DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
369#endif 369#endif
370 DEBUGP("hash=%u ct_hash=%lu ", hash, ct->mark); 370 DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark);
371 if (!clusterip_responsible(cipinfo->config, hash)) { 371 if (!clusterip_responsible(cipinfo->config, hash)) {
372 DEBUGP("not responsible\n"); 372 DEBUGP("not responsible\n");
373 return NF_DROP; 373 return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/ipv4/netfilter/ipt_CONNMARK.c
index 30ddd3e18eb7..134638021339 100644
--- a/net/ipv4/netfilter/ipt_CONNMARK.c
+++ b/net/ipv4/netfilter/ipt_CONNMARK.c
@@ -40,9 +40,9 @@ target(struct sk_buff **pskb,
40 void *userinfo) 40 void *userinfo)
41{ 41{
42 const struct ipt_connmark_target_info *markinfo = targinfo; 42 const struct ipt_connmark_target_info *markinfo = targinfo;
43 unsigned long diff; 43 u_int32_t diff;
44 unsigned long nfmark; 44 u_int32_t nfmark;
45 unsigned long newmark; 45 u_int32_t newmark;
46 46
47 enum ip_conntrack_info ctinfo; 47 enum ip_conntrack_info ctinfo;
48 struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo); 48 struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo);
@@ -61,10 +61,8 @@ target(struct sk_buff **pskb,
61 case IPT_CONNMARK_RESTORE: 61 case IPT_CONNMARK_RESTORE:
62 nfmark = (*pskb)->nfmark; 62 nfmark = (*pskb)->nfmark;
63 diff = (ct->mark ^ nfmark) & markinfo->mask; 63 diff = (ct->mark ^ nfmark) & markinfo->mask;
64 if (diff != 0) { 64 if (diff != 0)
65 (*pskb)->nfmark = nfmark ^ diff; 65 (*pskb)->nfmark = nfmark ^ diff;
66 (*pskb)->nfcache |= NFC_ALTERED;
67 }
68 break; 66 break;
69 } 67 }
70 } 68 }
@@ -94,6 +92,11 @@ checkentry(const char *tablename,
94 } 92 }
95 } 93 }
96 94
95 if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) {
96 printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
97 return 0;
98 }
99
97 return 1; 100 return 1;
98} 101}
99 102
diff --git a/net/ipv4/netfilter/ipt_DSCP.c b/net/ipv4/netfilter/ipt_DSCP.c
index 3ea4509099f9..6e319570a28c 100644
--- a/net/ipv4/netfilter/ipt_DSCP.c
+++ b/net/ipv4/netfilter/ipt_DSCP.c
@@ -39,7 +39,7 @@ target(struct sk_buff **pskb,
39 if (((*pskb)->nh.iph->tos & IPT_DSCP_MASK) != sh_dscp) { 39 if (((*pskb)->nh.iph->tos & IPT_DSCP_MASK) != sh_dscp) {
40 u_int16_t diffs[2]; 40 u_int16_t diffs[2];
41 41
42 if (!skb_ip_make_writable(pskb, sizeof(struct iphdr))) 42 if (!skb_make_writable(pskb, sizeof(struct iphdr)))
43 return NF_DROP; 43 return NF_DROP;
44 44
45 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF; 45 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF;
@@ -51,7 +51,6 @@ target(struct sk_buff **pskb,
51 sizeof(diffs), 51 sizeof(diffs),
52 (*pskb)->nh.iph->check 52 (*pskb)->nh.iph->check
53 ^ 0xFFFF)); 53 ^ 0xFFFF));
54 (*pskb)->nfcache |= NFC_ALTERED;
55 } 54 }
56 return IPT_CONTINUE; 55 return IPT_CONTINUE;
57} 56}
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 94a0ce1c1c9d..a1319693f648 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -31,7 +31,7 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
31 != (einfo->ip_ect & IPT_ECN_IP_MASK)) { 31 != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
32 u_int16_t diffs[2]; 32 u_int16_t diffs[2];
33 33
34 if (!skb_ip_make_writable(pskb, sizeof(struct iphdr))) 34 if (!skb_make_writable(pskb, sizeof(struct iphdr)))
35 return 0; 35 return 0;
36 36
37 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF; 37 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF;
@@ -43,7 +43,6 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
43 sizeof(diffs), 43 sizeof(diffs),
44 (*pskb)->nh.iph->check 44 (*pskb)->nh.iph->check
45 ^0xFFFF)); 45 ^0xFFFF));
46 (*pskb)->nfcache |= NFC_ALTERED;
47 } 46 }
48 return 1; 47 return 1;
49} 48}
@@ -67,7 +66,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward)
67 tcph->cwr == einfo->proto.tcp.cwr))) 66 tcph->cwr == einfo->proto.tcp.cwr)))
68 return 1; 67 return 1;
69 68
70 if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) 69 if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
71 return 0; 70 return 0;
72 tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; 71 tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4;
73 72
@@ -87,7 +86,6 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo, int inward)
87 tcph->check = csum_fold(csum_partial((char *)diffs, 86 tcph->check = csum_fold(csum_partial((char *)diffs,
88 sizeof(diffs), 87 sizeof(diffs),
89 tcph->check^0xFFFF)); 88 tcph->check^0xFFFF));
90 (*pskb)->nfcache |= NFC_ALTERED;
91 return 1; 89 return 1;
92} 90}
93 91
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index ef08733d26da..92ed050fac69 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -27,10 +27,6 @@ MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
28MODULE_DESCRIPTION("iptables syslog logging module"); 28MODULE_DESCRIPTION("iptables syslog logging module");
29 29
30static unsigned int nflog = 1;
31module_param(nflog, int, 0400);
32MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
33
34#if 0 30#if 0
35#define DEBUGP printk 31#define DEBUGP printk
36#else 32#else
@@ -41,11 +37,17 @@ MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
41static DEFINE_SPINLOCK(log_lock); 37static DEFINE_SPINLOCK(log_lock);
42 38
43/* One level of recursion won't kill us */ 39/* One level of recursion won't kill us */
44static void dump_packet(const struct ipt_log_info *info, 40static void dump_packet(const struct nf_loginfo *info,
45 const struct sk_buff *skb, 41 const struct sk_buff *skb,
46 unsigned int iphoff) 42 unsigned int iphoff)
47{ 43{
48 struct iphdr _iph, *ih; 44 struct iphdr _iph, *ih;
45 unsigned int logflags;
46
47 if (info->type == NF_LOG_TYPE_LOG)
48 logflags = info->u.log.logflags;
49 else
50 logflags = NF_LOG_MASK;
49 51
50 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph); 52 ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
51 if (ih == NULL) { 53 if (ih == NULL) {
@@ -76,7 +78,7 @@ static void dump_packet(const struct ipt_log_info *info,
76 if (ntohs(ih->frag_off) & IP_OFFSET) 78 if (ntohs(ih->frag_off) & IP_OFFSET)
77 printk("FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET); 79 printk("FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
78 80
79 if ((info->logflags & IPT_LOG_IPOPT) 81 if ((logflags & IPT_LOG_IPOPT)
80 && ih->ihl * 4 > sizeof(struct iphdr)) { 82 && ih->ihl * 4 > sizeof(struct iphdr)) {
81 unsigned char _opt[4 * 15 - sizeof(struct iphdr)], *op; 83 unsigned char _opt[4 * 15 - sizeof(struct iphdr)], *op;
82 unsigned int i, optsize; 84 unsigned int i, optsize;
@@ -119,7 +121,7 @@ static void dump_packet(const struct ipt_log_info *info,
119 printk("SPT=%u DPT=%u ", 121 printk("SPT=%u DPT=%u ",
120 ntohs(th->source), ntohs(th->dest)); 122 ntohs(th->source), ntohs(th->dest));
121 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ 123 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
122 if (info->logflags & IPT_LOG_TCPSEQ) 124 if (logflags & IPT_LOG_TCPSEQ)
123 printk("SEQ=%u ACK=%u ", 125 printk("SEQ=%u ACK=%u ",
124 ntohl(th->seq), ntohl(th->ack_seq)); 126 ntohl(th->seq), ntohl(th->ack_seq));
125 /* Max length: 13 "WINDOW=65535 " */ 127 /* Max length: 13 "WINDOW=65535 " */
@@ -146,7 +148,7 @@ static void dump_packet(const struct ipt_log_info *info,
146 /* Max length: 11 "URGP=65535 " */ 148 /* Max length: 11 "URGP=65535 " */
147 printk("URGP=%u ", ntohs(th->urg_ptr)); 149 printk("URGP=%u ", ntohs(th->urg_ptr));
148 150
149 if ((info->logflags & IPT_LOG_TCPOPT) 151 if ((logflags & IPT_LOG_TCPOPT)
150 && th->doff * 4 > sizeof(struct tcphdr)) { 152 && th->doff * 4 > sizeof(struct tcphdr)) {
151 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)]; 153 unsigned char _opt[4 * 15 - sizeof(struct tcphdr)];
152 unsigned char *op; 154 unsigned char *op;
@@ -328,7 +330,7 @@ static void dump_packet(const struct ipt_log_info *info,
328 } 330 }
329 331
330 /* Max length: 15 "UID=4294967295 " */ 332 /* Max length: 15 "UID=4294967295 " */
331 if ((info->logflags & IPT_LOG_UID) && !iphoff && skb->sk) { 333 if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
332 read_lock_bh(&skb->sk->sk_callback_lock); 334 read_lock_bh(&skb->sk->sk_callback_lock);
333 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 335 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
334 printk("UID=%u ", skb->sk->sk_socket->file->f_uid); 336 printk("UID=%u ", skb->sk->sk_socket->file->f_uid);
@@ -349,19 +351,31 @@ static void dump_packet(const struct ipt_log_info *info,
349 /* maxlen = 230+ 91 + 230 + 252 = 803 */ 351 /* maxlen = 230+ 91 + 230 + 252 = 803 */
350} 352}
351 353
354struct nf_loginfo default_loginfo = {
355 .type = NF_LOG_TYPE_LOG,
356 .u = {
357 .log = {
358 .level = 0,
359 .logflags = NF_LOG_MASK,
360 },
361 },
362};
363
352static void 364static void
353ipt_log_packet(unsigned int hooknum, 365ipt_log_packet(unsigned int pf,
366 unsigned int hooknum,
354 const struct sk_buff *skb, 367 const struct sk_buff *skb,
355 const struct net_device *in, 368 const struct net_device *in,
356 const struct net_device *out, 369 const struct net_device *out,
357 const struct ipt_log_info *loginfo, 370 const struct nf_loginfo *loginfo,
358 const char *level_string,
359 const char *prefix) 371 const char *prefix)
360{ 372{
373 if (!loginfo)
374 loginfo = &default_loginfo;
375
361 spin_lock_bh(&log_lock); 376 spin_lock_bh(&log_lock);
362 printk(level_string); 377 printk("<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
363 printk("%sIN=%s OUT=%s ", 378 prefix,
364 prefix == NULL ? loginfo->prefix : prefix,
365 in ? in->name : "", 379 in ? in->name : "",
366 out ? out->name : ""); 380 out ? out->name : "");
367#ifdef CONFIG_BRIDGE_NETFILTER 381#ifdef CONFIG_BRIDGE_NETFILTER
@@ -405,28 +419,15 @@ ipt_log_target(struct sk_buff **pskb,
405 void *userinfo) 419 void *userinfo)
406{ 420{
407 const struct ipt_log_info *loginfo = targinfo; 421 const struct ipt_log_info *loginfo = targinfo;
408 char level_string[4] = "< >"; 422 struct nf_loginfo li;
409 423
410 level_string[1] = '0' + (loginfo->level % 8); 424 li.type = NF_LOG_TYPE_LOG;
411 ipt_log_packet(hooknum, *pskb, in, out, loginfo, level_string, NULL); 425 li.u.log.level = loginfo->level;
426 li.u.log.logflags = loginfo->logflags;
412 427
413 return IPT_CONTINUE; 428 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, loginfo->prefix);
414}
415 429
416static void 430 return IPT_CONTINUE;
417ipt_logfn(unsigned int hooknum,
418 const struct sk_buff *skb,
419 const struct net_device *in,
420 const struct net_device *out,
421 const char *prefix)
422{
423 struct ipt_log_info loginfo = {
424 .level = 0,
425 .logflags = IPT_LOG_MASK,
426 .prefix = ""
427 };
428
429 ipt_log_packet(hooknum, skb, in, out, &loginfo, KERN_WARNING, prefix);
430} 431}
431 432
432static int ipt_log_checkentry(const char *tablename, 433static int ipt_log_checkentry(const char *tablename,
@@ -464,20 +465,29 @@ static struct ipt_target ipt_log_reg = {
464 .me = THIS_MODULE, 465 .me = THIS_MODULE,
465}; 466};
466 467
468static struct nf_logger ipt_log_logger ={
469 .name = "ipt_LOG",
470 .logfn = &ipt_log_packet,
471 .me = THIS_MODULE,
472};
473
467static int __init init(void) 474static int __init init(void)
468{ 475{
469 if (ipt_register_target(&ipt_log_reg)) 476 if (ipt_register_target(&ipt_log_reg))
470 return -EINVAL; 477 return -EINVAL;
471 if (nflog) 478 if (nf_log_register(PF_INET, &ipt_log_logger) < 0) {
472 nf_log_register(PF_INET, &ipt_logfn); 479 printk(KERN_WARNING "ipt_LOG: not logging via system console "
480 "since somebody else already registered for PF_INET\n");
481 /* we cannot make module load fail here, since otherwise
482 * iptables userspace would abort */
483 }
473 484
474 return 0; 485 return 0;
475} 486}
476 487
477static void __exit fini(void) 488static void __exit fini(void)
478{ 489{
479 if (nflog) 490 nf_log_unregister_logger(&ipt_log_logger);
480 nf_log_unregister(PF_INET, &ipt_logfn);
481 ipt_unregister_target(&ipt_log_reg); 491 ipt_unregister_target(&ipt_log_reg);
482} 492}
483 493
diff --git a/net/ipv4/netfilter/ipt_MARK.c b/net/ipv4/netfilter/ipt_MARK.c
index 33c6f9b63b8d..52b4f2c296bf 100644
--- a/net/ipv4/netfilter/ipt_MARK.c
+++ b/net/ipv4/netfilter/ipt_MARK.c
@@ -29,10 +29,9 @@ target_v0(struct sk_buff **pskb,
29{ 29{
30 const struct ipt_mark_target_info *markinfo = targinfo; 30 const struct ipt_mark_target_info *markinfo = targinfo;
31 31
32 if((*pskb)->nfmark != markinfo->mark) { 32 if((*pskb)->nfmark != markinfo->mark)
33 (*pskb)->nfmark = markinfo->mark; 33 (*pskb)->nfmark = markinfo->mark;
34 (*pskb)->nfcache |= NFC_ALTERED; 34
35 }
36 return IPT_CONTINUE; 35 return IPT_CONTINUE;
37} 36}
38 37
@@ -61,10 +60,9 @@ target_v1(struct sk_buff **pskb,
61 break; 60 break;
62 } 61 }
63 62
64 if((*pskb)->nfmark != mark) { 63 if((*pskb)->nfmark != mark)
65 (*pskb)->nfmark = mark; 64 (*pskb)->nfmark = mark;
66 (*pskb)->nfcache |= NFC_ALTERED; 65
67 }
68 return IPT_CONTINUE; 66 return IPT_CONTINUE;
69} 67}
70 68
@@ -76,6 +74,8 @@ checkentry_v0(const char *tablename,
76 unsigned int targinfosize, 74 unsigned int targinfosize,
77 unsigned int hook_mask) 75 unsigned int hook_mask)
78{ 76{
77 struct ipt_mark_target_info *markinfo = targinfo;
78
79 if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) { 79 if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) {
80 printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n", 80 printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
81 targinfosize, 81 targinfosize,
@@ -88,6 +88,11 @@ checkentry_v0(const char *tablename,
88 return 0; 88 return 0;
89 } 89 }
90 90
91 if (markinfo->mark > 0xffffffff) {
92 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
93 return 0;
94 }
95
91 return 1; 96 return 1;
92} 97}
93 98
@@ -120,6 +125,11 @@ checkentry_v1(const char *tablename,
120 return 0; 125 return 0;
121 } 126 }
122 127
128 if (markinfo->mark > 0xffffffff) {
129 printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
130 return 0;
131 }
132
123 return 1; 133 return 1;
124} 134}
125 135
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 91e74502c3d3..2f3e181c8e97 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -86,11 +86,6 @@ masquerade_target(struct sk_buff **pskb,
86 86
87 IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING); 87 IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
88 88
89 /* FIXME: For the moment, don't do local packets, breaks
90 testsuite for 2.3.49 --RR */
91 if ((*pskb)->sk)
92 return NF_ACCEPT;
93
94 ct = ip_conntrack_get(*pskb, &ctinfo); 89 ct = ip_conntrack_get(*pskb, &ctinfo);
95 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED 90 IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
96 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 91 || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 06254b29d034..e6e7b6095363 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -46,7 +46,8 @@ check(const char *tablename,
46 DEBUGP(MODULENAME":check: size %u.\n", targinfosize); 46 DEBUGP(MODULENAME":check: size %u.\n", targinfosize);
47 return 0; 47 return 0;
48 } 48 }
49 if (hook_mask & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING))) { 49 if (hook_mask & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) |
50 (1 << NF_IP_LOCAL_OUT))) {
50 DEBUGP(MODULENAME":check: bad hooks %x.\n", hook_mask); 51 DEBUGP(MODULENAME":check: bad hooks %x.\n", hook_mask);
51 return 0; 52 return 0;
52 } 53 }
@@ -76,12 +77,13 @@ target(struct sk_buff **pskb,
76 struct ip_nat_range newrange; 77 struct ip_nat_range newrange;
77 78
78 IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING 79 IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
79 || hooknum == NF_IP_POST_ROUTING); 80 || hooknum == NF_IP_POST_ROUTING
81 || hooknum == NF_IP_LOCAL_OUT);
80 ct = ip_conntrack_get(*pskb, &ctinfo); 82 ct = ip_conntrack_get(*pskb, &ctinfo);
81 83
82 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); 84 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
83 85
84 if (hooknum == NF_IP_PRE_ROUTING) 86 if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT)
85 new_ip = (*pskb)->nh.iph->daddr & ~netmask; 87 new_ip = (*pskb)->nh.iph->daddr & ~netmask;
86 else 88 else
87 new_ip = (*pskb)->nh.iph->saddr & ~netmask; 89 new_ip = (*pskb)->nh.iph->saddr & ~netmask;
diff --git a/net/ipv4/netfilter/ipt_NFQUEUE.c b/net/ipv4/netfilter/ipt_NFQUEUE.c
new file mode 100644
index 000000000000..3cedc9be8807
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_NFQUEUE.c
@@ -0,0 +1,70 @@
1/* iptables module for using new netfilter netlink queue
2 *
3 * (C) 2005 by Harald Welte <laforge@netfilter.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13
14#include <linux/netfilter.h>
15#include <linux/netfilter_ipv4/ip_tables.h>
16#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("iptables NFQUEUE target");
20MODULE_LICENSE("GPL");
21
22static unsigned int
23target(struct sk_buff **pskb,
24 const struct net_device *in,
25 const struct net_device *out,
26 unsigned int hooknum,
27 const void *targinfo,
28 void *userinfo)
29{
30 const struct ipt_NFQ_info *tinfo = targinfo;
31
32 return NF_QUEUE_NR(tinfo->queuenum);
33}
34
35static int
36checkentry(const char *tablename,
37 const struct ipt_entry *e,
38 void *targinfo,
39 unsigned int targinfosize,
40 unsigned int hook_mask)
41{
42 if (targinfosize != IPT_ALIGN(sizeof(struct ipt_NFQ_info))) {
43 printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
44 targinfosize,
45 IPT_ALIGN(sizeof(struct ipt_NFQ_info)));
46 return 0;
47 }
48
49 return 1;
50}
51
52static struct ipt_target ipt_NFQ_reg = {
53 .name = "NFQUEUE",
54 .target = target,
55 .checkentry = checkentry,
56 .me = THIS_MODULE,
57};
58
59static int __init init(void)
60{
61 return ipt_register_target(&ipt_NFQ_reg);
62}
63
64static void __exit fini(void)
65{
66 ipt_unregister_target(&ipt_NFQ_reg);
67}
68
69module_init(init);
70module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 915696446020..f115a84a4ac6 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -156,7 +156,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
156 156
157 /* This packet will not be the same as the other: clear nf fields */ 157 /* This packet will not be the same as the other: clear nf fields */
158 nf_reset(nskb); 158 nf_reset(nskb);
159 nskb->nfcache = 0;
160 nskb->nfmark = 0; 159 nskb->nfmark = 0;
161#ifdef CONFIG_BRIDGE_NETFILTER 160#ifdef CONFIG_BRIDGE_NETFILTER
162 nf_bridge_put(nskb->nf_bridge); 161 nf_bridge_put(nskb->nf_bridge);
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
index 7b84a254440e..8db70d6908c3 100644
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ b/net/ipv4/netfilter/ipt_TCPMSS.c
@@ -58,7 +58,7 @@ ipt_tcpmss_target(struct sk_buff **pskb,
58 unsigned int i; 58 unsigned int i;
59 u_int8_t *opt; 59 u_int8_t *opt;
60 60
61 if (!skb_ip_make_writable(pskb, (*pskb)->len)) 61 if (!skb_make_writable(pskb, (*pskb)->len))
62 return NF_DROP; 62 return NF_DROP;
63 63
64 if ((*pskb)->ip_summed == CHECKSUM_HW && 64 if ((*pskb)->ip_summed == CHECKSUM_HW &&
@@ -190,7 +190,6 @@ ipt_tcpmss_target(struct sk_buff **pskb,
190 newmss); 190 newmss);
191 191
192 retmodified: 192 retmodified:
193 (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED;
194 return IPT_CONTINUE; 193 return IPT_CONTINUE;
195} 194}
196 195
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 85c70d240f8b..deadb36d4428 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -33,7 +33,7 @@ target(struct sk_buff **pskb,
33 if (((*pskb)->nh.iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { 33 if (((*pskb)->nh.iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) {
34 u_int16_t diffs[2]; 34 u_int16_t diffs[2];
35 35
36 if (!skb_ip_make_writable(pskb, sizeof(struct iphdr))) 36 if (!skb_make_writable(pskb, sizeof(struct iphdr)))
37 return NF_DROP; 37 return NF_DROP;
38 38
39 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF; 39 diffs[0] = htons((*pskb)->nh.iph->tos) ^ 0xFFFF;
@@ -46,7 +46,6 @@ target(struct sk_buff **pskb,
46 sizeof(diffs), 46 sizeof(diffs),
47 (*pskb)->nh.iph->check 47 (*pskb)->nh.iph->check
48 ^0xFFFF)); 48 ^0xFFFF));
49 (*pskb)->nfcache |= NFC_ALTERED;
50 } 49 }
51 return IPT_CONTINUE; 50 return IPT_CONTINUE;
52} 51}
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
new file mode 100644
index 000000000000..b9ae6a9382f3
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -0,0 +1,119 @@
1/* TTL modification target for IP tables
2 * (C) 2000,2005 by Harald Welte <laforge@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <net/checksum.h>
14
15#include <linux/netfilter_ipv4/ip_tables.h>
16#include <linux/netfilter_ipv4/ipt_TTL.h>
17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("IP tables TTL modification module");
20MODULE_LICENSE("GPL");
21
22static unsigned int
23ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in,
24 const struct net_device *out, unsigned int hooknum,
25 const void *targinfo, void *userinfo)
26{
27 struct iphdr *iph;
28 const struct ipt_TTL_info *info = targinfo;
29 u_int16_t diffs[2];
30 int new_ttl;
31
32 if (!skb_make_writable(pskb, (*pskb)->len))
33 return NF_DROP;
34
35 iph = (*pskb)->nh.iph;
36
37 switch (info->mode) {
38 case IPT_TTL_SET:
39 new_ttl = info->ttl;
40 break;
41 case IPT_TTL_INC:
42 new_ttl = iph->ttl + info->ttl;
43 if (new_ttl > 255)
44 new_ttl = 255;
45 break;
46 case IPT_TTL_DEC:
47 new_ttl = iph->ttl - info->ttl;
48 if (new_ttl < 0)
49 new_ttl = 0;
50 break;
51 default:
52 new_ttl = iph->ttl;
53 break;
54 }
55
56 if (new_ttl != iph->ttl) {
57 diffs[0] = htons(((unsigned)iph->ttl) << 8) ^ 0xFFFF;
58 iph->ttl = new_ttl;
59 diffs[1] = htons(((unsigned)iph->ttl) << 8);
60 iph->check = csum_fold(csum_partial((char *)diffs,
61 sizeof(diffs),
62 iph->check^0xFFFF));
63 }
64
65 return IPT_CONTINUE;
66}
67
68static int ipt_ttl_checkentry(const char *tablename,
69 const struct ipt_entry *e,
70 void *targinfo,
71 unsigned int targinfosize,
72 unsigned int hook_mask)
73{
74 struct ipt_TTL_info *info = targinfo;
75
76 if (targinfosize != IPT_ALIGN(sizeof(struct ipt_TTL_info))) {
77 printk(KERN_WARNING "ipt_TTL: targinfosize %u != %Zu\n",
78 targinfosize,
79 IPT_ALIGN(sizeof(struct ipt_TTL_info)));
80 return 0;
81 }
82
83 if (strcmp(tablename, "mangle")) {
84 printk(KERN_WARNING "ipt_TTL: can only be called from "
85 "\"mangle\" table, not \"%s\"\n", tablename);
86 return 0;
87 }
88
89 if (info->mode > IPT_TTL_MAXMODE) {
90 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
91 info->mode);
92 return 0;
93 }
94
95 if ((info->mode != IPT_TTL_SET) && (info->ttl == 0))
96 return 0;
97
98 return 1;
99}
100
101static struct ipt_target ipt_TTL = {
102 .name = "TTL",
103 .target = ipt_ttl_target,
104 .checkentry = ipt_ttl_checkentry,
105 .me = THIS_MODULE,
106};
107
108static int __init init(void)
109{
110 return ipt_register_target(&ipt_TTL);
111}
112
113static void __exit fini(void)
114{
115 ipt_unregister_target(&ipt_TTL);
116}
117
118module_init(init);
119module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 52a0076302a7..e2c14f3cb2fc 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -62,6 +62,7 @@
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); 63MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
64MODULE_DESCRIPTION("iptables userspace logging module"); 64MODULE_DESCRIPTION("iptables userspace logging module");
65MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
65 66
66#define ULOG_NL_EVENT 111 /* Harald's favorite number */ 67#define ULOG_NL_EVENT 111 /* Harald's favorite number */
67#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ 68#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
@@ -115,10 +116,10 @@ static void ulog_send(unsigned int nlgroupnum)
115 if (ub->qlen > 1) 116 if (ub->qlen > 1)
116 ub->lastnlh->nlmsg_type = NLMSG_DONE; 117 ub->lastnlh->nlmsg_type = NLMSG_DONE;
117 118
118 NETLINK_CB(ub->skb).dst_groups = (1 << nlgroupnum); 119 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
119 DEBUGP("ipt_ULOG: throwing %d packets to netlink mask %u\n", 120 DEBUGP("ipt_ULOG: throwing %d packets to netlink group %u\n",
120 ub->qlen, nlgroupnum); 121 ub->qlen, nlgroupnum + 1);
121 netlink_broadcast(nflognl, ub->skb, 0, (1 << nlgroupnum), GFP_ATOMIC); 122 netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
122 123
123 ub->qlen = 0; 124 ub->qlen = 0;
124 ub->skb = NULL; 125 ub->skb = NULL;
@@ -219,13 +220,13 @@ static void ipt_ulog_packet(unsigned int hooknum,
219 pm = NLMSG_DATA(nlh); 220 pm = NLMSG_DATA(nlh);
220 221
221 /* We might not have a timestamp, get one */ 222 /* We might not have a timestamp, get one */
222 if (skb->stamp.tv_sec == 0) 223 if (skb->tstamp.off_sec == 0)
223 do_gettimeofday((struct timeval *)&skb->stamp); 224 __net_timestamp((struct sk_buff *)skb);
224 225
225 /* copy hook, prefix, timestamp, payload, etc. */ 226 /* copy hook, prefix, timestamp, payload, etc. */
226 pm->data_len = copy_len; 227 pm->data_len = copy_len;
227 pm->timestamp_sec = skb->stamp.tv_sec; 228 pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec;
228 pm->timestamp_usec = skb->stamp.tv_usec; 229 pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec;
229 pm->mark = skb->nfmark; 230 pm->mark = skb->nfmark;
230 pm->hook = hooknum; 231 pm->hook = hooknum;
231 if (prefix != NULL) 232 if (prefix != NULL)
@@ -303,18 +304,27 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
303 return IPT_CONTINUE; 304 return IPT_CONTINUE;
304} 305}
305 306
306static void ipt_logfn(unsigned int hooknum, 307static void ipt_logfn(unsigned int pf,
308 unsigned int hooknum,
307 const struct sk_buff *skb, 309 const struct sk_buff *skb,
308 const struct net_device *in, 310 const struct net_device *in,
309 const struct net_device *out, 311 const struct net_device *out,
312 const struct nf_loginfo *li,
310 const char *prefix) 313 const char *prefix)
311{ 314{
312 struct ipt_ulog_info loginfo = { 315 struct ipt_ulog_info loginfo;
313 .nl_group = ULOG_DEFAULT_NLGROUP, 316
314 .copy_range = 0, 317 if (!li || li->type != NF_LOG_TYPE_ULOG) {
315 .qthreshold = ULOG_DEFAULT_QTHRESHOLD, 318 loginfo.nl_group = ULOG_DEFAULT_NLGROUP;
316 .prefix = "" 319 loginfo.copy_range = 0;
317 }; 320 loginfo.qthreshold = ULOG_DEFAULT_QTHRESHOLD;
321 loginfo.prefix[0] = '\0';
322 } else {
323 loginfo.nl_group = li->u.ulog.group;
324 loginfo.copy_range = li->u.ulog.copy_len;
325 loginfo.qthreshold = li->u.ulog.qthreshold;
326 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
327 }
318 328
319 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); 329 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
320} 330}
@@ -354,6 +364,12 @@ static struct ipt_target ipt_ulog_reg = {
354 .me = THIS_MODULE, 364 .me = THIS_MODULE,
355}; 365};
356 366
367static struct nf_logger ipt_ulog_logger = {
368 .name = "ipt_ULOG",
369 .logfn = &ipt_logfn,
370 .me = THIS_MODULE,
371};
372
357static int __init init(void) 373static int __init init(void)
358{ 374{
359 int i; 375 int i;
@@ -372,7 +388,8 @@ static int __init init(void)
372 ulog_buffers[i].timer.data = i; 388 ulog_buffers[i].timer.data = i;
373 } 389 }
374 390
375 nflognl = netlink_kernel_create(NETLINK_NFLOG, NULL); 391 nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
392 THIS_MODULE);
376 if (!nflognl) 393 if (!nflognl)
377 return -ENOMEM; 394 return -ENOMEM;
378 395
@@ -381,7 +398,7 @@ static int __init init(void)
381 return -EINVAL; 398 return -EINVAL;
382 } 399 }
383 if (nflog) 400 if (nflog)
384 nf_log_register(PF_INET, &ipt_logfn); 401 nf_log_register(PF_INET, &ipt_ulog_logger);
385 402
386 return 0; 403 return 0;
387} 404}
@@ -394,7 +411,7 @@ static void __exit fini(void)
394 DEBUGP("ipt_ULOG: cleanup_module\n"); 411 DEBUGP("ipt_ULOG: cleanup_module\n");
395 412
396 if (nflog) 413 if (nflog)
397 nf_log_unregister(PF_INET, &ipt_logfn); 414 nf_log_unregister_logger(&ipt_ulog_logger);
398 ipt_unregister_target(&ipt_ulog_reg); 415 ipt_unregister_target(&ipt_ulog_reg);
399 sock_release(nflognl->sk_socket); 416 sock_release(nflognl->sk_socket);
400 417
diff --git a/net/ipv4/netfilter/ipt_connbytes.c b/net/ipv4/netfilter/ipt_connbytes.c
new file mode 100644
index 000000000000..df4a42c6da22
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_connbytes.c
@@ -0,0 +1,162 @@
1/* Kernel module to match connection tracking byte counter.
2 * GPL (C) 2002 Martin Devera (devik@cdi.cz).
3 *
4 * 2004-07-20 Harald Welte <laforge@netfilter.org>
5 * - reimplemented to use per-connection accounting counters
6 * - add functionality to match number of packets
7 * - add functionality to match average packet size
8 * - add support to match directions seperately
9 *
10 */
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/netfilter_ipv4/ip_conntrack.h>
14#include <linux/netfilter_ipv4/ip_tables.h>
15#include <linux/netfilter_ipv4/ipt_connbytes.h>
16
17#include <asm/div64.h>
18#include <asm/bitops.h>
19
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
22MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
23
24/* 64bit divisor, dividend and result. dynamic precision */
25static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
26{
27 u_int32_t d = divisor;
28
29 if (divisor > 0xffffffffULL) {
30 unsigned int shift = fls(divisor >> 32);
31
32 d = divisor >> shift;
33 dividend >>= shift;
34 }
35
36 do_div(dividend, d);
37 return dividend;
38}
39
40static int
41match(const struct sk_buff *skb,
42 const struct net_device *in,
43 const struct net_device *out,
44 const void *matchinfo,
45 int offset,
46 int *hotdrop)
47{
48 const struct ipt_connbytes_info *sinfo = matchinfo;
49 enum ip_conntrack_info ctinfo;
50 struct ip_conntrack *ct;
51 u_int64_t what = 0; /* initialize to make gcc happy */
52
53 if (!(ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)))
54 return 0; /* no match */
55
56 switch (sinfo->what) {
57 case IPT_CONNBYTES_PKTS:
58 switch (sinfo->direction) {
59 case IPT_CONNBYTES_DIR_ORIGINAL:
60 what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
61 break;
62 case IPT_CONNBYTES_DIR_REPLY:
63 what = ct->counters[IP_CT_DIR_REPLY].packets;
64 break;
65 case IPT_CONNBYTES_DIR_BOTH:
66 what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
67 what += ct->counters[IP_CT_DIR_REPLY].packets;
68 break;
69 }
70 break;
71 case IPT_CONNBYTES_BYTES:
72 switch (sinfo->direction) {
73 case IPT_CONNBYTES_DIR_ORIGINAL:
74 what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
75 break;
76 case IPT_CONNBYTES_DIR_REPLY:
77 what = ct->counters[IP_CT_DIR_REPLY].bytes;
78 break;
79 case IPT_CONNBYTES_DIR_BOTH:
80 what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
81 what += ct->counters[IP_CT_DIR_REPLY].bytes;
82 break;
83 }
84 break;
85 case IPT_CONNBYTES_AVGPKT:
86 switch (sinfo->direction) {
87 case IPT_CONNBYTES_DIR_ORIGINAL:
88 what = div64_64(ct->counters[IP_CT_DIR_ORIGINAL].bytes,
89 ct->counters[IP_CT_DIR_ORIGINAL].packets);
90 break;
91 case IPT_CONNBYTES_DIR_REPLY:
92 what = div64_64(ct->counters[IP_CT_DIR_REPLY].bytes,
93 ct->counters[IP_CT_DIR_REPLY].packets);
94 break;
95 case IPT_CONNBYTES_DIR_BOTH:
96 {
97 u_int64_t bytes;
98 u_int64_t pkts;
99 bytes = ct->counters[IP_CT_DIR_ORIGINAL].bytes +
100 ct->counters[IP_CT_DIR_REPLY].bytes;
101 pkts = ct->counters[IP_CT_DIR_ORIGINAL].packets+
102 ct->counters[IP_CT_DIR_REPLY].packets;
103
104 /* FIXME_THEORETICAL: what to do if sum
105 * overflows ? */
106
107 what = div64_64(bytes, pkts);
108 }
109 break;
110 }
111 break;
112 }
113
114 if (sinfo->count.to)
115 return (what <= sinfo->count.to && what >= sinfo->count.from);
116 else
117 return (what >= sinfo->count.from);
118}
119
120static int check(const char *tablename,
121 const struct ipt_ip *ip,
122 void *matchinfo,
123 unsigned int matchsize,
124 unsigned int hook_mask)
125{
126 const struct ipt_connbytes_info *sinfo = matchinfo;
127
128 if (matchsize != IPT_ALIGN(sizeof(struct ipt_connbytes_info)))
129 return 0;
130
131 if (sinfo->what != IPT_CONNBYTES_PKTS &&
132 sinfo->what != IPT_CONNBYTES_BYTES &&
133 sinfo->what != IPT_CONNBYTES_AVGPKT)
134 return 0;
135
136 if (sinfo->direction != IPT_CONNBYTES_DIR_ORIGINAL &&
137 sinfo->direction != IPT_CONNBYTES_DIR_REPLY &&
138 sinfo->direction != IPT_CONNBYTES_DIR_BOTH)
139 return 0;
140
141 return 1;
142}
143
144static struct ipt_match state_match = {
145 .name = "connbytes",
146 .match = &match,
147 .checkentry = &check,
148 .me = THIS_MODULE
149};
150
151static int __init init(void)
152{
153 return ipt_register_match(&state_match);
154}
155
156static void __exit fini(void)
157{
158 ipt_unregister_match(&state_match);
159}
160
161module_init(init);
162module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_connmark.c b/net/ipv4/netfilter/ipt_connmark.c
index 2706f96cea55..bf8de47ce004 100644
--- a/net/ipv4/netfilter/ipt_connmark.c
+++ b/net/ipv4/netfilter/ipt_connmark.c
@@ -54,9 +54,16 @@ checkentry(const char *tablename,
54 unsigned int matchsize, 54 unsigned int matchsize,
55 unsigned int hook_mask) 55 unsigned int hook_mask)
56{ 56{
57 struct ipt_connmark_info *cm =
58 (struct ipt_connmark_info *)matchinfo;
57 if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info))) 59 if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info)))
58 return 0; 60 return 0;
59 61
62 if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
63 printk(KERN_WARNING "connmark: only support 32bit mark\n");
64 return 0;
65 }
66
60 return 1; 67 return 1;
61} 68}
62 69
diff --git a/net/ipv4/netfilter/ipt_dccp.c b/net/ipv4/netfilter/ipt_dccp.c
new file mode 100644
index 000000000000..ad3278bba6c1
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_dccp.c
@@ -0,0 +1,176 @@
1/*
2 * iptables module for DCCP protocol header matching
3 *
4 * (C) 2005 by Harald Welte <laforge@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/spinlock.h>
14#include <net/ip.h>
15#include <linux/dccp.h>
16
17#include <linux/netfilter_ipv4/ip_tables.h>
18#include <linux/netfilter_ipv4/ipt_dccp.h>
19
20#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
21 || (!!((invflag) & (option)) ^ (cond)))
22
23static unsigned char *dccp_optbuf;
24static DEFINE_SPINLOCK(dccp_buflock);
25
26static inline int
27dccp_find_option(u_int8_t option,
28 const struct sk_buff *skb,
29 const struct dccp_hdr *dh,
30 int *hotdrop)
31{
32 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
33 unsigned char *op;
34 unsigned int optoff = __dccp_hdr_len(dh);
35 unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh);
36 unsigned int i;
37
38 if (dh->dccph_doff * 4 < __dccp_hdr_len(dh)) {
39 *hotdrop = 1;
40 return 0;
41 }
42
43 if (!optlen)
44 return 0;
45
46 spin_lock_bh(&dccp_buflock);
47 op = skb_header_pointer(skb,
48 skb->nh.iph->ihl*4 + optoff,
49 optlen, dccp_optbuf);
50 if (op == NULL) {
51 /* If we don't have the whole header, drop packet. */
52 spin_unlock_bh(&dccp_buflock);
53 *hotdrop = 1;
54 return 0;
55 }
56
57 for (i = 0; i < optlen; ) {
58 if (op[i] == option) {
59 spin_unlock_bh(&dccp_buflock);
60 return 1;
61 }
62
63 if (op[i] < 2)
64 i++;
65 else
66 i += op[i+1]?:1;
67 }
68
69 spin_unlock_bh(&dccp_buflock);
70 return 0;
71}
72
73
74static inline int
75match_types(const struct dccp_hdr *dh, u_int16_t typemask)
76{
77 return (typemask & (1 << dh->dccph_type));
78}
79
80static inline int
81match_option(u_int8_t option, const struct sk_buff *skb,
82 const struct dccp_hdr *dh, int *hotdrop)
83{
84 return dccp_find_option(option, skb, dh, hotdrop);
85}
86
87static int
88match(const struct sk_buff *skb,
89 const struct net_device *in,
90 const struct net_device *out,
91 const void *matchinfo,
92 int offset,
93 int *hotdrop)
94{
95 const struct ipt_dccp_info *info =
96 (const struct ipt_dccp_info *)matchinfo;
97 struct dccp_hdr _dh, *dh;
98
99 if (offset)
100 return 0;
101
102 dh = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_dh), &_dh);
103 if (dh == NULL) {
104 *hotdrop = 1;
105 return 0;
106 }
107
108 return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0])
109 && (ntohs(dh->dccph_sport) <= info->spts[1])),
110 IPT_DCCP_SRC_PORTS, info->flags, info->invflags)
111 && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0])
112 && (ntohs(dh->dccph_dport) <= info->dpts[1])),
113 IPT_DCCP_DEST_PORTS, info->flags, info->invflags)
114 && DCCHECK(match_types(dh, info->typemask),
115 IPT_DCCP_TYPE, info->flags, info->invflags)
116 && DCCHECK(match_option(info->option, skb, dh, hotdrop),
117 IPT_DCCP_OPTION, info->flags, info->invflags);
118}
119
120static int
121checkentry(const char *tablename,
122 const struct ipt_ip *ip,
123 void *matchinfo,
124 unsigned int matchsize,
125 unsigned int hook_mask)
126{
127 const struct ipt_dccp_info *info;
128
129 info = (const struct ipt_dccp_info *)matchinfo;
130
131 return ip->proto == IPPROTO_DCCP
132 && !(ip->invflags & IPT_INV_PROTO)
133 && matchsize == IPT_ALIGN(sizeof(struct ipt_dccp_info))
134 && !(info->flags & ~IPT_DCCP_VALID_FLAGS)
135 && !(info->invflags & ~IPT_DCCP_VALID_FLAGS)
136 && !(info->invflags & ~info->flags);
137}
138
139static struct ipt_match dccp_match =
140{
141 .name = "dccp",
142 .match = &match,
143 .checkentry = &checkentry,
144 .me = THIS_MODULE,
145};
146
147static int __init init(void)
148{
149 int ret;
150
151 /* doff is 8 bits, so the maximum option size is (4*256). Don't put
152 * this in BSS since DaveM is worried about locked TLB's for kernel
153 * BSS. */
154 dccp_optbuf = kmalloc(256 * 4, GFP_KERNEL);
155 if (!dccp_optbuf)
156 return -ENOMEM;
157 ret = ipt_register_match(&dccp_match);
158 if (ret)
159 kfree(dccp_optbuf);
160
161 return ret;
162}
163
164static void __exit fini(void)
165{
166 ipt_unregister_match(&dccp_match);
167 kfree(dccp_optbuf);
168}
169
170module_init(init);
171module_exit(fini);
172
173MODULE_LICENSE("GPL");
174MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
175MODULE_DESCRIPTION("Match for DCCP protocol packets");
176
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 564b49bfebcf..2dd1cccbdab9 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -94,7 +94,7 @@ struct ipt_hashlimit_htable {
94static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 94static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
95static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ 95static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */
96static HLIST_HEAD(hashlimit_htables); 96static HLIST_HEAD(hashlimit_htables);
97static kmem_cache_t *hashlimit_cachep; 97static kmem_cache_t *hashlimit_cachep __read_mostly;
98 98
99static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) 99static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
100{ 100{
diff --git a/net/ipv4/netfilter/ipt_mark.c b/net/ipv4/netfilter/ipt_mark.c
index 8955728127b9..00bef6cdd3f8 100644
--- a/net/ipv4/netfilter/ipt_mark.c
+++ b/net/ipv4/netfilter/ipt_mark.c
@@ -37,9 +37,16 @@ checkentry(const char *tablename,
37 unsigned int matchsize, 37 unsigned int matchsize,
38 unsigned int hook_mask) 38 unsigned int hook_mask)
39{ 39{
40 struct ipt_mark_info *minfo = (struct ipt_mark_info *) matchinfo;
41
40 if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info))) 42 if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info)))
41 return 0; 43 return 0;
42 44
45 if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
46 printk(KERN_WARNING "mark: only supports 32bit mark\n");
47 return 0;
48 }
49
43 return 1; 50 return 1;
44} 51}
45 52
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 3b9065e06381..c1889f88262b 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -21,106 +21,6 @@ MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
21MODULE_DESCRIPTION("iptables owner match"); 21MODULE_DESCRIPTION("iptables owner match");
22 22
23static int 23static int
24match_comm(const struct sk_buff *skb, const char *comm)
25{
26 struct task_struct *g, *p;
27 struct files_struct *files;
28 int i;
29
30 read_lock(&tasklist_lock);
31 do_each_thread(g, p) {
32 if(strncmp(p->comm, comm, sizeof(p->comm)))
33 continue;
34
35 task_lock(p);
36 files = p->files;
37 if(files) {
38 spin_lock(&files->file_lock);
39 for (i=0; i < files->max_fds; i++) {
40 if (fcheck_files(files, i) ==
41 skb->sk->sk_socket->file) {
42 spin_unlock(&files->file_lock);
43 task_unlock(p);
44 read_unlock(&tasklist_lock);
45 return 1;
46 }
47 }
48 spin_unlock(&files->file_lock);
49 }
50 task_unlock(p);
51 } while_each_thread(g, p);
52 read_unlock(&tasklist_lock);
53 return 0;
54}
55
56static int
57match_pid(const struct sk_buff *skb, pid_t pid)
58{
59 struct task_struct *p;
60 struct files_struct *files;
61 int i;
62
63 read_lock(&tasklist_lock);
64 p = find_task_by_pid(pid);
65 if (!p)
66 goto out;
67 task_lock(p);
68 files = p->files;
69 if(files) {
70 spin_lock(&files->file_lock);
71 for (i=0; i < files->max_fds; i++) {
72 if (fcheck_files(files, i) ==
73 skb->sk->sk_socket->file) {
74 spin_unlock(&files->file_lock);
75 task_unlock(p);
76 read_unlock(&tasklist_lock);
77 return 1;
78 }
79 }
80 spin_unlock(&files->file_lock);
81 }
82 task_unlock(p);
83out:
84 read_unlock(&tasklist_lock);
85 return 0;
86}
87
88static int
89match_sid(const struct sk_buff *skb, pid_t sid)
90{
91 struct task_struct *g, *p;
92 struct file *file = skb->sk->sk_socket->file;
93 int i, found=0;
94
95 read_lock(&tasklist_lock);
96 do_each_thread(g, p) {
97 struct files_struct *files;
98 if (p->signal->session != sid)
99 continue;
100
101 task_lock(p);
102 files = p->files;
103 if (files) {
104 spin_lock(&files->file_lock);
105 for (i=0; i < files->max_fds; i++) {
106 if (fcheck_files(files, i) == file) {
107 found = 1;
108 break;
109 }
110 }
111 spin_unlock(&files->file_lock);
112 }
113 task_unlock(p);
114 if (found)
115 goto out;
116 } while_each_thread(g, p);
117out:
118 read_unlock(&tasklist_lock);
119
120 return found;
121}
122
123static int
124match(const struct sk_buff *skb, 24match(const struct sk_buff *skb,
125 const struct net_device *in, 25 const struct net_device *in,
126 const struct net_device *out, 26 const struct net_device *out,
@@ -145,24 +45,6 @@ match(const struct sk_buff *skb,
145 return 0; 45 return 0;
146 } 46 }
147 47
148 if(info->match & IPT_OWNER_PID) {
149 if (!match_pid(skb, info->pid) ^
150 !!(info->invert & IPT_OWNER_PID))
151 return 0;
152 }
153
154 if(info->match & IPT_OWNER_SID) {
155 if (!match_sid(skb, info->sid) ^
156 !!(info->invert & IPT_OWNER_SID))
157 return 0;
158 }
159
160 if(info->match & IPT_OWNER_COMM) {
161 if (!match_comm(skb, info->comm) ^
162 !!(info->invert & IPT_OWNER_COMM))
163 return 0;
164 }
165
166 return 1; 48 return 1;
167} 49}
168 50
@@ -173,6 +55,8 @@ checkentry(const char *tablename,
173 unsigned int matchsize, 55 unsigned int matchsize,
174 unsigned int hook_mask) 56 unsigned int hook_mask)
175{ 57{
58 const struct ipt_owner_info *info = matchinfo;
59
176 if (hook_mask 60 if (hook_mask
177 & ~((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING))) { 61 & ~((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING))) {
178 printk("ipt_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); 62 printk("ipt_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
@@ -184,15 +68,13 @@ checkentry(const char *tablename,
184 IPT_ALIGN(sizeof(struct ipt_owner_info))); 68 IPT_ALIGN(sizeof(struct ipt_owner_info)));
185 return 0; 69 return 0;
186 } 70 }
187#ifdef CONFIG_SMP 71
188 /* files->file_lock can not be used in a BH */ 72 if (info->match & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) {
189 if (((struct ipt_owner_info *)matchinfo)->match 73 printk("ipt_owner: pid, sid and command matching "
190 & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) { 74 "not supported anymore\n");
191 printk("ipt_owner: pid, sid and command matching is broken "
192 "on SMP.\n");
193 return 0; 75 return 0;
194 } 76 }
195#endif 77
196 return 1; 78 return 1;
197} 79}
198 80
diff --git a/net/ipv4/netfilter/ipt_string.c b/net/ipv4/netfilter/ipt_string.c
new file mode 100644
index 000000000000..b5def204d798
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_string.c
@@ -0,0 +1,91 @@
1/* String matching match for iptables
2 *
3 * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/netfilter_ipv4/ip_tables.h>
15#include <linux/netfilter_ipv4/ipt_string.h>
16#include <linux/textsearch.h>
17
18MODULE_AUTHOR("Pablo Neira Ayuso <pablo@eurodev.net>");
19MODULE_DESCRIPTION("IP tables string match module");
20MODULE_LICENSE("GPL");
21
22static int match(const struct sk_buff *skb,
23 const struct net_device *in,
24 const struct net_device *out,
25 const void *matchinfo,
26 int offset,
27 int *hotdrop)
28{
29 struct ts_state state;
30 struct ipt_string_info *conf = (struct ipt_string_info *) matchinfo;
31
32 memset(&state, 0, sizeof(struct ts_state));
33
34 return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
35 conf->to_offset, conf->config, &state)
36 != UINT_MAX) && !conf->invert;
37}
38
39#define STRING_TEXT_PRIV(m) ((struct ipt_string_info *) m)
40
41static int checkentry(const char *tablename,
42 const struct ipt_ip *ip,
43 void *matchinfo,
44 unsigned int matchsize,
45 unsigned int hook_mask)
46{
47 struct ipt_string_info *conf = matchinfo;
48 struct ts_config *ts_conf;
49
50 if (matchsize != IPT_ALIGN(sizeof(struct ipt_string_info)))
51 return 0;
52
53 /* Damn, can't handle this case properly with iptables... */
54 if (conf->from_offset > conf->to_offset)
55 return 0;
56
57 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
58 GFP_KERNEL, TS_AUTOLOAD);
59 if (IS_ERR(ts_conf))
60 return 0;
61
62 conf->config = ts_conf;
63
64 return 1;
65}
66
67static void destroy(void *matchinfo, unsigned int matchsize)
68{
69 textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config);
70}
71
72static struct ipt_match string_match = {
73 .name = "string",
74 .match = match,
75 .checkentry = checkentry,
76 .destroy = destroy,
77 .me = THIS_MODULE
78};
79
80static int __init init(void)
81{
82 return ipt_register_match(&string_match);
83}
84
85static void __exit fini(void)
86{
87 ipt_unregister_match(&string_match);
88}
89
90module_init(init);
91module_exit(fini);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 912bbcc7f415..f7943ba1f43c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,10 @@ static int fold_prot_inuse(struct proto *proto)
59 */ 59 */
60static int sockstat_seq_show(struct seq_file *seq, void *v) 60static int sockstat_seq_show(struct seq_file *seq, void *v)
61{ 61{
62 /* From net/socket.c */
63 extern void socket_seq_show(struct seq_file *seq);
64
65 socket_seq_show(seq); 62 socket_seq_show(seq);
66 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 63 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
67 fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count), 64 fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
68 tcp_tw_count, atomic_read(&tcp_sockets_allocated), 65 tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
69 atomic_read(&tcp_memory_allocated)); 66 atomic_read(&tcp_memory_allocated));
70 seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); 67 seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot));
71 seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); 68 seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot));
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 0db405a869f2..291831e792af 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -40,7 +40,6 @@
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <net/ip.h> 41#include <net/ip.h>
42#include <net/protocol.h> 42#include <net/protocol.h>
43#include <net/tcp.h>
44#include <linux/skbuff.h> 43#include <linux/skbuff.h>
45#include <net/sock.h> 44#include <net/sock.h>
46#include <net/icmp.h> 45#include <net/icmp.h>
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index d1835b1bc8c4..304bb0a1d4f0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -59,7 +59,6 @@
59#include <linux/netdevice.h> 59#include <linux/netdevice.h>
60#include <linux/in_route.h> 60#include <linux/in_route.h>
61#include <linux/route.h> 61#include <linux/route.h>
62#include <linux/tcp.h>
63#include <linux/skbuff.h> 62#include <linux/skbuff.h>
64#include <net/dst.h> 63#include <net/dst.h>
65#include <net/sock.h> 64#include <net/sock.h>
@@ -71,6 +70,7 @@
71#include <net/udp.h> 70#include <net/udp.h>
72#include <net/raw.h> 71#include <net/raw.h>
73#include <net/snmp.h> 72#include <net/snmp.h>
73#include <net/tcp_states.h>
74#include <net/inet_common.h> 74#include <net/inet_common.h>
75#include <net/checksum.h> 75#include <net/checksum.h>
76#include <net/xfrm.h> 76#include <net/xfrm.h>
@@ -150,10 +150,11 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
150 * RFC 1122: SHOULD pass TOS value up to the transport layer. 150 * RFC 1122: SHOULD pass TOS value up to the transport layer.
151 * -> It does. And not only TOS, but all IP header. 151 * -> It does. And not only TOS, but all IP header.
152 */ 152 */
153void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) 153int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
154{ 154{
155 struct sock *sk; 155 struct sock *sk;
156 struct hlist_head *head; 156 struct hlist_head *head;
157 int delivered = 0;
157 158
158 read_lock(&raw_v4_lock); 159 read_lock(&raw_v4_lock);
159 head = &raw_v4_htable[hash]; 160 head = &raw_v4_htable[hash];
@@ -164,6 +165,7 @@ void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
164 skb->dev->ifindex); 165 skb->dev->ifindex);
165 166
166 while (sk) { 167 while (sk) {
168 delivered = 1;
167 if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { 169 if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
168 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 170 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
169 171
@@ -177,6 +179,7 @@ void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
177 } 179 }
178out: 180out:
179 read_unlock(&raw_v4_lock); 181 read_unlock(&raw_v4_lock);
182 return delivered;
180} 183}
181 184
182void raw_err (struct sock *sk, struct sk_buff *skb, u32 info) 185void raw_err (struct sock *sk, struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d675ff80b04d..8c0b14e3beec 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -240,7 +240,9 @@ static unsigned rt_hash_mask;
240static int rt_hash_log; 240static int rt_hash_log;
241static unsigned int rt_hash_rnd; 241static unsigned int rt_hash_rnd;
242 242
243struct rt_cache_stat *rt_cache_stat; 243static struct rt_cache_stat *rt_cache_stat;
244#define RT_CACHE_STAT_INC(field) \
245 (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
244 246
245static int rt_intern_hash(unsigned hash, struct rtable *rth, 247static int rt_intern_hash(unsigned hash, struct rtable *rth,
246 struct rtable **res); 248 struct rtable **res);
@@ -2600,6 +2602,8 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2600 return ip_route_output_slow(rp, flp); 2602 return ip_route_output_slow(rp, flp);
2601} 2603}
2602 2604
2605EXPORT_SYMBOL_GPL(__ip_route_output_key);
2606
2603int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags) 2607int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
2604{ 2608{
2605 int err; 2609 int err;
@@ -2618,6 +2622,8 @@ int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk,
2618 return 0; 2622 return 0;
2619} 2623}
2620 2624
2625EXPORT_SYMBOL_GPL(ip_route_output_flow);
2626
2621int ip_route_output_key(struct rtable **rp, struct flowi *flp) 2627int ip_route_output_key(struct rtable **rp, struct flowi *flp)
2622{ 2628{
2623 return ip_route_output_flow(rp, flp, NULL, 0); 2629 return ip_route_output_flow(rp, flp, NULL, 0);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 72d014442185..a34e60ea48a1 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -169,8 +169,6 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
169 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; 169 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
170} 170}
171 171
172extern struct request_sock_ops tcp_request_sock_ops;
173
174static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, 172static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
175 struct request_sock *req, 173 struct request_sock *req,
176 struct dst_entry *dst) 174 struct dst_entry *dst)
@@ -180,7 +178,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
180 178
181 child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); 179 child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
182 if (child) 180 if (child)
183 tcp_acceptq_queue(sk, req, child); 181 inet_csk_reqsk_queue_add(sk, req, child);
184 else 182 else
185 reqsk_free(req); 183 reqsk_free(req);
186 184
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index e32894532416..652685623519 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -11,7 +11,9 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/sysctl.h> 12#include <linux/sysctl.h>
13#include <linux/config.h> 13#include <linux/config.h>
14#include <linux/igmp.h>
14#include <net/snmp.h> 15#include <net/snmp.h>
16#include <net/icmp.h>
15#include <net/ip.h> 17#include <net/ip.h>
16#include <net/route.h> 18#include <net/route.h>
17#include <net/tcp.h> 19#include <net/tcp.h>
@@ -19,36 +21,6 @@
19/* From af_inet.c */ 21/* From af_inet.c */
20extern int sysctl_ip_nonlocal_bind; 22extern int sysctl_ip_nonlocal_bind;
21 23
22/* From icmp.c */
23extern int sysctl_icmp_echo_ignore_all;
24extern int sysctl_icmp_echo_ignore_broadcasts;
25extern int sysctl_icmp_ignore_bogus_error_responses;
26extern int sysctl_icmp_errors_use_inbound_ifaddr;
27
28/* From ip_fragment.c */
29extern int sysctl_ipfrag_low_thresh;
30extern int sysctl_ipfrag_high_thresh;
31extern int sysctl_ipfrag_time;
32extern int sysctl_ipfrag_secret_interval;
33
34/* From ip_output.c */
35extern int sysctl_ip_dynaddr;
36
37/* From icmp.c */
38extern int sysctl_icmp_ratelimit;
39extern int sysctl_icmp_ratemask;
40
41/* From igmp.c */
42extern int sysctl_igmp_max_memberships;
43extern int sysctl_igmp_max_msf;
44
45/* From inetpeer.c */
46extern int inet_peer_threshold;
47extern int inet_peer_minttl;
48extern int inet_peer_maxttl;
49extern int inet_peer_gc_mintime;
50extern int inet_peer_gc_maxtime;
51
52#ifdef CONFIG_SYSCTL 24#ifdef CONFIG_SYSCTL
53static int tcp_retr1_max = 255; 25static int tcp_retr1_max = 255;
54static int ip_local_port_range_min[] = { 1, 1 }; 26static int ip_local_port_range_min[] = { 1, 1 };
@@ -57,8 +29,6 @@ static int ip_local_port_range_max[] = { 65535, 65535 };
57 29
58struct ipv4_config ipv4_config; 30struct ipv4_config ipv4_config;
59 31
60extern ctl_table ipv4_route_table[];
61
62#ifdef CONFIG_SYSCTL 32#ifdef CONFIG_SYSCTL
63 33
64static 34static
@@ -136,10 +106,11 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file *
136 return ret; 106 return ret;
137} 107}
138 108
139int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, int nlen, 109static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
140 void __user *oldval, size_t __user *oldlenp, 110 int nlen, void __user *oldval,
141 void __user *newval, size_t newlen, 111 size_t __user *oldlenp,
142 void **context) 112 void __user *newval, size_t newlen,
113 void **context)
143{ 114{
144 char val[TCP_CA_NAME_MAX]; 115 char val[TCP_CA_NAME_MAX];
145 ctl_table tbl = { 116 ctl_table tbl = {
@@ -259,7 +230,7 @@ ctl_table ipv4_table[] = {
259 { 230 {
260 .ctl_name = NET_TCP_MAX_TW_BUCKETS, 231 .ctl_name = NET_TCP_MAX_TW_BUCKETS,
261 .procname = "tcp_max_tw_buckets", 232 .procname = "tcp_max_tw_buckets",
262 .data = &sysctl_tcp_max_tw_buckets, 233 .data = &tcp_death_row.sysctl_max_tw_buckets,
263 .maxlen = sizeof(int), 234 .maxlen = sizeof(int),
264 .mode = 0644, 235 .mode = 0644,
265 .proc_handler = &proc_dointvec 236 .proc_handler = &proc_dointvec
@@ -363,7 +334,7 @@ ctl_table ipv4_table[] = {
363 { 334 {
364 .ctl_name = NET_TCP_TW_RECYCLE, 335 .ctl_name = NET_TCP_TW_RECYCLE,
365 .procname = "tcp_tw_recycle", 336 .procname = "tcp_tw_recycle",
366 .data = &sysctl_tcp_tw_recycle, 337 .data = &tcp_death_row.sysctl_tw_recycle,
367 .maxlen = sizeof(int), 338 .maxlen = sizeof(int),
368 .mode = 0644, 339 .mode = 0644,
369 .proc_handler = &proc_dointvec 340 .proc_handler = &proc_dointvec
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 69b1fcf70077..02fdda68718d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -269,13 +269,12 @@
269 269
270int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; 270int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 271
272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); 272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
273
274kmem_cache_t *tcp_bucket_cachep;
275kmem_cache_t *tcp_timewait_cachep;
276 273
277atomic_t tcp_orphan_count = ATOMIC_INIT(0); 274atomic_t tcp_orphan_count = ATOMIC_INIT(0);
278 275
276EXPORT_SYMBOL_GPL(tcp_orphan_count);
277
279int sysctl_tcp_mem[3]; 278int sysctl_tcp_mem[3];
280int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 279int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
281int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 280int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
@@ -311,15 +310,6 @@ void tcp_enter_memory_pressure(void)
311EXPORT_SYMBOL(tcp_enter_memory_pressure); 310EXPORT_SYMBOL(tcp_enter_memory_pressure);
312 311
313/* 312/*
314 * LISTEN is a special case for poll..
315 */
316static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
317 poll_table *wait)
318{
319 return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0;
320}
321
322/*
323 * Wait for a TCP event. 313 * Wait for a TCP event.
324 * 314 *
325 * Note that we don't need to lock the socket, as the upper poll layers 315 * Note that we don't need to lock the socket, as the upper poll layers
@@ -334,7 +324,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
334 324
335 poll_wait(file, sk->sk_sleep, wait); 325 poll_wait(file, sk->sk_sleep, wait);
336 if (sk->sk_state == TCP_LISTEN) 326 if (sk->sk_state == TCP_LISTEN)
337 return tcp_listen_poll(sk, wait); 327 return inet_csk_listen_poll(sk);
338 328
339 /* Socket is not locked. We are protected from async events 329 /* Socket is not locked. We are protected from async events
340 by poll logic and correct handling of state changes 330 by poll logic and correct handling of state changes
@@ -457,109 +447,6 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
457 return put_user(answ, (int __user *)arg); 447 return put_user(answ, (int __user *)arg);
458} 448}
459 449
460
461int tcp_listen_start(struct sock *sk)
462{
463 struct inet_sock *inet = inet_sk(sk);
464 struct tcp_sock *tp = tcp_sk(sk);
465 int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE);
466
467 if (rc != 0)
468 return rc;
469
470 sk->sk_max_ack_backlog = 0;
471 sk->sk_ack_backlog = 0;
472 tcp_delack_init(tp);
473
474 /* There is race window here: we announce ourselves listening,
475 * but this transition is still not validated by get_port().
476 * It is OK, because this socket enters to hash table only
477 * after validation is complete.
478 */
479 sk->sk_state = TCP_LISTEN;
480 if (!sk->sk_prot->get_port(sk, inet->num)) {
481 inet->sport = htons(inet->num);
482
483 sk_dst_reset(sk);
484 sk->sk_prot->hash(sk);
485
486 return 0;
487 }
488
489 sk->sk_state = TCP_CLOSE;
490 reqsk_queue_destroy(&tp->accept_queue);
491 return -EADDRINUSE;
492}
493
494/*
495 * This routine closes sockets which have been at least partially
496 * opened, but not yet accepted.
497 */
498
499static void tcp_listen_stop (struct sock *sk)
500{
501 struct tcp_sock *tp = tcp_sk(sk);
502 struct listen_sock *lopt;
503 struct request_sock *acc_req;
504 struct request_sock *req;
505 int i;
506
507 tcp_delete_keepalive_timer(sk);
508
509 /* make all the listen_opt local to us */
510 lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue);
511 acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
512
513 if (lopt->qlen) {
514 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
515 while ((req = lopt->syn_table[i]) != NULL) {
516 lopt->syn_table[i] = req->dl_next;
517 lopt->qlen--;
518 reqsk_free(req);
519
520 /* Following specs, it would be better either to send FIN
521 * (and enter FIN-WAIT-1, it is normal close)
522 * or to send active reset (abort).
523 * Certainly, it is pretty dangerous while synflood, but it is
524 * bad justification for our negligence 8)
525 * To be honest, we are not able to make either
526 * of the variants now. --ANK
527 */
528 }
529 }
530 }
531 BUG_TRAP(!lopt->qlen);
532
533 kfree(lopt);
534
535 while ((req = acc_req) != NULL) {
536 struct sock *child = req->sk;
537
538 acc_req = req->dl_next;
539
540 local_bh_disable();
541 bh_lock_sock(child);
542 BUG_TRAP(!sock_owned_by_user(child));
543 sock_hold(child);
544
545 tcp_disconnect(child, O_NONBLOCK);
546
547 sock_orphan(child);
548
549 atomic_inc(&tcp_orphan_count);
550
551 tcp_destroy_sock(child);
552
553 bh_unlock_sock(child);
554 local_bh_enable();
555 sock_put(child);
556
557 sk_acceptq_removed(sk);
558 __reqsk_free(req);
559 }
560 BUG_TRAP(!sk->sk_ack_backlog);
561}
562
563static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 450static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
564{ 451{
565 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 452 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
@@ -975,7 +862,7 @@ do_fault:
975 if (!skb->len) { 862 if (!skb->len) {
976 if (sk->sk_send_head == skb) 863 if (sk->sk_send_head == skb)
977 sk->sk_send_head = NULL; 864 sk->sk_send_head = NULL;
978 __skb_unlink(skb, skb->list); 865 __skb_unlink(skb, &sk->sk_write_queue);
979 sk_stream_free_skb(sk, skb); 866 sk_stream_free_skb(sk, skb);
980 } 867 }
981 868
@@ -1057,20 +944,21 @@ static void cleanup_rbuf(struct sock *sk, int copied)
1057 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 944 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1058#endif 945#endif
1059 946
1060 if (tcp_ack_scheduled(tp)) { 947 if (inet_csk_ack_scheduled(sk)) {
948 const struct inet_connection_sock *icsk = inet_csk(sk);
1061 /* Delayed ACKs frequently hit locked sockets during bulk 949 /* Delayed ACKs frequently hit locked sockets during bulk
1062 * receive. */ 950 * receive. */
1063 if (tp->ack.blocked || 951 if (icsk->icsk_ack.blocked ||
1064 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 952 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1065 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || 953 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1066 /* 954 /*
1067 * If this read emptied read buffer, we send ACK, if 955 * If this read emptied read buffer, we send ACK, if
1068 * connection is not bidirectional, user drained 956 * connection is not bidirectional, user drained
1069 * receive buffer and there was a small segment 957 * receive buffer and there was a small segment
1070 * in queue. 958 * in queue.
1071 */ 959 */
1072 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && 960 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1073 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) 961 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1074 time_to_ack = 1; 962 time_to_ack = 1;
1075 } 963 }
1076 964
@@ -1572,40 +1460,6 @@ void tcp_shutdown(struct sock *sk, int how)
1572 } 1460 }
1573} 1461}
1574 1462
1575/*
1576 * At this point, there should be no process reference to this
1577 * socket, and thus no user references at all. Therefore we
1578 * can assume the socket waitqueue is inactive and nobody will
1579 * try to jump onto it.
1580 */
1581void tcp_destroy_sock(struct sock *sk)
1582{
1583 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1584 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1585
1586 /* It cannot be in hash table! */
1587 BUG_TRAP(sk_unhashed(sk));
1588
1589 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1590 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1591
1592 sk->sk_prot->destroy(sk);
1593
1594 sk_stream_kill_queues(sk);
1595
1596 xfrm_sk_free_policy(sk);
1597
1598#ifdef INET_REFCNT_DEBUG
1599 if (atomic_read(&sk->sk_refcnt) != 1) {
1600 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1601 sk, atomic_read(&sk->sk_refcnt));
1602 }
1603#endif
1604
1605 atomic_dec(&tcp_orphan_count);
1606 sock_put(sk);
1607}
1608
1609void tcp_close(struct sock *sk, long timeout) 1463void tcp_close(struct sock *sk, long timeout)
1610{ 1464{
1611 struct sk_buff *skb; 1465 struct sk_buff *skb;
@@ -1618,7 +1472,7 @@ void tcp_close(struct sock *sk, long timeout)
1618 tcp_set_state(sk, TCP_CLOSE); 1472 tcp_set_state(sk, TCP_CLOSE);
1619 1473
1620 /* Special case. */ 1474 /* Special case. */
1621 tcp_listen_stop(sk); 1475 inet_csk_listen_stop(sk);
1622 1476
1623 goto adjudge_to_death; 1477 goto adjudge_to_death;
1624 } 1478 }
@@ -1721,12 +1575,12 @@ adjudge_to_death:
1721 tcp_send_active_reset(sk, GFP_ATOMIC); 1575 tcp_send_active_reset(sk, GFP_ATOMIC);
1722 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1576 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1723 } else { 1577 } else {
1724 int tmo = tcp_fin_time(tp); 1578 const int tmo = tcp_fin_time(sk);
1725 1579
1726 if (tmo > TCP_TIMEWAIT_LEN) { 1580 if (tmo > TCP_TIMEWAIT_LEN) {
1727 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); 1581 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1728 } else { 1582 } else {
1729 atomic_inc(&tcp_orphan_count); 1583 atomic_inc(sk->sk_prot->orphan_count);
1730 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1584 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1731 goto out; 1585 goto out;
1732 } 1586 }
@@ -1734,7 +1588,7 @@ adjudge_to_death:
1734 } 1588 }
1735 if (sk->sk_state != TCP_CLOSE) { 1589 if (sk->sk_state != TCP_CLOSE) {
1736 sk_stream_mem_reclaim(sk); 1590 sk_stream_mem_reclaim(sk);
1737 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans || 1591 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1738 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 1592 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1739 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { 1593 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1740 if (net_ratelimit()) 1594 if (net_ratelimit())
@@ -1745,10 +1599,10 @@ adjudge_to_death:
1745 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1599 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1746 } 1600 }
1747 } 1601 }
1748 atomic_inc(&tcp_orphan_count); 1602 atomic_inc(sk->sk_prot->orphan_count);
1749 1603
1750 if (sk->sk_state == TCP_CLOSE) 1604 if (sk->sk_state == TCP_CLOSE)
1751 tcp_destroy_sock(sk); 1605 inet_csk_destroy_sock(sk);
1752 /* Otherwise, socket is reprieved until protocol close. */ 1606 /* Otherwise, socket is reprieved until protocol close. */
1753 1607
1754out: 1608out:
@@ -1769,6 +1623,7 @@ static inline int tcp_need_reset(int state)
1769int tcp_disconnect(struct sock *sk, int flags) 1623int tcp_disconnect(struct sock *sk, int flags)
1770{ 1624{
1771 struct inet_sock *inet = inet_sk(sk); 1625 struct inet_sock *inet = inet_sk(sk);
1626 struct inet_connection_sock *icsk = inet_csk(sk);
1772 struct tcp_sock *tp = tcp_sk(sk); 1627 struct tcp_sock *tp = tcp_sk(sk);
1773 int err = 0; 1628 int err = 0;
1774 int old_state = sk->sk_state; 1629 int old_state = sk->sk_state;
@@ -1778,7 +1633,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1778 1633
1779 /* ABORT function of RFC793 */ 1634 /* ABORT function of RFC793 */
1780 if (old_state == TCP_LISTEN) { 1635 if (old_state == TCP_LISTEN) {
1781 tcp_listen_stop(sk); 1636 inet_csk_listen_stop(sk);
1782 } else if (tcp_need_reset(old_state) || 1637 } else if (tcp_need_reset(old_state) ||
1783 (tp->snd_nxt != tp->write_seq && 1638 (tp->snd_nxt != tp->write_seq &&
1784 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 1639 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -1805,125 +1660,34 @@ int tcp_disconnect(struct sock *sk, int flags)
1805 tp->srtt = 0; 1660 tp->srtt = 0;
1806 if ((tp->write_seq += tp->max_window + 2) == 0) 1661 if ((tp->write_seq += tp->max_window + 2) == 0)
1807 tp->write_seq = 1; 1662 tp->write_seq = 1;
1808 tp->backoff = 0; 1663 icsk->icsk_backoff = 0;
1809 tp->snd_cwnd = 2; 1664 tp->snd_cwnd = 2;
1810 tp->probes_out = 0; 1665 icsk->icsk_probes_out = 0;
1811 tp->packets_out = 0; 1666 tp->packets_out = 0;
1812 tp->snd_ssthresh = 0x7fffffff; 1667 tp->snd_ssthresh = 0x7fffffff;
1813 tp->snd_cwnd_cnt = 0; 1668 tp->snd_cwnd_cnt = 0;
1814 tcp_set_ca_state(tp, TCP_CA_Open); 1669 tcp_set_ca_state(sk, TCP_CA_Open);
1815 tcp_clear_retrans(tp); 1670 tcp_clear_retrans(tp);
1816 tcp_delack_init(tp); 1671 inet_csk_delack_init(sk);
1817 sk->sk_send_head = NULL; 1672 sk->sk_send_head = NULL;
1818 tp->rx_opt.saw_tstamp = 0; 1673 tp->rx_opt.saw_tstamp = 0;
1819 tcp_sack_reset(&tp->rx_opt); 1674 tcp_sack_reset(&tp->rx_opt);
1820 __sk_dst_reset(sk); 1675 __sk_dst_reset(sk);
1821 1676
1822 BUG_TRAP(!inet->num || tp->bind_hash); 1677 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1823 1678
1824 sk->sk_error_report(sk); 1679 sk->sk_error_report(sk);
1825 return err; 1680 return err;
1826} 1681}
1827 1682
1828/* 1683/*
1829 * Wait for an incoming connection, avoid race
1830 * conditions. This must be called with the socket locked.
1831 */
1832static int wait_for_connect(struct sock *sk, long timeo)
1833{
1834 struct tcp_sock *tp = tcp_sk(sk);
1835 DEFINE_WAIT(wait);
1836 int err;
1837
1838 /*
1839 * True wake-one mechanism for incoming connections: only
1840 * one process gets woken up, not the 'whole herd'.
1841 * Since we do not 'race & poll' for established sockets
1842 * anymore, the common case will execute the loop only once.
1843 *
1844 * Subtle issue: "add_wait_queue_exclusive()" will be added
1845 * after any current non-exclusive waiters, and we know that
1846 * it will always _stay_ after any new non-exclusive waiters
1847 * because all non-exclusive waiters are added at the
1848 * beginning of the wait-queue. As such, it's ok to "drop"
1849 * our exclusiveness temporarily when we get woken up without
1850 * having to remove and re-insert us on the wait queue.
1851 */
1852 for (;;) {
1853 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1854 TASK_INTERRUPTIBLE);
1855 release_sock(sk);
1856 if (reqsk_queue_empty(&tp->accept_queue))
1857 timeo = schedule_timeout(timeo);
1858 lock_sock(sk);
1859 err = 0;
1860 if (!reqsk_queue_empty(&tp->accept_queue))
1861 break;
1862 err = -EINVAL;
1863 if (sk->sk_state != TCP_LISTEN)
1864 break;
1865 err = sock_intr_errno(timeo);
1866 if (signal_pending(current))
1867 break;
1868 err = -EAGAIN;
1869 if (!timeo)
1870 break;
1871 }
1872 finish_wait(sk->sk_sleep, &wait);
1873 return err;
1874}
1875
1876/*
1877 * This will accept the next outstanding connection.
1878 */
1879
1880struct sock *tcp_accept(struct sock *sk, int flags, int *err)
1881{
1882 struct tcp_sock *tp = tcp_sk(sk);
1883 struct sock *newsk;
1884 int error;
1885
1886 lock_sock(sk);
1887
1888 /* We need to make sure that this socket is listening,
1889 * and that it has something pending.
1890 */
1891 error = -EINVAL;
1892 if (sk->sk_state != TCP_LISTEN)
1893 goto out_err;
1894
1895 /* Find already established connection */
1896 if (reqsk_queue_empty(&tp->accept_queue)) {
1897 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1898
1899 /* If this is a non blocking socket don't sleep */
1900 error = -EAGAIN;
1901 if (!timeo)
1902 goto out_err;
1903
1904 error = wait_for_connect(sk, timeo);
1905 if (error)
1906 goto out_err;
1907 }
1908
1909 newsk = reqsk_queue_get_child(&tp->accept_queue, sk);
1910 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1911out:
1912 release_sock(sk);
1913 return newsk;
1914out_err:
1915 newsk = NULL;
1916 *err = error;
1917 goto out;
1918}
1919
1920/*
1921 * Socket option code for TCP. 1684 * Socket option code for TCP.
1922 */ 1685 */
1923int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 1686int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1924 int optlen) 1687 int optlen)
1925{ 1688{
1926 struct tcp_sock *tp = tcp_sk(sk); 1689 struct tcp_sock *tp = tcp_sk(sk);
1690 struct inet_connection_sock *icsk = inet_csk(sk);
1927 int val; 1691 int val;
1928 int err = 0; 1692 int err = 0;
1929 1693
@@ -1945,7 +1709,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1945 name[val] = 0; 1709 name[val] = 0;
1946 1710
1947 lock_sock(sk); 1711 lock_sock(sk);
1948 err = tcp_set_congestion_control(tp, name); 1712 err = tcp_set_congestion_control(sk, name);
1949 release_sock(sk); 1713 release_sock(sk);
1950 return err; 1714 return err;
1951 } 1715 }
@@ -2022,7 +1786,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2022 elapsed = tp->keepalive_time - elapsed; 1786 elapsed = tp->keepalive_time - elapsed;
2023 else 1787 else
2024 elapsed = 0; 1788 elapsed = 0;
2025 tcp_reset_keepalive_timer(sk, elapsed); 1789 inet_csk_reset_keepalive_timer(sk, elapsed);
2026 } 1790 }
2027 } 1791 }
2028 break; 1792 break;
@@ -2042,7 +1806,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2042 if (val < 1 || val > MAX_TCP_SYNCNT) 1806 if (val < 1 || val > MAX_TCP_SYNCNT)
2043 err = -EINVAL; 1807 err = -EINVAL;
2044 else 1808 else
2045 tp->syn_retries = val; 1809 icsk->icsk_syn_retries = val;
2046 break; 1810 break;
2047 1811
2048 case TCP_LINGER2: 1812 case TCP_LINGER2:
@@ -2055,15 +1819,15 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2055 break; 1819 break;
2056 1820
2057 case TCP_DEFER_ACCEPT: 1821 case TCP_DEFER_ACCEPT:
2058 tp->defer_accept = 0; 1822 icsk->icsk_accept_queue.rskq_defer_accept = 0;
2059 if (val > 0) { 1823 if (val > 0) {
2060 /* Translate value in seconds to number of 1824 /* Translate value in seconds to number of
2061 * retransmits */ 1825 * retransmits */
2062 while (tp->defer_accept < 32 && 1826 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2063 val > ((TCP_TIMEOUT_INIT / HZ) << 1827 val > ((TCP_TIMEOUT_INIT / HZ) <<
2064 tp->defer_accept)) 1828 icsk->icsk_accept_queue.rskq_defer_accept))
2065 tp->defer_accept++; 1829 icsk->icsk_accept_queue.rskq_defer_accept++;
2066 tp->defer_accept++; 1830 icsk->icsk_accept_queue.rskq_defer_accept++;
2067 } 1831 }
2068 break; 1832 break;
2069 1833
@@ -2081,16 +1845,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2081 1845
2082 case TCP_QUICKACK: 1846 case TCP_QUICKACK:
2083 if (!val) { 1847 if (!val) {
2084 tp->ack.pingpong = 1; 1848 icsk->icsk_ack.pingpong = 1;
2085 } else { 1849 } else {
2086 tp->ack.pingpong = 0; 1850 icsk->icsk_ack.pingpong = 0;
2087 if ((1 << sk->sk_state) & 1851 if ((1 << sk->sk_state) &
2088 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 1852 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2089 tcp_ack_scheduled(tp)) { 1853 inet_csk_ack_scheduled(sk)) {
2090 tp->ack.pending |= TCP_ACK_PUSHED; 1854 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2091 cleanup_rbuf(sk, 1); 1855 cleanup_rbuf(sk, 1);
2092 if (!(val & 1)) 1856 if (!(val & 1))
2093 tp->ack.pingpong = 1; 1857 icsk->icsk_ack.pingpong = 1;
2094 } 1858 }
2095 } 1859 }
2096 break; 1860 break;
@@ -2107,15 +1871,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2107void tcp_get_info(struct sock *sk, struct tcp_info *info) 1871void tcp_get_info(struct sock *sk, struct tcp_info *info)
2108{ 1872{
2109 struct tcp_sock *tp = tcp_sk(sk); 1873 struct tcp_sock *tp = tcp_sk(sk);
1874 const struct inet_connection_sock *icsk = inet_csk(sk);
2110 u32 now = tcp_time_stamp; 1875 u32 now = tcp_time_stamp;
2111 1876
2112 memset(info, 0, sizeof(*info)); 1877 memset(info, 0, sizeof(*info));
2113 1878
2114 info->tcpi_state = sk->sk_state; 1879 info->tcpi_state = sk->sk_state;
2115 info->tcpi_ca_state = tp->ca_state; 1880 info->tcpi_ca_state = icsk->icsk_ca_state;
2116 info->tcpi_retransmits = tp->retransmits; 1881 info->tcpi_retransmits = icsk->icsk_retransmits;
2117 info->tcpi_probes = tp->probes_out; 1882 info->tcpi_probes = icsk->icsk_probes_out;
2118 info->tcpi_backoff = tp->backoff; 1883 info->tcpi_backoff = icsk->icsk_backoff;
2119 1884
2120 if (tp->rx_opt.tstamp_ok) 1885 if (tp->rx_opt.tstamp_ok)
2121 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 1886 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
@@ -2130,10 +1895,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2130 if (tp->ecn_flags&TCP_ECN_OK) 1895 if (tp->ecn_flags&TCP_ECN_OK)
2131 info->tcpi_options |= TCPI_OPT_ECN; 1896 info->tcpi_options |= TCPI_OPT_ECN;
2132 1897
2133 info->tcpi_rto = jiffies_to_usecs(tp->rto); 1898 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2134 info->tcpi_ato = jiffies_to_usecs(tp->ack.ato); 1899 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2135 info->tcpi_snd_mss = tp->mss_cache; 1900 info->tcpi_snd_mss = tp->mss_cache;
2136 info->tcpi_rcv_mss = tp->ack.rcv_mss; 1901 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2137 1902
2138 info->tcpi_unacked = tp->packets_out; 1903 info->tcpi_unacked = tp->packets_out;
2139 info->tcpi_sacked = tp->sacked_out; 1904 info->tcpi_sacked = tp->sacked_out;
@@ -2142,7 +1907,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2142 info->tcpi_fackets = tp->fackets_out; 1907 info->tcpi_fackets = tp->fackets_out;
2143 1908
2144 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 1909 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2145 info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime); 1910 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2146 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 1911 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2147 1912
2148 info->tcpi_pmtu = tp->pmtu_cookie; 1913 info->tcpi_pmtu = tp->pmtu_cookie;
@@ -2165,6 +1930,7 @@ EXPORT_SYMBOL_GPL(tcp_get_info);
2165int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 1930int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2166 int __user *optlen) 1931 int __user *optlen)
2167{ 1932{
1933 struct inet_connection_sock *icsk = inet_csk(sk);
2168 struct tcp_sock *tp = tcp_sk(sk); 1934 struct tcp_sock *tp = tcp_sk(sk);
2169 int val, len; 1935 int val, len;
2170 1936
@@ -2202,7 +1968,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2202 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 1968 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2203 break; 1969 break;
2204 case TCP_SYNCNT: 1970 case TCP_SYNCNT:
2205 val = tp->syn_retries ? : sysctl_tcp_syn_retries; 1971 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2206 break; 1972 break;
2207 case TCP_LINGER2: 1973 case TCP_LINGER2:
2208 val = tp->linger2; 1974 val = tp->linger2;
@@ -2210,8 +1976,8 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2210 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 1976 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2211 break; 1977 break;
2212 case TCP_DEFER_ACCEPT: 1978 case TCP_DEFER_ACCEPT:
2213 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) << 1979 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2214 (tp->defer_accept - 1)); 1980 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2215 break; 1981 break;
2216 case TCP_WINDOW_CLAMP: 1982 case TCP_WINDOW_CLAMP:
2217 val = tp->window_clamp; 1983 val = tp->window_clamp;
@@ -2232,7 +1998,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2232 return 0; 1998 return 0;
2233 } 1999 }
2234 case TCP_QUICKACK: 2000 case TCP_QUICKACK:
2235 val = !tp->ack.pingpong; 2001 val = !icsk->icsk_ack.pingpong;
2236 break; 2002 break;
2237 2003
2238 case TCP_CONGESTION: 2004 case TCP_CONGESTION:
@@ -2241,7 +2007,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2241 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2007 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2242 if (put_user(len, optlen)) 2008 if (put_user(len, optlen))
2243 return -EFAULT; 2009 return -EFAULT;
2244 if (copy_to_user(optval, tp->ca_ops->name, len)) 2010 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2245 return -EFAULT; 2011 return -EFAULT;
2246 return 0; 2012 return 0;
2247 default: 2013 default:
@@ -2278,79 +2044,72 @@ void __init tcp_init(void)
2278 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2044 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2279 sizeof(skb->cb)); 2045 sizeof(skb->cb));
2280 2046
2281 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", 2047 tcp_hashinfo.bind_bucket_cachep =
2282 sizeof(struct tcp_bind_bucket), 2048 kmem_cache_create("tcp_bind_bucket",
2283 0, SLAB_HWCACHE_ALIGN, 2049 sizeof(struct inet_bind_bucket), 0,
2284 NULL, NULL); 2050 SLAB_HWCACHE_ALIGN, NULL, NULL);
2285 if (!tcp_bucket_cachep) 2051 if (!tcp_hashinfo.bind_bucket_cachep)
2286 panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); 2052 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2287 2053
2288 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2289 sizeof(struct tcp_tw_bucket),
2290 0, SLAB_HWCACHE_ALIGN,
2291 NULL, NULL);
2292 if (!tcp_timewait_cachep)
2293 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2294
2295 /* Size and allocate the main established and bind bucket 2054 /* Size and allocate the main established and bind bucket
2296 * hash tables. 2055 * hash tables.
2297 * 2056 *
2298 * The methodology is similar to that of the buffer cache. 2057 * The methodology is similar to that of the buffer cache.
2299 */ 2058 */
2300 tcp_ehash = (struct tcp_ehash_bucket *) 2059 tcp_hashinfo.ehash =
2301 alloc_large_system_hash("TCP established", 2060 alloc_large_system_hash("TCP established",
2302 sizeof(struct tcp_ehash_bucket), 2061 sizeof(struct inet_ehash_bucket),
2303 thash_entries, 2062 thash_entries,
2304 (num_physpages >= 128 * 1024) ? 2063 (num_physpages >= 128 * 1024) ?
2305 (25 - PAGE_SHIFT) : 2064 (25 - PAGE_SHIFT) :
2306 (27 - PAGE_SHIFT), 2065 (27 - PAGE_SHIFT),
2307 HASH_HIGHMEM, 2066 HASH_HIGHMEM,
2308 &tcp_ehash_size, 2067 &tcp_hashinfo.ehash_size,
2309 NULL, 2068 NULL,
2310 0); 2069 0);
2311 tcp_ehash_size = (1 << tcp_ehash_size) >> 1; 2070 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2312 for (i = 0; i < (tcp_ehash_size << 1); i++) { 2071 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2313 rwlock_init(&tcp_ehash[i].lock); 2072 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2314 INIT_HLIST_HEAD(&tcp_ehash[i].chain); 2073 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2315 } 2074 }
2316 2075
2317 tcp_bhash = (struct tcp_bind_hashbucket *) 2076 tcp_hashinfo.bhash =
2318 alloc_large_system_hash("TCP bind", 2077 alloc_large_system_hash("TCP bind",
2319 sizeof(struct tcp_bind_hashbucket), 2078 sizeof(struct inet_bind_hashbucket),
2320 tcp_ehash_size, 2079 tcp_hashinfo.ehash_size,
2321 (num_physpages >= 128 * 1024) ? 2080 (num_physpages >= 128 * 1024) ?
2322 (25 - PAGE_SHIFT) : 2081 (25 - PAGE_SHIFT) :
2323 (27 - PAGE_SHIFT), 2082 (27 - PAGE_SHIFT),
2324 HASH_HIGHMEM, 2083 HASH_HIGHMEM,
2325 &tcp_bhash_size, 2084 &tcp_hashinfo.bhash_size,
2326 NULL, 2085 NULL,
2327 64 * 1024); 2086 64 * 1024);
2328 tcp_bhash_size = 1 << tcp_bhash_size; 2087 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2329 for (i = 0; i < tcp_bhash_size; i++) { 2088 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2330 spin_lock_init(&tcp_bhash[i].lock); 2089 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2331 INIT_HLIST_HEAD(&tcp_bhash[i].chain); 2090 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2332 } 2091 }
2333 2092
2334 /* Try to be a bit smarter and adjust defaults depending 2093 /* Try to be a bit smarter and adjust defaults depending
2335 * on available memory. 2094 * on available memory.
2336 */ 2095 */
2337 for (order = 0; ((1 << order) << PAGE_SHIFT) < 2096 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2338 (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket)); 2097 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2339 order++) 2098 order++)
2340 ; 2099 ;
2341 if (order >= 4) { 2100 if (order >= 4) {
2342 sysctl_local_port_range[0] = 32768; 2101 sysctl_local_port_range[0] = 32768;
2343 sysctl_local_port_range[1] = 61000; 2102 sysctl_local_port_range[1] = 61000;
2344 sysctl_tcp_max_tw_buckets = 180000; 2103 tcp_death_row.sysctl_max_tw_buckets = 180000;
2345 sysctl_tcp_max_orphans = 4096 << (order - 4); 2104 sysctl_tcp_max_orphans = 4096 << (order - 4);
2346 sysctl_max_syn_backlog = 1024; 2105 sysctl_max_syn_backlog = 1024;
2347 } else if (order < 3) { 2106 } else if (order < 3) {
2348 sysctl_local_port_range[0] = 1024 * (3 - order); 2107 sysctl_local_port_range[0] = 1024 * (3 - order);
2349 sysctl_tcp_max_tw_buckets >>= (3 - order); 2108 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2350 sysctl_tcp_max_orphans >>= (3 - order); 2109 sysctl_tcp_max_orphans >>= (3 - order);
2351 sysctl_max_syn_backlog = 128; 2110 sysctl_max_syn_backlog = 128;
2352 } 2111 }
2353 tcp_port_rover = sysctl_local_port_range[0] - 1; 2112 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
2354 2113
2355 sysctl_tcp_mem[0] = 768 << order; 2114 sysctl_tcp_mem[0] = 768 << order;
2356 sysctl_tcp_mem[1] = 1024 << order; 2115 sysctl_tcp_mem[1] = 1024 << order;
@@ -2365,14 +2124,12 @@ void __init tcp_init(void)
2365 2124
2366 printk(KERN_INFO "TCP: Hash tables configured " 2125 printk(KERN_INFO "TCP: Hash tables configured "
2367 "(established %d bind %d)\n", 2126 "(established %d bind %d)\n",
2368 tcp_ehash_size << 1, tcp_bhash_size); 2127 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2369 2128
2370 tcp_register_congestion_control(&tcp_reno); 2129 tcp_register_congestion_control(&tcp_reno);
2371} 2130}
2372 2131
2373EXPORT_SYMBOL(tcp_accept);
2374EXPORT_SYMBOL(tcp_close); 2132EXPORT_SYMBOL(tcp_close);
2375EXPORT_SYMBOL(tcp_destroy_sock);
2376EXPORT_SYMBOL(tcp_disconnect); 2133EXPORT_SYMBOL(tcp_disconnect);
2377EXPORT_SYMBOL(tcp_getsockopt); 2134EXPORT_SYMBOL(tcp_getsockopt);
2378EXPORT_SYMBOL(tcp_ioctl); 2135EXPORT_SYMBOL(tcp_ioctl);
@@ -2384,4 +2141,3 @@ EXPORT_SYMBOL(tcp_sendpage);
2384EXPORT_SYMBOL(tcp_setsockopt); 2141EXPORT_SYMBOL(tcp_setsockopt);
2385EXPORT_SYMBOL(tcp_shutdown); 2142EXPORT_SYMBOL(tcp_shutdown);
2386EXPORT_SYMBOL(tcp_statistics); 2143EXPORT_SYMBOL(tcp_statistics);
2387EXPORT_SYMBOL(tcp_timewait_cachep);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index ec38d45d6649..b940346de4e7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -86,11 +86,11 @@ static inline void bictcp_reset(struct bictcp *ca)
86 ca->delayed_ack = 2 << ACK_RATIO_SHIFT; 86 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
87} 87}
88 88
89static void bictcp_init(struct tcp_sock *tp) 89static void bictcp_init(struct sock *sk)
90{ 90{
91 bictcp_reset(tcp_ca(tp)); 91 bictcp_reset(inet_csk_ca(sk));
92 if (initial_ssthresh) 92 if (initial_ssthresh)
93 tp->snd_ssthresh = initial_ssthresh; 93 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
94} 94}
95 95
96/* 96/*
@@ -156,9 +156,10 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
156 156
157 157
158/* Detect low utilization in congestion avoidance */ 158/* Detect low utilization in congestion avoidance */
159static inline void bictcp_low_utilization(struct tcp_sock *tp, int flag) 159static inline void bictcp_low_utilization(struct sock *sk, int flag)
160{ 160{
161 struct bictcp *ca = tcp_ca(tp); 161 const struct tcp_sock *tp = tcp_sk(sk);
162 struct bictcp *ca = inet_csk_ca(sk);
162 u32 dist, delay; 163 u32 dist, delay;
163 164
164 /* No time stamp */ 165 /* No time stamp */
@@ -208,12 +209,13 @@ static inline void bictcp_low_utilization(struct tcp_sock *tp, int flag)
208 209
209} 210}
210 211
211static void bictcp_cong_avoid(struct tcp_sock *tp, u32 ack, 212static void bictcp_cong_avoid(struct sock *sk, u32 ack,
212 u32 seq_rtt, u32 in_flight, int data_acked) 213 u32 seq_rtt, u32 in_flight, int data_acked)
213{ 214{
214 struct bictcp *ca = tcp_ca(tp); 215 struct tcp_sock *tp = tcp_sk(sk);
216 struct bictcp *ca = inet_csk_ca(sk);
215 217
216 bictcp_low_utilization(tp, data_acked); 218 bictcp_low_utilization(sk, data_acked);
217 219
218 if (in_flight < tp->snd_cwnd) 220 if (in_flight < tp->snd_cwnd)
219 return; 221 return;
@@ -242,9 +244,10 @@ static void bictcp_cong_avoid(struct tcp_sock *tp, u32 ack,
242 * behave like Reno until low_window is reached, 244 * behave like Reno until low_window is reached,
243 * then increase congestion window slowly 245 * then increase congestion window slowly
244 */ 246 */
245static u32 bictcp_recalc_ssthresh(struct tcp_sock *tp) 247static u32 bictcp_recalc_ssthresh(struct sock *sk)
246{ 248{
247 struct bictcp *ca = tcp_ca(tp); 249 const struct tcp_sock *tp = tcp_sk(sk);
250 struct bictcp *ca = inet_csk_ca(sk);
248 251
249 ca->epoch_start = 0; /* end of epoch */ 252 ca->epoch_start = 0; /* end of epoch */
250 253
@@ -269,31 +272,34 @@ static u32 bictcp_recalc_ssthresh(struct tcp_sock *tp)
269 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 272 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
270} 273}
271 274
272static u32 bictcp_undo_cwnd(struct tcp_sock *tp) 275static u32 bictcp_undo_cwnd(struct sock *sk)
273{ 276{
274 struct bictcp *ca = tcp_ca(tp); 277 const struct tcp_sock *tp = tcp_sk(sk);
275 278 const struct bictcp *ca = inet_csk_ca(sk);
276 return max(tp->snd_cwnd, ca->last_max_cwnd); 279 return max(tp->snd_cwnd, ca->last_max_cwnd);
277} 280}
278 281
279static u32 bictcp_min_cwnd(struct tcp_sock *tp) 282static u32 bictcp_min_cwnd(struct sock *sk)
280{ 283{
284 const struct tcp_sock *tp = tcp_sk(sk);
281 return tp->snd_ssthresh; 285 return tp->snd_ssthresh;
282} 286}
283 287
284static void bictcp_state(struct tcp_sock *tp, u8 new_state) 288static void bictcp_state(struct sock *sk, u8 new_state)
285{ 289{
286 if (new_state == TCP_CA_Loss) 290 if (new_state == TCP_CA_Loss)
287 bictcp_reset(tcp_ca(tp)); 291 bictcp_reset(inet_csk_ca(sk));
288} 292}
289 293
290/* Track delayed acknowledgement ratio using sliding window 294/* Track delayed acknowledgement ratio using sliding window
291 * ratio = (15*ratio + sample) / 16 295 * ratio = (15*ratio + sample) / 16
292 */ 296 */
293static void bictcp_acked(struct tcp_sock *tp, u32 cnt) 297static void bictcp_acked(struct sock *sk, u32 cnt)
294{ 298{
295 if (cnt > 0 && tp->ca_state == TCP_CA_Open) { 299 const struct inet_connection_sock *icsk = inet_csk(sk);
296 struct bictcp *ca = tcp_ca(tp); 300
301 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
302 struct bictcp *ca = inet_csk_ca(sk);
297 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 303 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
298 ca->delayed_ack += cnt; 304 ca->delayed_ack += cnt;
299 } 305 }
@@ -314,7 +320,7 @@ static struct tcp_congestion_ops bictcp = {
314 320
315static int __init bictcp_register(void) 321static int __init bictcp_register(void)
316{ 322{
317 BUG_ON(sizeof(struct bictcp) > TCP_CA_PRIV_SIZE); 323 BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
318 return tcp_register_congestion_control(&bictcp); 324 return tcp_register_congestion_control(&bictcp);
319} 325}
320 326
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4970d10a7785..bbf2d6624e89 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -73,33 +73,36 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
73EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); 73EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
74 74
75/* Assign choice of congestion control. */ 75/* Assign choice of congestion control. */
76void tcp_init_congestion_control(struct tcp_sock *tp) 76void tcp_init_congestion_control(struct sock *sk)
77{ 77{
78 struct inet_connection_sock *icsk = inet_csk(sk);
78 struct tcp_congestion_ops *ca; 79 struct tcp_congestion_ops *ca;
79 80
80 if (tp->ca_ops != &tcp_init_congestion_ops) 81 if (icsk->icsk_ca_ops != &tcp_init_congestion_ops)
81 return; 82 return;
82 83
83 rcu_read_lock(); 84 rcu_read_lock();
84 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 85 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
85 if (try_module_get(ca->owner)) { 86 if (try_module_get(ca->owner)) {
86 tp->ca_ops = ca; 87 icsk->icsk_ca_ops = ca;
87 break; 88 break;
88 } 89 }
89 90
90 } 91 }
91 rcu_read_unlock(); 92 rcu_read_unlock();
92 93
93 if (tp->ca_ops->init) 94 if (icsk->icsk_ca_ops->init)
94 tp->ca_ops->init(tp); 95 icsk->icsk_ca_ops->init(sk);
95} 96}
96 97
97/* Manage refcounts on socket close. */ 98/* Manage refcounts on socket close. */
98void tcp_cleanup_congestion_control(struct tcp_sock *tp) 99void tcp_cleanup_congestion_control(struct sock *sk)
99{ 100{
100 if (tp->ca_ops->release) 101 struct inet_connection_sock *icsk = inet_csk(sk);
101 tp->ca_ops->release(tp); 102
102 module_put(tp->ca_ops->owner); 103 if (icsk->icsk_ca_ops->release)
104 icsk->icsk_ca_ops->release(sk);
105 module_put(icsk->icsk_ca_ops->owner);
103} 106}
104 107
105/* Used by sysctl to change default congestion control */ 108/* Used by sysctl to change default congestion control */
@@ -143,14 +146,15 @@ void tcp_get_default_congestion_control(char *name)
143} 146}
144 147
145/* Change congestion control for socket */ 148/* Change congestion control for socket */
146int tcp_set_congestion_control(struct tcp_sock *tp, const char *name) 149int tcp_set_congestion_control(struct sock *sk, const char *name)
147{ 150{
151 struct inet_connection_sock *icsk = inet_csk(sk);
148 struct tcp_congestion_ops *ca; 152 struct tcp_congestion_ops *ca;
149 int err = 0; 153 int err = 0;
150 154
151 rcu_read_lock(); 155 rcu_read_lock();
152 ca = tcp_ca_find(name); 156 ca = tcp_ca_find(name);
153 if (ca == tp->ca_ops) 157 if (ca == icsk->icsk_ca_ops)
154 goto out; 158 goto out;
155 159
156 if (!ca) 160 if (!ca)
@@ -160,10 +164,10 @@ int tcp_set_congestion_control(struct tcp_sock *tp, const char *name)
160 err = -EBUSY; 164 err = -EBUSY;
161 165
162 else { 166 else {
163 tcp_cleanup_congestion_control(tp); 167 tcp_cleanup_congestion_control(sk);
164 tp->ca_ops = ca; 168 icsk->icsk_ca_ops = ca;
165 if (tp->ca_ops->init) 169 if (icsk->icsk_ca_ops->init)
166 tp->ca_ops->init(tp); 170 icsk->icsk_ca_ops->init(sk);
167 } 171 }
168 out: 172 out:
169 rcu_read_unlock(); 173 rcu_read_unlock();
@@ -177,9 +181,11 @@ int tcp_set_congestion_control(struct tcp_sock *tp, const char *name)
177/* This is Jacobson's slow start and congestion avoidance. 181/* This is Jacobson's slow start and congestion avoidance.
178 * SIGCOMM '88, p. 328. 182 * SIGCOMM '88, p. 328.
179 */ 183 */
180void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight, 184void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
181 int flag) 185 int flag)
182{ 186{
187 struct tcp_sock *tp = tcp_sk(sk);
188
183 if (in_flight < tp->snd_cwnd) 189 if (in_flight < tp->snd_cwnd)
184 return; 190 return;
185 191
@@ -202,15 +208,17 @@ void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight,
202EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 208EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
203 209
204/* Slow start threshold is half the congestion window (min 2) */ 210/* Slow start threshold is half the congestion window (min 2) */
205u32 tcp_reno_ssthresh(struct tcp_sock *tp) 211u32 tcp_reno_ssthresh(struct sock *sk)
206{ 212{
213 const struct tcp_sock *tp = tcp_sk(sk);
207 return max(tp->snd_cwnd >> 1U, 2U); 214 return max(tp->snd_cwnd >> 1U, 2U);
208} 215}
209EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); 216EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
210 217
211/* Lower bound on congestion window. */ 218/* Lower bound on congestion window. */
212u32 tcp_reno_min_cwnd(struct tcp_sock *tp) 219u32 tcp_reno_min_cwnd(struct sock *sk)
213{ 220{
221 const struct tcp_sock *tp = tcp_sk(sk);
214 return tp->snd_ssthresh/2; 222 return tp->snd_ssthresh/2;
215} 223}
216EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); 224EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index f66945cb158f..c148c1081880 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * tcp_diag.c Module for monitoring TCP sockets. 2 * tcp_diag.c Module for monitoring TCP transport protocols sockets.
3 * 3 *
4 * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $ 4 * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 * 5 *
@@ -12,779 +12,43 @@
12 */ 12 */
13 13
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/fcntl.h>
18#include <linux/random.h>
19#include <linux/cache.h>
20#include <linux/init.h>
21#include <linux/time.h>
22
23#include <net/icmp.h>
24#include <net/tcp.h>
25#include <net/ipv6.h>
26#include <net/inet_common.h>
27
28#include <linux/inet.h>
29#include <linux/stddef.h>
30
31#include <linux/tcp_diag.h>
32 15
33struct tcpdiag_entry 16#include <linux/module.h>
34{ 17#include <linux/inet_diag.h>
35 u32 *saddr;
36 u32 *daddr;
37 u16 sport;
38 u16 dport;
39 u16 family;
40 u16 userlocks;
41};
42 18
43static struct sock *tcpnl; 19#include <linux/tcp.h>
44 20
45#define TCPDIAG_PUT(skb, attrtype, attrlen) \ 21#include <net/tcp.h>
46 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
47 22
48static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, 23static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
49 int ext, u32 pid, u32 seq, u16 nlmsg_flags) 24 void *_info)
50{ 25{
51 struct inet_sock *inet = inet_sk(sk); 26 const struct tcp_sock *tp = tcp_sk(sk);
52 struct tcp_sock *tp = tcp_sk(sk); 27 struct tcp_info *info = _info;
53 struct tcpdiagmsg *r;
54 struct nlmsghdr *nlh;
55 struct tcp_info *info = NULL;
56 struct tcpdiag_meminfo *minfo = NULL;
57 unsigned char *b = skb->tail;
58
59 nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r));
60 nlh->nlmsg_flags = nlmsg_flags;
61 r = NLMSG_DATA(nlh);
62 if (sk->sk_state != TCP_TIME_WAIT) {
63 if (ext & (1<<(TCPDIAG_MEMINFO-1)))
64 minfo = TCPDIAG_PUT(skb, TCPDIAG_MEMINFO, sizeof(*minfo));
65 if (ext & (1<<(TCPDIAG_INFO-1)))
66 info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
67
68 if (ext & (1<<(TCPDIAG_CONG-1))) {
69 size_t len = strlen(tp->ca_ops->name);
70 strcpy(TCPDIAG_PUT(skb, TCPDIAG_CONG, len+1),
71 tp->ca_ops->name);
72 }
73 }
74 r->tcpdiag_family = sk->sk_family;
75 r->tcpdiag_state = sk->sk_state;
76 r->tcpdiag_timer = 0;
77 r->tcpdiag_retrans = 0;
78
79 r->id.tcpdiag_if = sk->sk_bound_dev_if;
80 r->id.tcpdiag_cookie[0] = (u32)(unsigned long)sk;
81 r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
82
83 if (r->tcpdiag_state == TCP_TIME_WAIT) {
84 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket*)sk;
85 long tmo = tw->tw_ttd - jiffies;
86 if (tmo < 0)
87 tmo = 0;
88
89 r->id.tcpdiag_sport = tw->tw_sport;
90 r->id.tcpdiag_dport = tw->tw_dport;
91 r->id.tcpdiag_src[0] = tw->tw_rcv_saddr;
92 r->id.tcpdiag_dst[0] = tw->tw_daddr;
93 r->tcpdiag_state = tw->tw_substate;
94 r->tcpdiag_timer = 3;
95 r->tcpdiag_expires = (tmo*1000+HZ-1)/HZ;
96 r->tcpdiag_rqueue = 0;
97 r->tcpdiag_wqueue = 0;
98 r->tcpdiag_uid = 0;
99 r->tcpdiag_inode = 0;
100#ifdef CONFIG_IP_TCPDIAG_IPV6
101 if (r->tcpdiag_family == AF_INET6) {
102 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
103 &tw->tw_v6_rcv_saddr);
104 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
105 &tw->tw_v6_daddr);
106 }
107#endif
108 nlh->nlmsg_len = skb->tail - b;
109 return skb->len;
110 }
111
112 r->id.tcpdiag_sport = inet->sport;
113 r->id.tcpdiag_dport = inet->dport;
114 r->id.tcpdiag_src[0] = inet->rcv_saddr;
115 r->id.tcpdiag_dst[0] = inet->daddr;
116
117#ifdef CONFIG_IP_TCPDIAG_IPV6
118 if (r->tcpdiag_family == AF_INET6) {
119 struct ipv6_pinfo *np = inet6_sk(sk);
120
121 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
122 &np->rcv_saddr);
123 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
124 &np->daddr);
125 }
126#endif
127
128#define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ
129
130 if (tp->pending == TCP_TIME_RETRANS) {
131 r->tcpdiag_timer = 1;
132 r->tcpdiag_retrans = tp->retransmits;
133 r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
134 } else if (tp->pending == TCP_TIME_PROBE0) {
135 r->tcpdiag_timer = 4;
136 r->tcpdiag_retrans = tp->probes_out;
137 r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
138 } else if (timer_pending(&sk->sk_timer)) {
139 r->tcpdiag_timer = 2;
140 r->tcpdiag_retrans = tp->probes_out;
141 r->tcpdiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
142 } else {
143 r->tcpdiag_timer = 0;
144 r->tcpdiag_expires = 0;
145 }
146#undef EXPIRES_IN_MS
147 28
148 r->tcpdiag_rqueue = tp->rcv_nxt - tp->copied_seq; 29 r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq;
149 r->tcpdiag_wqueue = tp->write_seq - tp->snd_una; 30 r->idiag_wqueue = tp->write_seq - tp->snd_una;
150 r->tcpdiag_uid = sock_i_uid(sk); 31 if (info != NULL)
151 r->tcpdiag_inode = sock_i_ino(sk);
152
153 if (minfo) {
154 minfo->tcpdiag_rmem = atomic_read(&sk->sk_rmem_alloc);
155 minfo->tcpdiag_wmem = sk->sk_wmem_queued;
156 minfo->tcpdiag_fmem = sk->sk_forward_alloc;
157 minfo->tcpdiag_tmem = atomic_read(&sk->sk_wmem_alloc);
158 }
159
160 if (info)
161 tcp_get_info(sk, info); 32 tcp_get_info(sk, info);
162
163 if (sk->sk_state < TCP_TIME_WAIT && tp->ca_ops->get_info)
164 tp->ca_ops->get_info(tp, ext, skb);
165
166 nlh->nlmsg_len = skb->tail - b;
167 return skb->len;
168
169rtattr_failure:
170nlmsg_failure:
171 skb_trim(skb, b - skb->data);
172 return -1;
173}
174
175extern struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport,
176 int dif);
177#ifdef CONFIG_IP_TCPDIAG_IPV6
178extern struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
179 struct in6_addr *daddr, u16 dport,
180 int dif);
181#else
182static inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
183 struct in6_addr *daddr, u16 dport,
184 int dif)
185{
186 return NULL;
187}
188#endif
189
190static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
191{
192 int err;
193 struct sock *sk;
194 struct tcpdiagreq *req = NLMSG_DATA(nlh);
195 struct sk_buff *rep;
196
197 if (req->tcpdiag_family == AF_INET) {
198 sk = tcp_v4_lookup(req->id.tcpdiag_dst[0], req->id.tcpdiag_dport,
199 req->id.tcpdiag_src[0], req->id.tcpdiag_sport,
200 req->id.tcpdiag_if);
201 }
202#ifdef CONFIG_IP_TCPDIAG_IPV6
203 else if (req->tcpdiag_family == AF_INET6) {
204 sk = tcp_v6_lookup((struct in6_addr*)req->id.tcpdiag_dst, req->id.tcpdiag_dport,
205 (struct in6_addr*)req->id.tcpdiag_src, req->id.tcpdiag_sport,
206 req->id.tcpdiag_if);
207 }
208#endif
209 else {
210 return -EINVAL;
211 }
212
213 if (sk == NULL)
214 return -ENOENT;
215
216 err = -ESTALE;
217 if ((req->id.tcpdiag_cookie[0] != TCPDIAG_NOCOOKIE ||
218 req->id.tcpdiag_cookie[1] != TCPDIAG_NOCOOKIE) &&
219 ((u32)(unsigned long)sk != req->id.tcpdiag_cookie[0] ||
220 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.tcpdiag_cookie[1]))
221 goto out;
222
223 err = -ENOMEM;
224 rep = alloc_skb(NLMSG_SPACE(sizeof(struct tcpdiagmsg)+
225 sizeof(struct tcpdiag_meminfo)+
226 sizeof(struct tcp_info)+64), GFP_KERNEL);
227 if (!rep)
228 goto out;
229
230 if (tcpdiag_fill(rep, sk, req->tcpdiag_ext,
231 NETLINK_CB(in_skb).pid,
232 nlh->nlmsg_seq, 0) <= 0)
233 BUG();
234
235 err = netlink_unicast(tcpnl, rep, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
236 if (err > 0)
237 err = 0;
238
239out:
240 if (sk) {
241 if (sk->sk_state == TCP_TIME_WAIT)
242 tcp_tw_put((struct tcp_tw_bucket*)sk);
243 else
244 sock_put(sk);
245 }
246 return err;
247}
248
249static int bitstring_match(const u32 *a1, const u32 *a2, int bits)
250{
251 int words = bits >> 5;
252
253 bits &= 0x1f;
254
255 if (words) {
256 if (memcmp(a1, a2, words << 2))
257 return 0;
258 }
259 if (bits) {
260 __u32 w1, w2;
261 __u32 mask;
262
263 w1 = a1[words];
264 w2 = a2[words];
265
266 mask = htonl((0xffffffff) << (32 - bits));
267
268 if ((w1 ^ w2) & mask)
269 return 0;
270 }
271
272 return 1;
273}
274
275
276static int tcpdiag_bc_run(const void *bc, int len,
277 const struct tcpdiag_entry *entry)
278{
279 while (len > 0) {
280 int yes = 1;
281 const struct tcpdiag_bc_op *op = bc;
282
283 switch (op->code) {
284 case TCPDIAG_BC_NOP:
285 break;
286 case TCPDIAG_BC_JMP:
287 yes = 0;
288 break;
289 case TCPDIAG_BC_S_GE:
290 yes = entry->sport >= op[1].no;
291 break;
292 case TCPDIAG_BC_S_LE:
293 yes = entry->dport <= op[1].no;
294 break;
295 case TCPDIAG_BC_D_GE:
296 yes = entry->dport >= op[1].no;
297 break;
298 case TCPDIAG_BC_D_LE:
299 yes = entry->dport <= op[1].no;
300 break;
301 case TCPDIAG_BC_AUTO:
302 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
303 break;
304 case TCPDIAG_BC_S_COND:
305 case TCPDIAG_BC_D_COND:
306 {
307 struct tcpdiag_hostcond *cond = (struct tcpdiag_hostcond*)(op+1);
308 u32 *addr;
309
310 if (cond->port != -1 &&
311 cond->port != (op->code == TCPDIAG_BC_S_COND ?
312 entry->sport : entry->dport)) {
313 yes = 0;
314 break;
315 }
316
317 if (cond->prefix_len == 0)
318 break;
319
320 if (op->code == TCPDIAG_BC_S_COND)
321 addr = entry->saddr;
322 else
323 addr = entry->daddr;
324
325 if (bitstring_match(addr, cond->addr, cond->prefix_len))
326 break;
327 if (entry->family == AF_INET6 &&
328 cond->family == AF_INET) {
329 if (addr[0] == 0 && addr[1] == 0 &&
330 addr[2] == htonl(0xffff) &&
331 bitstring_match(addr+3, cond->addr, cond->prefix_len))
332 break;
333 }
334 yes = 0;
335 break;
336 }
337 }
338
339 if (yes) {
340 len -= op->yes;
341 bc += op->yes;
342 } else {
343 len -= op->no;
344 bc += op->no;
345 }
346 }
347 return (len == 0);
348}
349
350static int valid_cc(const void *bc, int len, int cc)
351{
352 while (len >= 0) {
353 const struct tcpdiag_bc_op *op = bc;
354
355 if (cc > len)
356 return 0;
357 if (cc == len)
358 return 1;
359 if (op->yes < 4)
360 return 0;
361 len -= op->yes;
362 bc += op->yes;
363 }
364 return 0;
365}
366
367static int tcpdiag_bc_audit(const void *bytecode, int bytecode_len)
368{
369 const unsigned char *bc = bytecode;
370 int len = bytecode_len;
371
372 while (len > 0) {
373 struct tcpdiag_bc_op *op = (struct tcpdiag_bc_op*)bc;
374
375//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
376 switch (op->code) {
377 case TCPDIAG_BC_AUTO:
378 case TCPDIAG_BC_S_COND:
379 case TCPDIAG_BC_D_COND:
380 case TCPDIAG_BC_S_GE:
381 case TCPDIAG_BC_S_LE:
382 case TCPDIAG_BC_D_GE:
383 case TCPDIAG_BC_D_LE:
384 if (op->yes < 4 || op->yes > len+4)
385 return -EINVAL;
386 case TCPDIAG_BC_JMP:
387 if (op->no < 4 || op->no > len+4)
388 return -EINVAL;
389 if (op->no < len &&
390 !valid_cc(bytecode, bytecode_len, len-op->no))
391 return -EINVAL;
392 break;
393 case TCPDIAG_BC_NOP:
394 if (op->yes < 4 || op->yes > len+4)
395 return -EINVAL;
396 break;
397 default:
398 return -EINVAL;
399 }
400 bc += op->yes;
401 len -= op->yes;
402 }
403 return len == 0 ? 0 : -EINVAL;
404}
405
406static int tcpdiag_dump_sock(struct sk_buff *skb, struct sock *sk,
407 struct netlink_callback *cb)
408{
409 struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
410
411 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
412 struct tcpdiag_entry entry;
413 struct rtattr *bc = (struct rtattr *)(r + 1);
414 struct inet_sock *inet = inet_sk(sk);
415
416 entry.family = sk->sk_family;
417#ifdef CONFIG_IP_TCPDIAG_IPV6
418 if (entry.family == AF_INET6) {
419 struct ipv6_pinfo *np = inet6_sk(sk);
420
421 entry.saddr = np->rcv_saddr.s6_addr32;
422 entry.daddr = np->daddr.s6_addr32;
423 } else
424#endif
425 {
426 entry.saddr = &inet->rcv_saddr;
427 entry.daddr = &inet->daddr;
428 }
429 entry.sport = inet->num;
430 entry.dport = ntohs(inet->dport);
431 entry.userlocks = sk->sk_userlocks;
432
433 if (!tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
434 return 0;
435 }
436
437 return tcpdiag_fill(skb, sk, r->tcpdiag_ext, NETLINK_CB(cb->skb).pid,
438 cb->nlh->nlmsg_seq, NLM_F_MULTI);
439} 33}
440 34
441static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk, 35static struct inet_diag_handler tcp_diag_handler = {
442 struct request_sock *req, 36 .idiag_hashinfo = &tcp_hashinfo,
443 u32 pid, u32 seq) 37 .idiag_get_info = tcp_diag_get_info,
444{ 38 .idiag_type = TCPDIAG_GETSOCK,
445 const struct inet_request_sock *ireq = inet_rsk(req); 39 .idiag_info_size = sizeof(struct tcp_info),
446 struct inet_sock *inet = inet_sk(sk); 40};
447 unsigned char *b = skb->tail;
448 struct tcpdiagmsg *r;
449 struct nlmsghdr *nlh;
450 long tmo;
451
452 nlh = NLMSG_PUT(skb, pid, seq, TCPDIAG_GETSOCK, sizeof(*r));
453 nlh->nlmsg_flags = NLM_F_MULTI;
454 r = NLMSG_DATA(nlh);
455
456 r->tcpdiag_family = sk->sk_family;
457 r->tcpdiag_state = TCP_SYN_RECV;
458 r->tcpdiag_timer = 1;
459 r->tcpdiag_retrans = req->retrans;
460
461 r->id.tcpdiag_if = sk->sk_bound_dev_if;
462 r->id.tcpdiag_cookie[0] = (u32)(unsigned long)req;
463 r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
464
465 tmo = req->expires - jiffies;
466 if (tmo < 0)
467 tmo = 0;
468
469 r->id.tcpdiag_sport = inet->sport;
470 r->id.tcpdiag_dport = ireq->rmt_port;
471 r->id.tcpdiag_src[0] = ireq->loc_addr;
472 r->id.tcpdiag_dst[0] = ireq->rmt_addr;
473 r->tcpdiag_expires = jiffies_to_msecs(tmo),
474 r->tcpdiag_rqueue = 0;
475 r->tcpdiag_wqueue = 0;
476 r->tcpdiag_uid = sock_i_uid(sk);
477 r->tcpdiag_inode = 0;
478#ifdef CONFIG_IP_TCPDIAG_IPV6
479 if (r->tcpdiag_family == AF_INET6) {
480 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
481 &tcp6_rsk(req)->loc_addr);
482 ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
483 &tcp6_rsk(req)->rmt_addr);
484 }
485#endif
486 nlh->nlmsg_len = skb->tail - b;
487
488 return skb->len;
489
490nlmsg_failure:
491 skb_trim(skb, b - skb->data);
492 return -1;
493}
494
495static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
496 struct netlink_callback *cb)
497{
498 struct tcpdiag_entry entry;
499 struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
500 struct tcp_sock *tp = tcp_sk(sk);
501 struct listen_sock *lopt;
502 struct rtattr *bc = NULL;
503 struct inet_sock *inet = inet_sk(sk);
504 int j, s_j;
505 int reqnum, s_reqnum;
506 int err = 0;
507
508 s_j = cb->args[3];
509 s_reqnum = cb->args[4];
510
511 if (s_j > 0)
512 s_j--;
513
514 entry.family = sk->sk_family;
515
516 read_lock_bh(&tp->accept_queue.syn_wait_lock);
517
518 lopt = tp->accept_queue.listen_opt;
519 if (!lopt || !lopt->qlen)
520 goto out;
521
522 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
523 bc = (struct rtattr *)(r + 1);
524 entry.sport = inet->num;
525 entry.userlocks = sk->sk_userlocks;
526 }
527
528 for (j = s_j; j < TCP_SYNQ_HSIZE; j++) {
529 struct request_sock *req, *head = lopt->syn_table[j];
530
531 reqnum = 0;
532 for (req = head; req; reqnum++, req = req->dl_next) {
533 struct inet_request_sock *ireq = inet_rsk(req);
534
535 if (reqnum < s_reqnum)
536 continue;
537 if (r->id.tcpdiag_dport != ireq->rmt_port &&
538 r->id.tcpdiag_dport)
539 continue;
540
541 if (bc) {
542 entry.saddr =
543#ifdef CONFIG_IP_TCPDIAG_IPV6
544 (entry.family == AF_INET6) ?
545 tcp6_rsk(req)->loc_addr.s6_addr32 :
546#endif
547 &ireq->loc_addr;
548 entry.daddr =
549#ifdef CONFIG_IP_TCPDIAG_IPV6
550 (entry.family == AF_INET6) ?
551 tcp6_rsk(req)->rmt_addr.s6_addr32 :
552#endif
553 &ireq->rmt_addr;
554 entry.dport = ntohs(ireq->rmt_port);
555
556 if (!tcpdiag_bc_run(RTA_DATA(bc),
557 RTA_PAYLOAD(bc), &entry))
558 continue;
559 }
560
561 err = tcpdiag_fill_req(skb, sk, req,
562 NETLINK_CB(cb->skb).pid,
563 cb->nlh->nlmsg_seq);
564 if (err < 0) {
565 cb->args[3] = j + 1;
566 cb->args[4] = reqnum;
567 goto out;
568 }
569 }
570
571 s_reqnum = 0;
572 }
573
574out:
575 read_unlock_bh(&tp->accept_queue.syn_wait_lock);
576
577 return err;
578}
579
580static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
581{
582 int i, num;
583 int s_i, s_num;
584 struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
585
586 s_i = cb->args[1];
587 s_num = num = cb->args[2];
588
589 if (cb->args[0] == 0) {
590 if (!(r->tcpdiag_states&(TCPF_LISTEN|TCPF_SYN_RECV)))
591 goto skip_listen_ht;
592 tcp_listen_lock();
593 for (i = s_i; i < TCP_LHTABLE_SIZE; i++) {
594 struct sock *sk;
595 struct hlist_node *node;
596
597 num = 0;
598 sk_for_each(sk, node, &tcp_listening_hash[i]) {
599 struct inet_sock *inet = inet_sk(sk);
600
601 if (num < s_num) {
602 num++;
603 continue;
604 }
605
606 if (r->id.tcpdiag_sport != inet->sport &&
607 r->id.tcpdiag_sport)
608 goto next_listen;
609
610 if (!(r->tcpdiag_states&TCPF_LISTEN) ||
611 r->id.tcpdiag_dport ||
612 cb->args[3] > 0)
613 goto syn_recv;
614
615 if (tcpdiag_dump_sock(skb, sk, cb) < 0) {
616 tcp_listen_unlock();
617 goto done;
618 }
619
620syn_recv:
621 if (!(r->tcpdiag_states&TCPF_SYN_RECV))
622 goto next_listen;
623
624 if (tcpdiag_dump_reqs(skb, sk, cb) < 0) {
625 tcp_listen_unlock();
626 goto done;
627 }
628
629next_listen:
630 cb->args[3] = 0;
631 cb->args[4] = 0;
632 ++num;
633 }
634
635 s_num = 0;
636 cb->args[3] = 0;
637 cb->args[4] = 0;
638 }
639 tcp_listen_unlock();
640skip_listen_ht:
641 cb->args[0] = 1;
642 s_i = num = s_num = 0;
643 }
644
645 if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
646 return skb->len;
647
648 for (i = s_i; i < tcp_ehash_size; i++) {
649 struct tcp_ehash_bucket *head = &tcp_ehash[i];
650 struct sock *sk;
651 struct hlist_node *node;
652
653 if (i > s_i)
654 s_num = 0;
655
656 read_lock_bh(&head->lock);
657
658 num = 0;
659 sk_for_each(sk, node, &head->chain) {
660 struct inet_sock *inet = inet_sk(sk);
661
662 if (num < s_num)
663 goto next_normal;
664 if (!(r->tcpdiag_states & (1 << sk->sk_state)))
665 goto next_normal;
666 if (r->id.tcpdiag_sport != inet->sport &&
667 r->id.tcpdiag_sport)
668 goto next_normal;
669 if (r->id.tcpdiag_dport != inet->dport && r->id.tcpdiag_dport)
670 goto next_normal;
671 if (tcpdiag_dump_sock(skb, sk, cb) < 0) {
672 read_unlock_bh(&head->lock);
673 goto done;
674 }
675next_normal:
676 ++num;
677 }
678
679 if (r->tcpdiag_states&TCPF_TIME_WAIT) {
680 sk_for_each(sk, node,
681 &tcp_ehash[i + tcp_ehash_size].chain) {
682 struct inet_sock *inet = inet_sk(sk);
683
684 if (num < s_num)
685 goto next_dying;
686 if (r->id.tcpdiag_sport != inet->sport &&
687 r->id.tcpdiag_sport)
688 goto next_dying;
689 if (r->id.tcpdiag_dport != inet->dport &&
690 r->id.tcpdiag_dport)
691 goto next_dying;
692 if (tcpdiag_dump_sock(skb, sk, cb) < 0) {
693 read_unlock_bh(&head->lock);
694 goto done;
695 }
696next_dying:
697 ++num;
698 }
699 }
700 read_unlock_bh(&head->lock);
701 }
702
703done:
704 cb->args[1] = i;
705 cb->args[2] = num;
706 return skb->len;
707}
708
709static int tcpdiag_dump_done(struct netlink_callback *cb)
710{
711 return 0;
712}
713
714
715static __inline__ int
716tcpdiag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
717{
718 if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
719 return 0;
720
721 if (nlh->nlmsg_type != TCPDIAG_GETSOCK)
722 goto err_inval;
723
724 if (NLMSG_LENGTH(sizeof(struct tcpdiagreq)) > skb->len)
725 goto err_inval;
726
727 if (nlh->nlmsg_flags&NLM_F_DUMP) {
728 if (nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(struct tcpdiagreq))) {
729 struct rtattr *rta = (struct rtattr*)(NLMSG_DATA(nlh) + sizeof(struct tcpdiagreq));
730 if (rta->rta_type != TCPDIAG_REQ_BYTECODE ||
731 rta->rta_len < 8 ||
732 rta->rta_len > nlh->nlmsg_len - NLMSG_SPACE(sizeof(struct tcpdiagreq)))
733 goto err_inval;
734 if (tcpdiag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta)))
735 goto err_inval;
736 }
737 return netlink_dump_start(tcpnl, skb, nlh,
738 tcpdiag_dump,
739 tcpdiag_dump_done);
740 } else {
741 return tcpdiag_get_exact(skb, nlh);
742 }
743
744err_inval:
745 return -EINVAL;
746}
747
748
749static inline void tcpdiag_rcv_skb(struct sk_buff *skb)
750{
751 int err;
752 struct nlmsghdr * nlh;
753
754 if (skb->len >= NLMSG_SPACE(0)) {
755 nlh = (struct nlmsghdr *)skb->data;
756 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
757 return;
758 err = tcpdiag_rcv_msg(skb, nlh);
759 if (err || nlh->nlmsg_flags & NLM_F_ACK)
760 netlink_ack(skb, nlh, err);
761 }
762}
763
764static void tcpdiag_rcv(struct sock *sk, int len)
765{
766 struct sk_buff *skb;
767 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
768
769 while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
770 tcpdiag_rcv_skb(skb);
771 kfree_skb(skb);
772 }
773}
774 41
775static int __init tcpdiag_init(void) 42static int __init tcp_diag_init(void)
776{ 43{
777 tcpnl = netlink_kernel_create(NETLINK_TCPDIAG, tcpdiag_rcv); 44 return inet_diag_register(&tcp_diag_handler);
778 if (tcpnl == NULL)
779 return -ENOMEM;
780 return 0;
781} 45}
782 46
783static void __exit tcpdiag_exit(void) 47static void __exit tcp_diag_exit(void)
784{ 48{
785 sock_release(tcpnl->sk_socket); 49 inet_diag_unregister(&tcp_diag_handler);
786} 50}
787 51
788module_init(tcpdiag_init); 52module_init(tcp_diag_init);
789module_exit(tcpdiag_exit); 53module_exit(tcp_diag_exit);
790MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 36c51f8136bf..6acc04bde080 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -98,9 +98,10 @@ struct hstcp {
98 u32 ai; 98 u32 ai;
99}; 99};
100 100
101static void hstcp_init(struct tcp_sock *tp) 101static void hstcp_init(struct sock *sk)
102{ 102{
103 struct hstcp *ca = tcp_ca(tp); 103 struct tcp_sock *tp = tcp_sk(sk);
104 struct hstcp *ca = inet_csk_ca(sk);
104 105
105 ca->ai = 0; 106 ca->ai = 0;
106 107
@@ -109,10 +110,11 @@ static void hstcp_init(struct tcp_sock *tp)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 110 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 111}
111 112
112static void hstcp_cong_avoid(struct tcp_sock *tp, u32 adk, u32 rtt, 113static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
113 u32 in_flight, int good) 114 u32 in_flight, int good)
114{ 115{
115 struct hstcp *ca = tcp_ca(tp); 116 struct tcp_sock *tp = tcp_sk(sk);
117 struct hstcp *ca = inet_csk_ca(sk);
116 118
117 if (in_flight < tp->snd_cwnd) 119 if (in_flight < tp->snd_cwnd)
118 return; 120 return;
@@ -143,9 +145,10 @@ static void hstcp_cong_avoid(struct tcp_sock *tp, u32 adk, u32 rtt,
143 } 145 }
144} 146}
145 147
146static u32 hstcp_ssthresh(struct tcp_sock *tp) 148static u32 hstcp_ssthresh(struct sock *sk)
147{ 149{
148 struct hstcp *ca = tcp_ca(tp); 150 const struct tcp_sock *tp = tcp_sk(sk);
151 const struct hstcp *ca = inet_csk_ca(sk);
149 152
150 /* Do multiplicative decrease */ 153 /* Do multiplicative decrease */
151 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 154 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
@@ -164,7 +167,7 @@ static struct tcp_congestion_ops tcp_highspeed = {
164 167
165static int __init hstcp_register(void) 168static int __init hstcp_register(void)
166{ 169{
167 BUG_ON(sizeof(struct hstcp) > TCP_CA_PRIV_SIZE); 170 BUG_ON(sizeof(struct hstcp) > ICSK_CA_PRIV_SIZE);
168 return tcp_register_congestion_control(&tcp_highspeed); 171 return tcp_register_congestion_control(&tcp_highspeed);
169} 172}
170 173
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 40168275acf9..e47b37984e95 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -55,18 +55,21 @@ static inline void htcp_reset(struct htcp *ca)
55 ca->snd_cwnd_cnt2 = 0; 55 ca->snd_cwnd_cnt2 = 0;
56} 56}
57 57
58static u32 htcp_cwnd_undo(struct tcp_sock *tp) 58static u32 htcp_cwnd_undo(struct sock *sk)
59{ 59{
60 struct htcp *ca = tcp_ca(tp); 60 const struct tcp_sock *tp = tcp_sk(sk);
61 struct htcp *ca = inet_csk_ca(sk);
61 ca->ccount = ca->undo_ccount; 62 ca->ccount = ca->undo_ccount;
62 ca->maxRTT = ca->undo_maxRTT; 63 ca->maxRTT = ca->undo_maxRTT;
63 ca->old_maxB = ca->undo_old_maxB; 64 ca->old_maxB = ca->undo_old_maxB;
64 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta); 65 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta);
65} 66}
66 67
67static inline void measure_rtt(struct tcp_sock *tp) 68static inline void measure_rtt(struct sock *sk)
68{ 69{
69 struct htcp *ca = tcp_ca(tp); 70 const struct inet_connection_sock *icsk = inet_csk(sk);
71 const struct tcp_sock *tp = tcp_sk(sk);
72 struct htcp *ca = inet_csk_ca(sk);
70 u32 srtt = tp->srtt>>3; 73 u32 srtt = tp->srtt>>3;
71 74
72 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 75 /* keep track of minimum RTT seen so far, minRTT is zero at first */
@@ -74,7 +77,7 @@ static inline void measure_rtt(struct tcp_sock *tp)
74 ca->minRTT = srtt; 77 ca->minRTT = srtt;
75 78
76 /* max RTT */ 79 /* max RTT */
77 if (tp->ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) { 80 if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) {
78 if (ca->maxRTT < ca->minRTT) 81 if (ca->maxRTT < ca->minRTT)
79 ca->maxRTT = ca->minRTT; 82 ca->maxRTT = ca->minRTT;
80 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+HZ/50) 83 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+HZ/50)
@@ -82,13 +85,16 @@ static inline void measure_rtt(struct tcp_sock *tp)
82 } 85 }
83} 86}
84 87
85static void measure_achieved_throughput(struct tcp_sock *tp, u32 pkts_acked) 88static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
86{ 89{
87 struct htcp *ca = tcp_ca(tp); 90 const struct inet_connection_sock *icsk = inet_csk(sk);
91 const struct tcp_sock *tp = tcp_sk(sk);
92 struct htcp *ca = inet_csk_ca(sk);
88 u32 now = tcp_time_stamp; 93 u32 now = tcp_time_stamp;
89 94
90 /* achieved throughput calculations */ 95 /* achieved throughput calculations */
91 if (tp->ca_state != TCP_CA_Open && tp->ca_state != TCP_CA_Disorder) { 96 if (icsk->icsk_ca_state != TCP_CA_Open &&
97 icsk->icsk_ca_state != TCP_CA_Disorder) {
92 ca->packetcount = 0; 98 ca->packetcount = 0;
93 ca->lasttime = now; 99 ca->lasttime = now;
94 return; 100 return;
@@ -173,9 +179,9 @@ static inline void htcp_alpha_update(struct htcp *ca)
173 * that point do we really have a real sense of maxRTT (the queues en route 179 * that point do we really have a real sense of maxRTT (the queues en route
174 * were getting just too full now). 180 * were getting just too full now).
175 */ 181 */
176static void htcp_param_update(struct tcp_sock *tp) 182static void htcp_param_update(struct sock *sk)
177{ 183{
178 struct htcp *ca = tcp_ca(tp); 184 struct htcp *ca = inet_csk_ca(sk);
179 u32 minRTT = ca->minRTT; 185 u32 minRTT = ca->minRTT;
180 u32 maxRTT = ca->maxRTT; 186 u32 maxRTT = ca->maxRTT;
181 187
@@ -187,17 +193,19 @@ static void htcp_param_update(struct tcp_sock *tp)
187 ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100; 193 ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
188} 194}
189 195
190static u32 htcp_recalc_ssthresh(struct tcp_sock *tp) 196static u32 htcp_recalc_ssthresh(struct sock *sk)
191{ 197{
192 struct htcp *ca = tcp_ca(tp); 198 const struct tcp_sock *tp = tcp_sk(sk);
193 htcp_param_update(tp); 199 const struct htcp *ca = inet_csk_ca(sk);
200 htcp_param_update(sk);
194 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 201 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
195} 202}
196 203
197static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 204static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
198 u32 in_flight, int data_acked) 205 u32 in_flight, int data_acked)
199{ 206{
200 struct htcp *ca = tcp_ca(tp); 207 struct tcp_sock *tp = tcp_sk(sk);
208 struct htcp *ca = inet_csk_ca(sk);
201 209
202 if (in_flight < tp->snd_cwnd) 210 if (in_flight < tp->snd_cwnd)
203 return; 211 return;
@@ -207,7 +215,7 @@ static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
207 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 215 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
208 tp->snd_cwnd++; 216 tp->snd_cwnd++;
209 } else { 217 } else {
210 measure_rtt(tp); 218 measure_rtt(sk);
211 219
212 /* keep track of number of round-trip times since last backoff event */ 220 /* keep track of number of round-trip times since last backoff event */
213 if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { 221 if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) {
@@ -229,28 +237,29 @@ static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
229} 237}
230 238
231/* Lower bound on congestion window. */ 239/* Lower bound on congestion window. */
232static u32 htcp_min_cwnd(struct tcp_sock *tp) 240static u32 htcp_min_cwnd(struct sock *sk)
233{ 241{
242 const struct tcp_sock *tp = tcp_sk(sk);
234 return tp->snd_ssthresh; 243 return tp->snd_ssthresh;
235} 244}
236 245
237 246
238static void htcp_init(struct tcp_sock *tp) 247static void htcp_init(struct sock *sk)
239{ 248{
240 struct htcp *ca = tcp_ca(tp); 249 struct htcp *ca = inet_csk_ca(sk);
241 250
242 memset(ca, 0, sizeof(struct htcp)); 251 memset(ca, 0, sizeof(struct htcp));
243 ca->alpha = ALPHA_BASE; 252 ca->alpha = ALPHA_BASE;
244 ca->beta = BETA_MIN; 253 ca->beta = BETA_MIN;
245} 254}
246 255
247static void htcp_state(struct tcp_sock *tp, u8 new_state) 256static void htcp_state(struct sock *sk, u8 new_state)
248{ 257{
249 switch (new_state) { 258 switch (new_state) {
250 case TCP_CA_CWR: 259 case TCP_CA_CWR:
251 case TCP_CA_Recovery: 260 case TCP_CA_Recovery:
252 case TCP_CA_Loss: 261 case TCP_CA_Loss:
253 htcp_reset(tcp_ca(tp)); 262 htcp_reset(inet_csk_ca(sk));
254 break; 263 break;
255 } 264 }
256} 265}
@@ -269,7 +278,7 @@ static struct tcp_congestion_ops htcp = {
269 278
270static int __init htcp_register(void) 279static int __init htcp_register(void)
271{ 280{
272 BUG_ON(sizeof(struct htcp) > TCP_CA_PRIV_SIZE); 281 BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE);
273 BUILD_BUG_ON(BETA_MIN >= BETA_MAX); 282 BUILD_BUG_ON(BETA_MIN >= BETA_MAX);
274 if (!use_bandwidth_switch) 283 if (!use_bandwidth_switch)
275 htcp.pkts_acked = NULL; 284 htcp.pkts_acked = NULL;
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 13a66342c304..77add63623df 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -33,19 +33,20 @@ MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
33 33
34 34
35/* This is called to refresh values for hybla parameters */ 35/* This is called to refresh values for hybla parameters */
36static inline void hybla_recalc_param (struct tcp_sock *tp) 36static inline void hybla_recalc_param (struct sock *sk)
37{ 37{
38 struct hybla *ca = tcp_ca(tp); 38 struct hybla *ca = inet_csk_ca(sk);
39 39
40 ca->rho_3ls = max_t(u32, tp->srtt / msecs_to_jiffies(rtt0), 8); 40 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
41 ca->rho = ca->rho_3ls >> 3; 41 ca->rho = ca->rho_3ls >> 3;
42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
43 ca->rho2 = ca->rho2_7ls >>7; 43 ca->rho2 = ca->rho2_7ls >>7;
44} 44}
45 45
46static void hybla_init(struct tcp_sock *tp) 46static void hybla_init(struct sock *sk)
47{ 47{
48 struct hybla *ca = tcp_ca(tp); 48 struct tcp_sock *tp = tcp_sk(sk);
49 struct hybla *ca = inet_csk_ca(sk);
49 50
50 ca->rho = 0; 51 ca->rho = 0;
51 ca->rho2 = 0; 52 ca->rho2 = 0;
@@ -57,17 +58,16 @@ static void hybla_init(struct tcp_sock *tp)
57 tp->snd_cwnd_clamp = 65535; 58 tp->snd_cwnd_clamp = 65535;
58 59
59 /* 1st Rho measurement based on initial srtt */ 60 /* 1st Rho measurement based on initial srtt */
60 hybla_recalc_param(tp); 61 hybla_recalc_param(sk);
61 62
62 /* set minimum rtt as this is the 1st ever seen */ 63 /* set minimum rtt as this is the 1st ever seen */
63 ca->minrtt = tp->srtt; 64 ca->minrtt = tp->srtt;
64 tp->snd_cwnd = ca->rho; 65 tp->snd_cwnd = ca->rho;
65} 66}
66 67
67static void hybla_state(struct tcp_sock *tp, u8 ca_state) 68static void hybla_state(struct sock *sk, u8 ca_state)
68{ 69{
69 struct hybla *ca = tcp_ca(tp); 70 struct hybla *ca = inet_csk_ca(sk);
70
71 ca->hybla_en = (ca_state == TCP_CA_Open); 71 ca->hybla_en = (ca_state == TCP_CA_Open);
72} 72}
73 73
@@ -86,27 +86,28 @@ static inline u32 hybla_fraction(u32 odds)
86 * o Give cwnd a new value based on the model proposed 86 * o Give cwnd a new value based on the model proposed
87 * o remember increments <1 87 * o remember increments <1
88 */ 88 */
89static void hybla_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 89static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
90 u32 in_flight, int flag) 90 u32 in_flight, int flag)
91{ 91{
92 struct hybla *ca = tcp_ca(tp); 92 struct tcp_sock *tp = tcp_sk(sk);
93 struct hybla *ca = inet_csk_ca(sk);
93 u32 increment, odd, rho_fractions; 94 u32 increment, odd, rho_fractions;
94 int is_slowstart = 0; 95 int is_slowstart = 0;
95 96
96 /* Recalculate rho only if this srtt is the lowest */ 97 /* Recalculate rho only if this srtt is the lowest */
97 if (tp->srtt < ca->minrtt){ 98 if (tp->srtt < ca->minrtt){
98 hybla_recalc_param(tp); 99 hybla_recalc_param(sk);
99 ca->minrtt = tp->srtt; 100 ca->minrtt = tp->srtt;
100 } 101 }
101 102
102 if (!ca->hybla_en) 103 if (!ca->hybla_en)
103 return tcp_reno_cong_avoid(tp, ack, rtt, in_flight, flag); 104 return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
104 105
105 if (in_flight < tp->snd_cwnd) 106 if (in_flight < tp->snd_cwnd)
106 return; 107 return;
107 108
108 if (ca->rho == 0) 109 if (ca->rho == 0)
109 hybla_recalc_param(tp); 110 hybla_recalc_param(sk);
110 111
111 rho_fractions = ca->rho_3ls - (ca->rho << 3); 112 rho_fractions = ca->rho_3ls - (ca->rho << 3);
112 113
@@ -170,7 +171,7 @@ static struct tcp_congestion_ops tcp_hybla = {
170 171
171static int __init hybla_register(void) 172static int __init hybla_register(void)
172{ 173{
173 BUG_ON(sizeof(struct hybla) > TCP_CA_PRIV_SIZE); 174 BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
174 return tcp_register_congestion_control(&tcp_hybla); 175 return tcp_register_congestion_control(&tcp_hybla);
175} 176}
176 177
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..1afb080bdf0c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1;
114/* Adapt the MSS value used to make delayed ack decision to the 114/* Adapt the MSS value used to make delayed ack decision to the
115 * real world. 115 * real world.
116 */ 116 */
117static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, 117static inline void tcp_measure_rcv_mss(struct sock *sk,
118 struct sk_buff *skb) 118 const struct sk_buff *skb)
119{ 119{
120 unsigned int len, lss; 120 struct inet_connection_sock *icsk = inet_csk(sk);
121 const unsigned int lss = icsk->icsk_ack.last_seg_size;
122 unsigned int len;
121 123
122 lss = tp->ack.last_seg_size; 124 icsk->icsk_ack.last_seg_size = 0;
123 tp->ack.last_seg_size = 0;
124 125
125 /* skb->len may jitter because of SACKs, even if peer 126 /* skb->len may jitter because of SACKs, even if peer
126 * sends good full-sized frames. 127 * sends good full-sized frames.
127 */ 128 */
128 len = skb->len; 129 len = skb->len;
129 if (len >= tp->ack.rcv_mss) { 130 if (len >= icsk->icsk_ack.rcv_mss) {
130 tp->ack.rcv_mss = len; 131 icsk->icsk_ack.rcv_mss = len;
131 } else { 132 } else {
132 /* Otherwise, we make more careful check taking into account, 133 /* Otherwise, we make more careful check taking into account,
133 * that SACKs block is variable. 134 * that SACKs block is variable.
@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp,
147 * tcp header plus fixed timestamp option length. 148 * tcp header plus fixed timestamp option length.
148 * Resulting "len" is MSS free of SACK jitter. 149 * Resulting "len" is MSS free of SACK jitter.
149 */ 150 */
150 len -= tp->tcp_header_len; 151 len -= tcp_sk(sk)->tcp_header_len;
151 tp->ack.last_seg_size = len; 152 icsk->icsk_ack.last_seg_size = len;
152 if (len == lss) { 153 if (len == lss) {
153 tp->ack.rcv_mss = len; 154 icsk->icsk_ack.rcv_mss = len;
154 return; 155 return;
155 } 156 }
156 } 157 }
157 tp->ack.pending |= TCP_ACK_PUSHED; 158 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
158 } 159 }
159} 160}
160 161
161static void tcp_incr_quickack(struct tcp_sock *tp) 162static void tcp_incr_quickack(struct sock *sk)
162{ 163{
163 unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); 164 struct inet_connection_sock *icsk = inet_csk(sk);
165 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
164 166
165 if (quickacks==0) 167 if (quickacks==0)
166 quickacks=2; 168 quickacks=2;
167 if (quickacks > tp->ack.quick) 169 if (quickacks > icsk->icsk_ack.quick)
168 tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 170 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
169} 171}
170 172
171void tcp_enter_quickack_mode(struct tcp_sock *tp) 173void tcp_enter_quickack_mode(struct sock *sk)
172{ 174{
173 tcp_incr_quickack(tp); 175 struct inet_connection_sock *icsk = inet_csk(sk);
174 tp->ack.pingpong = 0; 176 tcp_incr_quickack(sk);
175 tp->ack.ato = TCP_ATO_MIN; 177 icsk->icsk_ack.pingpong = 0;
178 icsk->icsk_ack.ato = TCP_ATO_MIN;
176} 179}
177 180
178/* Send ACKs quickly, if "quick" count is not exhausted 181/* Send ACKs quickly, if "quick" count is not exhausted
179 * and the session is not interactive. 182 * and the session is not interactive.
180 */ 183 */
181 184
182static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) 185static inline int tcp_in_quickack_mode(const struct sock *sk)
183{ 186{
184 return (tp->ack.quick && !tp->ack.pingpong); 187 const struct inet_connection_sock *icsk = inet_csk(sk);
188 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
185} 189}
186 190
187/* Buffer size and advertised window tuning. 191/* Buffer size and advertised window tuning.
@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk)
224 */ 228 */
225 229
226/* Slow part of check#2. */ 230/* Slow part of check#2. */
227static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, 231static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
228 struct sk_buff *skb) 232 const struct sk_buff *skb)
229{ 233{
230 /* Optimize this! */ 234 /* Optimize this! */
231 int truesize = tcp_win_from_space(skb->truesize)/2; 235 int truesize = tcp_win_from_space(skb->truesize)/2;
@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
233 237
234 while (tp->rcv_ssthresh <= window) { 238 while (tp->rcv_ssthresh <= window) {
235 if (truesize <= skb->len) 239 if (truesize <= skb->len)
236 return 2*tp->ack.rcv_mss; 240 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
237 241
238 truesize >>= 1; 242 truesize >>= 1;
239 window >>= 1; 243 window >>= 1;
@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
260 264
261 if (incr) { 265 if (incr) {
262 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); 266 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
263 tp->ack.quick |= 1; 267 inet_csk(sk)->icsk_ack.quick |= 1;
264 } 268 }
265 } 269 }
266} 270}
@@ -321,11 +325,12 @@ static void tcp_init_buffer_space(struct sock *sk)
321/* 5. Recalculate window clamp after socket hit its memory bounds. */ 325/* 5. Recalculate window clamp after socket hit its memory bounds. */
322static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) 326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
323{ 327{
328 struct inet_connection_sock *icsk = inet_csk(sk);
324 struct sk_buff *skb; 329 struct sk_buff *skb;
325 unsigned int app_win = tp->rcv_nxt - tp->copied_seq; 330 unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
326 int ofo_win = 0; 331 int ofo_win = 0;
327 332
328 tp->ack.quick = 0; 333 icsk->icsk_ack.quick = 0;
329 334
330 skb_queue_walk(&tp->out_of_order_queue, skb) { 335 skb_queue_walk(&tp->out_of_order_queue, skb) {
331 ofo_win += skb->len; 336 ofo_win += skb->len;
@@ -346,8 +351,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
346 app_win += ofo_win; 351 app_win += ofo_win;
347 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) 352 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
348 app_win >>= 1; 353 app_win >>= 1;
349 if (app_win > tp->ack.rcv_mss) 354 if (app_win > icsk->icsk_ack.rcv_mss)
350 app_win -= tp->ack.rcv_mss; 355 app_win -= icsk->icsk_ack.rcv_mss;
351 app_win = max(app_win, 2U*tp->advmss); 356 app_win = max(app_win, 2U*tp->advmss);
352 357
353 if (!ofo_win) 358 if (!ofo_win)
@@ -415,11 +420,12 @@ new_measure:
415 tp->rcv_rtt_est.time = tcp_time_stamp; 420 tp->rcv_rtt_est.time = tcp_time_stamp;
416} 421}
417 422
418static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) 423static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
419{ 424{
425 struct tcp_sock *tp = tcp_sk(sk);
420 if (tp->rx_opt.rcv_tsecr && 426 if (tp->rx_opt.rcv_tsecr &&
421 (TCP_SKB_CB(skb)->end_seq - 427 (TCP_SKB_CB(skb)->end_seq -
422 TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) 428 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
423 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 429 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
424} 430}
425 431
@@ -492,41 +498,42 @@ new_measure:
492 */ 498 */
493static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 499static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
494{ 500{
501 struct inet_connection_sock *icsk = inet_csk(sk);
495 u32 now; 502 u32 now;
496 503
497 tcp_schedule_ack(tp); 504 inet_csk_schedule_ack(sk);
498 505
499 tcp_measure_rcv_mss(tp, skb); 506 tcp_measure_rcv_mss(sk, skb);
500 507
501 tcp_rcv_rtt_measure(tp); 508 tcp_rcv_rtt_measure(tp);
502 509
503 now = tcp_time_stamp; 510 now = tcp_time_stamp;
504 511
505 if (!tp->ack.ato) { 512 if (!icsk->icsk_ack.ato) {
506 /* The _first_ data packet received, initialize 513 /* The _first_ data packet received, initialize
507 * delayed ACK engine. 514 * delayed ACK engine.
508 */ 515 */
509 tcp_incr_quickack(tp); 516 tcp_incr_quickack(sk);
510 tp->ack.ato = TCP_ATO_MIN; 517 icsk->icsk_ack.ato = TCP_ATO_MIN;
511 } else { 518 } else {
512 int m = now - tp->ack.lrcvtime; 519 int m = now - icsk->icsk_ack.lrcvtime;
513 520
514 if (m <= TCP_ATO_MIN/2) { 521 if (m <= TCP_ATO_MIN/2) {
515 /* The fastest case is the first. */ 522 /* The fastest case is the first. */
516 tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; 523 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
517 } else if (m < tp->ack.ato) { 524 } else if (m < icsk->icsk_ack.ato) {
518 tp->ack.ato = (tp->ack.ato>>1) + m; 525 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
519 if (tp->ack.ato > tp->rto) 526 if (icsk->icsk_ack.ato > icsk->icsk_rto)
520 tp->ack.ato = tp->rto; 527 icsk->icsk_ack.ato = icsk->icsk_rto;
521 } else if (m > tp->rto) { 528 } else if (m > icsk->icsk_rto) {
522 /* Too long gap. Apparently sender falled to 529 /* Too long gap. Apparently sender falled to
523 * restart window, so that we send ACKs quickly. 530 * restart window, so that we send ACKs quickly.
524 */ 531 */
525 tcp_incr_quickack(tp); 532 tcp_incr_quickack(sk);
526 sk_stream_mem_reclaim(sk); 533 sk_stream_mem_reclaim(sk);
527 } 534 }
528 } 535 }
529 tp->ack.lrcvtime = now; 536 icsk->icsk_ack.lrcvtime = now;
530 537
531 TCP_ECN_check_ce(tp, skb); 538 TCP_ECN_check_ce(tp, skb);
532 539
@@ -543,8 +550,10 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
543 * To save cycles in the RFC 1323 implementation it was better to break 550 * To save cycles in the RFC 1323 implementation it was better to break
544 * it up into three procedures. -- erics 551 * it up into three procedures. -- erics
545 */ 552 */
546static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) 553static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
547{ 554{
555 struct tcp_sock *tp = tcp_sk(sk);
556 const struct inet_connection_sock *icsk = inet_csk(sk);
548 long m = mrtt; /* RTT */ 557 long m = mrtt; /* RTT */
549 558
550 /* The following amusing code comes from Jacobson's 559 /* The following amusing code comes from Jacobson's
@@ -604,15 +613,16 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt)
604 tp->rtt_seq = tp->snd_nxt; 613 tp->rtt_seq = tp->snd_nxt;
605 } 614 }
606 615
607 if (tp->ca_ops->rtt_sample) 616 if (icsk->icsk_ca_ops->rtt_sample)
608 tp->ca_ops->rtt_sample(tp, *usrtt); 617 icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
609} 618}
610 619
611/* Calculate rto without backoff. This is the second half of Van Jacobson's 620/* Calculate rto without backoff. This is the second half of Van Jacobson's
612 * routine referred to above. 621 * routine referred to above.
613 */ 622 */
614static inline void tcp_set_rto(struct tcp_sock *tp) 623static inline void tcp_set_rto(struct sock *sk)
615{ 624{
625 const struct tcp_sock *tp = tcp_sk(sk);
616 /* Old crap is replaced with new one. 8) 626 /* Old crap is replaced with new one. 8)
617 * 627 *
618 * More seriously: 628 * More seriously:
@@ -623,7 +633,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
623 * is invisible. Actually, Linux-2.4 also generates erratic 633 * is invisible. Actually, Linux-2.4 also generates erratic
624 * ACKs in some curcumstances. 634 * ACKs in some curcumstances.
625 */ 635 */
626 tp->rto = (tp->srtt >> 3) + tp->rttvar; 636 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
627 637
628 /* 2. Fixups made earlier cannot be right. 638 /* 2. Fixups made earlier cannot be right.
629 * If we do not estimate RTO correctly without them, 639 * If we do not estimate RTO correctly without them,
@@ -635,10 +645,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
635/* NOTE: clamping at TCP_RTO_MIN is not required, current algo 645/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
636 * guarantees that rto is higher. 646 * guarantees that rto is higher.
637 */ 647 */
638static inline void tcp_bound_rto(struct tcp_sock *tp) 648static inline void tcp_bound_rto(struct sock *sk)
639{ 649{
640 if (tp->rto > TCP_RTO_MAX) 650 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
641 tp->rto = TCP_RTO_MAX; 651 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
642} 652}
643 653
644/* Save metrics learned by this TCP session. 654/* Save metrics learned by this TCP session.
@@ -656,9 +666,10 @@ void tcp_update_metrics(struct sock *sk)
656 dst_confirm(dst); 666 dst_confirm(dst);
657 667
658 if (dst && (dst->flags&DST_HOST)) { 668 if (dst && (dst->flags&DST_HOST)) {
669 const struct inet_connection_sock *icsk = inet_csk(sk);
659 int m; 670 int m;
660 671
661 if (tp->backoff || !tp->srtt) { 672 if (icsk->icsk_backoff || !tp->srtt) {
662 /* This session failed to estimate rtt. Why? 673 /* This session failed to estimate rtt. Why?
663 * Probably, no packets returned in time. 674 * Probably, no packets returned in time.
664 * Reset our results. 675 * Reset our results.
@@ -707,7 +718,7 @@ void tcp_update_metrics(struct sock *sk)
707 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 718 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
708 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; 719 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
709 } else if (tp->snd_cwnd > tp->snd_ssthresh && 720 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
710 tp->ca_state == TCP_CA_Open) { 721 icsk->icsk_ca_state == TCP_CA_Open) {
711 /* Cong. avoidance phase, cwnd is reliable. */ 722 /* Cong. avoidance phase, cwnd is reliable. */
712 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 723 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
713 dst->metrics[RTAX_SSTHRESH-1] = 724 dst->metrics[RTAX_SSTHRESH-1] =
@@ -801,9 +812,9 @@ static void tcp_init_metrics(struct sock *sk)
801 tp->mdev = dst_metric(dst, RTAX_RTTVAR); 812 tp->mdev = dst_metric(dst, RTAX_RTTVAR);
802 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 813 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
803 } 814 }
804 tcp_set_rto(tp); 815 tcp_set_rto(sk);
805 tcp_bound_rto(tp); 816 tcp_bound_rto(sk);
806 if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 817 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
807 goto reset; 818 goto reset;
808 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 819 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
809 tp->snd_cwnd_stamp = tcp_time_stamp; 820 tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -817,12 +828,14 @@ reset:
817 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 828 if (!tp->rx_opt.saw_tstamp && tp->srtt) {
818 tp->srtt = 0; 829 tp->srtt = 0;
819 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 830 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
820 tp->rto = TCP_TIMEOUT_INIT; 831 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
821 } 832 }
822} 833}
823 834
824static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) 835static void tcp_update_reordering(struct sock *sk, const int metric,
836 const int ts)
825{ 837{
838 struct tcp_sock *tp = tcp_sk(sk);
826 if (metric > tp->reordering) { 839 if (metric > tp->reordering) {
827 tp->reordering = min(TCP_MAX_REORDERING, metric); 840 tp->reordering = min(TCP_MAX_REORDERING, metric);
828 841
@@ -837,7 +850,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
837 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 850 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
838#if FASTRETRANS_DEBUG > 1 851#if FASTRETRANS_DEBUG > 1
839 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 852 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
840 tp->rx_opt.sack_ok, tp->ca_state, 853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
841 tp->reordering, 854 tp->reordering,
842 tp->fackets_out, 855 tp->fackets_out,
843 tp->sacked_out, 856 tp->sacked_out,
@@ -899,6 +912,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
899static int 912static int
900tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 913tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
901{ 914{
915 const struct inet_connection_sock *icsk = inet_csk(sk);
902 struct tcp_sock *tp = tcp_sk(sk); 916 struct tcp_sock *tp = tcp_sk(sk);
903 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 917 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
904 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); 918 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
@@ -1064,7 +1078,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1064 * we have to account for reordering! Ugly, 1078 * we have to account for reordering! Ugly,
1065 * but should help. 1079 * but should help.
1066 */ 1080 */
1067 if (lost_retrans && tp->ca_state == TCP_CA_Recovery) { 1081 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1068 struct sk_buff *skb; 1082 struct sk_buff *skb;
1069 1083
1070 sk_stream_for_retrans_queue(skb, sk) { 1084 sk_stream_for_retrans_queue(skb, sk) {
@@ -1093,8 +1107,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1093 1107
1094 tp->left_out = tp->sacked_out + tp->lost_out; 1108 tp->left_out = tp->sacked_out + tp->lost_out;
1095 1109
1096 if ((reord < tp->fackets_out) && tp->ca_state != TCP_CA_Loss) 1110 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
1097 tcp_update_reordering(tp, ((tp->fackets_out + 1) - reord), 0); 1111 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1098 1112
1099#if FASTRETRANS_DEBUG > 0 1113#if FASTRETRANS_DEBUG > 0
1100 BUG_TRAP((int)tp->sacked_out >= 0); 1114 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1111,17 +1125,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1111 */ 1125 */
1112void tcp_enter_frto(struct sock *sk) 1126void tcp_enter_frto(struct sock *sk)
1113{ 1127{
1128 const struct inet_connection_sock *icsk = inet_csk(sk);
1114 struct tcp_sock *tp = tcp_sk(sk); 1129 struct tcp_sock *tp = tcp_sk(sk);
1115 struct sk_buff *skb; 1130 struct sk_buff *skb;
1116 1131
1117 tp->frto_counter = 1; 1132 tp->frto_counter = 1;
1118 1133
1119 if (tp->ca_state <= TCP_CA_Disorder || 1134 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1120 tp->snd_una == tp->high_seq || 1135 tp->snd_una == tp->high_seq ||
1121 (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1136 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1122 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1137 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1123 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1138 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1124 tcp_ca_event(tp, CA_EVENT_FRTO); 1139 tcp_ca_event(sk, CA_EVENT_FRTO);
1125 } 1140 }
1126 1141
1127 /* Have to clear retransmission markers here to keep the bookkeeping 1142 /* Have to clear retransmission markers here to keep the bookkeeping
@@ -1138,7 +1153,7 @@ void tcp_enter_frto(struct sock *sk)
1138 } 1153 }
1139 tcp_sync_left_out(tp); 1154 tcp_sync_left_out(tp);
1140 1155
1141 tcp_set_ca_state(tp, TCP_CA_Open); 1156 tcp_set_ca_state(sk, TCP_CA_Open);
1142 tp->frto_highmark = tp->snd_nxt; 1157 tp->frto_highmark = tp->snd_nxt;
1143} 1158}
1144 1159
@@ -1184,7 +1199,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
1184 1199
1185 tp->reordering = min_t(unsigned int, tp->reordering, 1200 tp->reordering = min_t(unsigned int, tp->reordering,
1186 sysctl_tcp_reordering); 1201 sysctl_tcp_reordering);
1187 tcp_set_ca_state(tp, TCP_CA_Loss); 1202 tcp_set_ca_state(sk, TCP_CA_Loss);
1188 tp->high_seq = tp->frto_highmark; 1203 tp->high_seq = tp->frto_highmark;
1189 TCP_ECN_queue_cwr(tp); 1204 TCP_ECN_queue_cwr(tp);
1190} 1205}
@@ -1208,16 +1223,17 @@ void tcp_clear_retrans(struct tcp_sock *tp)
1208 */ 1223 */
1209void tcp_enter_loss(struct sock *sk, int how) 1224void tcp_enter_loss(struct sock *sk, int how)
1210{ 1225{
1226 const struct inet_connection_sock *icsk = inet_csk(sk);
1211 struct tcp_sock *tp = tcp_sk(sk); 1227 struct tcp_sock *tp = tcp_sk(sk);
1212 struct sk_buff *skb; 1228 struct sk_buff *skb;
1213 int cnt = 0; 1229 int cnt = 0;
1214 1230
1215 /* Reduce ssthresh if it has not yet been made inside this window. */ 1231 /* Reduce ssthresh if it has not yet been made inside this window. */
1216 if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1232 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1217 (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1233 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1218 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1234 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1219 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1235 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1220 tcp_ca_event(tp, CA_EVENT_LOSS); 1236 tcp_ca_event(sk, CA_EVENT_LOSS);
1221 } 1237 }
1222 tp->snd_cwnd = 1; 1238 tp->snd_cwnd = 1;
1223 tp->snd_cwnd_cnt = 0; 1239 tp->snd_cwnd_cnt = 0;
@@ -1248,12 +1264,12 @@ void tcp_enter_loss(struct sock *sk, int how)
1248 1264
1249 tp->reordering = min_t(unsigned int, tp->reordering, 1265 tp->reordering = min_t(unsigned int, tp->reordering,
1250 sysctl_tcp_reordering); 1266 sysctl_tcp_reordering);
1251 tcp_set_ca_state(tp, TCP_CA_Loss); 1267 tcp_set_ca_state(sk, TCP_CA_Loss);
1252 tp->high_seq = tp->snd_nxt; 1268 tp->high_seq = tp->snd_nxt;
1253 TCP_ECN_queue_cwr(tp); 1269 TCP_ECN_queue_cwr(tp);
1254} 1270}
1255 1271
1256static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) 1272static int tcp_check_sack_reneging(struct sock *sk)
1257{ 1273{
1258 struct sk_buff *skb; 1274 struct sk_buff *skb;
1259 1275
@@ -1265,12 +1281,14 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp)
1265 */ 1281 */
1266 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 1282 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1267 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1283 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1284 struct inet_connection_sock *icsk = inet_csk(sk);
1268 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1285 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1269 1286
1270 tcp_enter_loss(sk, 1); 1287 tcp_enter_loss(sk, 1);
1271 tp->retransmits++; 1288 icsk->icsk_retransmits++;
1272 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1289 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1273 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1290 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1291 icsk->icsk_rto, TCP_RTO_MAX);
1274 return 1; 1292 return 1;
1275 } 1293 }
1276 return 0; 1294 return 0;
@@ -1281,15 +1299,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
1281 return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; 1299 return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
1282} 1300}
1283 1301
1284static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) 1302static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
1285{ 1303{
1286 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); 1304 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
1287} 1305}
1288 1306
1289static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) 1307static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
1290{ 1308{
1291 return tp->packets_out && 1309 return tp->packets_out &&
1292 tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); 1310 tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
1293} 1311}
1294 1312
1295/* Linux NewReno/SACK/FACK/ECN state machine. 1313/* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1423,8 +1441,9 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1423 * in assumption of absent reordering, interpret this as reordering. 1441 * in assumption of absent reordering, interpret this as reordering.
1424 * The only another reason could be bug in receiver TCP. 1442 * The only another reason could be bug in receiver TCP.
1425 */ 1443 */
1426static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend) 1444static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1427{ 1445{
1446 struct tcp_sock *tp = tcp_sk(sk);
1428 u32 holes; 1447 u32 holes;
1429 1448
1430 holes = max(tp->lost_out, 1U); 1449 holes = max(tp->lost_out, 1U);
@@ -1432,16 +1451,17 @@ static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend)
1432 1451
1433 if ((tp->sacked_out + holes) > tp->packets_out) { 1452 if ((tp->sacked_out + holes) > tp->packets_out) {
1434 tp->sacked_out = tp->packets_out - holes; 1453 tp->sacked_out = tp->packets_out - holes;
1435 tcp_update_reordering(tp, tp->packets_out+addend, 0); 1454 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1436 } 1455 }
1437} 1456}
1438 1457
1439/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1458/* Emulate SACKs for SACKless connection: account for a new dupack. */
1440 1459
1441static void tcp_add_reno_sack(struct tcp_sock *tp) 1460static void tcp_add_reno_sack(struct sock *sk)
1442{ 1461{
1462 struct tcp_sock *tp = tcp_sk(sk);
1443 tp->sacked_out++; 1463 tp->sacked_out++;
1444 tcp_check_reno_reordering(tp, 0); 1464 tcp_check_reno_reordering(sk, 0);
1445 tcp_sync_left_out(tp); 1465 tcp_sync_left_out(tp);
1446} 1466}
1447 1467
@@ -1456,7 +1476,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acke
1456 else 1476 else
1457 tp->sacked_out -= acked-1; 1477 tp->sacked_out -= acked-1;
1458 } 1478 }
1459 tcp_check_reno_reordering(tp, acked); 1479 tcp_check_reno_reordering(sk, acked);
1460 tcp_sync_left_out(tp); 1480 tcp_sync_left_out(tp);
1461} 1481}
1462 1482
@@ -1509,7 +1529,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1509 struct sk_buff *skb; 1529 struct sk_buff *skb;
1510 1530
1511 sk_stream_for_retrans_queue(skb, sk) { 1531 sk_stream_for_retrans_queue(skb, sk) {
1512 if (tcp_skb_timedout(tp, skb) && 1532 if (tcp_skb_timedout(sk, skb) &&
1513 !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 1533 !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1514 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1534 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1515 tp->lost_out += tcp_skb_pcount(skb); 1535 tp->lost_out += tcp_skb_pcount(skb);
@@ -1530,14 +1550,16 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1530} 1550}
1531 1551
1532/* Decrease cwnd each second ack. */ 1552/* Decrease cwnd each second ack. */
1533static void tcp_cwnd_down(struct tcp_sock *tp) 1553static void tcp_cwnd_down(struct sock *sk)
1534{ 1554{
1555 const struct inet_connection_sock *icsk = inet_csk(sk);
1556 struct tcp_sock *tp = tcp_sk(sk);
1535 int decr = tp->snd_cwnd_cnt + 1; 1557 int decr = tp->snd_cwnd_cnt + 1;
1536 1558
1537 tp->snd_cwnd_cnt = decr&1; 1559 tp->snd_cwnd_cnt = decr&1;
1538 decr >>= 1; 1560 decr >>= 1;
1539 1561
1540 if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) 1562 if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
1541 tp->snd_cwnd -= decr; 1563 tp->snd_cwnd -= decr;
1542 1564
1543 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1565 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1571,11 +1593,15 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1571#define DBGUNDO(x...) do { } while (0) 1593#define DBGUNDO(x...) do { } while (0)
1572#endif 1594#endif
1573 1595
1574static void tcp_undo_cwr(struct tcp_sock *tp, int undo) 1596static void tcp_undo_cwr(struct sock *sk, const int undo)
1575{ 1597{
1598 struct tcp_sock *tp = tcp_sk(sk);
1599
1576 if (tp->prior_ssthresh) { 1600 if (tp->prior_ssthresh) {
1577 if (tp->ca_ops->undo_cwnd) 1601 const struct inet_connection_sock *icsk = inet_csk(sk);
1578 tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); 1602
1603 if (icsk->icsk_ca_ops->undo_cwnd)
1604 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1579 else 1605 else
1580 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); 1606 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1581 1607
@@ -1603,9 +1629,9 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1603 /* Happy end! We did not retransmit anything 1629 /* Happy end! We did not retransmit anything
1604 * or our original transmission succeeded. 1630 * or our original transmission succeeded.
1605 */ 1631 */
1606 DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans"); 1632 DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1607 tcp_undo_cwr(tp, 1); 1633 tcp_undo_cwr(sk, 1);
1608 if (tp->ca_state == TCP_CA_Loss) 1634 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1609 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1635 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1610 else 1636 else
1611 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 1637 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
@@ -1618,7 +1644,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1618 tcp_moderate_cwnd(tp); 1644 tcp_moderate_cwnd(tp);
1619 return 1; 1645 return 1;
1620 } 1646 }
1621 tcp_set_ca_state(tp, TCP_CA_Open); 1647 tcp_set_ca_state(sk, TCP_CA_Open);
1622 return 0; 1648 return 0;
1623} 1649}
1624 1650
@@ -1627,7 +1653,7 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1627{ 1653{
1628 if (tp->undo_marker && !tp->undo_retrans) { 1654 if (tp->undo_marker && !tp->undo_retrans) {
1629 DBGUNDO(sk, tp, "D-SACK"); 1655 DBGUNDO(sk, tp, "D-SACK");
1630 tcp_undo_cwr(tp, 1); 1656 tcp_undo_cwr(sk, 1);
1631 tp->undo_marker = 0; 1657 tp->undo_marker = 0;
1632 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 1658 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1633 } 1659 }
@@ -1648,10 +1674,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1648 if (tp->retrans_out == 0) 1674 if (tp->retrans_out == 0)
1649 tp->retrans_stamp = 0; 1675 tp->retrans_stamp = 0;
1650 1676
1651 tcp_update_reordering(tp, tcp_fackets_out(tp)+acked, 1); 1677 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1652 1678
1653 DBGUNDO(sk, tp, "Hoe"); 1679 DBGUNDO(sk, tp, "Hoe");
1654 tcp_undo_cwr(tp, 0); 1680 tcp_undo_cwr(sk, 0);
1655 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 1681 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1656 1682
1657 /* So... Do not make Hoe's retransmit yet. 1683 /* So... Do not make Hoe's retransmit yet.
@@ -1674,22 +1700,23 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1674 DBGUNDO(sk, tp, "partial loss"); 1700 DBGUNDO(sk, tp, "partial loss");
1675 tp->lost_out = 0; 1701 tp->lost_out = 0;
1676 tp->left_out = tp->sacked_out; 1702 tp->left_out = tp->sacked_out;
1677 tcp_undo_cwr(tp, 1); 1703 tcp_undo_cwr(sk, 1);
1678 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1704 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1679 tp->retransmits = 0; 1705 inet_csk(sk)->icsk_retransmits = 0;
1680 tp->undo_marker = 0; 1706 tp->undo_marker = 0;
1681 if (!IsReno(tp)) 1707 if (!IsReno(tp))
1682 tcp_set_ca_state(tp, TCP_CA_Open); 1708 tcp_set_ca_state(sk, TCP_CA_Open);
1683 return 1; 1709 return 1;
1684 } 1710 }
1685 return 0; 1711 return 0;
1686} 1712}
1687 1713
1688static inline void tcp_complete_cwr(struct tcp_sock *tp) 1714static inline void tcp_complete_cwr(struct sock *sk)
1689{ 1715{
1716 struct tcp_sock *tp = tcp_sk(sk);
1690 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 1717 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1691 tp->snd_cwnd_stamp = tcp_time_stamp; 1718 tp->snd_cwnd_stamp = tcp_time_stamp;
1692 tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); 1719 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1693} 1720}
1694 1721
1695static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) 1722static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
@@ -1700,21 +1727,21 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1700 tp->retrans_stamp = 0; 1727 tp->retrans_stamp = 0;
1701 1728
1702 if (flag&FLAG_ECE) 1729 if (flag&FLAG_ECE)
1703 tcp_enter_cwr(tp); 1730 tcp_enter_cwr(sk);
1704 1731
1705 if (tp->ca_state != TCP_CA_CWR) { 1732 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
1706 int state = TCP_CA_Open; 1733 int state = TCP_CA_Open;
1707 1734
1708 if (tp->left_out || tp->retrans_out || tp->undo_marker) 1735 if (tp->left_out || tp->retrans_out || tp->undo_marker)
1709 state = TCP_CA_Disorder; 1736 state = TCP_CA_Disorder;
1710 1737
1711 if (tp->ca_state != state) { 1738 if (inet_csk(sk)->icsk_ca_state != state) {
1712 tcp_set_ca_state(tp, state); 1739 tcp_set_ca_state(sk, state);
1713 tp->high_seq = tp->snd_nxt; 1740 tp->high_seq = tp->snd_nxt;
1714 } 1741 }
1715 tcp_moderate_cwnd(tp); 1742 tcp_moderate_cwnd(tp);
1716 } else { 1743 } else {
1717 tcp_cwnd_down(tp); 1744 tcp_cwnd_down(sk);
1718 } 1745 }
1719} 1746}
1720 1747
@@ -1733,6 +1760,7 @@ static void
1733tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 1760tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1734 int prior_packets, int flag) 1761 int prior_packets, int flag)
1735{ 1762{
1763 struct inet_connection_sock *icsk = inet_csk(sk);
1736 struct tcp_sock *tp = tcp_sk(sk); 1764 struct tcp_sock *tp = tcp_sk(sk);
1737 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 1765 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1738 1766
@@ -1750,13 +1778,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1750 tp->prior_ssthresh = 0; 1778 tp->prior_ssthresh = 0;
1751 1779
1752 /* B. In all the states check for reneging SACKs. */ 1780 /* B. In all the states check for reneging SACKs. */
1753 if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) 1781 if (tp->sacked_out && tcp_check_sack_reneging(sk))
1754 return; 1782 return;
1755 1783
1756 /* C. Process data loss notification, provided it is valid. */ 1784 /* C. Process data loss notification, provided it is valid. */
1757 if ((flag&FLAG_DATA_LOST) && 1785 if ((flag&FLAG_DATA_LOST) &&
1758 before(tp->snd_una, tp->high_seq) && 1786 before(tp->snd_una, tp->high_seq) &&
1759 tp->ca_state != TCP_CA_Open && 1787 icsk->icsk_ca_state != TCP_CA_Open &&
1760 tp->fackets_out > tp->reordering) { 1788 tp->fackets_out > tp->reordering) {
1761 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); 1789 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1762 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 1790 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
@@ -1767,14 +1795,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1767 1795
1768 /* E. Check state exit conditions. State can be terminated 1796 /* E. Check state exit conditions. State can be terminated
1769 * when high_seq is ACKed. */ 1797 * when high_seq is ACKed. */
1770 if (tp->ca_state == TCP_CA_Open) { 1798 if (icsk->icsk_ca_state == TCP_CA_Open) {
1771 if (!sysctl_tcp_frto) 1799 if (!sysctl_tcp_frto)
1772 BUG_TRAP(tp->retrans_out == 0); 1800 BUG_TRAP(tp->retrans_out == 0);
1773 tp->retrans_stamp = 0; 1801 tp->retrans_stamp = 0;
1774 } else if (!before(tp->snd_una, tp->high_seq)) { 1802 } else if (!before(tp->snd_una, tp->high_seq)) {
1775 switch (tp->ca_state) { 1803 switch (icsk->icsk_ca_state) {
1776 case TCP_CA_Loss: 1804 case TCP_CA_Loss:
1777 tp->retransmits = 0; 1805 icsk->icsk_retransmits = 0;
1778 if (tcp_try_undo_recovery(sk, tp)) 1806 if (tcp_try_undo_recovery(sk, tp))
1779 return; 1807 return;
1780 break; 1808 break;
@@ -1783,8 +1811,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1783 /* CWR is to be held something *above* high_seq 1811 /* CWR is to be held something *above* high_seq
1784 * is ACKed for CWR bit to reach receiver. */ 1812 * is ACKed for CWR bit to reach receiver. */
1785 if (tp->snd_una != tp->high_seq) { 1813 if (tp->snd_una != tp->high_seq) {
1786 tcp_complete_cwr(tp); 1814 tcp_complete_cwr(sk);
1787 tcp_set_ca_state(tp, TCP_CA_Open); 1815 tcp_set_ca_state(sk, TCP_CA_Open);
1788 } 1816 }
1789 break; 1817 break;
1790 1818
@@ -1795,7 +1823,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1795 * catching for all duplicate ACKs. */ 1823 * catching for all duplicate ACKs. */
1796 IsReno(tp) || tp->snd_una != tp->high_seq) { 1824 IsReno(tp) || tp->snd_una != tp->high_seq) {
1797 tp->undo_marker = 0; 1825 tp->undo_marker = 0;
1798 tcp_set_ca_state(tp, TCP_CA_Open); 1826 tcp_set_ca_state(sk, TCP_CA_Open);
1799 } 1827 }
1800 break; 1828 break;
1801 1829
@@ -1804,17 +1832,17 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1804 tcp_reset_reno_sack(tp); 1832 tcp_reset_reno_sack(tp);
1805 if (tcp_try_undo_recovery(sk, tp)) 1833 if (tcp_try_undo_recovery(sk, tp))
1806 return; 1834 return;
1807 tcp_complete_cwr(tp); 1835 tcp_complete_cwr(sk);
1808 break; 1836 break;
1809 } 1837 }
1810 } 1838 }
1811 1839
1812 /* F. Process state. */ 1840 /* F. Process state. */
1813 switch (tp->ca_state) { 1841 switch (icsk->icsk_ca_state) {
1814 case TCP_CA_Recovery: 1842 case TCP_CA_Recovery:
1815 if (prior_snd_una == tp->snd_una) { 1843 if (prior_snd_una == tp->snd_una) {
1816 if (IsReno(tp) && is_dupack) 1844 if (IsReno(tp) && is_dupack)
1817 tcp_add_reno_sack(tp); 1845 tcp_add_reno_sack(sk);
1818 } else { 1846 } else {
1819 int acked = prior_packets - tp->packets_out; 1847 int acked = prior_packets - tp->packets_out;
1820 if (IsReno(tp)) 1848 if (IsReno(tp))
@@ -1824,13 +1852,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1824 break; 1852 break;
1825 case TCP_CA_Loss: 1853 case TCP_CA_Loss:
1826 if (flag&FLAG_DATA_ACKED) 1854 if (flag&FLAG_DATA_ACKED)
1827 tp->retransmits = 0; 1855 icsk->icsk_retransmits = 0;
1828 if (!tcp_try_undo_loss(sk, tp)) { 1856 if (!tcp_try_undo_loss(sk, tp)) {
1829 tcp_moderate_cwnd(tp); 1857 tcp_moderate_cwnd(tp);
1830 tcp_xmit_retransmit_queue(sk); 1858 tcp_xmit_retransmit_queue(sk);
1831 return; 1859 return;
1832 } 1860 }
1833 if (tp->ca_state != TCP_CA_Open) 1861 if (icsk->icsk_ca_state != TCP_CA_Open)
1834 return; 1862 return;
1835 /* Loss is undone; fall through to processing in Open state. */ 1863 /* Loss is undone; fall through to processing in Open state. */
1836 default: 1864 default:
@@ -1838,10 +1866,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1838 if (tp->snd_una != prior_snd_una) 1866 if (tp->snd_una != prior_snd_una)
1839 tcp_reset_reno_sack(tp); 1867 tcp_reset_reno_sack(tp);
1840 if (is_dupack) 1868 if (is_dupack)
1841 tcp_add_reno_sack(tp); 1869 tcp_add_reno_sack(sk);
1842 } 1870 }
1843 1871
1844 if (tp->ca_state == TCP_CA_Disorder) 1872 if (icsk->icsk_ca_state == TCP_CA_Disorder)
1845 tcp_try_undo_dsack(sk, tp); 1873 tcp_try_undo_dsack(sk, tp);
1846 1874
1847 if (!tcp_time_to_recover(sk, tp)) { 1875 if (!tcp_time_to_recover(sk, tp)) {
@@ -1861,30 +1889,28 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1861 tp->undo_marker = tp->snd_una; 1889 tp->undo_marker = tp->snd_una;
1862 tp->undo_retrans = tp->retrans_out; 1890 tp->undo_retrans = tp->retrans_out;
1863 1891
1864 if (tp->ca_state < TCP_CA_CWR) { 1892 if (icsk->icsk_ca_state < TCP_CA_CWR) {
1865 if (!(flag&FLAG_ECE)) 1893 if (!(flag&FLAG_ECE))
1866 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1894 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1867 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1895 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1868 TCP_ECN_queue_cwr(tp); 1896 TCP_ECN_queue_cwr(tp);
1869 } 1897 }
1870 1898
1871 tp->snd_cwnd_cnt = 0; 1899 tp->snd_cwnd_cnt = 0;
1872 tcp_set_ca_state(tp, TCP_CA_Recovery); 1900 tcp_set_ca_state(sk, TCP_CA_Recovery);
1873 } 1901 }
1874 1902
1875 if (is_dupack || tcp_head_timedout(sk, tp)) 1903 if (is_dupack || tcp_head_timedout(sk, tp))
1876 tcp_update_scoreboard(sk, tp); 1904 tcp_update_scoreboard(sk, tp);
1877 tcp_cwnd_down(tp); 1905 tcp_cwnd_down(sk);
1878 tcp_xmit_retransmit_queue(sk); 1906 tcp_xmit_retransmit_queue(sk);
1879} 1907}
1880 1908
1881/* Read draft-ietf-tcplw-high-performance before mucking 1909/* Read draft-ietf-tcplw-high-performance before mucking
1882 * with this code. (Superceeds RFC1323) 1910 * with this code. (Superceeds RFC1323)
1883 */ 1911 */
1884static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) 1912static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
1885{ 1913{
1886 __u32 seq_rtt;
1887
1888 /* RTTM Rule: A TSecr value received in a segment is used to 1914 /* RTTM Rule: A TSecr value received in a segment is used to
1889 * update the averaged RTT measurement only if the segment 1915 * update the averaged RTT measurement only if the segment
1890 * acknowledges some new data, i.e., only if it advances the 1916 * acknowledges some new data, i.e., only if it advances the
@@ -1900,14 +1926,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag)
1900 * answer arrives rto becomes 120 seconds! If at least one of segments 1926 * answer arrives rto becomes 120 seconds! If at least one of segments
1901 * in window is lost... Voila. --ANK (010210) 1927 * in window is lost... Voila. --ANK (010210)
1902 */ 1928 */
1903 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1929 struct tcp_sock *tp = tcp_sk(sk);
1904 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1930 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
1905 tcp_set_rto(tp); 1931 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1906 tp->backoff = 0; 1932 tcp_set_rto(sk);
1907 tcp_bound_rto(tp); 1933 inet_csk(sk)->icsk_backoff = 0;
1934 tcp_bound_rto(sk);
1908} 1935}
1909 1936
1910static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) 1937static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag)
1911{ 1938{
1912 /* We don't have a timestamp. Can only use 1939 /* We don't have a timestamp. Can only use
1913 * packets that are not retransmitted to determine 1940 * packets that are not retransmitted to determine
@@ -1921,27 +1948,29 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int
1921 if (flag & FLAG_RETRANS_DATA_ACKED) 1948 if (flag & FLAG_RETRANS_DATA_ACKED)
1922 return; 1949 return;
1923 1950
1924 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1951 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1925 tcp_set_rto(tp); 1952 tcp_set_rto(sk);
1926 tp->backoff = 0; 1953 inet_csk(sk)->icsk_backoff = 0;
1927 tcp_bound_rto(tp); 1954 tcp_bound_rto(sk);
1928} 1955}
1929 1956
1930static inline void tcp_ack_update_rtt(struct tcp_sock *tp, 1957static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
1931 int flag, s32 seq_rtt, u32 *usrtt) 1958 const s32 seq_rtt, u32 *usrtt)
1932{ 1959{
1960 const struct tcp_sock *tp = tcp_sk(sk);
1933 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 1961 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
1934 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 1962 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
1935 tcp_ack_saw_tstamp(tp, usrtt, flag); 1963 tcp_ack_saw_tstamp(sk, usrtt, flag);
1936 else if (seq_rtt >= 0) 1964 else if (seq_rtt >= 0)
1937 tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); 1965 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
1938} 1966}
1939 1967
1940static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 1968static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
1941 u32 in_flight, int good) 1969 u32 in_flight, int good)
1942{ 1970{
1943 tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); 1971 const struct inet_connection_sock *icsk = inet_csk(sk);
1944 tp->snd_cwnd_stamp = tcp_time_stamp; 1972 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
1973 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
1945} 1974}
1946 1975
1947/* Restart timer after forward progress on connection. 1976/* Restart timer after forward progress on connection.
@@ -1951,9 +1980,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
1951static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) 1980static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
1952{ 1981{
1953 if (!tp->packets_out) { 1982 if (!tp->packets_out) {
1954 tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); 1983 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
1955 } else { 1984 } else {
1956 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1985 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1957 } 1986 }
1958} 1987}
1959 1988
@@ -2068,9 +2097,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2068 seq_rtt = -1; 2097 seq_rtt = -1;
2069 } else if (seq_rtt < 0) 2098 } else if (seq_rtt < 0)
2070 seq_rtt = now - scb->when; 2099 seq_rtt = now - scb->when;
2071 if (seq_usrtt) 2100 if (seq_usrtt) {
2072 *seq_usrtt = (usnow.tv_sec - skb->stamp.tv_sec) * 1000000 2101 struct timeval tv;
2073 + (usnow.tv_usec - skb->stamp.tv_usec); 2102
2103 skb_get_timestamp(skb, &tv);
2104 *seq_usrtt = (usnow.tv_sec - tv.tv_sec) * 1000000
2105 + (usnow.tv_usec - tv.tv_usec);
2106 }
2074 2107
2075 if (sacked & TCPCB_SACKED_ACKED) 2108 if (sacked & TCPCB_SACKED_ACKED)
2076 tp->sacked_out -= tcp_skb_pcount(skb); 2109 tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2085,16 +2118,17 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2085 seq_rtt = now - scb->when; 2118 seq_rtt = now - scb->when;
2086 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2119 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2087 tcp_packets_out_dec(tp, skb); 2120 tcp_packets_out_dec(tp, skb);
2088 __skb_unlink(skb, skb->list); 2121 __skb_unlink(skb, &sk->sk_write_queue);
2089 sk_stream_free_skb(sk, skb); 2122 sk_stream_free_skb(sk, skb);
2090 } 2123 }
2091 2124
2092 if (acked&FLAG_ACKED) { 2125 if (acked&FLAG_ACKED) {
2093 tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); 2126 const struct inet_connection_sock *icsk = inet_csk(sk);
2127 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
2094 tcp_ack_packets_out(sk, tp); 2128 tcp_ack_packets_out(sk, tp);
2095 2129
2096 if (tp->ca_ops->pkts_acked) 2130 if (icsk->icsk_ca_ops->pkts_acked)
2097 tp->ca_ops->pkts_acked(tp, pkts_acked); 2131 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2098 } 2132 }
2099 2133
2100#if FASTRETRANS_DEBUG > 0 2134#if FASTRETRANS_DEBUG > 0
@@ -2102,19 +2136,20 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2102 BUG_TRAP((int)tp->lost_out >= 0); 2136 BUG_TRAP((int)tp->lost_out >= 0);
2103 BUG_TRAP((int)tp->retrans_out >= 0); 2137 BUG_TRAP((int)tp->retrans_out >= 0);
2104 if (!tp->packets_out && tp->rx_opt.sack_ok) { 2138 if (!tp->packets_out && tp->rx_opt.sack_ok) {
2139 const struct inet_connection_sock *icsk = inet_csk(sk);
2105 if (tp->lost_out) { 2140 if (tp->lost_out) {
2106 printk(KERN_DEBUG "Leak l=%u %d\n", 2141 printk(KERN_DEBUG "Leak l=%u %d\n",
2107 tp->lost_out, tp->ca_state); 2142 tp->lost_out, icsk->icsk_ca_state);
2108 tp->lost_out = 0; 2143 tp->lost_out = 0;
2109 } 2144 }
2110 if (tp->sacked_out) { 2145 if (tp->sacked_out) {
2111 printk(KERN_DEBUG "Leak s=%u %d\n", 2146 printk(KERN_DEBUG "Leak s=%u %d\n",
2112 tp->sacked_out, tp->ca_state); 2147 tp->sacked_out, icsk->icsk_ca_state);
2113 tp->sacked_out = 0; 2148 tp->sacked_out = 0;
2114 } 2149 }
2115 if (tp->retrans_out) { 2150 if (tp->retrans_out) {
2116 printk(KERN_DEBUG "Leak r=%u %d\n", 2151 printk(KERN_DEBUG "Leak r=%u %d\n",
2117 tp->retrans_out, tp->ca_state); 2152 tp->retrans_out, icsk->icsk_ca_state);
2118 tp->retrans_out = 0; 2153 tp->retrans_out = 0;
2119 } 2154 }
2120 } 2155 }
@@ -2125,40 +2160,43 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2125 2160
2126static void tcp_ack_probe(struct sock *sk) 2161static void tcp_ack_probe(struct sock *sk)
2127{ 2162{
2128 struct tcp_sock *tp = tcp_sk(sk); 2163 const struct tcp_sock *tp = tcp_sk(sk);
2164 struct inet_connection_sock *icsk = inet_csk(sk);
2129 2165
2130 /* Was it a usable window open? */ 2166 /* Was it a usable window open? */
2131 2167
2132 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 2168 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
2133 tp->snd_una + tp->snd_wnd)) { 2169 tp->snd_una + tp->snd_wnd)) {
2134 tp->backoff = 0; 2170 icsk->icsk_backoff = 0;
2135 tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); 2171 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
2136 /* Socket must be waked up by subsequent tcp_data_snd_check(). 2172 /* Socket must be waked up by subsequent tcp_data_snd_check().
2137 * This function is not for random using! 2173 * This function is not for random using!
2138 */ 2174 */
2139 } else { 2175 } else {
2140 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, 2176 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2141 min(tp->rto << tp->backoff, TCP_RTO_MAX)); 2177 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2178 TCP_RTO_MAX);
2142 } 2179 }
2143} 2180}
2144 2181
2145static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag) 2182static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2146{ 2183{
2147 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 2184 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2148 tp->ca_state != TCP_CA_Open); 2185 inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2149} 2186}
2150 2187
2151static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) 2188static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2152{ 2189{
2190 const struct tcp_sock *tp = tcp_sk(sk);
2153 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 2191 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2154 !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR)); 2192 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2155} 2193}
2156 2194
2157/* Check that window update is acceptable. 2195/* Check that window update is acceptable.
2158 * The function assumes that snd_una<=ack<=snd_next. 2196 * The function assumes that snd_una<=ack<=snd_next.
2159 */ 2197 */
2160static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, 2198static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
2161 u32 ack_seq, u32 nwin) 2199 const u32 ack_seq, const u32 nwin)
2162{ 2200{
2163 return (after(ack, tp->snd_una) || 2201 return (after(ack, tp->snd_una) ||
2164 after(ack_seq, tp->snd_wl1) || 2202 after(ack_seq, tp->snd_wl1) ||
@@ -2241,6 +2279,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2241/* This routine deals with incoming acks, but not outgoing ones. */ 2279/* This routine deals with incoming acks, but not outgoing ones. */
2242static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 2280static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2243{ 2281{
2282 struct inet_connection_sock *icsk = inet_csk(sk);
2244 struct tcp_sock *tp = tcp_sk(sk); 2283 struct tcp_sock *tp = tcp_sk(sk);
2245 u32 prior_snd_una = tp->snd_una; 2284 u32 prior_snd_una = tp->snd_una;
2246 u32 ack_seq = TCP_SKB_CB(skb)->seq; 2285 u32 ack_seq = TCP_SKB_CB(skb)->seq;
@@ -2268,7 +2307,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2268 tp->snd_una = ack; 2307 tp->snd_una = ack;
2269 flag |= FLAG_WIN_UPDATE; 2308 flag |= FLAG_WIN_UPDATE;
2270 2309
2271 tcp_ca_event(tp, CA_EVENT_FAST_ACK); 2310 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2272 2311
2273 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 2312 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2274 } else { 2313 } else {
@@ -2285,7 +2324,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2285 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 2324 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2286 flag |= FLAG_ECE; 2325 flag |= FLAG_ECE;
2287 2326
2288 tcp_ca_event(tp, CA_EVENT_SLOW_ACK); 2327 tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2289 } 2328 }
2290 2329
2291 /* We passed data and got it acked, remove any soft error 2330 /* We passed data and got it acked, remove any soft error
@@ -2301,19 +2340,19 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2301 2340
2302 /* See if we can take anything off of the retransmit queue. */ 2341 /* See if we can take anything off of the retransmit queue. */
2303 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, 2342 flag |= tcp_clean_rtx_queue(sk, &seq_rtt,
2304 tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); 2343 icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
2305 2344
2306 if (tp->frto_counter) 2345 if (tp->frto_counter)
2307 tcp_process_frto(sk, prior_snd_una); 2346 tcp_process_frto(sk, prior_snd_una);
2308 2347
2309 if (tcp_ack_is_dubious(tp, flag)) { 2348 if (tcp_ack_is_dubious(sk, flag)) {
2310 /* Advanve CWND, if state allows this. */ 2349 /* Advanve CWND, if state allows this. */
2311 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) 2350 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2312 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); 2351 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2313 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2352 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2314 } else { 2353 } else {
2315 if ((flag & FLAG_DATA_ACKED)) 2354 if ((flag & FLAG_DATA_ACKED))
2316 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); 2355 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2317 } 2356 }
2318 2357
2319 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2358 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
@@ -2322,7 +2361,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2322 return 1; 2361 return 1;
2323 2362
2324no_queue: 2363no_queue:
2325 tp->probes_out = 0; 2364 icsk->icsk_probes_out = 0;
2326 2365
2327 /* If this ack opens up a zero window, clear backoff. It was 2366 /* If this ack opens up a zero window, clear backoff. It was
2328 * being used to time the probes, and is probably far higher than 2367 * being used to time the probes, and is probably far higher than
@@ -2500,8 +2539,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2500 * up to bandwidth of 18Gigabit/sec. 8) ] 2539 * up to bandwidth of 18Gigabit/sec. 8) ]
2501 */ 2540 */
2502 2541
2503static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) 2542static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
2504{ 2543{
2544 struct tcp_sock *tp = tcp_sk(sk);
2505 struct tcphdr *th = skb->h.th; 2545 struct tcphdr *th = skb->h.th;
2506 u32 seq = TCP_SKB_CB(skb)->seq; 2546 u32 seq = TCP_SKB_CB(skb)->seq;
2507 u32 ack = TCP_SKB_CB(skb)->ack_seq; 2547 u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -2516,14 +2556,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb)
2516 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 2556 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
2517 2557
2518 /* 4. ... and sits in replay window. */ 2558 /* 4. ... and sits in replay window. */
2519 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); 2559 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
2520} 2560}
2521 2561
2522static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) 2562static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
2523{ 2563{
2564 const struct tcp_sock *tp = tcp_sk(sk);
2524 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 2565 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
2525 xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 2566 xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
2526 !tcp_disordered_ack(tp, skb)); 2567 !tcp_disordered_ack(sk, skb));
2527} 2568}
2528 2569
2529/* Check segment sequence number for validity. 2570/* Check segment sequence number for validity.
@@ -2586,7 +2627,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2586{ 2627{
2587 struct tcp_sock *tp = tcp_sk(sk); 2628 struct tcp_sock *tp = tcp_sk(sk);
2588 2629
2589 tcp_schedule_ack(tp); 2630 inet_csk_schedule_ack(sk);
2590 2631
2591 sk->sk_shutdown |= RCV_SHUTDOWN; 2632 sk->sk_shutdown |= RCV_SHUTDOWN;
2592 sock_set_flag(sk, SOCK_DONE); 2633 sock_set_flag(sk, SOCK_DONE);
@@ -2596,7 +2637,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2596 case TCP_ESTABLISHED: 2637 case TCP_ESTABLISHED:
2597 /* Move to CLOSE_WAIT */ 2638 /* Move to CLOSE_WAIT */
2598 tcp_set_state(sk, TCP_CLOSE_WAIT); 2639 tcp_set_state(sk, TCP_CLOSE_WAIT);
2599 tp->ack.pingpong = 1; 2640 inet_csk(sk)->icsk_ack.pingpong = 1;
2600 break; 2641 break;
2601 2642
2602 case TCP_CLOSE_WAIT: 2643 case TCP_CLOSE_WAIT:
@@ -2694,7 +2735,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
2694 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 2735 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
2695 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 2736 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2696 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 2737 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
2697 tcp_enter_quickack_mode(tp); 2738 tcp_enter_quickack_mode(sk);
2698 2739
2699 if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { 2740 if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
2700 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 2741 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -2853,7 +2894,7 @@ static void tcp_ofo_queue(struct sock *sk)
2853 2894
2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 2895 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2855 SOCK_DEBUG(sk, "ofo packet was already received \n"); 2896 SOCK_DEBUG(sk, "ofo packet was already received \n");
2856 __skb_unlink(skb, skb->list); 2897 __skb_unlink(skb, &tp->out_of_order_queue);
2857 __kfree_skb(skb); 2898 __kfree_skb(skb);
2858 continue; 2899 continue;
2859 } 2900 }
@@ -2861,7 +2902,7 @@ static void tcp_ofo_queue(struct sock *sk)
2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 2902 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2862 TCP_SKB_CB(skb)->end_seq); 2903 TCP_SKB_CB(skb)->end_seq);
2863 2904
2864 __skb_unlink(skb, skb->list); 2905 __skb_unlink(skb, &tp->out_of_order_queue);
2865 __skb_queue_tail(&sk->sk_receive_queue, skb); 2906 __skb_queue_tail(&sk->sk_receive_queue, skb);
2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 2907 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2867 if(skb->h.th->fin) 2908 if(skb->h.th->fin)
@@ -2942,7 +2983,7 @@ queue_and_out:
2942 * gap in queue is filled. 2983 * gap in queue is filled.
2943 */ 2984 */
2944 if (skb_queue_empty(&tp->out_of_order_queue)) 2985 if (skb_queue_empty(&tp->out_of_order_queue))
2945 tp->ack.pingpong = 0; 2986 inet_csk(sk)->icsk_ack.pingpong = 0;
2946 } 2987 }
2947 2988
2948 if (tp->rx_opt.num_sacks) 2989 if (tp->rx_opt.num_sacks)
@@ -2963,8 +3004,8 @@ queue_and_out:
2963 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3004 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
2964 3005
2965out_of_window: 3006out_of_window:
2966 tcp_enter_quickack_mode(tp); 3007 tcp_enter_quickack_mode(sk);
2967 tcp_schedule_ack(tp); 3008 inet_csk_schedule_ack(sk);
2968drop: 3009drop:
2969 __kfree_skb(skb); 3010 __kfree_skb(skb);
2970 return; 3011 return;
@@ -2974,7 +3015,7 @@ drop:
2974 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 3015 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
2975 goto out_of_window; 3016 goto out_of_window;
2976 3017
2977 tcp_enter_quickack_mode(tp); 3018 tcp_enter_quickack_mode(sk);
2978 3019
2979 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3020 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2980 /* Partial packet, seq < rcv_next < end_seq */ 3021 /* Partial packet, seq < rcv_next < end_seq */
@@ -3003,7 +3044,7 @@ drop:
3003 3044
3004 /* Disable header prediction. */ 3045 /* Disable header prediction. */
3005 tp->pred_flags = 0; 3046 tp->pred_flags = 0;
3006 tcp_schedule_ack(tp); 3047 inet_csk_schedule_ack(sk);
3007 3048
3008 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3049 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3009 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3050 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@@ -3027,7 +3068,7 @@ drop:
3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3068 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3028 3069
3029 if (seq == TCP_SKB_CB(skb1)->end_seq) { 3070 if (seq == TCP_SKB_CB(skb1)->end_seq) {
3030 __skb_append(skb1, skb); 3071 __skb_append(skb1, skb, &tp->out_of_order_queue);
3031 3072
3032 if (!tp->rx_opt.num_sacks || 3073 if (!tp->rx_opt.num_sacks ||
3033 tp->selective_acks[0].end_seq != seq) 3074 tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3112,7 @@ drop:
3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 3112 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3072 break; 3113 break;
3073 } 3114 }
3074 __skb_unlink(skb1, skb1->list); 3115 __skb_unlink(skb1, &tp->out_of_order_queue);
3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 3116 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3076 __kfree_skb(skb1); 3117 __kfree_skb(skb1);
3077 } 3118 }
@@ -3088,8 +3129,9 @@ add_sack:
3088 * simplifies code) 3129 * simplifies code)
3089 */ 3130 */
3090static void 3131static void
3091tcp_collapse(struct sock *sk, struct sk_buff *head, 3132tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3092 struct sk_buff *tail, u32 start, u32 end) 3133 struct sk_buff *head, struct sk_buff *tail,
3134 u32 start, u32 end)
3093{ 3135{
3094 struct sk_buff *skb; 3136 struct sk_buff *skb;
3095 3137
@@ -3099,7 +3141,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3099 /* No new bits? It is possible on ofo queue. */ 3141 /* No new bits? It is possible on ofo queue. */
3100 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3142 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3101 struct sk_buff *next = skb->next; 3143 struct sk_buff *next = skb->next;
3102 __skb_unlink(skb, skb->list); 3144 __skb_unlink(skb, list);
3103 __kfree_skb(skb); 3145 __kfree_skb(skb);
3104 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3146 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3105 skb = next; 3147 skb = next;
@@ -3145,7 +3187,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3145 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 3187 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3146 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 3188 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 3189 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3148 __skb_insert(nskb, skb->prev, skb, skb->list); 3190 __skb_insert(nskb, skb->prev, skb, list);
3149 sk_stream_set_owner_r(nskb, sk); 3191 sk_stream_set_owner_r(nskb, sk);
3150 3192
3151 /* Copy data, releasing collapsed skbs. */ 3193 /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3206,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3164 } 3206 }
3165 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3207 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3166 struct sk_buff *next = skb->next; 3208 struct sk_buff *next = skb->next;
3167 __skb_unlink(skb, skb->list); 3209 __skb_unlink(skb, list);
3168 __kfree_skb(skb); 3210 __kfree_skb(skb);
3169 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3211 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3170 skb = next; 3212 skb = next;
@@ -3200,7 +3242,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3200 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 3242 if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3201 after(TCP_SKB_CB(skb)->seq, end) || 3243 after(TCP_SKB_CB(skb)->seq, end) ||
3202 before(TCP_SKB_CB(skb)->end_seq, start)) { 3244 before(TCP_SKB_CB(skb)->end_seq, start)) {
3203 tcp_collapse(sk, head, skb, start, end); 3245 tcp_collapse(sk, &tp->out_of_order_queue,
3246 head, skb, start, end);
3204 head = skb; 3247 head = skb;
3205 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 3248 if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3206 break; 3249 break;
@@ -3237,7 +3280,8 @@ static int tcp_prune_queue(struct sock *sk)
3237 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 3280 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3238 3281
3239 tcp_collapse_ofo_queue(sk); 3282 tcp_collapse_ofo_queue(sk);
3240 tcp_collapse(sk, sk->sk_receive_queue.next, 3283 tcp_collapse(sk, &sk->sk_receive_queue,
3284 sk->sk_receive_queue.next,
3241 (struct sk_buff*)&sk->sk_receive_queue, 3285 (struct sk_buff*)&sk->sk_receive_queue,
3242 tp->copied_seq, tp->rcv_nxt); 3286 tp->copied_seq, tp->rcv_nxt);
3243 sk_stream_mem_reclaim(sk); 3287 sk_stream_mem_reclaim(sk);
@@ -3286,12 +3330,12 @@ void tcp_cwnd_application_limited(struct sock *sk)
3286{ 3330{
3287 struct tcp_sock *tp = tcp_sk(sk); 3331 struct tcp_sock *tp = tcp_sk(sk);
3288 3332
3289 if (tp->ca_state == TCP_CA_Open && 3333 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3290 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3334 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3291 /* Limited by application or receiver window. */ 3335 /* Limited by application or receiver window. */
3292 u32 win_used = max(tp->snd_cwnd_used, 2U); 3336 u32 win_used = max(tp->snd_cwnd_used, 2U);
3293 if (win_used < tp->snd_cwnd) { 3337 if (win_used < tp->snd_cwnd) {
3294 tp->snd_ssthresh = tcp_current_ssthresh(tp); 3338 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3295 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3339 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3296 } 3340 }
3297 tp->snd_cwnd_used = 0; 3341 tp->snd_cwnd_used = 0;
@@ -3370,13 +3414,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3370 struct tcp_sock *tp = tcp_sk(sk); 3414 struct tcp_sock *tp = tcp_sk(sk);
3371 3415
3372 /* More than one full frame received... */ 3416 /* More than one full frame received... */
3373 if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss 3417 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
3374 /* ... and right edge of window advances far enough. 3418 /* ... and right edge of window advances far enough.
3375 * (tcp_recvmsg() will send ACK otherwise). Or... 3419 * (tcp_recvmsg() will send ACK otherwise). Or...
3376 */ 3420 */
3377 && __tcp_select_window(sk) >= tp->rcv_wnd) || 3421 && __tcp_select_window(sk) >= tp->rcv_wnd) ||
3378 /* We ACK each frame or... */ 3422 /* We ACK each frame or... */
3379 tcp_in_quickack_mode(tp) || 3423 tcp_in_quickack_mode(sk) ||
3380 /* We have out of order data. */ 3424 /* We have out of order data. */
3381 (ofo_possible && 3425 (ofo_possible &&
3382 skb_peek(&tp->out_of_order_queue))) { 3426 skb_peek(&tp->out_of_order_queue))) {
@@ -3390,8 +3434,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3390 3434
3391static __inline__ void tcp_ack_snd_check(struct sock *sk) 3435static __inline__ void tcp_ack_snd_check(struct sock *sk)
3392{ 3436{
3393 struct tcp_sock *tp = tcp_sk(sk); 3437 if (!inet_csk_ack_scheduled(sk)) {
3394 if (!tcp_ack_scheduled(tp)) {
3395 /* We sent a data segment already. */ 3438 /* We sent a data segment already. */
3396 return; 3439 return;
3397 } 3440 }
@@ -3462,7 +3505,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3462 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 3505 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3463 tp->copied_seq++; 3506 tp->copied_seq++;
3464 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 3507 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3465 __skb_unlink(skb, skb->list); 3508 __skb_unlink(skb, &sk->sk_receive_queue);
3466 __kfree_skb(skb); 3509 __kfree_skb(skb);
3467 } 3510 }
3468 } 3511 }
@@ -3645,7 +3688,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3645 tp->rcv_nxt == tp->rcv_wup) 3688 tp->rcv_nxt == tp->rcv_wup)
3646 tcp_store_ts_recent(tp); 3689 tcp_store_ts_recent(tp);
3647 3690
3648 tcp_rcv_rtt_measure_ts(tp, skb); 3691 tcp_rcv_rtt_measure_ts(sk, skb);
3649 3692
3650 /* We know that such packets are checksummed 3693 /* We know that such packets are checksummed
3651 * on entry. 3694 * on entry.
@@ -3678,7 +3721,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3678 tp->rcv_nxt == tp->rcv_wup) 3721 tp->rcv_nxt == tp->rcv_wup)
3679 tcp_store_ts_recent(tp); 3722 tcp_store_ts_recent(tp);
3680 3723
3681 tcp_rcv_rtt_measure_ts(tp, skb); 3724 tcp_rcv_rtt_measure_ts(sk, skb);
3682 3725
3683 __skb_pull(skb, tcp_header_len); 3726 __skb_pull(skb, tcp_header_len);
3684 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3727 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3699,7 +3742,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3699 tp->rcv_nxt == tp->rcv_wup) 3742 tp->rcv_nxt == tp->rcv_wup)
3700 tcp_store_ts_recent(tp); 3743 tcp_store_ts_recent(tp);
3701 3744
3702 tcp_rcv_rtt_measure_ts(tp, skb); 3745 tcp_rcv_rtt_measure_ts(sk, skb);
3703 3746
3704 if ((int)skb->truesize > sk->sk_forward_alloc) 3747 if ((int)skb->truesize > sk->sk_forward_alloc)
3705 goto step5; 3748 goto step5;
@@ -3719,7 +3762,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3719 /* Well, only one small jumplet in fast path... */ 3762 /* Well, only one small jumplet in fast path... */
3720 tcp_ack(sk, skb, FLAG_DATA); 3763 tcp_ack(sk, skb, FLAG_DATA);
3721 tcp_data_snd_check(sk, tp); 3764 tcp_data_snd_check(sk, tp);
3722 if (!tcp_ack_scheduled(tp)) 3765 if (!inet_csk_ack_scheduled(sk))
3723 goto no_ack; 3766 goto no_ack;
3724 } 3767 }
3725 3768
@@ -3741,7 +3784,7 @@ slow_path:
3741 * RFC1323: H1. Apply PAWS check first. 3784 * RFC1323: H1. Apply PAWS check first.
3742 */ 3785 */
3743 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 3786 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
3744 tcp_paws_discard(tp, skb)) { 3787 tcp_paws_discard(sk, skb)) {
3745 if (!th->rst) { 3788 if (!th->rst) {
3746 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 3789 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
3747 tcp_send_dupack(sk, skb); 3790 tcp_send_dupack(sk, skb);
@@ -3788,7 +3831,7 @@ step5:
3788 if(th->ack) 3831 if(th->ack)
3789 tcp_ack(sk, skb, FLAG_SLOWPATH); 3832 tcp_ack(sk, skb, FLAG_SLOWPATH);
3790 3833
3791 tcp_rcv_rtt_measure_ts(tp, skb); 3834 tcp_rcv_rtt_measure_ts(sk, skb);
3792 3835
3793 /* Process urgent data. */ 3836 /* Process urgent data. */
3794 tcp_urg(sk, skb, th); 3837 tcp_urg(sk, skb, th);
@@ -3817,6 +3860,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3817 tcp_parse_options(skb, &tp->rx_opt, 0); 3860 tcp_parse_options(skb, &tp->rx_opt, 0);
3818 3861
3819 if (th->ack) { 3862 if (th->ack) {
3863 struct inet_connection_sock *icsk;
3820 /* rfc793: 3864 /* rfc793:
3821 * "If the state is SYN-SENT then 3865 * "If the state is SYN-SENT then
3822 * first check the ACK bit 3866 * first check the ACK bit
@@ -3920,7 +3964,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3920 3964
3921 tcp_init_metrics(sk); 3965 tcp_init_metrics(sk);
3922 3966
3923 tcp_init_congestion_control(tp); 3967 tcp_init_congestion_control(sk);
3924 3968
3925 /* Prevent spurious tcp_cwnd_restart() on first data 3969 /* Prevent spurious tcp_cwnd_restart() on first data
3926 * packet. 3970 * packet.
@@ -3930,7 +3974,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3930 tcp_init_buffer_space(sk); 3974 tcp_init_buffer_space(sk);
3931 3975
3932 if (sock_flag(sk, SOCK_KEEPOPEN)) 3976 if (sock_flag(sk, SOCK_KEEPOPEN))
3933 tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); 3977 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
3934 3978
3935 if (!tp->rx_opt.snd_wscale) 3979 if (!tp->rx_opt.snd_wscale)
3936 __tcp_fast_path_on(tp, tp->snd_wnd); 3980 __tcp_fast_path_on(tp, tp->snd_wnd);
@@ -3942,7 +3986,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3942 sk_wake_async(sk, 0, POLL_OUT); 3986 sk_wake_async(sk, 0, POLL_OUT);
3943 } 3987 }
3944 3988
3945 if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { 3989 icsk = inet_csk(sk);
3990
3991 if (sk->sk_write_pending ||
3992 icsk->icsk_accept_queue.rskq_defer_accept ||
3993 icsk->icsk_ack.pingpong) {
3946 /* Save one ACK. Data will be ready after 3994 /* Save one ACK. Data will be ready after
3947 * several ticks, if write_pending is set. 3995 * several ticks, if write_pending is set.
3948 * 3996 *
@@ -3950,12 +3998,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3950 * look so _wonderfully_ clever, that I was not able 3998 * look so _wonderfully_ clever, that I was not able
3951 * to stand against the temptation 8) --ANK 3999 * to stand against the temptation 8) --ANK
3952 */ 4000 */
3953 tcp_schedule_ack(tp); 4001 inet_csk_schedule_ack(sk);
3954 tp->ack.lrcvtime = tcp_time_stamp; 4002 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
3955 tp->ack.ato = TCP_ATO_MIN; 4003 icsk->icsk_ack.ato = TCP_ATO_MIN;
3956 tcp_incr_quickack(tp); 4004 tcp_incr_quickack(sk);
3957 tcp_enter_quickack_mode(tp); 4005 tcp_enter_quickack_mode(sk);
3958 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); 4006 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
4007 TCP_DELACK_MAX, TCP_RTO_MAX);
3959 4008
3960discard: 4009discard:
3961 __kfree_skb(skb); 4010 __kfree_skb(skb);
@@ -4111,7 +4160,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4111 } 4160 }
4112 4161
4113 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4162 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4114 tcp_paws_discard(tp, skb)) { 4163 tcp_paws_discard(sk, skb)) {
4115 if (!th->rst) { 4164 if (!th->rst) {
4116 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4165 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4117 tcp_send_dupack(sk, skb); 4166 tcp_send_dupack(sk, skb);
@@ -4180,7 +4229,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4180 */ 4229 */
4181 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4230 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4182 !tp->srtt) 4231 !tp->srtt)
4183 tcp_ack_saw_tstamp(tp, 0, 0); 4232 tcp_ack_saw_tstamp(sk, NULL, 0);
4184 4233
4185 if (tp->rx_opt.tstamp_ok) 4234 if (tp->rx_opt.tstamp_ok)
4186 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 4235 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -4192,7 +4241,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4192 4241
4193 tcp_init_metrics(sk); 4242 tcp_init_metrics(sk);
4194 4243
4195 tcp_init_congestion_control(tp); 4244 tcp_init_congestion_control(sk);
4196 4245
4197 /* Prevent spurious tcp_cwnd_restart() on 4246 /* Prevent spurious tcp_cwnd_restart() on
4198 * first data packet. 4247 * first data packet.
@@ -4227,9 +4276,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4227 return 1; 4276 return 1;
4228 } 4277 }
4229 4278
4230 tmo = tcp_fin_time(tp); 4279 tmo = tcp_fin_time(sk);
4231 if (tmo > TCP_TIMEWAIT_LEN) { 4280 if (tmo > TCP_TIMEWAIT_LEN) {
4232 tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 4281 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
4233 } else if (th->fin || sock_owned_by_user(sk)) { 4282 } else if (th->fin || sock_owned_by_user(sk)) {
4234 /* Bad case. We could lose such FIN otherwise. 4283 /* Bad case. We could lose such FIN otherwise.
4235 * It is not a big problem, but it looks confusing 4284 * It is not a big problem, but it looks confusing
@@ -4237,7 +4286,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4237 * if it spins in bh_lock_sock(), but it is really 4286 * if it spins in bh_lock_sock(), but it is really
4238 * marginal case. 4287 * marginal case.
4239 */ 4288 */
4240 tcp_reset_keepalive_timer(sk, tmo); 4289 inet_csk_reset_keepalive_timer(sk, tmo);
4241 } else { 4290 } else {
4242 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 4291 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
4243 goto discard; 4292 goto discard;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 67c670886c1f..13dfb391cdf1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -64,7 +64,9 @@
64#include <linux/times.h> 64#include <linux/times.h>
65 65
66#include <net/icmp.h> 66#include <net/icmp.h>
67#include <net/inet_hashtables.h>
67#include <net/tcp.h> 68#include <net/tcp.h>
69#include <net/transp_v6.h>
68#include <net/ipv6.h> 70#include <net/ipv6.h>
69#include <net/inet_common.h> 71#include <net/inet_common.h>
70#include <net/xfrm.h> 72#include <net/xfrm.h>
@@ -75,7 +77,6 @@
75#include <linux/proc_fs.h> 77#include <linux/proc_fs.h>
76#include <linux/seq_file.h> 78#include <linux/seq_file.h>
77 79
78extern int sysctl_ip_dynaddr;
79int sysctl_tcp_tw_reuse; 80int sysctl_tcp_tw_reuse;
80int sysctl_tcp_low_latency; 81int sysctl_tcp_low_latency;
81 82
@@ -88,463 +89,29 @@ static struct socket *tcp_socket;
88void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, 89void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
89 struct sk_buff *skb); 90 struct sk_buff *skb);
90 91
91struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { 92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
92 .__tcp_lhash_lock = RW_LOCK_UNLOCKED, 93 .lhash_lock = RW_LOCK_UNLOCKED,
93 .__tcp_lhash_users = ATOMIC_INIT(0), 94 .lhash_users = ATOMIC_INIT(0),
94 .__tcp_lhash_wait 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
95 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait), 96 .portalloc_lock = SPIN_LOCK_UNLOCKED,
96 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED 97 .port_rover = 1024 - 1,
97}; 98};
98 99
99/*
100 * This array holds the first and last local port number.
101 * For high-usage systems, use sysctl to change this to
102 * 32768-61000
103 */
104int sysctl_local_port_range[2] = { 1024, 4999 };
105int tcp_port_rover = 1024 - 1;
106
107static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108 __u32 faddr, __u16 fport)
109{
110 int h = (laddr ^ lport) ^ (faddr ^ fport);
111 h ^= h >> 16;
112 h ^= h >> 8;
113 return h & (tcp_ehash_size - 1);
114}
115
116static __inline__ int tcp_sk_hashfn(struct sock *sk)
117{
118 struct inet_sock *inet = inet_sk(sk);
119 __u32 laddr = inet->rcv_saddr;
120 __u16 lport = inet->num;
121 __u32 faddr = inet->daddr;
122 __u16 fport = inet->dport;
123
124 return tcp_hashfn(laddr, lport, faddr, fport);
125}
126
127/* Allocate and initialize a new TCP local port bind bucket.
128 * The bindhash mutex for snum's hash chain must be held here.
129 */
130struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
131 unsigned short snum)
132{
133 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
134 SLAB_ATOMIC);
135 if (tb) {
136 tb->port = snum;
137 tb->fastreuse = 0;
138 INIT_HLIST_HEAD(&tb->owners);
139 hlist_add_head(&tb->node, &head->chain);
140 }
141 return tb;
142}
143
144/* Caller must hold hashbucket lock for this tb with local BH disabled */
145void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
146{
147 if (hlist_empty(&tb->owners)) {
148 __hlist_del(&tb->node);
149 kmem_cache_free(tcp_bucket_cachep, tb);
150 }
151}
152
153/* Caller must disable local BH processing. */
154static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
155{
156 struct tcp_bind_hashbucket *head =
157 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158 struct tcp_bind_bucket *tb;
159
160 spin_lock(&head->lock);
161 tb = tcp_sk(sk)->bind_hash;
162 sk_add_bind_node(child, &tb->owners);
163 tcp_sk(child)->bind_hash = tb;
164 spin_unlock(&head->lock);
165}
166
167inline void tcp_inherit_port(struct sock *sk, struct sock *child)
168{
169 local_bh_disable();
170 __tcp_inherit_port(sk, child);
171 local_bh_enable();
172}
173
174void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
175 unsigned short snum)
176{
177 inet_sk(sk)->num = snum;
178 sk_add_bind_node(sk, &tb->owners);
179 tcp_sk(sk)->bind_hash = tb;
180}
181
182static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
183{
184 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
185 struct sock *sk2;
186 struct hlist_node *node;
187 int reuse = sk->sk_reuse;
188
189 sk_for_each_bound(sk2, node, &tb->owners) {
190 if (sk != sk2 &&
191 !tcp_v6_ipv6only(sk2) &&
192 (!sk->sk_bound_dev_if ||
193 !sk2->sk_bound_dev_if ||
194 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195 if (!reuse || !sk2->sk_reuse ||
196 sk2->sk_state == TCP_LISTEN) {
197 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
198 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
199 sk2_rcv_saddr == sk_rcv_saddr)
200 break;
201 }
202 }
203 }
204 return node != NULL;
205}
206
207/* Obtain a reference to a local port for the given sock,
208 * if snum is zero it means select any available local port.
209 */
210static int tcp_v4_get_port(struct sock *sk, unsigned short snum) 100static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
211{ 101{
212 struct tcp_bind_hashbucket *head; 102 return inet_csk_get_port(&tcp_hashinfo, sk, snum);
213 struct hlist_node *node;
214 struct tcp_bind_bucket *tb;
215 int ret;
216
217 local_bh_disable();
218 if (!snum) {
219 int low = sysctl_local_port_range[0];
220 int high = sysctl_local_port_range[1];
221 int remaining = (high - low) + 1;
222 int rover;
223
224 spin_lock(&tcp_portalloc_lock);
225 if (tcp_port_rover < low)
226 rover = low;
227 else
228 rover = tcp_port_rover;
229 do {
230 rover++;
231 if (rover > high)
232 rover = low;
233 head = &tcp_bhash[tcp_bhashfn(rover)];
234 spin_lock(&head->lock);
235 tb_for_each(tb, node, &head->chain)
236 if (tb->port == rover)
237 goto next;
238 break;
239 next:
240 spin_unlock(&head->lock);
241 } while (--remaining > 0);
242 tcp_port_rover = rover;
243 spin_unlock(&tcp_portalloc_lock);
244
245 /* Exhausted local port range during search? It is not
246 * possible for us to be holding one of the bind hash
247 * locks if this test triggers, because if 'remaining'
248 * drops to zero, we broke out of the do/while loop at
249 * the top level, not from the 'break;' statement.
250 */
251 ret = 1;
252 if (unlikely(remaining <= 0))
253 goto fail;
254
255 /* OK, here is the one we will use. HEAD is
256 * non-NULL and we hold it's mutex.
257 */
258 snum = rover;
259 } else {
260 head = &tcp_bhash[tcp_bhashfn(snum)];
261 spin_lock(&head->lock);
262 tb_for_each(tb, node, &head->chain)
263 if (tb->port == snum)
264 goto tb_found;
265 }
266 tb = NULL;
267 goto tb_not_found;
268tb_found:
269 if (!hlist_empty(&tb->owners)) {
270 if (sk->sk_reuse > 1)
271 goto success;
272 if (tb->fastreuse > 0 &&
273 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
274 goto success;
275 } else {
276 ret = 1;
277 if (tcp_bind_conflict(sk, tb))
278 goto fail_unlock;
279 }
280 }
281tb_not_found:
282 ret = 1;
283 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
284 goto fail_unlock;
285 if (hlist_empty(&tb->owners)) {
286 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
287 tb->fastreuse = 1;
288 else
289 tb->fastreuse = 0;
290 } else if (tb->fastreuse &&
291 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
292 tb->fastreuse = 0;
293success:
294 if (!tcp_sk(sk)->bind_hash)
295 tcp_bind_hash(sk, tb, snum);
296 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
297 ret = 0;
298
299fail_unlock:
300 spin_unlock(&head->lock);
301fail:
302 local_bh_enable();
303 return ret;
304}
305
306/* Get rid of any references to a local port held by the
307 * given sock.
308 */
309static void __tcp_put_port(struct sock *sk)
310{
311 struct inet_sock *inet = inet_sk(sk);
312 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
313 struct tcp_bind_bucket *tb;
314
315 spin_lock(&head->lock);
316 tb = tcp_sk(sk)->bind_hash;
317 __sk_del_bind_node(sk);
318 tcp_sk(sk)->bind_hash = NULL;
319 inet->num = 0;
320 tcp_bucket_destroy(tb);
321 spin_unlock(&head->lock);
322}
323
324void tcp_put_port(struct sock *sk)
325{
326 local_bh_disable();
327 __tcp_put_port(sk);
328 local_bh_enable();
329}
330
331/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
332 * Look, when several writers sleep and reader wakes them up, all but one
333 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
334 * this, _but_ remember, it adds useless work on UP machines (wake up each
335 * exclusive lock release). It should be ifdefed really.
336 */
337
338void tcp_listen_wlock(void)
339{
340 write_lock(&tcp_lhash_lock);
341
342 if (atomic_read(&tcp_lhash_users)) {
343 DEFINE_WAIT(wait);
344
345 for (;;) {
346 prepare_to_wait_exclusive(&tcp_lhash_wait,
347 &wait, TASK_UNINTERRUPTIBLE);
348 if (!atomic_read(&tcp_lhash_users))
349 break;
350 write_unlock_bh(&tcp_lhash_lock);
351 schedule();
352 write_lock_bh(&tcp_lhash_lock);
353 }
354
355 finish_wait(&tcp_lhash_wait, &wait);
356 }
357}
358
359static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
360{
361 struct hlist_head *list;
362 rwlock_t *lock;
363
364 BUG_TRAP(sk_unhashed(sk));
365 if (listen_possible && sk->sk_state == TCP_LISTEN) {
366 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
367 lock = &tcp_lhash_lock;
368 tcp_listen_wlock();
369 } else {
370 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
371 lock = &tcp_ehash[sk->sk_hashent].lock;
372 write_lock(lock);
373 }
374 __sk_add_node(sk, list);
375 sock_prot_inc_use(sk->sk_prot);
376 write_unlock(lock);
377 if (listen_possible && sk->sk_state == TCP_LISTEN)
378 wake_up(&tcp_lhash_wait);
379} 103}
380 104
381static void tcp_v4_hash(struct sock *sk) 105static void tcp_v4_hash(struct sock *sk)
382{ 106{
383 if (sk->sk_state != TCP_CLOSE) { 107 inet_hash(&tcp_hashinfo, sk);
384 local_bh_disable();
385 __tcp_v4_hash(sk, 1);
386 local_bh_enable();
387 }
388} 108}
389 109
390void tcp_unhash(struct sock *sk) 110void tcp_unhash(struct sock *sk)
391{ 111{
392 rwlock_t *lock; 112 inet_unhash(&tcp_hashinfo, sk);
393
394 if (sk_unhashed(sk))
395 goto ende;
396
397 if (sk->sk_state == TCP_LISTEN) {
398 local_bh_disable();
399 tcp_listen_wlock();
400 lock = &tcp_lhash_lock;
401 } else {
402 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
403 lock = &head->lock;
404 write_lock_bh(&head->lock);
405 }
406
407 if (__sk_del_node_init(sk))
408 sock_prot_dec_use(sk->sk_prot);
409 write_unlock_bh(lock);
410
411 ende:
412 if (sk->sk_state == TCP_LISTEN)
413 wake_up(&tcp_lhash_wait);
414}
415
416/* Don't inline this cruft. Here are some nice properties to
417 * exploit here. The BSD API does not allow a listening TCP
418 * to specify the remote port nor the remote address for the
419 * connection. So always assume those are both wildcarded
420 * during the search since they can never be otherwise.
421 */
422static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
423 unsigned short hnum, int dif)
424{
425 struct sock *result = NULL, *sk;
426 struct hlist_node *node;
427 int score, hiscore;
428
429 hiscore=-1;
430 sk_for_each(sk, node, head) {
431 struct inet_sock *inet = inet_sk(sk);
432
433 if (inet->num == hnum && !ipv6_only_sock(sk)) {
434 __u32 rcv_saddr = inet->rcv_saddr;
435
436 score = (sk->sk_family == PF_INET ? 1 : 0);
437 if (rcv_saddr) {
438 if (rcv_saddr != daddr)
439 continue;
440 score+=2;
441 }
442 if (sk->sk_bound_dev_if) {
443 if (sk->sk_bound_dev_if != dif)
444 continue;
445 score+=2;
446 }
447 if (score == 5)
448 return sk;
449 if (score > hiscore) {
450 hiscore = score;
451 result = sk;
452 }
453 }
454 }
455 return result;
456}
457
458/* Optimize the common listener case. */
459static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
460 unsigned short hnum, int dif)
461{
462 struct sock *sk = NULL;
463 struct hlist_head *head;
464
465 read_lock(&tcp_lhash_lock);
466 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
467 if (!hlist_empty(head)) {
468 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
469
470 if (inet->num == hnum && !sk->sk_node.next &&
471 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
472 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
473 !sk->sk_bound_dev_if)
474 goto sherry_cache;
475 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
476 }
477 if (sk) {
478sherry_cache:
479 sock_hold(sk);
480 }
481 read_unlock(&tcp_lhash_lock);
482 return sk;
483} 113}
484 114
485/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
486 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
487 *
488 * Local BH must be disabled here.
489 */
490
491static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
492 u32 daddr, u16 hnum,
493 int dif)
494{
495 struct tcp_ehash_bucket *head;
496 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
497 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
498 struct sock *sk;
499 struct hlist_node *node;
500 /* Optimize here for direct hit, only listening connections can
501 * have wildcards anyways.
502 */
503 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
504 head = &tcp_ehash[hash];
505 read_lock(&head->lock);
506 sk_for_each(sk, node, &head->chain) {
507 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
508 goto hit; /* You sunk my battleship! */
509 }
510
511 /* Must check for a TIME_WAIT'er before going to listener hash. */
512 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
513 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
514 goto hit;
515 }
516 sk = NULL;
517out:
518 read_unlock(&head->lock);
519 return sk;
520hit:
521 sock_hold(sk);
522 goto out;
523}
524
525static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
526 u32 daddr, u16 hnum, int dif)
527{
528 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
529 daddr, hnum, dif);
530
531 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
532}
533
534inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
535 u16 dport, int dif)
536{
537 struct sock *sk;
538
539 local_bh_disable();
540 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
541 local_bh_enable();
542
543 return sk;
544}
545
546EXPORT_SYMBOL_GPL(tcp_v4_lookup);
547
548static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) 115static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
549{ 116{
550 return secure_tcp_sequence_number(skb->nh.iph->daddr, 117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
@@ -555,27 +122,28 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
555 122
556/* called with local bh disabled */ 123/* called with local bh disabled */
557static int __tcp_v4_check_established(struct sock *sk, __u16 lport, 124static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
558 struct tcp_tw_bucket **twp) 125 struct inet_timewait_sock **twp)
559{ 126{
560 struct inet_sock *inet = inet_sk(sk); 127 struct inet_sock *inet = inet_sk(sk);
561 u32 daddr = inet->rcv_saddr; 128 u32 daddr = inet->rcv_saddr;
562 u32 saddr = inet->daddr; 129 u32 saddr = inet->daddr;
563 int dif = sk->sk_bound_dev_if; 130 int dif = sk->sk_bound_dev_if;
564 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) 131 INET_ADDR_COOKIE(acookie, saddr, daddr)
565 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
566 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport); 133 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
567 struct tcp_ehash_bucket *head = &tcp_ehash[hash]; 134 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
568 struct sock *sk2; 135 struct sock *sk2;
569 struct hlist_node *node; 136 const struct hlist_node *node;
570 struct tcp_tw_bucket *tw; 137 struct inet_timewait_sock *tw;
571 138
572 write_lock(&head->lock); 139 write_lock(&head->lock);
573 140
574 /* Check TIME-WAIT sockets first. */ 141 /* Check TIME-WAIT sockets first. */
575 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 142 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
576 tw = (struct tcp_tw_bucket *)sk2; 143 tw = inet_twsk(sk2);
577 144
578 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 145 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
146 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
579 struct tcp_sock *tp = tcp_sk(sk); 147 struct tcp_sock *tp = tcp_sk(sk);
580 148
581 /* With PAWS, it is safe from the viewpoint 149 /* With PAWS, it is safe from the viewpoint
@@ -592,15 +160,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
592 fall back to VJ's scheme and use initial 160 fall back to VJ's scheme and use initial
593 timestamp retrieved from peer table. 161 timestamp retrieved from peer table.
594 */ 162 */
595 if (tw->tw_ts_recent_stamp && 163 if (tcptw->tw_ts_recent_stamp &&
596 (!twp || (sysctl_tcp_tw_reuse && 164 (!twp || (sysctl_tcp_tw_reuse &&
597 xtime.tv_sec - 165 xtime.tv_sec -
598 tw->tw_ts_recent_stamp > 1))) { 166 tcptw->tw_ts_recent_stamp > 1))) {
599 if ((tp->write_seq = 167 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
600 tw->tw_snd_nxt + 65535 + 2) == 0) 168 if (tp->write_seq == 0)
601 tp->write_seq = 1; 169 tp->write_seq = 1;
602 tp->rx_opt.ts_recent = tw->tw_ts_recent; 170 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
603 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 171 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
604 sock_hold(sk2); 172 sock_hold(sk2);
605 goto unique; 173 goto unique;
606 } else 174 } else
@@ -611,7 +179,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
611 179
612 /* And established part... */ 180 /* And established part... */
613 sk_for_each(sk2, node, &head->chain) { 181 sk_for_each(sk2, node, &head->chain) {
614 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 182 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
615 goto not_unique; 183 goto not_unique;
616 } 184 }
617 185
@@ -631,10 +199,10 @@ unique:
631 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 199 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
632 } else if (tw) { 200 } else if (tw) {
633 /* Silly. Should hash-dance instead... */ 201 /* Silly. Should hash-dance instead... */
634 tcp_tw_deschedule(tw); 202 inet_twsk_deschedule(tw, &tcp_death_row);
635 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 203 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
636 204
637 tcp_tw_put(tw); 205 inet_twsk_put(tw);
638 } 206 }
639 207
640 return 0; 208 return 0;
@@ -657,9 +225,9 @@ static inline u32 connect_port_offset(const struct sock *sk)
657 */ 225 */
658static inline int tcp_v4_hash_connect(struct sock *sk) 226static inline int tcp_v4_hash_connect(struct sock *sk)
659{ 227{
660 unsigned short snum = inet_sk(sk)->num; 228 const unsigned short snum = inet_sk(sk)->num;
661 struct tcp_bind_hashbucket *head; 229 struct inet_bind_hashbucket *head;
662 struct tcp_bind_bucket *tb; 230 struct inet_bind_bucket *tb;
663 int ret; 231 int ret;
664 232
665 if (!snum) { 233 if (!snum) {
@@ -671,19 +239,19 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
671 static u32 hint; 239 static u32 hint;
672 u32 offset = hint + connect_port_offset(sk); 240 u32 offset = hint + connect_port_offset(sk);
673 struct hlist_node *node; 241 struct hlist_node *node;
674 struct tcp_tw_bucket *tw = NULL; 242 struct inet_timewait_sock *tw = NULL;
675 243
676 local_bh_disable(); 244 local_bh_disable();
677 for (i = 1; i <= range; i++) { 245 for (i = 1; i <= range; i++) {
678 port = low + (i + offset) % range; 246 port = low + (i + offset) % range;
679 head = &tcp_bhash[tcp_bhashfn(port)]; 247 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
680 spin_lock(&head->lock); 248 spin_lock(&head->lock);
681 249
682 /* Does not bother with rcv_saddr checks, 250 /* Does not bother with rcv_saddr checks,
683 * because the established check is already 251 * because the established check is already
684 * unique enough. 252 * unique enough.
685 */ 253 */
686 tb_for_each(tb, node, &head->chain) { 254 inet_bind_bucket_for_each(tb, node, &head->chain) {
687 if (tb->port == port) { 255 if (tb->port == port) {
688 BUG_TRAP(!hlist_empty(&tb->owners)); 256 BUG_TRAP(!hlist_empty(&tb->owners));
689 if (tb->fastreuse >= 0) 257 if (tb->fastreuse >= 0)
@@ -696,7 +264,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
696 } 264 }
697 } 265 }
698 266
699 tb = tcp_bucket_create(head, port); 267 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
700 if (!tb) { 268 if (!tb) {
701 spin_unlock(&head->lock); 269 spin_unlock(&head->lock);
702 break; 270 break;
@@ -715,27 +283,27 @@ ok:
715 hint += i; 283 hint += i;
716 284
717 /* Head lock still held and bh's disabled */ 285 /* Head lock still held and bh's disabled */
718 tcp_bind_hash(sk, tb, port); 286 inet_bind_hash(sk, tb, port);
719 if (sk_unhashed(sk)) { 287 if (sk_unhashed(sk)) {
720 inet_sk(sk)->sport = htons(port); 288 inet_sk(sk)->sport = htons(port);
721 __tcp_v4_hash(sk, 0); 289 __inet_hash(&tcp_hashinfo, sk, 0);
722 } 290 }
723 spin_unlock(&head->lock); 291 spin_unlock(&head->lock);
724 292
725 if (tw) { 293 if (tw) {
726 tcp_tw_deschedule(tw); 294 inet_twsk_deschedule(tw, &tcp_death_row);;
727 tcp_tw_put(tw); 295 inet_twsk_put(tw);
728 } 296 }
729 297
730 ret = 0; 298 ret = 0;
731 goto out; 299 goto out;
732 } 300 }
733 301
734 head = &tcp_bhash[tcp_bhashfn(snum)]; 302 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
735 tb = tcp_sk(sk)->bind_hash; 303 tb = inet_csk(sk)->icsk_bind_hash;
736 spin_lock_bh(&head->lock); 304 spin_lock_bh(&head->lock);
737 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 305 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
738 __tcp_v4_hash(sk, 0); 306 __inet_hash(&tcp_hashinfo, sk, 0);
739 spin_unlock_bh(&head->lock); 307 spin_unlock_bh(&head->lock);
740 return 0; 308 return 0;
741 } else { 309 } else {
@@ -798,7 +366,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
798 tp->write_seq = 0; 366 tp->write_seq = 0;
799 } 367 }
800 368
801 if (sysctl_tcp_tw_recycle && 369 if (tcp_death_row.sysctl_tw_recycle &&
802 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { 370 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
803 struct inet_peer *peer = rt_get_peer(rt); 371 struct inet_peer *peer = rt_get_peer(rt);
804 372
@@ -837,8 +405,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
837 goto failure; 405 goto failure;
838 406
839 /* OK, now commit destination to socket. */ 407 /* OK, now commit destination to socket. */
840 __sk_dst_set(sk, &rt->u.dst); 408 sk_setup_caps(sk, &rt->u.dst);
841 tcp_v4_setup_caps(sk, &rt->u.dst);
842 409
843 if (!tp->write_seq) 410 if (!tp->write_seq)
844 tp->write_seq = secure_tcp_sequence_number(inet->saddr, 411 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
@@ -864,53 +431,6 @@ failure:
864 return err; 431 return err;
865} 432}
866 433
867static __inline__ int tcp_v4_iif(struct sk_buff *skb)
868{
869 return ((struct rtable *)skb->dst)->rt_iif;
870}
871
872static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
873{
874 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
875}
876
877static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
878 struct request_sock ***prevp,
879 __u16 rport,
880 __u32 raddr, __u32 laddr)
881{
882 struct listen_sock *lopt = tp->accept_queue.listen_opt;
883 struct request_sock *req, **prev;
884
885 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
886 (req = *prev) != NULL;
887 prev = &req->dl_next) {
888 const struct inet_request_sock *ireq = inet_rsk(req);
889
890 if (ireq->rmt_port == rport &&
891 ireq->rmt_addr == raddr &&
892 ireq->loc_addr == laddr &&
893 TCP_INET_FAMILY(req->rsk_ops->family)) {
894 BUG_TRAP(!req->sk);
895 *prevp = prev;
896 break;
897 }
898 }
899
900 return req;
901}
902
903static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
904{
905 struct tcp_sock *tp = tcp_sk(sk);
906 struct listen_sock *lopt = tp->accept_queue.listen_opt;
907 u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
908
909 reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
910 tcp_synq_added(sk);
911}
912
913
914/* 434/*
915 * This routine does path mtu discovery as defined in RFC1191. 435 * This routine does path mtu discovery as defined in RFC1191.
916 */ 436 */
@@ -993,14 +513,14 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
993 return; 513 return;
994 } 514 }
995 515
996 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr, 516 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
997 th->source, tcp_v4_iif(skb)); 517 th->source, inet_iif(skb));
998 if (!sk) { 518 if (!sk) {
999 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 519 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1000 return; 520 return;
1001 } 521 }
1002 if (sk->sk_state == TCP_TIME_WAIT) { 522 if (sk->sk_state == TCP_TIME_WAIT) {
1003 tcp_tw_put((struct tcp_tw_bucket *)sk); 523 inet_twsk_put((struct inet_timewait_sock *)sk);
1004 return; 524 return;
1005 } 525 }
1006 526
@@ -1054,8 +574,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
1054 if (sock_owned_by_user(sk)) 574 if (sock_owned_by_user(sk))
1055 goto out; 575 goto out;
1056 576
1057 req = tcp_v4_search_req(tp, &prev, th->dest, 577 req = inet_csk_search_req(sk, &prev, th->dest,
1058 iph->daddr, iph->saddr); 578 iph->daddr, iph->saddr);
1059 if (!req) 579 if (!req)
1060 goto out; 580 goto out;
1061 581
@@ -1075,7 +595,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
1075 * created socket, and POSIX does not want network 595 * created socket, and POSIX does not want network
1076 * errors returned from accept(). 596 * errors returned from accept().
1077 */ 597 */
1078 tcp_synq_drop(sk, req, prev); 598 inet_csk_reqsk_queue_drop(sk, req, prev);
1079 goto out; 599 goto out;
1080 600
1081 case TCP_SYN_SENT: 601 case TCP_SYN_SENT:
@@ -1245,12 +765,13 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1245 765
1246static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 766static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1247{ 767{
1248 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 768 struct inet_timewait_sock *tw = inet_twsk(sk);
769 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1249 770
1250 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 771 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1251 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 772 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
1252 773
1253 tcp_tw_put(tw); 774 inet_twsk_put(tw);
1254} 775}
1255 776
1256static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 777static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1259,36 +780,6 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1259 req->ts_recent); 780 req->ts_recent);
1260} 781}
1261 782
1262static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1263 struct request_sock *req)
1264{
1265 struct rtable *rt;
1266 const struct inet_request_sock *ireq = inet_rsk(req);
1267 struct ip_options *opt = inet_rsk(req)->opt;
1268 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1269 .nl_u = { .ip4_u =
1270 { .daddr = ((opt && opt->srr) ?
1271 opt->faddr :
1272 ireq->rmt_addr),
1273 .saddr = ireq->loc_addr,
1274 .tos = RT_CONN_FLAGS(sk) } },
1275 .proto = IPPROTO_TCP,
1276 .uli_u = { .ports =
1277 { .sport = inet_sk(sk)->sport,
1278 .dport = ireq->rmt_port } } };
1279
1280 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1281 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1282 return NULL;
1283 }
1284 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1285 ip_rt_put(rt);
1286 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1287 return NULL;
1288 }
1289 return &rt->u.dst;
1290}
1291
1292/* 783/*
1293 * Send a SYN-ACK after having received an ACK. 784 * Send a SYN-ACK after having received an ACK.
1294 * This still operates on a request_sock only, not on a big 785 * This still operates on a request_sock only, not on a big
@@ -1302,7 +793,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
1302 struct sk_buff * skb; 793 struct sk_buff * skb;
1303 794
1304 /* First, grab a route. */ 795 /* First, grab a route. */
1305 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) 796 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1306 goto out; 797 goto out;
1307 798
1308 skb = tcp_make_synack(sk, dst, req); 799 skb = tcp_make_synack(sk, dst, req);
@@ -1404,7 +895,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1404 * limitations, they conserve resources and peer is 895 * limitations, they conserve resources and peer is
1405 * evidently real one. 896 * evidently real one.
1406 */ 897 */
1407 if (tcp_synq_is_full(sk) && !isn) { 898 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1408#ifdef CONFIG_SYN_COOKIES 899#ifdef CONFIG_SYN_COOKIES
1409 if (sysctl_tcp_syncookies) { 900 if (sysctl_tcp_syncookies) {
1410 want_cookie = 1; 901 want_cookie = 1;
@@ -1418,7 +909,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1418 * clogging syn queue with openreqs with exponentially increasing 909 * clogging syn queue with openreqs with exponentially increasing
1419 * timeout. 910 * timeout.
1420 */ 911 */
1421 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 912 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1422 goto drop; 913 goto drop;
1423 914
1424 req = reqsk_alloc(&tcp_request_sock_ops); 915 req = reqsk_alloc(&tcp_request_sock_ops);
@@ -1474,8 +965,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1474 * are made in the function processing timewait state. 965 * are made in the function processing timewait state.
1475 */ 966 */
1476 if (tmp_opt.saw_tstamp && 967 if (tmp_opt.saw_tstamp &&
1477 sysctl_tcp_tw_recycle && 968 tcp_death_row.sysctl_tw_recycle &&
1478 (dst = tcp_v4_route_req(sk, req)) != NULL && 969 (dst = inet_csk_route_req(sk, req)) != NULL &&
1479 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 970 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1480 peer->v4daddr == saddr) { 971 peer->v4daddr == saddr) {
1481 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && 972 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
@@ -1488,7 +979,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1488 } 979 }
1489 /* Kill the following clause, if you dislike this way. */ 980 /* Kill the following clause, if you dislike this way. */
1490 else if (!sysctl_tcp_syncookies && 981 else if (!sysctl_tcp_syncookies &&
1491 (sysctl_max_syn_backlog - tcp_synq_len(sk) < 982 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1492 (sysctl_max_syn_backlog >> 2)) && 983 (sysctl_max_syn_backlog >> 2)) &&
1493 (!peer || !peer->tcp_ts_stamp) && 984 (!peer || !peer->tcp_ts_stamp) &&
1494 (!dst || !dst_metric(dst, RTAX_RTT))) { 985 (!dst || !dst_metric(dst, RTAX_RTT))) {
@@ -1499,11 +990,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1499 * to destinations, already remembered 990 * to destinations, already remembered
1500 * to the moment of synflood. 991 * to the moment of synflood.
1501 */ 992 */
1502 LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open " 993 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1503 "request from %u.%u." 994 "request from %u.%u.%u.%u/%u\n",
1504 "%u.%u/%u\n", 995 NIPQUAD(saddr),
1505 NIPQUAD(saddr), 996 ntohs(skb->h.th->source));
1506 ntohs(skb->h.th->source)));
1507 dst_release(dst); 997 dst_release(dst);
1508 goto drop_and_free; 998 goto drop_and_free;
1509 } 999 }
@@ -1518,7 +1008,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1518 if (want_cookie) { 1008 if (want_cookie) {
1519 reqsk_free(req); 1009 reqsk_free(req);
1520 } else { 1010 } else {
1521 tcp_v4_synq_add(sk, req); 1011 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1522 } 1012 }
1523 return 0; 1013 return 0;
1524 1014
@@ -1546,15 +1036,14 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1546 if (sk_acceptq_is_full(sk)) 1036 if (sk_acceptq_is_full(sk))
1547 goto exit_overflow; 1037 goto exit_overflow;
1548 1038
1549 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) 1039 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1550 goto exit; 1040 goto exit;
1551 1041
1552 newsk = tcp_create_openreq_child(sk, req, skb); 1042 newsk = tcp_create_openreq_child(sk, req, skb);
1553 if (!newsk) 1043 if (!newsk)
1554 goto exit; 1044 goto exit;
1555 1045
1556 newsk->sk_dst_cache = dst; 1046 sk_setup_caps(newsk, dst);
1557 tcp_v4_setup_caps(newsk, dst);
1558 1047
1559 newtp = tcp_sk(newsk); 1048 newtp = tcp_sk(newsk);
1560 newinet = inet_sk(newsk); 1049 newinet = inet_sk(newsk);
@@ -1564,7 +1053,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1564 newinet->saddr = ireq->loc_addr; 1053 newinet->saddr = ireq->loc_addr;
1565 newinet->opt = ireq->opt; 1054 newinet->opt = ireq->opt;
1566 ireq->opt = NULL; 1055 ireq->opt = NULL;
1567 newinet->mc_index = tcp_v4_iif(skb); 1056 newinet->mc_index = inet_iif(skb);
1568 newinet->mc_ttl = skb->nh.iph->ttl; 1057 newinet->mc_ttl = skb->nh.iph->ttl;
1569 newtp->ext_header_len = 0; 1058 newtp->ext_header_len = 0;
1570 if (newinet->opt) 1059 if (newinet->opt)
@@ -1575,8 +1064,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1575 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1064 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1576 tcp_initialize_rcv_mss(newsk); 1065 tcp_initialize_rcv_mss(newsk);
1577 1066
1578 __tcp_v4_hash(newsk, 0); 1067 __inet_hash(&tcp_hashinfo, newsk, 0);
1579 __tcp_inherit_port(sk, newsk); 1068 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
1580 1069
1581 return newsk; 1070 return newsk;
1582 1071
@@ -1592,27 +1081,24 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1592{ 1081{
1593 struct tcphdr *th = skb->h.th; 1082 struct tcphdr *th = skb->h.th;
1594 struct iphdr *iph = skb->nh.iph; 1083 struct iphdr *iph = skb->nh.iph;
1595 struct tcp_sock *tp = tcp_sk(sk);
1596 struct sock *nsk; 1084 struct sock *nsk;
1597 struct request_sock **prev; 1085 struct request_sock **prev;
1598 /* Find possible connection requests. */ 1086 /* Find possible connection requests. */
1599 struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, 1087 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1600 iph->saddr, iph->daddr); 1088 iph->saddr, iph->daddr);
1601 if (req) 1089 if (req)
1602 return tcp_check_req(sk, skb, req, prev); 1090 return tcp_check_req(sk, skb, req, prev);
1603 1091
1604 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr, 1092 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
1605 th->source, 1093 th->source, skb->nh.iph->daddr,
1606 skb->nh.iph->daddr, 1094 ntohs(th->dest), inet_iif(skb));
1607 ntohs(th->dest),
1608 tcp_v4_iif(skb));
1609 1095
1610 if (nsk) { 1096 if (nsk) {
1611 if (nsk->sk_state != TCP_TIME_WAIT) { 1097 if (nsk->sk_state != TCP_TIME_WAIT) {
1612 bh_lock_sock(nsk); 1098 bh_lock_sock(nsk);
1613 return nsk; 1099 return nsk;
1614 } 1100 }
1615 tcp_tw_put((struct tcp_tw_bucket *)nsk); 1101 inet_twsk_put((struct inet_timewait_sock *)nsk);
1616 return NULL; 1102 return NULL;
1617 } 1103 }
1618 1104
@@ -1631,7 +1117,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
1631 skb->nh.iph->daddr, skb->csum)) 1117 skb->nh.iph->daddr, skb->csum))
1632 return 0; 1118 return 0;
1633 1119
1634 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n")); 1120 LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v4 csum failed\n");
1635 skb->ip_summed = CHECKSUM_NONE; 1121 skb->ip_summed = CHECKSUM_NONE;
1636 } 1122 }
1637 if (skb->len <= 76) { 1123 if (skb->len <= 76) {
@@ -1747,9 +1233,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
1747 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; 1233 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1748 TCP_SKB_CB(skb)->sacked = 0; 1234 TCP_SKB_CB(skb)->sacked = 0;
1749 1235
1750 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source, 1236 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1751 skb->nh.iph->daddr, ntohs(th->dest), 1237 skb->nh.iph->daddr, ntohs(th->dest),
1752 tcp_v4_iif(skb)); 1238 inet_iif(skb));
1753 1239
1754 if (!sk) 1240 if (!sk)
1755 goto no_tcp_socket; 1241 goto no_tcp_socket;
@@ -1801,24 +1287,26 @@ discard_and_relse:
1801 1287
1802do_time_wait: 1288do_time_wait:
1803 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1289 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1804 tcp_tw_put((struct tcp_tw_bucket *) sk); 1290 inet_twsk_put((struct inet_timewait_sock *) sk);
1805 goto discard_it; 1291 goto discard_it;
1806 } 1292 }
1807 1293
1808 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1294 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1809 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1295 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1810 tcp_tw_put((struct tcp_tw_bucket *) sk); 1296 inet_twsk_put((struct inet_timewait_sock *) sk);
1811 goto discard_it; 1297 goto discard_it;
1812 } 1298 }
1813 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1299 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1814 skb, th, skb->len)) { 1300 skb, th)) {
1815 case TCP_TW_SYN: { 1301 case TCP_TW_SYN: {
1816 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr, 1302 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1817 ntohs(th->dest), 1303 skb->nh.iph->daddr,
1818 tcp_v4_iif(skb)); 1304 ntohs(th->dest),
1305 inet_iif(skb));
1819 if (sk2) { 1306 if (sk2) {
1820 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1307 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1821 tcp_tw_put((struct tcp_tw_bucket *)sk); 1308 &tcp_death_row);
1309 inet_twsk_put((struct inet_timewait_sock *)sk);
1822 sk = sk2; 1310 sk = sk2;
1823 goto process; 1311 goto process;
1824 } 1312 }
@@ -1834,112 +1322,6 @@ do_time_wait:
1834 goto discard_it; 1322 goto discard_it;
1835} 1323}
1836 1324
1837/* With per-bucket locks this operation is not-atomic, so that
1838 * this version is not worse.
1839 */
1840static void __tcp_v4_rehash(struct sock *sk)
1841{
1842 sk->sk_prot->unhash(sk);
1843 sk->sk_prot->hash(sk);
1844}
1845
1846static int tcp_v4_reselect_saddr(struct sock *sk)
1847{
1848 struct inet_sock *inet = inet_sk(sk);
1849 int err;
1850 struct rtable *rt;
1851 __u32 old_saddr = inet->saddr;
1852 __u32 new_saddr;
1853 __u32 daddr = inet->daddr;
1854
1855 if (inet->opt && inet->opt->srr)
1856 daddr = inet->opt->faddr;
1857
1858 /* Query new route. */
1859 err = ip_route_connect(&rt, daddr, 0,
1860 RT_CONN_FLAGS(sk),
1861 sk->sk_bound_dev_if,
1862 IPPROTO_TCP,
1863 inet->sport, inet->dport, sk);
1864 if (err)
1865 return err;
1866
1867 __sk_dst_set(sk, &rt->u.dst);
1868 tcp_v4_setup_caps(sk, &rt->u.dst);
1869
1870 new_saddr = rt->rt_src;
1871
1872 if (new_saddr == old_saddr)
1873 return 0;
1874
1875 if (sysctl_ip_dynaddr > 1) {
1876 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1877 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1878 NIPQUAD(old_saddr),
1879 NIPQUAD(new_saddr));
1880 }
1881
1882 inet->saddr = new_saddr;
1883 inet->rcv_saddr = new_saddr;
1884
1885 /* XXX The only one ugly spot where we need to
1886 * XXX really change the sockets identity after
1887 * XXX it has entered the hashes. -DaveM
1888 *
1889 * Besides that, it does not check for connection
1890 * uniqueness. Wait for troubles.
1891 */
1892 __tcp_v4_rehash(sk);
1893 return 0;
1894}
1895
1896int tcp_v4_rebuild_header(struct sock *sk)
1897{
1898 struct inet_sock *inet = inet_sk(sk);
1899 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1900 u32 daddr;
1901 int err;
1902
1903 /* Route is OK, nothing to do. */
1904 if (rt)
1905 return 0;
1906
1907 /* Reroute. */
1908 daddr = inet->daddr;
1909 if (inet->opt && inet->opt->srr)
1910 daddr = inet->opt->faddr;
1911
1912 {
1913 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1914 .nl_u = { .ip4_u =
1915 { .daddr = daddr,
1916 .saddr = inet->saddr,
1917 .tos = RT_CONN_FLAGS(sk) } },
1918 .proto = IPPROTO_TCP,
1919 .uli_u = { .ports =
1920 { .sport = inet->sport,
1921 .dport = inet->dport } } };
1922
1923 err = ip_route_output_flow(&rt, &fl, sk, 0);
1924 }
1925 if (!err) {
1926 __sk_dst_set(sk, &rt->u.dst);
1927 tcp_v4_setup_caps(sk, &rt->u.dst);
1928 return 0;
1929 }
1930
1931 /* Routing failed... */
1932 sk->sk_route_caps = 0;
1933
1934 if (!sysctl_ip_dynaddr ||
1935 sk->sk_state != TCP_SYN_SENT ||
1936 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1937 (err = tcp_v4_reselect_saddr(sk)) != 0)
1938 sk->sk_err_soft = -err;
1939
1940 return err;
1941}
1942
1943static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) 1325static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1944{ 1326{
1945 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr; 1327 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
@@ -1988,18 +1370,18 @@ int tcp_v4_remember_stamp(struct sock *sk)
1988 return 0; 1370 return 0;
1989} 1371}
1990 1372
1991int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw) 1373int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1992{ 1374{
1993 struct inet_peer *peer = NULL; 1375 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1994
1995 peer = inet_getpeer(tw->tw_daddr, 1);
1996 1376
1997 if (peer) { 1377 if (peer) {
1998 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 || 1378 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1379
1380 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1999 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && 1381 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2000 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) { 1382 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
2001 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp; 1383 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
2002 peer->tcp_ts = tw->tw_ts_recent; 1384 peer->tcp_ts = tcptw->tw_ts_recent;
2003 } 1385 }
2004 inet_putpeer(peer); 1386 inet_putpeer(peer);
2005 return 1; 1387 return 1;
@@ -2011,7 +1393,7 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2011struct tcp_func ipv4_specific = { 1393struct tcp_func ipv4_specific = {
2012 .queue_xmit = ip_queue_xmit, 1394 .queue_xmit = ip_queue_xmit,
2013 .send_check = tcp_v4_send_check, 1395 .send_check = tcp_v4_send_check,
2014 .rebuild_header = tcp_v4_rebuild_header, 1396 .rebuild_header = inet_sk_rebuild_header,
2015 .conn_request = tcp_v4_conn_request, 1397 .conn_request = tcp_v4_conn_request,
2016 .syn_recv_sock = tcp_v4_syn_recv_sock, 1398 .syn_recv_sock = tcp_v4_syn_recv_sock,
2017 .remember_stamp = tcp_v4_remember_stamp, 1399 .remember_stamp = tcp_v4_remember_stamp,
@@ -2027,13 +1409,14 @@ struct tcp_func ipv4_specific = {
2027 */ 1409 */
2028static int tcp_v4_init_sock(struct sock *sk) 1410static int tcp_v4_init_sock(struct sock *sk)
2029{ 1411{
1412 struct inet_connection_sock *icsk = inet_csk(sk);
2030 struct tcp_sock *tp = tcp_sk(sk); 1413 struct tcp_sock *tp = tcp_sk(sk);
2031 1414
2032 skb_queue_head_init(&tp->out_of_order_queue); 1415 skb_queue_head_init(&tp->out_of_order_queue);
2033 tcp_init_xmit_timers(sk); 1416 tcp_init_xmit_timers(sk);
2034 tcp_prequeue_init(tp); 1417 tcp_prequeue_init(tp);
2035 1418
2036 tp->rto = TCP_TIMEOUT_INIT; 1419 icsk->icsk_rto = TCP_TIMEOUT_INIT;
2037 tp->mdev = TCP_TIMEOUT_INIT; 1420 tp->mdev = TCP_TIMEOUT_INIT;
2038 1421
2039 /* So many TCP implementations out there (incorrectly) count the 1422 /* So many TCP implementations out there (incorrectly) count the
@@ -2051,7 +1434,7 @@ static int tcp_v4_init_sock(struct sock *sk)
2051 tp->mss_cache = 536; 1434 tp->mss_cache = 536;
2052 1435
2053 tp->reordering = sysctl_tcp_reordering; 1436 tp->reordering = sysctl_tcp_reordering;
2054 tp->ca_ops = &tcp_init_congestion_ops; 1437 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
2055 1438
2056 sk->sk_state = TCP_CLOSE; 1439 sk->sk_state = TCP_CLOSE;
2057 1440
@@ -2074,7 +1457,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
2074 1457
2075 tcp_clear_xmit_timers(sk); 1458 tcp_clear_xmit_timers(sk);
2076 1459
2077 tcp_cleanup_congestion_control(tp); 1460 tcp_cleanup_congestion_control(sk);
2078 1461
2079 /* Cleanup up the write buffer. */ 1462 /* Cleanup up the write buffer. */
2080 sk_stream_writequeue_purge(sk); 1463 sk_stream_writequeue_purge(sk);
@@ -2086,8 +1469,8 @@ int tcp_v4_destroy_sock(struct sock *sk)
2086 __skb_queue_purge(&tp->ucopy.prequeue); 1469 __skb_queue_purge(&tp->ucopy.prequeue);
2087 1470
2088 /* Clean up a referenced TCP bind bucket. */ 1471 /* Clean up a referenced TCP bind bucket. */
2089 if (tp->bind_hash) 1472 if (inet_csk(sk)->icsk_bind_hash)
2090 tcp_put_port(sk); 1473 inet_put_port(&tcp_hashinfo, sk);
2091 1474
2092 /* 1475 /*
2093 * If sendmsg cached page exists, toss it. 1476 * If sendmsg cached page exists, toss it.
@@ -2107,13 +1490,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
2107#ifdef CONFIG_PROC_FS 1490#ifdef CONFIG_PROC_FS
2108/* Proc filesystem TCP sock list dumping. */ 1491/* Proc filesystem TCP sock list dumping. */
2109 1492
2110static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head) 1493static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
2111{ 1494{
2112 return hlist_empty(head) ? NULL : 1495 return hlist_empty(head) ? NULL :
2113 list_entry(head->first, struct tcp_tw_bucket, tw_node); 1496 list_entry(head->first, struct inet_timewait_sock, tw_node);
2114} 1497}
2115 1498
2116static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw) 1499static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2117{ 1500{
2118 return tw->tw_node.next ? 1501 return tw->tw_node.next ?
2119 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; 1502 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
@@ -2121,14 +1504,14 @@ static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2121 1504
2122static void *listening_get_next(struct seq_file *seq, void *cur) 1505static void *listening_get_next(struct seq_file *seq, void *cur)
2123{ 1506{
2124 struct tcp_sock *tp; 1507 struct inet_connection_sock *icsk;
2125 struct hlist_node *node; 1508 struct hlist_node *node;
2126 struct sock *sk = cur; 1509 struct sock *sk = cur;
2127 struct tcp_iter_state* st = seq->private; 1510 struct tcp_iter_state* st = seq->private;
2128 1511
2129 if (!sk) { 1512 if (!sk) {
2130 st->bucket = 0; 1513 st->bucket = 0;
2131 sk = sk_head(&tcp_listening_hash[0]); 1514 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
2132 goto get_sk; 1515 goto get_sk;
2133 } 1516 }
2134 1517
@@ -2137,7 +1520,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
2137 if (st->state == TCP_SEQ_STATE_OPENREQ) { 1520 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2138 struct request_sock *req = cur; 1521 struct request_sock *req = cur;
2139 1522
2140 tp = tcp_sk(st->syn_wait_sk); 1523 icsk = inet_csk(st->syn_wait_sk);
2141 req = req->dl_next; 1524 req = req->dl_next;
2142 while (1) { 1525 while (1) {
2143 while (req) { 1526 while (req) {
@@ -2150,17 +1533,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
2150 if (++st->sbucket >= TCP_SYNQ_HSIZE) 1533 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2151 break; 1534 break;
2152get_req: 1535get_req:
2153 req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; 1536 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2154 } 1537 }
2155 sk = sk_next(st->syn_wait_sk); 1538 sk = sk_next(st->syn_wait_sk);
2156 st->state = TCP_SEQ_STATE_LISTENING; 1539 st->state = TCP_SEQ_STATE_LISTENING;
2157 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1540 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2158 } else { 1541 } else {
2159 tp = tcp_sk(sk); 1542 icsk = inet_csk(sk);
2160 read_lock_bh(&tp->accept_queue.syn_wait_lock); 1543 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2161 if (reqsk_queue_len(&tp->accept_queue)) 1544 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2162 goto start_req; 1545 goto start_req;
2163 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1546 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2164 sk = sk_next(sk); 1547 sk = sk_next(sk);
2165 } 1548 }
2166get_sk: 1549get_sk:
@@ -2169,9 +1552,9 @@ get_sk:
2169 cur = sk; 1552 cur = sk;
2170 goto out; 1553 goto out;
2171 } 1554 }
2172 tp = tcp_sk(sk); 1555 icsk = inet_csk(sk);
2173 read_lock_bh(&tp->accept_queue.syn_wait_lock); 1556 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2174 if (reqsk_queue_len(&tp->accept_queue)) { 1557 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2175start_req: 1558start_req:
2176 st->uid = sock_i_uid(sk); 1559 st->uid = sock_i_uid(sk);
2177 st->syn_wait_sk = sk; 1560 st->syn_wait_sk = sk;
@@ -2179,10 +1562,10 @@ start_req:
2179 st->sbucket = 0; 1562 st->sbucket = 0;
2180 goto get_req; 1563 goto get_req;
2181 } 1564 }
2182 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1565 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2183 } 1566 }
2184 if (++st->bucket < TCP_LHTABLE_SIZE) { 1567 if (++st->bucket < INET_LHTABLE_SIZE) {
2185 sk = sk_head(&tcp_listening_hash[st->bucket]); 1568 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
2186 goto get_sk; 1569 goto get_sk;
2187 } 1570 }
2188 cur = NULL; 1571 cur = NULL;
@@ -2206,16 +1589,16 @@ static void *established_get_first(struct seq_file *seq)
2206 struct tcp_iter_state* st = seq->private; 1589 struct tcp_iter_state* st = seq->private;
2207 void *rc = NULL; 1590 void *rc = NULL;
2208 1591
2209 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) { 1592 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
2210 struct sock *sk; 1593 struct sock *sk;
2211 struct hlist_node *node; 1594 struct hlist_node *node;
2212 struct tcp_tw_bucket *tw; 1595 struct inet_timewait_sock *tw;
2213 1596
2214 /* We can reschedule _before_ having picked the target: */ 1597 /* We can reschedule _before_ having picked the target: */
2215 cond_resched_softirq(); 1598 cond_resched_softirq();
2216 1599
2217 read_lock(&tcp_ehash[st->bucket].lock); 1600 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2218 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) { 1601 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2219 if (sk->sk_family != st->family) { 1602 if (sk->sk_family != st->family) {
2220 continue; 1603 continue;
2221 } 1604 }
@@ -2223,15 +1606,15 @@ static void *established_get_first(struct seq_file *seq)
2223 goto out; 1606 goto out;
2224 } 1607 }
2225 st->state = TCP_SEQ_STATE_TIME_WAIT; 1608 st->state = TCP_SEQ_STATE_TIME_WAIT;
2226 tw_for_each(tw, node, 1609 inet_twsk_for_each(tw, node,
2227 &tcp_ehash[st->bucket + tcp_ehash_size].chain) { 1610 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
2228 if (tw->tw_family != st->family) { 1611 if (tw->tw_family != st->family) {
2229 continue; 1612 continue;
2230 } 1613 }
2231 rc = tw; 1614 rc = tw;
2232 goto out; 1615 goto out;
2233 } 1616 }
2234 read_unlock(&tcp_ehash[st->bucket].lock); 1617 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2235 st->state = TCP_SEQ_STATE_ESTABLISHED; 1618 st->state = TCP_SEQ_STATE_ESTABLISHED;
2236 } 1619 }
2237out: 1620out:
@@ -2241,7 +1624,7 @@ out:
2241static void *established_get_next(struct seq_file *seq, void *cur) 1624static void *established_get_next(struct seq_file *seq, void *cur)
2242{ 1625{
2243 struct sock *sk = cur; 1626 struct sock *sk = cur;
2244 struct tcp_tw_bucket *tw; 1627 struct inet_timewait_sock *tw;
2245 struct hlist_node *node; 1628 struct hlist_node *node;
2246 struct tcp_iter_state* st = seq->private; 1629 struct tcp_iter_state* st = seq->private;
2247 1630
@@ -2258,15 +1641,15 @@ get_tw:
2258 cur = tw; 1641 cur = tw;
2259 goto out; 1642 goto out;
2260 } 1643 }
2261 read_unlock(&tcp_ehash[st->bucket].lock); 1644 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2262 st->state = TCP_SEQ_STATE_ESTABLISHED; 1645 st->state = TCP_SEQ_STATE_ESTABLISHED;
2263 1646
2264 /* We can reschedule between buckets: */ 1647 /* We can reschedule between buckets: */
2265 cond_resched_softirq(); 1648 cond_resched_softirq();
2266 1649
2267 if (++st->bucket < tcp_ehash_size) { 1650 if (++st->bucket < tcp_hashinfo.ehash_size) {
2268 read_lock(&tcp_ehash[st->bucket].lock); 1651 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2269 sk = sk_head(&tcp_ehash[st->bucket].chain); 1652 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2270 } else { 1653 } else {
2271 cur = NULL; 1654 cur = NULL;
2272 goto out; 1655 goto out;
@@ -2280,7 +1663,7 @@ get_tw:
2280 } 1663 }
2281 1664
2282 st->state = TCP_SEQ_STATE_TIME_WAIT; 1665 st->state = TCP_SEQ_STATE_TIME_WAIT;
2283 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain); 1666 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
2284 goto get_tw; 1667 goto get_tw;
2285found: 1668found:
2286 cur = sk; 1669 cur = sk;
@@ -2304,12 +1687,12 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2304 void *rc; 1687 void *rc;
2305 struct tcp_iter_state* st = seq->private; 1688 struct tcp_iter_state* st = seq->private;
2306 1689
2307 tcp_listen_lock(); 1690 inet_listen_lock(&tcp_hashinfo);
2308 st->state = TCP_SEQ_STATE_LISTENING; 1691 st->state = TCP_SEQ_STATE_LISTENING;
2309 rc = listening_get_idx(seq, &pos); 1692 rc = listening_get_idx(seq, &pos);
2310 1693
2311 if (!rc) { 1694 if (!rc) {
2312 tcp_listen_unlock(); 1695 inet_listen_unlock(&tcp_hashinfo);
2313 local_bh_disable(); 1696 local_bh_disable();
2314 st->state = TCP_SEQ_STATE_ESTABLISHED; 1697 st->state = TCP_SEQ_STATE_ESTABLISHED;
2315 rc = established_get_idx(seq, pos); 1698 rc = established_get_idx(seq, pos);
@@ -2342,7 +1725,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2342 case TCP_SEQ_STATE_LISTENING: 1725 case TCP_SEQ_STATE_LISTENING:
2343 rc = listening_get_next(seq, v); 1726 rc = listening_get_next(seq, v);
2344 if (!rc) { 1727 if (!rc) {
2345 tcp_listen_unlock(); 1728 inet_listen_unlock(&tcp_hashinfo);
2346 local_bh_disable(); 1729 local_bh_disable();
2347 st->state = TCP_SEQ_STATE_ESTABLISHED; 1730 st->state = TCP_SEQ_STATE_ESTABLISHED;
2348 rc = established_get_first(seq); 1731 rc = established_get_first(seq);
@@ -2365,17 +1748,17 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2365 switch (st->state) { 1748 switch (st->state) {
2366 case TCP_SEQ_STATE_OPENREQ: 1749 case TCP_SEQ_STATE_OPENREQ:
2367 if (v) { 1750 if (v) {
2368 struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); 1751 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2369 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1752 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2370 } 1753 }
2371 case TCP_SEQ_STATE_LISTENING: 1754 case TCP_SEQ_STATE_LISTENING:
2372 if (v != SEQ_START_TOKEN) 1755 if (v != SEQ_START_TOKEN)
2373 tcp_listen_unlock(); 1756 inet_listen_unlock(&tcp_hashinfo);
2374 break; 1757 break;
2375 case TCP_SEQ_STATE_TIME_WAIT: 1758 case TCP_SEQ_STATE_TIME_WAIT:
2376 case TCP_SEQ_STATE_ESTABLISHED: 1759 case TCP_SEQ_STATE_ESTABLISHED:
2377 if (v) 1760 if (v)
2378 read_unlock(&tcp_ehash[st->bucket].lock); 1761 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2379 local_bh_enable(); 1762 local_bh_enable();
2380 break; 1763 break;
2381 } 1764 }
@@ -2472,18 +1855,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2472 int timer_active; 1855 int timer_active;
2473 unsigned long timer_expires; 1856 unsigned long timer_expires;
2474 struct tcp_sock *tp = tcp_sk(sp); 1857 struct tcp_sock *tp = tcp_sk(sp);
1858 const struct inet_connection_sock *icsk = inet_csk(sp);
2475 struct inet_sock *inet = inet_sk(sp); 1859 struct inet_sock *inet = inet_sk(sp);
2476 unsigned int dest = inet->daddr; 1860 unsigned int dest = inet->daddr;
2477 unsigned int src = inet->rcv_saddr; 1861 unsigned int src = inet->rcv_saddr;
2478 __u16 destp = ntohs(inet->dport); 1862 __u16 destp = ntohs(inet->dport);
2479 __u16 srcp = ntohs(inet->sport); 1863 __u16 srcp = ntohs(inet->sport);
2480 1864
2481 if (tp->pending == TCP_TIME_RETRANS) { 1865 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2482 timer_active = 1; 1866 timer_active = 1;
2483 timer_expires = tp->timeout; 1867 timer_expires = icsk->icsk_timeout;
2484 } else if (tp->pending == TCP_TIME_PROBE0) { 1868 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2485 timer_active = 4; 1869 timer_active = 4;
2486 timer_expires = tp->timeout; 1870 timer_expires = icsk->icsk_timeout;
2487 } else if (timer_pending(&sp->sk_timer)) { 1871 } else if (timer_pending(&sp->sk_timer)) {
2488 timer_active = 2; 1872 timer_active = 2;
2489 timer_expires = sp->sk_timer.expires; 1873 timer_expires = sp->sk_timer.expires;
@@ -2498,17 +1882,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2498 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, 1882 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2499 timer_active, 1883 timer_active,
2500 jiffies_to_clock_t(timer_expires - jiffies), 1884 jiffies_to_clock_t(timer_expires - jiffies),
2501 tp->retransmits, 1885 icsk->icsk_retransmits,
2502 sock_i_uid(sp), 1886 sock_i_uid(sp),
2503 tp->probes_out, 1887 icsk->icsk_probes_out,
2504 sock_i_ino(sp), 1888 sock_i_ino(sp),
2505 atomic_read(&sp->sk_refcnt), sp, 1889 atomic_read(&sp->sk_refcnt), sp,
2506 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong, 1890 icsk->icsk_rto,
1891 icsk->icsk_ack.ato,
1892 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2507 tp->snd_cwnd, 1893 tp->snd_cwnd,
2508 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); 1894 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2509} 1895}
2510 1896
2511static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) 1897static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
2512{ 1898{
2513 unsigned int dest, src; 1899 unsigned int dest, src;
2514 __u16 destp, srcp; 1900 __u16 destp, srcp;
@@ -2588,7 +1974,7 @@ struct proto tcp_prot = {
2588 .close = tcp_close, 1974 .close = tcp_close,
2589 .connect = tcp_v4_connect, 1975 .connect = tcp_v4_connect,
2590 .disconnect = tcp_disconnect, 1976 .disconnect = tcp_disconnect,
2591 .accept = tcp_accept, 1977 .accept = inet_csk_accept,
2592 .ioctl = tcp_ioctl, 1978 .ioctl = tcp_ioctl,
2593 .init = tcp_v4_init_sock, 1979 .init = tcp_v4_init_sock,
2594 .destroy = tcp_v4_destroy_sock, 1980 .destroy = tcp_v4_destroy_sock,
@@ -2603,6 +1989,7 @@ struct proto tcp_prot = {
2603 .get_port = tcp_v4_get_port, 1989 .get_port = tcp_v4_get_port,
2604 .enter_memory_pressure = tcp_enter_memory_pressure, 1990 .enter_memory_pressure = tcp_enter_memory_pressure,
2605 .sockets_allocated = &tcp_sockets_allocated, 1991 .sockets_allocated = &tcp_sockets_allocated,
1992 .orphan_count = &tcp_orphan_count,
2606 .memory_allocated = &tcp_memory_allocated, 1993 .memory_allocated = &tcp_memory_allocated,
2607 .memory_pressure = &tcp_memory_pressure, 1994 .memory_pressure = &tcp_memory_pressure,
2608 .sysctl_mem = sysctl_tcp_mem, 1995 .sysctl_mem = sysctl_tcp_mem,
@@ -2610,6 +1997,7 @@ struct proto tcp_prot = {
2610 .sysctl_rmem = sysctl_tcp_rmem, 1997 .sysctl_rmem = sysctl_tcp_rmem,
2611 .max_header = MAX_TCP_HEADER, 1998 .max_header = MAX_TCP_HEADER,
2612 .obj_size = sizeof(struct tcp_sock), 1999 .obj_size = sizeof(struct tcp_sock),
2000 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2613 .rsk_prot = &tcp_request_sock_ops, 2001 .rsk_prot = &tcp_request_sock_ops,
2614}; 2002};
2615 2003
@@ -2631,19 +2019,13 @@ void __init tcp_v4_init(struct net_proto_family *ops)
2631} 2019}
2632 2020
2633EXPORT_SYMBOL(ipv4_specific); 2021EXPORT_SYMBOL(ipv4_specific);
2634EXPORT_SYMBOL(tcp_bind_hash); 2022EXPORT_SYMBOL(inet_bind_bucket_create);
2635EXPORT_SYMBOL(tcp_bucket_create);
2636EXPORT_SYMBOL(tcp_hashinfo); 2023EXPORT_SYMBOL(tcp_hashinfo);
2637EXPORT_SYMBOL(tcp_inherit_port);
2638EXPORT_SYMBOL(tcp_listen_wlock);
2639EXPORT_SYMBOL(tcp_port_rover);
2640EXPORT_SYMBOL(tcp_prot); 2024EXPORT_SYMBOL(tcp_prot);
2641EXPORT_SYMBOL(tcp_put_port);
2642EXPORT_SYMBOL(tcp_unhash); 2025EXPORT_SYMBOL(tcp_unhash);
2643EXPORT_SYMBOL(tcp_v4_conn_request); 2026EXPORT_SYMBOL(tcp_v4_conn_request);
2644EXPORT_SYMBOL(tcp_v4_connect); 2027EXPORT_SYMBOL(tcp_v4_connect);
2645EXPORT_SYMBOL(tcp_v4_do_rcv); 2028EXPORT_SYMBOL(tcp_v4_do_rcv);
2646EXPORT_SYMBOL(tcp_v4_rebuild_header);
2647EXPORT_SYMBOL(tcp_v4_remember_stamp); 2029EXPORT_SYMBOL(tcp_v4_remember_stamp);
2648EXPORT_SYMBOL(tcp_v4_send_check); 2030EXPORT_SYMBOL(tcp_v4_send_check);
2649EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 2031EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f42a284164b7..a88db28b0af7 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -35,13 +35,27 @@
35#define SYNC_INIT 1 35#define SYNC_INIT 1
36#endif 36#endif
37 37
38int sysctl_tcp_tw_recycle;
39int sysctl_tcp_max_tw_buckets = NR_FILE*2;
40
41int sysctl_tcp_syncookies = SYNC_INIT; 38int sysctl_tcp_syncookies = SYNC_INIT;
42int sysctl_tcp_abort_on_overflow; 39int sysctl_tcp_abort_on_overflow;
43 40
44static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); 41struct inet_timewait_death_row tcp_death_row = {
42 .sysctl_max_tw_buckets = NR_FILE * 2,
43 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
44 .death_lock = SPIN_LOCK_UNLOCKED,
45 .hashinfo = &tcp_hashinfo,
46 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
47 (unsigned long)&tcp_death_row),
48 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
49 inet_twdr_twkill_work,
50 &tcp_death_row),
51/* Short-time timewait calendar */
52
53 .twcal_hand = -1,
54 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
55 (unsigned long)&tcp_death_row),
56};
57
58EXPORT_SYMBOL_GPL(tcp_death_row);
45 59
46static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 60static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
47{ 61{
@@ -52,47 +66,6 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
52 return (seq == e_win && seq == end_seq); 66 return (seq == e_win && seq == end_seq);
53} 67}
54 68
55/* New-style handling of TIME_WAIT sockets. */
56
57int tcp_tw_count;
58
59
60/* Must be called with locally disabled BHs. */
61static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
62{
63 struct tcp_ehash_bucket *ehead;
64 struct tcp_bind_hashbucket *bhead;
65 struct tcp_bind_bucket *tb;
66
67 /* Unlink from established hashes. */
68 ehead = &tcp_ehash[tw->tw_hashent];
69 write_lock(&ehead->lock);
70 if (hlist_unhashed(&tw->tw_node)) {
71 write_unlock(&ehead->lock);
72 return;
73 }
74 __hlist_del(&tw->tw_node);
75 sk_node_init(&tw->tw_node);
76 write_unlock(&ehead->lock);
77
78 /* Disassociate with bind bucket. */
79 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
80 spin_lock(&bhead->lock);
81 tb = tw->tw_tb;
82 __hlist_del(&tw->tw_bind_node);
83 tw->tw_tb = NULL;
84 tcp_bucket_destroy(tb);
85 spin_unlock(&bhead->lock);
86
87#ifdef INET_REFCNT_DEBUG
88 if (atomic_read(&tw->tw_refcnt) != 1) {
89 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
90 atomic_read(&tw->tw_refcnt));
91 }
92#endif
93 tcp_tw_put(tw);
94}
95
96/* 69/*
97 * * Main purpose of TIME-WAIT state is to close connection gracefully, 70 * * Main purpose of TIME-WAIT state is to close connection gracefully,
98 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 71 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -122,19 +95,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
122 * to avoid misread sequence numbers, states etc. --ANK 95 * to avoid misread sequence numbers, states etc. --ANK
123 */ 96 */
124enum tcp_tw_status 97enum tcp_tw_status
125tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, 98tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
126 struct tcphdr *th, unsigned len) 99 const struct tcphdr *th)
127{ 100{
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
128 struct tcp_options_received tmp_opt; 102 struct tcp_options_received tmp_opt;
129 int paws_reject = 0; 103 int paws_reject = 0;
130 104
131 tmp_opt.saw_tstamp = 0; 105 tmp_opt.saw_tstamp = 0;
132 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { 106 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
133 tcp_parse_options(skb, &tmp_opt, 0); 107 tcp_parse_options(skb, &tmp_opt, 0);
134 108
135 if (tmp_opt.saw_tstamp) { 109 if (tmp_opt.saw_tstamp) {
136 tmp_opt.ts_recent = tw->tw_ts_recent; 110 tmp_opt.ts_recent = tcptw->tw_ts_recent;
137 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 111 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 paws_reject = tcp_paws_check(&tmp_opt, th->rst); 112 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
139 } 113 }
140 } 114 }
@@ -145,20 +119,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
145 /* Out of window, send ACK */ 119 /* Out of window, send ACK */
146 if (paws_reject || 120 if (paws_reject ||
147 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 121 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
148 tw->tw_rcv_nxt, 122 tcptw->tw_rcv_nxt,
149 tw->tw_rcv_nxt + tw->tw_rcv_wnd)) 123 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
150 return TCP_TW_ACK; 124 return TCP_TW_ACK;
151 125
152 if (th->rst) 126 if (th->rst)
153 goto kill; 127 goto kill;
154 128
155 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) 129 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
156 goto kill_with_rst; 130 goto kill_with_rst;
157 131
158 /* Dup ACK? */ 132 /* Dup ACK? */
159 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || 133 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
160 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 134 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
161 tcp_tw_put(tw); 135 inet_twsk_put(tw);
162 return TCP_TW_SUCCESS; 136 return TCP_TW_SUCCESS;
163 } 137 }
164 138
@@ -166,19 +140,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
166 * reset. 140 * reset.
167 */ 141 */
168 if (!th->fin || 142 if (!th->fin ||
169 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { 143 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
170kill_with_rst: 144kill_with_rst:
171 tcp_tw_deschedule(tw); 145 inet_twsk_deschedule(tw, &tcp_death_row);
172 tcp_tw_put(tw); 146 inet_twsk_put(tw);
173 return TCP_TW_RST; 147 return TCP_TW_RST;
174 } 148 }
175 149
176 /* FIN arrived, enter true time-wait state. */ 150 /* FIN arrived, enter true time-wait state. */
177 tw->tw_substate = TCP_TIME_WAIT; 151 tw->tw_substate = TCP_TIME_WAIT;
178 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 152 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
179 if (tmp_opt.saw_tstamp) { 153 if (tmp_opt.saw_tstamp) {
180 tw->tw_ts_recent_stamp = xtime.tv_sec; 154 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
181 tw->tw_ts_recent = tmp_opt.rcv_tsval; 155 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
182 } 156 }
183 157
184 /* I am shamed, but failed to make it more elegant. 158 /* I am shamed, but failed to make it more elegant.
@@ -187,11 +161,13 @@ kill_with_rst:
187 * do not undertsnad recycling in any case, it not 161 * do not undertsnad recycling in any case, it not
188 * a big problem in practice. --ANK */ 162 * a big problem in practice. --ANK */
189 if (tw->tw_family == AF_INET && 163 if (tw->tw_family == AF_INET &&
190 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && 164 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
191 tcp_v4_tw_remember_stamp(tw)) 165 tcp_v4_tw_remember_stamp(tw))
192 tcp_tw_schedule(tw, tw->tw_timeout); 166 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
167 TCP_TIMEWAIT_LEN);
193 else 168 else
194 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 169 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
170 TCP_TIMEWAIT_LEN);
195 return TCP_TW_ACK; 171 return TCP_TW_ACK;
196 } 172 }
197 173
@@ -213,7 +189,7 @@ kill_with_rst:
213 */ 189 */
214 190
215 if (!paws_reject && 191 if (!paws_reject &&
216 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && 192 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
217 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 193 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
218 /* In window segment, it may be only reset or bare ack. */ 194 /* In window segment, it may be only reset or bare ack. */
219 195
@@ -224,19 +200,20 @@ kill_with_rst:
224 */ 200 */
225 if (sysctl_tcp_rfc1337 == 0) { 201 if (sysctl_tcp_rfc1337 == 0) {
226kill: 202kill:
227 tcp_tw_deschedule(tw); 203 inet_twsk_deschedule(tw, &tcp_death_row);
228 tcp_tw_put(tw); 204 inet_twsk_put(tw);
229 return TCP_TW_SUCCESS; 205 return TCP_TW_SUCCESS;
230 } 206 }
231 } 207 }
232 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 208 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
209 TCP_TIMEWAIT_LEN);
233 210
234 if (tmp_opt.saw_tstamp) { 211 if (tmp_opt.saw_tstamp) {
235 tw->tw_ts_recent = tmp_opt.rcv_tsval; 212 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
236 tw->tw_ts_recent_stamp = xtime.tv_sec; 213 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
237 } 214 }
238 215
239 tcp_tw_put(tw); 216 inet_twsk_put(tw);
240 return TCP_TW_SUCCESS; 217 return TCP_TW_SUCCESS;
241 } 218 }
242 219
@@ -258,9 +235,10 @@ kill:
258 */ 235 */
259 236
260 if (th->syn && !th->rst && !th->ack && !paws_reject && 237 if (th->syn && !th->rst && !th->ack && !paws_reject &&
261 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || 238 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
262 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 239 (tmp_opt.saw_tstamp &&
263 u32 isn = tw->tw_snd_nxt + 65535 + 2; 240 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
241 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
264 if (isn == 0) 242 if (isn == 0)
265 isn++; 243 isn++;
266 TCP_SKB_CB(skb)->when = isn; 244 TCP_SKB_CB(skb)->when = isn;
@@ -278,107 +256,57 @@ kill:
278 * Do not reschedule in the last case. 256 * Do not reschedule in the last case.
279 */ 257 */
280 if (paws_reject || th->ack) 258 if (paws_reject || th->ack)
281 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 259 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
260 TCP_TIMEWAIT_LEN);
282 261
283 /* Send ACK. Note, we do not put the bucket, 262 /* Send ACK. Note, we do not put the bucket,
284 * it will be released by caller. 263 * it will be released by caller.
285 */ 264 */
286 return TCP_TW_ACK; 265 return TCP_TW_ACK;
287 } 266 }
288 tcp_tw_put(tw); 267 inet_twsk_put(tw);
289 return TCP_TW_SUCCESS; 268 return TCP_TW_SUCCESS;
290} 269}
291 270
292/* Enter the time wait state. This is called with locally disabled BH.
293 * Essentially we whip up a timewait bucket, copy the
294 * relevant info into it from the SK, and mess with hash chains
295 * and list linkage.
296 */
297static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
298{
299 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
300 struct tcp_bind_hashbucket *bhead;
301
302 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
304 binding cache, even if it is closed.
305 */
306 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
307 spin_lock(&bhead->lock);
308 tw->tw_tb = tcp_sk(sk)->bind_hash;
309 BUG_TRAP(tcp_sk(sk)->bind_hash);
310 tw_add_bind_node(tw, &tw->tw_tb->owners);
311 spin_unlock(&bhead->lock);
312
313 write_lock(&ehead->lock);
314
315 /* Step 2: Remove SK from established hash. */
316 if (__sk_del_node_init(sk))
317 sock_prot_dec_use(sk->sk_prot);
318
319 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
320 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
321 atomic_inc(&tw->tw_refcnt);
322
323 write_unlock(&ehead->lock);
324}
325
326/* 271/*
327 * Move a socket to time-wait or dead fin-wait-2 state. 272 * Move a socket to time-wait or dead fin-wait-2 state.
328 */ 273 */
329void tcp_time_wait(struct sock *sk, int state, int timeo) 274void tcp_time_wait(struct sock *sk, int state, int timeo)
330{ 275{
331 struct tcp_tw_bucket *tw = NULL; 276 struct inet_timewait_sock *tw = NULL;
332 struct tcp_sock *tp = tcp_sk(sk); 277 const struct tcp_sock *tp = tcp_sk(sk);
333 int recycle_ok = 0; 278 int recycle_ok = 0;
334 279
335 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) 280 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
336 recycle_ok = tp->af_specific->remember_stamp(sk); 281 recycle_ok = tp->af_specific->remember_stamp(sk);
337 282
338 if (tcp_tw_count < sysctl_tcp_max_tw_buckets) 283 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
339 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); 284 tw = inet_twsk_alloc(sk, state);
340
341 if(tw != NULL) {
342 struct inet_sock *inet = inet_sk(sk);
343 int rto = (tp->rto<<2) - (tp->rto>>1);
344
345 /* Give us an identity. */
346 tw->tw_daddr = inet->daddr;
347 tw->tw_rcv_saddr = inet->rcv_saddr;
348 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
349 tw->tw_num = inet->num;
350 tw->tw_state = TCP_TIME_WAIT;
351 tw->tw_substate = state;
352 tw->tw_sport = inet->sport;
353 tw->tw_dport = inet->dport;
354 tw->tw_family = sk->sk_family;
355 tw->tw_reuse = sk->sk_reuse;
356 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
357 atomic_set(&tw->tw_refcnt, 1);
358 285
359 tw->tw_hashent = sk->sk_hashent; 286 if (tw != NULL) {
360 tw->tw_rcv_nxt = tp->rcv_nxt; 287 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
361 tw->tw_snd_nxt = tp->snd_nxt; 288 const struct inet_connection_sock *icsk = inet_csk(sk);
362 tw->tw_rcv_wnd = tcp_receive_window(tp); 289 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
363 tw->tw_ts_recent = tp->rx_opt.ts_recent; 290
364 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 291 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
365 tw_dead_node_init(tw); 292 tcptw->tw_rcv_nxt = tp->rcv_nxt;
293 tcptw->tw_snd_nxt = tp->snd_nxt;
294 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
295 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
296 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
366 297
367#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 298#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
368 if (tw->tw_family == PF_INET6) { 299 if (tw->tw_family == PF_INET6) {
369 struct ipv6_pinfo *np = inet6_sk(sk); 300 struct ipv6_pinfo *np = inet6_sk(sk);
301 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
370 302
371 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); 303 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
372 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); 304 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
373 tw->tw_v6_ipv6only = np->ipv6only; 305 tw->tw_ipv6only = np->ipv6only;
374 } else {
375 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
376 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
377 tw->tw_v6_ipv6only = 0;
378 } 306 }
379#endif 307#endif
380 /* Linkage updates. */ 308 /* Linkage updates. */
381 __tcp_tw_hashdance(sk, tw); 309 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
382 310
383 /* Get the TIME_WAIT timeout firing. */ 311 /* Get the TIME_WAIT timeout firing. */
384 if (timeo < rto) 312 if (timeo < rto)
@@ -392,8 +320,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
392 timeo = TCP_TIMEWAIT_LEN; 320 timeo = TCP_TIMEWAIT_LEN;
393 } 321 }
394 322
395 tcp_tw_schedule(tw, timeo); 323 inet_twsk_schedule(tw, &tcp_death_row, timeo,
396 tcp_tw_put(tw); 324 TCP_TIMEWAIT_LEN);
325 inet_twsk_put(tw);
397 } else { 326 } else {
398 /* Sorry, if we're out of memory, just CLOSE this 327 /* Sorry, if we're out of memory, just CLOSE this
399 * socket up. We've got bigger problems than 328 * socket up. We've got bigger problems than
@@ -407,277 +336,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
407 tcp_done(sk); 336 tcp_done(sk);
408} 337}
409 338
410/* Kill off TIME_WAIT sockets once their lifetime has expired. */
411static int tcp_tw_death_row_slot;
412
413static void tcp_twkill(unsigned long);
414
415/* TIME_WAIT reaping mechanism. */
416#define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
417#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
418
419#define TCP_TWKILL_QUOTA 100
420
421static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
422static DEFINE_SPINLOCK(tw_death_lock);
423static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
424static void twkill_work(void *);
425static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
426static u32 twkill_thread_slots;
427
428/* Returns non-zero if quota exceeded. */
429static int tcp_do_twkill_work(int slot, unsigned int quota)
430{
431 struct tcp_tw_bucket *tw;
432 struct hlist_node *node;
433 unsigned int killed;
434 int ret;
435
436 /* NOTE: compare this to previous version where lock
437 * was released after detaching chain. It was racy,
438 * because tw buckets are scheduled in not serialized context
439 * in 2.3 (with netfilter), and with softnet it is common, because
440 * soft irqs are not sequenced.
441 */
442 killed = 0;
443 ret = 0;
444rescan:
445 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
446 __tw_del_dead_node(tw);
447 spin_unlock(&tw_death_lock);
448 tcp_timewait_kill(tw);
449 tcp_tw_put(tw);
450 killed++;
451 spin_lock(&tw_death_lock);
452 if (killed > quota) {
453 ret = 1;
454 break;
455 }
456
457 /* While we dropped tw_death_lock, another cpu may have
458 * killed off the next TW bucket in the list, therefore
459 * do a fresh re-read of the hlist head node with the
460 * lock reacquired. We still use the hlist traversal
461 * macro in order to get the prefetches.
462 */
463 goto rescan;
464 }
465
466 tcp_tw_count -= killed;
467 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
468
469 return ret;
470}
471
472static void tcp_twkill(unsigned long dummy)
473{
474 int need_timer, ret;
475
476 spin_lock(&tw_death_lock);
477
478 if (tcp_tw_count == 0)
479 goto out;
480
481 need_timer = 0;
482 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
483 if (ret) {
484 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
485 mb();
486 schedule_work(&tcp_twkill_work);
487 need_timer = 1;
488 } else {
489 /* We purged the entire slot, anything left? */
490 if (tcp_tw_count)
491 need_timer = 1;
492 }
493 tcp_tw_death_row_slot =
494 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
495 if (need_timer)
496 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
497out:
498 spin_unlock(&tw_death_lock);
499}
500
501extern void twkill_slots_invalid(void);
502
503static void twkill_work(void *dummy)
504{
505 int i;
506
507 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
508 twkill_slots_invalid();
509
510 while (twkill_thread_slots) {
511 spin_lock_bh(&tw_death_lock);
512 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
513 if (!(twkill_thread_slots & (1 << i)))
514 continue;
515
516 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
517 if (need_resched()) {
518 spin_unlock_bh(&tw_death_lock);
519 schedule();
520 spin_lock_bh(&tw_death_lock);
521 }
522 }
523
524 twkill_thread_slots &= ~(1 << i);
525 }
526 spin_unlock_bh(&tw_death_lock);
527 }
528}
529
530/* These are always called from BH context. See callers in
531 * tcp_input.c to verify this.
532 */
533
534/* This is for handling early-kills of TIME_WAIT sockets. */
535void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
536{
537 spin_lock(&tw_death_lock);
538 if (tw_del_dead_node(tw)) {
539 tcp_tw_put(tw);
540 if (--tcp_tw_count == 0)
541 del_timer(&tcp_tw_timer);
542 }
543 spin_unlock(&tw_death_lock);
544 tcp_timewait_kill(tw);
545}
546
547/* Short-time timewait calendar */
548
549static int tcp_twcal_hand = -1;
550static int tcp_twcal_jiffie;
551static void tcp_twcal_tick(unsigned long);
552static struct timer_list tcp_twcal_timer =
553 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
554static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
555
556static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
557{
558 struct hlist_head *list;
559 int slot;
560
561 /* timeout := RTO * 3.5
562 *
563 * 3.5 = 1+2+0.5 to wait for two retransmits.
564 *
565 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
566 * our ACK acking that FIN can be lost. If N subsequent retransmitted
567 * FINs (or previous seqments) are lost (probability of such event
568 * is p^(N+1), where p is probability to lose single packet and
569 * time to detect the loss is about RTO*(2^N - 1) with exponential
570 * backoff). Normal timewait length is calculated so, that we
571 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
572 * [ BTW Linux. following BSD, violates this requirement waiting
573 * only for 60sec, we should wait at least for 240 secs.
574 * Well, 240 consumes too much of resources 8)
575 * ]
576 * This interval is not reduced to catch old duplicate and
577 * responces to our wandering segments living for two MSLs.
578 * However, if we use PAWS to detect
579 * old duplicates, we can reduce the interval to bounds required
580 * by RTO, rather than MSL. So, if peer understands PAWS, we
581 * kill tw bucket after 3.5*RTO (it is important that this number
582 * is greater than TS tick!) and detect old duplicates with help
583 * of PAWS.
584 */
585 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
586
587 spin_lock(&tw_death_lock);
588
589 /* Unlink it, if it was scheduled */
590 if (tw_del_dead_node(tw))
591 tcp_tw_count--;
592 else
593 atomic_inc(&tw->tw_refcnt);
594
595 if (slot >= TCP_TW_RECYCLE_SLOTS) {
596 /* Schedule to slow timer */
597 if (timeo >= TCP_TIMEWAIT_LEN) {
598 slot = TCP_TWKILL_SLOTS-1;
599 } else {
600 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
601 if (slot >= TCP_TWKILL_SLOTS)
602 slot = TCP_TWKILL_SLOTS-1;
603 }
604 tw->tw_ttd = jiffies + timeo;
605 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
606 list = &tcp_tw_death_row[slot];
607 } else {
608 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
609
610 if (tcp_twcal_hand < 0) {
611 tcp_twcal_hand = 0;
612 tcp_twcal_jiffie = jiffies;
613 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
614 add_timer(&tcp_twcal_timer);
615 } else {
616 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
617 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
618 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
619 }
620 list = &tcp_twcal_row[slot];
621 }
622
623 hlist_add_head(&tw->tw_death_node, list);
624
625 if (tcp_tw_count++ == 0)
626 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
627 spin_unlock(&tw_death_lock);
628}
629
630void tcp_twcal_tick(unsigned long dummy)
631{
632 int n, slot;
633 unsigned long j;
634 unsigned long now = jiffies;
635 int killed = 0;
636 int adv = 0;
637
638 spin_lock(&tw_death_lock);
639 if (tcp_twcal_hand < 0)
640 goto out;
641
642 slot = tcp_twcal_hand;
643 j = tcp_twcal_jiffie;
644
645 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
646 if (time_before_eq(j, now)) {
647 struct hlist_node *node, *safe;
648 struct tcp_tw_bucket *tw;
649
650 tw_for_each_inmate_safe(tw, node, safe,
651 &tcp_twcal_row[slot]) {
652 __tw_del_dead_node(tw);
653 tcp_timewait_kill(tw);
654 tcp_tw_put(tw);
655 killed++;
656 }
657 } else {
658 if (!adv) {
659 adv = 1;
660 tcp_twcal_jiffie = j;
661 tcp_twcal_hand = slot;
662 }
663
664 if (!hlist_empty(&tcp_twcal_row[slot])) {
665 mod_timer(&tcp_twcal_timer, j);
666 goto out;
667 }
668 }
669 j += (1<<TCP_TW_RECYCLE_TICK);
670 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
671 }
672 tcp_twcal_hand = -1;
673
674out:
675 if ((tcp_tw_count -= killed) == 0)
676 del_timer(&tcp_tw_timer);
677 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
678 spin_unlock(&tw_death_lock);
679}
680
681/* This is not only more efficient than what we used to do, it eliminates 339/* This is not only more efficient than what we used to do, it eliminates
682 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 340 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
683 * 341 *
@@ -686,75 +344,27 @@ out:
686 */ 344 */
687struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) 345struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
688{ 346{
689 /* allocate the newsk from the same slab of the master sock, 347 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
690 * if not, at sk_free time we'll try to free it from the wrong
691 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
692 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
693 348
694 if(newsk != NULL) { 349 if (newsk != NULL) {
695 struct inet_request_sock *ireq = inet_rsk(req); 350 const struct inet_request_sock *ireq = inet_rsk(req);
696 struct tcp_request_sock *treq = tcp_rsk(req); 351 struct tcp_request_sock *treq = tcp_rsk(req);
352 struct inet_connection_sock *newicsk = inet_csk(sk);
697 struct tcp_sock *newtp; 353 struct tcp_sock *newtp;
698 struct sk_filter *filter;
699
700 memcpy(newsk, sk, sizeof(struct tcp_sock));
701 newsk->sk_state = TCP_SYN_RECV;
702
703 /* SANITY */
704 sk_node_init(&newsk->sk_node);
705 tcp_sk(newsk)->bind_hash = NULL;
706
707 /* Clone the TCP header template */
708 inet_sk(newsk)->dport = ireq->rmt_port;
709
710 sock_lock_init(newsk);
711 bh_lock_sock(newsk);
712
713 rwlock_init(&newsk->sk_dst_lock);
714 atomic_set(&newsk->sk_rmem_alloc, 0);
715 skb_queue_head_init(&newsk->sk_receive_queue);
716 atomic_set(&newsk->sk_wmem_alloc, 0);
717 skb_queue_head_init(&newsk->sk_write_queue);
718 atomic_set(&newsk->sk_omem_alloc, 0);
719 newsk->sk_wmem_queued = 0;
720 newsk->sk_forward_alloc = 0;
721
722 sock_reset_flag(newsk, SOCK_DONE);
723 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
724 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
725 newsk->sk_send_head = NULL;
726 rwlock_init(&newsk->sk_callback_lock);
727 skb_queue_head_init(&newsk->sk_error_queue);
728 newsk->sk_write_space = sk_stream_write_space;
729
730 if ((filter = newsk->sk_filter) != NULL)
731 sk_filter_charge(newsk, filter);
732
733 if (unlikely(xfrm_sk_clone_policy(newsk))) {
734 /* It is still raw copy of parent, so invalidate
735 * destructor and make plain sk_free() */
736 newsk->sk_destruct = NULL;
737 sk_free(newsk);
738 return NULL;
739 }
740 354
741 /* Now setup tcp_sock */ 355 /* Now setup tcp_sock */
742 newtp = tcp_sk(newsk); 356 newtp = tcp_sk(newsk);
743 newtp->pred_flags = 0; 357 newtp->pred_flags = 0;
744 newtp->rcv_nxt = treq->rcv_isn + 1; 358 newtp->rcv_nxt = treq->rcv_isn + 1;
745 newtp->snd_nxt = treq->snt_isn + 1; 359 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
746 newtp->snd_una = treq->snt_isn + 1;
747 newtp->snd_sml = treq->snt_isn + 1;
748 360
749 tcp_prequeue_init(newtp); 361 tcp_prequeue_init(newtp);
750 362
751 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); 363 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
752 364
753 newtp->retransmits = 0;
754 newtp->backoff = 0;
755 newtp->srtt = 0; 365 newtp->srtt = 0;
756 newtp->mdev = TCP_TIMEOUT_INIT; 366 newtp->mdev = TCP_TIMEOUT_INIT;
757 newtp->rto = TCP_TIMEOUT_INIT; 367 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
758 368
759 newtp->packets_out = 0; 369 newtp->packets_out = 0;
760 newtp->left_out = 0; 370 newtp->left_out = 0;
@@ -774,9 +384,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
774 newtp->frto_counter = 0; 384 newtp->frto_counter = 0;
775 newtp->frto_highmark = 0; 385 newtp->frto_highmark = 0;
776 386
777 newtp->ca_ops = &tcp_reno; 387 newicsk->icsk_ca_ops = &tcp_reno;
778 388
779 tcp_set_ca_state(newtp, TCP_CA_Open); 389 tcp_set_ca_state(newsk, TCP_CA_Open);
780 tcp_init_xmit_timers(newsk); 390 tcp_init_xmit_timers(newsk);
781 skb_queue_head_init(&newtp->out_of_order_queue); 391 skb_queue_head_init(&newtp->out_of_order_queue);
782 newtp->rcv_wup = treq->rcv_isn + 1; 392 newtp->rcv_wup = treq->rcv_isn + 1;
@@ -789,26 +399,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
789 newtp->rx_opt.dsack = 0; 399 newtp->rx_opt.dsack = 0;
790 newtp->rx_opt.eff_sacks = 0; 400 newtp->rx_opt.eff_sacks = 0;
791 401
792 newtp->probes_out = 0;
793 newtp->rx_opt.num_sacks = 0; 402 newtp->rx_opt.num_sacks = 0;
794 newtp->urg_data = 0; 403 newtp->urg_data = 0;
795 /* Deinitialize accept_queue to trap illegal accesses. */
796 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
797
798 /* Back to base struct sock members. */
799 newsk->sk_err = 0;
800 newsk->sk_priority = 0;
801 atomic_set(&newsk->sk_refcnt, 2);
802#ifdef INET_REFCNT_DEBUG
803 atomic_inc(&inet_sock_nr);
804#endif
805 atomic_inc(&tcp_sockets_allocated);
806 404
807 if (sock_flag(newsk, SOCK_KEEPOPEN)) 405 if (sock_flag(newsk, SOCK_KEEPOPEN))
808 tcp_reset_keepalive_timer(newsk, 406 inet_csk_reset_keepalive_timer(newsk,
809 keepalive_time_when(newtp)); 407 keepalive_time_when(newtp));
810 newsk->sk_socket = NULL;
811 newsk->sk_sleep = NULL;
812 408
813 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 409 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
814 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 410 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
@@ -838,7 +434,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
838 newtp->tcp_header_len = sizeof(struct tcphdr); 434 newtp->tcp_header_len = sizeof(struct tcphdr);
839 } 435 }
840 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 436 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
841 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; 437 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
842 newtp->rx_opt.mss_clamp = req->mss; 438 newtp->rx_opt.mss_clamp = req->mss;
843 TCP_ECN_openreq_child(newtp, req); 439 TCP_ECN_openreq_child(newtp, req);
844 if (newtp->ecn_flags&TCP_ECN_OK) 440 if (newtp->ecn_flags&TCP_ECN_OK)
@@ -934,9 +530,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
934 does sequence test, SYN is truncated, and thus we consider 530 does sequence test, SYN is truncated, and thus we consider
935 it a bare ACK. 531 it a bare ACK.
936 532
937 If tp->defer_accept, we silently drop this bare ACK. Otherwise, 533 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
938 we create an established connection. Both ends (listening sockets) 534 bare ACK. Otherwise, we create an established connection. Both
939 accept the new incoming connection and try to talk to each other. 8-) 535 ends (listening sockets) accept the new incoming connection and try
536 to talk to each other. 8-)
940 537
941 Note: This case is both harmless, and rare. Possibility is about the 538 Note: This case is both harmless, and rare. Possibility is about the
942 same as us discovering intelligent life on another plant tomorrow. 539 same as us discovering intelligent life on another plant tomorrow.
@@ -1003,7 +600,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1003 return NULL; 600 return NULL;
1004 601
1005 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 602 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1006 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 603 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
604 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
1007 inet_rsk(req)->acked = 1; 605 inet_rsk(req)->acked = 1;
1008 return NULL; 606 return NULL;
1009 } 607 }
@@ -1018,10 +616,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1018 if (child == NULL) 616 if (child == NULL)
1019 goto listen_overflow; 617 goto listen_overflow;
1020 618
1021 tcp_synq_unlink(tp, req, prev); 619 inet_csk_reqsk_queue_unlink(sk, req, prev);
1022 tcp_synq_removed(sk, req); 620 inet_csk_reqsk_queue_removed(sk, req);
1023 621
1024 tcp_acceptq_queue(sk, req, child); 622 inet_csk_reqsk_queue_add(sk, req, child);
1025 return child; 623 return child;
1026 624
1027 listen_overflow: 625 listen_overflow:
@@ -1035,7 +633,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1035 if (!(flg & TCP_FLAG_RST)) 633 if (!(flg & TCP_FLAG_RST))
1036 req->rsk_ops->send_reset(skb); 634 req->rsk_ops->send_reset(skb);
1037 635
1038 tcp_synq_drop(sk, req, prev); 636 inet_csk_reqsk_queue_drop(sk, req, prev);
1039 return NULL; 637 return NULL;
1040} 638}
1041 639
@@ -1074,4 +672,3 @@ EXPORT_SYMBOL(tcp_check_req);
1074EXPORT_SYMBOL(tcp_child_process); 672EXPORT_SYMBOL(tcp_child_process);
1075EXPORT_SYMBOL(tcp_create_openreq_child); 673EXPORT_SYMBOL(tcp_create_openreq_child);
1076EXPORT_SYMBOL(tcp_timewait_state_process); 674EXPORT_SYMBOL(tcp_timewait_state_process);
1077EXPORT_SYMBOL(tcp_tw_deschedule);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dd30dd137b74..75b68116682a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -105,18 +105,19 @@ static __u16 tcp_advertise_mss(struct sock *sk)
105 105
106/* RFC2861. Reset CWND after idle period longer RTO to "restart window". 106/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
107 * This is the first part of cwnd validation mechanism. */ 107 * This is the first part of cwnd validation mechanism. */
108static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) 108static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
109{ 109{
110 struct tcp_sock *tp = tcp_sk(sk);
110 s32 delta = tcp_time_stamp - tp->lsndtime; 111 s32 delta = tcp_time_stamp - tp->lsndtime;
111 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 112 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
112 u32 cwnd = tp->snd_cwnd; 113 u32 cwnd = tp->snd_cwnd;
113 114
114 tcp_ca_event(tp, CA_EVENT_CWND_RESTART); 115 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
115 116
116 tp->snd_ssthresh = tcp_current_ssthresh(tp); 117 tp->snd_ssthresh = tcp_current_ssthresh(sk);
117 restart_cwnd = min(restart_cwnd, cwnd); 118 restart_cwnd = min(restart_cwnd, cwnd);
118 119
119 while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) 120 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
120 cwnd >>= 1; 121 cwnd >>= 1;
121 tp->snd_cwnd = max(cwnd, restart_cwnd); 122 tp->snd_cwnd = max(cwnd, restart_cwnd);
122 tp->snd_cwnd_stamp = tcp_time_stamp; 123 tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
126static inline void tcp_event_data_sent(struct tcp_sock *tp, 127static inline void tcp_event_data_sent(struct tcp_sock *tp,
127 struct sk_buff *skb, struct sock *sk) 128 struct sk_buff *skb, struct sock *sk)
128{ 129{
129 u32 now = tcp_time_stamp; 130 struct inet_connection_sock *icsk = inet_csk(sk);
131 const u32 now = tcp_time_stamp;
130 132
131 if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) 133 if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
132 tcp_cwnd_restart(tp, __sk_dst_get(sk)); 134 tcp_cwnd_restart(sk, __sk_dst_get(sk));
133 135
134 tp->lsndtime = now; 136 tp->lsndtime = now;
135 137
136 /* If it is a reply for ato after last received 138 /* If it is a reply for ato after last received
137 * packet, enter pingpong mode. 139 * packet, enter pingpong mode.
138 */ 140 */
139 if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) 141 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
140 tp->ack.pingpong = 1; 142 icsk->icsk_ack.pingpong = 1;
141} 143}
142 144
143static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 145static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
144{ 146{
145 struct tcp_sock *tp = tcp_sk(sk); 147 tcp_dec_quickack_mode(sk, pkts);
146 148 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
147 tcp_dec_quickack_mode(tp, pkts);
148 tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
149} 149}
150 150
151/* Determine a window scaling and initial window to offer. 151/* Determine a window scaling and initial window to offer.
@@ -265,6 +265,7 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
265static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) 265static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
266{ 266{
267 if (skb != NULL) { 267 if (skb != NULL) {
268 const struct inet_connection_sock *icsk = inet_csk(sk);
268 struct inet_sock *inet = inet_sk(sk); 269 struct inet_sock *inet = inet_sk(sk);
269 struct tcp_sock *tp = tcp_sk(sk); 270 struct tcp_sock *tp = tcp_sk(sk);
270 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 271 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -280,8 +281,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
280#define SYSCTL_FLAG_SACK 0x4 281#define SYSCTL_FLAG_SACK 0x4
281 282
282 /* If congestion control is doing timestamping */ 283 /* If congestion control is doing timestamping */
283 if (tp->ca_ops->rtt_sample) 284 if (icsk->icsk_ca_ops->rtt_sample)
284 do_gettimeofday(&skb->stamp); 285 __net_timestamp(skb);
285 286
286 sysctl_flags = 0; 287 sysctl_flags = 0;
287 if (tcb->flags & TCPCB_FLAG_SYN) { 288 if (tcb->flags & TCPCB_FLAG_SYN) {
@@ -308,7 +309,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
308 } 309 }
309 310
310 if (tcp_packets_in_flight(tp) == 0) 311 if (tcp_packets_in_flight(tp) == 0)
311 tcp_ca_event(tp, CA_EVENT_TX_START); 312 tcp_ca_event(sk, CA_EVENT_TX_START);
312 313
313 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 314 th = (struct tcphdr *) skb_push(skb, tcp_header_size);
314 skb->h.th = th; 315 skb->h.th = th;
@@ -366,7 +367,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
366 if (err <= 0) 367 if (err <= 0)
367 return err; 368 return err;
368 369
369 tcp_enter_cwr(tp); 370 tcp_enter_cwr(sk);
370 371
371 /* NET_XMIT_CN is special. It does not guarantee, 372 /* NET_XMIT_CN is special. It does not guarantee,
372 * that this packet is lost. It tells that device 373 * that this packet is lost. It tells that device
@@ -482,7 +483,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
482 * skbs, which it never sent before. --ANK 483 * skbs, which it never sent before. --ANK
483 */ 484 */
484 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 485 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
485 buff->stamp = skb->stamp; 486 buff->tstamp = skb->tstamp;
486 487
487 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 488 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
488 tp->lost_out -= tcp_skb_pcount(skb); 489 tp->lost_out -= tcp_skb_pcount(skb);
@@ -505,7 +506,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
505 506
506 /* Link BUFF into the send queue. */ 507 /* Link BUFF into the send queue. */
507 skb_header_release(buff); 508 skb_header_release(buff);
508 __skb_append(skb, buff); 509 __skb_append(skb, buff, &sk->sk_write_queue);
509 510
510 return 0; 511 return 0;
511} 512}
@@ -696,7 +697,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
696 if (tp->packets_out > tp->snd_cwnd_used) 697 if (tp->packets_out > tp->snd_cwnd_used)
697 tp->snd_cwnd_used = tp->packets_out; 698 tp->snd_cwnd_used = tp->packets_out;
698 699
699 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) 700 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
700 tcp_cwnd_application_limited(sk); 701 tcp_cwnd_application_limited(sk);
701 } 702 }
702} 703}
@@ -893,7 +894,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
893 894
894 /* Link BUFF into the send queue. */ 895 /* Link BUFF into the send queue. */
895 skb_header_release(buff); 896 skb_header_release(buff);
896 __skb_append(skb, buff); 897 __skb_append(skb, buff, &sk->sk_write_queue);
897 898
898 return 0; 899 return 0;
899} 900}
@@ -905,12 +906,13 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
905 */ 906 */
906static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 907static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
907{ 908{
909 const struct inet_connection_sock *icsk = inet_csk(sk);
908 u32 send_win, cong_win, limit, in_flight; 910 u32 send_win, cong_win, limit, in_flight;
909 911
910 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 912 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
911 return 0; 913 return 0;
912 914
913 if (tp->ca_state != TCP_CA_Open) 915 if (icsk->icsk_ca_state != TCP_CA_Open)
914 return 0; 916 return 0;
915 917
916 in_flight = tcp_packets_in_flight(tp); 918 in_flight = tcp_packets_in_flight(tp);
@@ -1147,6 +1149,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1147 */ 1149 */
1148u32 __tcp_select_window(struct sock *sk) 1150u32 __tcp_select_window(struct sock *sk)
1149{ 1151{
1152 struct inet_connection_sock *icsk = inet_csk(sk);
1150 struct tcp_sock *tp = tcp_sk(sk); 1153 struct tcp_sock *tp = tcp_sk(sk);
1151 /* MSS for the peer's data. Previous verions used mss_clamp 1154 /* MSS for the peer's data. Previous verions used mss_clamp
1152 * here. I don't know if the value based on our guesses 1155 * here. I don't know if the value based on our guesses
@@ -1154,7 +1157,7 @@ u32 __tcp_select_window(struct sock *sk)
1154 * but may be worse for the performance because of rcv_mss 1157 * but may be worse for the performance because of rcv_mss
1155 * fluctuations. --SAW 1998/11/1 1158 * fluctuations. --SAW 1998/11/1
1156 */ 1159 */
1157 int mss = tp->ack.rcv_mss; 1160 int mss = icsk->icsk_ack.rcv_mss;
1158 int free_space = tcp_space(sk); 1161 int free_space = tcp_space(sk);
1159 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1162 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1160 int window; 1163 int window;
@@ -1163,7 +1166,7 @@ u32 __tcp_select_window(struct sock *sk)
1163 mss = full_space; 1166 mss = full_space;
1164 1167
1165 if (free_space < full_space/2) { 1168 if (free_space < full_space/2) {
1166 tp->ack.quick = 0; 1169 icsk->icsk_ack.quick = 0;
1167 1170
1168 if (tcp_memory_pressure) 1171 if (tcp_memory_pressure)
1169 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 1172 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
@@ -1238,7 +1241,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1238 tcp_skb_pcount(next_skb) != 1); 1241 tcp_skb_pcount(next_skb) != 1);
1239 1242
1240 /* Ok. We will be able to collapse the packet. */ 1243 /* Ok. We will be able to collapse the packet. */
1241 __skb_unlink(next_skb, next_skb->list); 1244 __skb_unlink(next_skb, &sk->sk_write_queue);
1242 1245
1243 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 1246 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1244 1247
@@ -1286,6 +1289,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1286 */ 1289 */
1287void tcp_simple_retransmit(struct sock *sk) 1290void tcp_simple_retransmit(struct sock *sk)
1288{ 1291{
1292 const struct inet_connection_sock *icsk = inet_csk(sk);
1289 struct tcp_sock *tp = tcp_sk(sk); 1293 struct tcp_sock *tp = tcp_sk(sk);
1290 struct sk_buff *skb; 1294 struct sk_buff *skb;
1291 unsigned int mss = tcp_current_mss(sk, 0); 1295 unsigned int mss = tcp_current_mss(sk, 0);
@@ -1316,12 +1320,12 @@ void tcp_simple_retransmit(struct sock *sk)
1316 * in network, but units changed and effective 1320 * in network, but units changed and effective
1317 * cwnd/ssthresh really reduced now. 1321 * cwnd/ssthresh really reduced now.
1318 */ 1322 */
1319 if (tp->ca_state != TCP_CA_Loss) { 1323 if (icsk->icsk_ca_state != TCP_CA_Loss) {
1320 tp->high_seq = tp->snd_nxt; 1324 tp->high_seq = tp->snd_nxt;
1321 tp->snd_ssthresh = tcp_current_ssthresh(tp); 1325 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1322 tp->prior_ssthresh = 0; 1326 tp->prior_ssthresh = 0;
1323 tp->undo_marker = 0; 1327 tp->undo_marker = 0;
1324 tcp_set_ca_state(tp, TCP_CA_Loss); 1328 tcp_set_ca_state(sk, TCP_CA_Loss);
1325 } 1329 }
1326 tcp_xmit_retransmit_queue(sk); 1330 tcp_xmit_retransmit_queue(sk);
1327} 1331}
@@ -1461,6 +1465,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1461 */ 1465 */
1462void tcp_xmit_retransmit_queue(struct sock *sk) 1466void tcp_xmit_retransmit_queue(struct sock *sk)
1463{ 1467{
1468 const struct inet_connection_sock *icsk = inet_csk(sk);
1464 struct tcp_sock *tp = tcp_sk(sk); 1469 struct tcp_sock *tp = tcp_sk(sk);
1465 struct sk_buff *skb; 1470 struct sk_buff *skb;
1466 int packet_cnt = tp->lost_out; 1471 int packet_cnt = tp->lost_out;
@@ -1484,14 +1489,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1484 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1489 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1485 if (tcp_retransmit_skb(sk, skb)) 1490 if (tcp_retransmit_skb(sk, skb))
1486 return; 1491 return;
1487 if (tp->ca_state != TCP_CA_Loss) 1492 if (icsk->icsk_ca_state != TCP_CA_Loss)
1488 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1493 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1489 else 1494 else
1490 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1495 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1491 1496
1492 if (skb == 1497 if (skb ==
1493 skb_peek(&sk->sk_write_queue)) 1498 skb_peek(&sk->sk_write_queue))
1494 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1499 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1500 inet_csk(sk)->icsk_rto,
1501 TCP_RTO_MAX);
1495 } 1502 }
1496 1503
1497 packet_cnt -= tcp_skb_pcount(skb); 1504 packet_cnt -= tcp_skb_pcount(skb);
@@ -1504,7 +1511,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1504 /* OK, demanded retransmission is finished. */ 1511 /* OK, demanded retransmission is finished. */
1505 1512
1506 /* Forward retransmissions are possible only during Recovery. */ 1513 /* Forward retransmissions are possible only during Recovery. */
1507 if (tp->ca_state != TCP_CA_Recovery) 1514 if (icsk->icsk_ca_state != TCP_CA_Recovery)
1508 return; 1515 return;
1509 1516
1510 /* No forward retransmissions in Reno are possible. */ 1517 /* No forward retransmissions in Reno are possible. */
@@ -1544,7 +1551,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1544 break; 1551 break;
1545 1552
1546 if (skb == skb_peek(&sk->sk_write_queue)) 1553 if (skb == skb_peek(&sk->sk_write_queue))
1547 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1554 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1555 inet_csk(sk)->icsk_rto,
1556 TCP_RTO_MAX);
1548 1557
1549 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 1558 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
1550 } 1559 }
@@ -1573,7 +1582,7 @@ void tcp_send_fin(struct sock *sk)
1573 } else { 1582 } else {
1574 /* Socket is locked, keep trying until memory is available. */ 1583 /* Socket is locked, keep trying until memory is available. */
1575 for (;;) { 1584 for (;;) {
1576 skb = alloc_skb(MAX_TCP_HEADER, GFP_KERNEL); 1585 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
1577 if (skb) 1586 if (skb)
1578 break; 1587 break;
1579 yield(); 1588 yield();
@@ -1780,8 +1789,8 @@ static inline void tcp_connect_init(struct sock *sk)
1780 tp->rcv_wup = 0; 1789 tp->rcv_wup = 0;
1781 tp->copied_seq = 0; 1790 tp->copied_seq = 0;
1782 1791
1783 tp->rto = TCP_TIMEOUT_INIT; 1792 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
1784 tp->retransmits = 0; 1793 inet_csk(sk)->icsk_retransmits = 0;
1785 tcp_clear_retrans(tp); 1794 tcp_clear_retrans(tp);
1786} 1795}
1787 1796
@@ -1795,7 +1804,7 @@ int tcp_connect(struct sock *sk)
1795 1804
1796 tcp_connect_init(sk); 1805 tcp_connect_init(sk);
1797 1806
1798 buff = alloc_skb(MAX_TCP_HEADER + 15, sk->sk_allocation); 1807 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
1799 if (unlikely(buff == NULL)) 1808 if (unlikely(buff == NULL))
1800 return -ENOBUFS; 1809 return -ENOBUFS;
1801 1810
@@ -1824,7 +1833,8 @@ int tcp_connect(struct sock *sk)
1824 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 1833 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
1825 1834
1826 /* Timer for repeating the SYN until an answer. */ 1835 /* Timer for repeating the SYN until an answer. */
1827 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1836 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1837 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1828 return 0; 1838 return 0;
1829} 1839}
1830 1840
@@ -1834,20 +1844,21 @@ int tcp_connect(struct sock *sk)
1834 */ 1844 */
1835void tcp_send_delayed_ack(struct sock *sk) 1845void tcp_send_delayed_ack(struct sock *sk)
1836{ 1846{
1837 struct tcp_sock *tp = tcp_sk(sk); 1847 struct inet_connection_sock *icsk = inet_csk(sk);
1838 int ato = tp->ack.ato; 1848 int ato = icsk->icsk_ack.ato;
1839 unsigned long timeout; 1849 unsigned long timeout;
1840 1850
1841 if (ato > TCP_DELACK_MIN) { 1851 if (ato > TCP_DELACK_MIN) {
1852 const struct tcp_sock *tp = tcp_sk(sk);
1842 int max_ato = HZ/2; 1853 int max_ato = HZ/2;
1843 1854
1844 if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) 1855 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
1845 max_ato = TCP_DELACK_MAX; 1856 max_ato = TCP_DELACK_MAX;
1846 1857
1847 /* Slow path, intersegment interval is "high". */ 1858 /* Slow path, intersegment interval is "high". */
1848 1859
1849 /* If some rtt estimate is known, use it to bound delayed ack. 1860 /* If some rtt estimate is known, use it to bound delayed ack.
1850 * Do not use tp->rto here, use results of rtt measurements 1861 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
1851 * directly. 1862 * directly.
1852 */ 1863 */
1853 if (tp->srtt) { 1864 if (tp->srtt) {
@@ -1864,21 +1875,22 @@ void tcp_send_delayed_ack(struct sock *sk)
1864 timeout = jiffies + ato; 1875 timeout = jiffies + ato;
1865 1876
1866 /* Use new timeout only if there wasn't a older one earlier. */ 1877 /* Use new timeout only if there wasn't a older one earlier. */
1867 if (tp->ack.pending&TCP_ACK_TIMER) { 1878 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
1868 /* If delack timer was blocked or is about to expire, 1879 /* If delack timer was blocked or is about to expire,
1869 * send ACK now. 1880 * send ACK now.
1870 */ 1881 */
1871 if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { 1882 if (icsk->icsk_ack.blocked ||
1883 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
1872 tcp_send_ack(sk); 1884 tcp_send_ack(sk);
1873 return; 1885 return;
1874 } 1886 }
1875 1887
1876 if (!time_before(timeout, tp->ack.timeout)) 1888 if (!time_before(timeout, icsk->icsk_ack.timeout))
1877 timeout = tp->ack.timeout; 1889 timeout = icsk->icsk_ack.timeout;
1878 } 1890 }
1879 tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; 1891 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
1880 tp->ack.timeout = timeout; 1892 icsk->icsk_ack.timeout = timeout;
1881 sk_reset_timer(sk, &tp->delack_timer, timeout); 1893 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
1882} 1894}
1883 1895
1884/* This routine sends an ack and also updates the window. */ 1896/* This routine sends an ack and also updates the window. */
@@ -1895,9 +1907,10 @@ void tcp_send_ack(struct sock *sk)
1895 */ 1907 */
1896 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 1908 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
1897 if (buff == NULL) { 1909 if (buff == NULL) {
1898 tcp_schedule_ack(tp); 1910 inet_csk_schedule_ack(sk);
1899 tp->ack.ato = TCP_ATO_MIN; 1911 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
1900 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); 1912 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1913 TCP_DELACK_MAX, TCP_RTO_MAX);
1901 return; 1914 return;
1902 } 1915 }
1903 1916
@@ -2011,6 +2024,7 @@ int tcp_write_wakeup(struct sock *sk)
2011 */ 2024 */
2012void tcp_send_probe0(struct sock *sk) 2025void tcp_send_probe0(struct sock *sk)
2013{ 2026{
2027 struct inet_connection_sock *icsk = inet_csk(sk);
2014 struct tcp_sock *tp = tcp_sk(sk); 2028 struct tcp_sock *tp = tcp_sk(sk);
2015 int err; 2029 int err;
2016 2030
@@ -2018,28 +2032,31 @@ void tcp_send_probe0(struct sock *sk)
2018 2032
2019 if (tp->packets_out || !sk->sk_send_head) { 2033 if (tp->packets_out || !sk->sk_send_head) {
2020 /* Cancel probe timer, if it is not required. */ 2034 /* Cancel probe timer, if it is not required. */
2021 tp->probes_out = 0; 2035 icsk->icsk_probes_out = 0;
2022 tp->backoff = 0; 2036 icsk->icsk_backoff = 0;
2023 return; 2037 return;
2024 } 2038 }
2025 2039
2026 if (err <= 0) { 2040 if (err <= 0) {
2027 if (tp->backoff < sysctl_tcp_retries2) 2041 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2028 tp->backoff++; 2042 icsk->icsk_backoff++;
2029 tp->probes_out++; 2043 icsk->icsk_probes_out++;
2030 tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 2044 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2031 min(tp->rto << tp->backoff, TCP_RTO_MAX)); 2045 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2046 TCP_RTO_MAX);
2032 } else { 2047 } else {
2033 /* If packet was not sent due to local congestion, 2048 /* If packet was not sent due to local congestion,
2034 * do not backoff and do not remember probes_out. 2049 * do not backoff and do not remember icsk_probes_out.
2035 * Let local senders to fight for local resources. 2050 * Let local senders to fight for local resources.
2036 * 2051 *
2037 * Use accumulated backoff yet. 2052 * Use accumulated backoff yet.
2038 */ 2053 */
2039 if (!tp->probes_out) 2054 if (!icsk->icsk_probes_out)
2040 tp->probes_out=1; 2055 icsk->icsk_probes_out = 1;
2041 tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 2056 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2042 min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); 2057 min(icsk->icsk_rto << icsk->icsk_backoff,
2058 TCP_RESOURCE_PROBE_INTERVAL),
2059 TCP_RTO_MAX);
2043 } 2060 }
2044} 2061}
2045 2062
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 70e108e15c71..327770bf5522 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -16,9 +16,10 @@
16#define TCP_SCALABLE_AI_CNT 50U 16#define TCP_SCALABLE_AI_CNT 50U
17#define TCP_SCALABLE_MD_SCALE 3 17#define TCP_SCALABLE_MD_SCALE 3
18 18
19static void tcp_scalable_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 19static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
20 u32 in_flight, int flag) 20 u32 in_flight, int flag)
21{ 21{
22 struct tcp_sock *tp = tcp_sk(sk);
22 if (in_flight < tp->snd_cwnd) 23 if (in_flight < tp->snd_cwnd)
23 return; 24 return;
24 25
@@ -35,8 +36,9 @@ static void tcp_scalable_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
35 tp->snd_cwnd_stamp = tcp_time_stamp; 36 tp->snd_cwnd_stamp = tcp_time_stamp;
36} 37}
37 38
38static u32 tcp_scalable_ssthresh(struct tcp_sock *tp) 39static u32 tcp_scalable_ssthresh(struct sock *sk)
39{ 40{
41 const struct tcp_sock *tp = tcp_sk(sk);
40 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 42 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
41} 43}
42 44
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0084227438c2..415ee47ac1c5 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -36,49 +36,13 @@ static void tcp_write_timer(unsigned long);
36static void tcp_delack_timer(unsigned long); 36static void tcp_delack_timer(unsigned long);
37static void tcp_keepalive_timer (unsigned long data); 37static void tcp_keepalive_timer (unsigned long data);
38 38
39#ifdef TCP_DEBUG
40const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
41EXPORT_SYMBOL(tcp_timer_bug_msg);
42#endif
43
44/*
45 * Using different timers for retransmit, delayed acks and probes
46 * We may wish use just one timer maintaining a list of expire jiffies
47 * to optimize.
48 */
49
50void tcp_init_xmit_timers(struct sock *sk) 39void tcp_init_xmit_timers(struct sock *sk)
51{ 40{
52 struct tcp_sock *tp = tcp_sk(sk); 41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
53 42 &tcp_keepalive_timer);
54 init_timer(&tp->retransmit_timer);
55 tp->retransmit_timer.function=&tcp_write_timer;
56 tp->retransmit_timer.data = (unsigned long) sk;
57 tp->pending = 0;
58
59 init_timer(&tp->delack_timer);
60 tp->delack_timer.function=&tcp_delack_timer;
61 tp->delack_timer.data = (unsigned long) sk;
62 tp->ack.pending = 0;
63
64 init_timer(&sk->sk_timer);
65 sk->sk_timer.function = &tcp_keepalive_timer;
66 sk->sk_timer.data = (unsigned long)sk;
67} 43}
68 44
69void tcp_clear_xmit_timers(struct sock *sk) 45EXPORT_SYMBOL(tcp_init_xmit_timers);
70{
71 struct tcp_sock *tp = tcp_sk(sk);
72
73 tp->pending = 0;
74 sk_stop_timer(sk, &tp->retransmit_timer);
75
76 tp->ack.pending = 0;
77 tp->ack.blocked = 0;
78 sk_stop_timer(sk, &tp->delack_timer);
79
80 sk_stop_timer(sk, &sk->sk_timer);
81}
82 46
83static void tcp_write_err(struct sock *sk) 47static void tcp_write_err(struct sock *sk)
84{ 48{
@@ -155,15 +119,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
155/* A write timeout has occurred. Process the after effects. */ 119/* A write timeout has occurred. Process the after effects. */
156static int tcp_write_timeout(struct sock *sk) 120static int tcp_write_timeout(struct sock *sk)
157{ 121{
158 struct tcp_sock *tp = tcp_sk(sk); 122 const struct inet_connection_sock *icsk = inet_csk(sk);
159 int retry_until; 123 int retry_until;
160 124
161 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 125 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
162 if (tp->retransmits) 126 if (icsk->icsk_retransmits)
163 dst_negative_advice(&sk->sk_dst_cache); 127 dst_negative_advice(&sk->sk_dst_cache);
164 retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; 128 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
165 } else { 129 } else {
166 if (tp->retransmits >= sysctl_tcp_retries1) { 130 if (icsk->icsk_retransmits >= sysctl_tcp_retries1) {
167 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black 131 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
168 hole detection. :-( 132 hole detection. :-(
169 133
@@ -189,16 +153,16 @@ static int tcp_write_timeout(struct sock *sk)
189 153
190 retry_until = sysctl_tcp_retries2; 154 retry_until = sysctl_tcp_retries2;
191 if (sock_flag(sk, SOCK_DEAD)) { 155 if (sock_flag(sk, SOCK_DEAD)) {
192 int alive = (tp->rto < TCP_RTO_MAX); 156 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
193 157
194 retry_until = tcp_orphan_retries(sk, alive); 158 retry_until = tcp_orphan_retries(sk, alive);
195 159
196 if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) 160 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until))
197 return 1; 161 return 1;
198 } 162 }
199 } 163 }
200 164
201 if (tp->retransmits >= retry_until) { 165 if (icsk->icsk_retransmits >= retry_until) {
202 /* Has it gone just too far? */ 166 /* Has it gone just too far? */
203 tcp_write_err(sk); 167 tcp_write_err(sk);
204 return 1; 168 return 1;
@@ -210,26 +174,27 @@ static void tcp_delack_timer(unsigned long data)
210{ 174{
211 struct sock *sk = (struct sock*)data; 175 struct sock *sk = (struct sock*)data;
212 struct tcp_sock *tp = tcp_sk(sk); 176 struct tcp_sock *tp = tcp_sk(sk);
177 struct inet_connection_sock *icsk = inet_csk(sk);
213 178
214 bh_lock_sock(sk); 179 bh_lock_sock(sk);
215 if (sock_owned_by_user(sk)) { 180 if (sock_owned_by_user(sk)) {
216 /* Try again later. */ 181 /* Try again later. */
217 tp->ack.blocked = 1; 182 icsk->icsk_ack.blocked = 1;
218 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 183 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
219 sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); 184 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
220 goto out_unlock; 185 goto out_unlock;
221 } 186 }
222 187
223 sk_stream_mem_reclaim(sk); 188 sk_stream_mem_reclaim(sk);
224 189
225 if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) 190 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
226 goto out; 191 goto out;
227 192
228 if (time_after(tp->ack.timeout, jiffies)) { 193 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
229 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); 194 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
230 goto out; 195 goto out;
231 } 196 }
232 tp->ack.pending &= ~TCP_ACK_TIMER; 197 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
233 198
234 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 199 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
235 struct sk_buff *skb; 200 struct sk_buff *skb;
@@ -242,16 +207,16 @@ static void tcp_delack_timer(unsigned long data)
242 tp->ucopy.memory = 0; 207 tp->ucopy.memory = 0;
243 } 208 }
244 209
245 if (tcp_ack_scheduled(tp)) { 210 if (inet_csk_ack_scheduled(sk)) {
246 if (!tp->ack.pingpong) { 211 if (!icsk->icsk_ack.pingpong) {
247 /* Delayed ACK missed: inflate ATO. */ 212 /* Delayed ACK missed: inflate ATO. */
248 tp->ack.ato = min(tp->ack.ato << 1, tp->rto); 213 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
249 } else { 214 } else {
250 /* Delayed ACK missed: leave pingpong mode and 215 /* Delayed ACK missed: leave pingpong mode and
251 * deflate ATO. 216 * deflate ATO.
252 */ 217 */
253 tp->ack.pingpong = 0; 218 icsk->icsk_ack.pingpong = 0;
254 tp->ack.ato = TCP_ATO_MIN; 219 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 } 220 }
256 tcp_send_ack(sk); 221 tcp_send_ack(sk);
257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 222 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
@@ -268,11 +233,12 @@ out_unlock:
268 233
269static void tcp_probe_timer(struct sock *sk) 234static void tcp_probe_timer(struct sock *sk)
270{ 235{
236 struct inet_connection_sock *icsk = inet_csk(sk);
271 struct tcp_sock *tp = tcp_sk(sk); 237 struct tcp_sock *tp = tcp_sk(sk);
272 int max_probes; 238 int max_probes;
273 239
274 if (tp->packets_out || !sk->sk_send_head) { 240 if (tp->packets_out || !sk->sk_send_head) {
275 tp->probes_out = 0; 241 icsk->icsk_probes_out = 0;
276 return; 242 return;
277 } 243 }
278 244
@@ -283,7 +249,7 @@ static void tcp_probe_timer(struct sock *sk)
283 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing 249 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
284 * this behaviour in Solaris down as a bug fix. [AC] 250 * this behaviour in Solaris down as a bug fix. [AC]
285 * 251 *
286 * Let me to explain. probes_out is zeroed by incoming ACKs 252 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
287 * even if they advertise zero window. Hence, connection is killed only 253 * even if they advertise zero window. Hence, connection is killed only
288 * if we received no ACKs for normal connection timeout. It is not killed 254 * if we received no ACKs for normal connection timeout. It is not killed
289 * only because window stays zero for some time, window may be zero 255 * only because window stays zero for some time, window may be zero
@@ -294,15 +260,15 @@ static void tcp_probe_timer(struct sock *sk)
294 max_probes = sysctl_tcp_retries2; 260 max_probes = sysctl_tcp_retries2;
295 261
296 if (sock_flag(sk, SOCK_DEAD)) { 262 if (sock_flag(sk, SOCK_DEAD)) {
297 int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); 263 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
298 264
299 max_probes = tcp_orphan_retries(sk, alive); 265 max_probes = tcp_orphan_retries(sk, alive);
300 266
301 if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes)) 267 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
302 return; 268 return;
303 } 269 }
304 270
305 if (tp->probes_out > max_probes) { 271 if (icsk->icsk_probes_out > max_probes) {
306 tcp_write_err(sk); 272 tcp_write_err(sk);
307 } else { 273 } else {
308 /* Only send another probe if we didn't close things up. */ 274 /* Only send another probe if we didn't close things up. */
@@ -317,6 +283,7 @@ static void tcp_probe_timer(struct sock *sk)
317static void tcp_retransmit_timer(struct sock *sk) 283static void tcp_retransmit_timer(struct sock *sk)
318{ 284{
319 struct tcp_sock *tp = tcp_sk(sk); 285 struct tcp_sock *tp = tcp_sk(sk);
286 struct inet_connection_sock *icsk = inet_csk(sk);
320 287
321 if (!tp->packets_out) 288 if (!tp->packets_out)
322 goto out; 289 goto out;
@@ -351,20 +318,21 @@ static void tcp_retransmit_timer(struct sock *sk)
351 if (tcp_write_timeout(sk)) 318 if (tcp_write_timeout(sk))
352 goto out; 319 goto out;
353 320
354 if (tp->retransmits == 0) { 321 if (icsk->icsk_retransmits == 0) {
355 if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { 322 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
323 icsk->icsk_ca_state == TCP_CA_Recovery) {
356 if (tp->rx_opt.sack_ok) { 324 if (tp->rx_opt.sack_ok) {
357 if (tp->ca_state == TCP_CA_Recovery) 325 if (icsk->icsk_ca_state == TCP_CA_Recovery)
358 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 326 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
359 else 327 else
360 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 328 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
361 } else { 329 } else {
362 if (tp->ca_state == TCP_CA_Recovery) 330 if (icsk->icsk_ca_state == TCP_CA_Recovery)
363 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 331 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
364 else 332 else
365 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 333 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
366 } 334 }
367 } else if (tp->ca_state == TCP_CA_Loss) { 335 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
368 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 336 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
369 } else { 337 } else {
370 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 338 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
@@ -381,10 +349,11 @@ static void tcp_retransmit_timer(struct sock *sk)
381 /* Retransmission failed because of local congestion, 349 /* Retransmission failed because of local congestion,
382 * do not backoff. 350 * do not backoff.
383 */ 351 */
384 if (!tp->retransmits) 352 if (!icsk->icsk_retransmits)
385 tp->retransmits=1; 353 icsk->icsk_retransmits = 1;
386 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, 354 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
387 min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); 355 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
356 TCP_RTO_MAX);
388 goto out; 357 goto out;
389 } 358 }
390 359
@@ -403,13 +372,13 @@ static void tcp_retransmit_timer(struct sock *sk)
403 * implemented ftp to mars will work nicely. We will have to fix 372 * implemented ftp to mars will work nicely. We will have to fix
404 * the 120 second clamps though! 373 * the 120 second clamps though!
405 */ 374 */
406 tp->backoff++; 375 icsk->icsk_backoff++;
407 tp->retransmits++; 376 icsk->icsk_retransmits++;
408 377
409out_reset_timer: 378out_reset_timer:
410 tp->rto = min(tp->rto << 1, TCP_RTO_MAX); 379 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
411 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 380 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
412 if (tp->retransmits > sysctl_tcp_retries1) 381 if (icsk->icsk_retransmits > sysctl_tcp_retries1)
413 __sk_dst_reset(sk); 382 __sk_dst_reset(sk);
414 383
415out:; 384out:;
@@ -418,32 +387,32 @@ out:;
418static void tcp_write_timer(unsigned long data) 387static void tcp_write_timer(unsigned long data)
419{ 388{
420 struct sock *sk = (struct sock*)data; 389 struct sock *sk = (struct sock*)data;
421 struct tcp_sock *tp = tcp_sk(sk); 390 struct inet_connection_sock *icsk = inet_csk(sk);
422 int event; 391 int event;
423 392
424 bh_lock_sock(sk); 393 bh_lock_sock(sk);
425 if (sock_owned_by_user(sk)) { 394 if (sock_owned_by_user(sk)) {
426 /* Try again later */ 395 /* Try again later */
427 sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); 396 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
428 goto out_unlock; 397 goto out_unlock;
429 } 398 }
430 399
431 if (sk->sk_state == TCP_CLOSE || !tp->pending) 400 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
432 goto out; 401 goto out;
433 402
434 if (time_after(tp->timeout, jiffies)) { 403 if (time_after(icsk->icsk_timeout, jiffies)) {
435 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); 404 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
436 goto out; 405 goto out;
437 } 406 }
438 407
439 event = tp->pending; 408 event = icsk->icsk_pending;
440 tp->pending = 0; 409 icsk->icsk_pending = 0;
441 410
442 switch (event) { 411 switch (event) {
443 case TCP_TIME_RETRANS: 412 case ICSK_TIME_RETRANS:
444 tcp_retransmit_timer(sk); 413 tcp_retransmit_timer(sk);
445 break; 414 break;
446 case TCP_TIME_PROBE0: 415 case ICSK_TIME_PROBE0:
447 tcp_probe_timer(sk); 416 tcp_probe_timer(sk);
448 break; 417 break;
449 } 418 }
@@ -462,96 +431,8 @@ out_unlock:
462 431
463static void tcp_synack_timer(struct sock *sk) 432static void tcp_synack_timer(struct sock *sk)
464{ 433{
465 struct tcp_sock *tp = tcp_sk(sk); 434 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
466 struct listen_sock *lopt = tp->accept_queue.listen_opt; 435 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
467 int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
468 int thresh = max_retries;
469 unsigned long now = jiffies;
470 struct request_sock **reqp, *req;
471 int i, budget;
472
473 if (lopt == NULL || lopt->qlen == 0)
474 return;
475
476 /* Normally all the openreqs are young and become mature
477 * (i.e. converted to established socket) for first timeout.
478 * If synack was not acknowledged for 3 seconds, it means
479 * one of the following things: synack was lost, ack was lost,
480 * rtt is high or nobody planned to ack (i.e. synflood).
481 * When server is a bit loaded, queue is populated with old
482 * open requests, reducing effective size of queue.
483 * When server is well loaded, queue size reduces to zero
484 * after several minutes of work. It is not synflood,
485 * it is normal operation. The solution is pruning
486 * too old entries overriding normal timeout, when
487 * situation becomes dangerous.
488 *
489 * Essentially, we reserve half of room for young
490 * embrions; and abort old ones without pity, if old
491 * ones are about to clog our table.
492 */
493 if (lopt->qlen>>(lopt->max_qlen_log-1)) {
494 int young = (lopt->qlen_young<<1);
495
496 while (thresh > 2) {
497 if (lopt->qlen < young)
498 break;
499 thresh--;
500 young <<= 1;
501 }
502 }
503
504 if (tp->defer_accept)
505 max_retries = tp->defer_accept;
506
507 budget = 2*(TCP_SYNQ_HSIZE/(TCP_TIMEOUT_INIT/TCP_SYNQ_INTERVAL));
508 i = lopt->clock_hand;
509
510 do {
511 reqp=&lopt->syn_table[i];
512 while ((req = *reqp) != NULL) {
513 if (time_after_eq(now, req->expires)) {
514 if ((req->retrans < thresh ||
515 (inet_rsk(req)->acked && req->retrans < max_retries))
516 && !req->rsk_ops->rtx_syn_ack(sk, req, NULL)) {
517 unsigned long timeo;
518
519 if (req->retrans++ == 0)
520 lopt->qlen_young--;
521 timeo = min((TCP_TIMEOUT_INIT << req->retrans),
522 TCP_RTO_MAX);
523 req->expires = now + timeo;
524 reqp = &req->dl_next;
525 continue;
526 }
527
528 /* Drop this request */
529 tcp_synq_unlink(tp, req, reqp);
530 reqsk_queue_removed(&tp->accept_queue, req);
531 reqsk_free(req);
532 continue;
533 }
534 reqp = &req->dl_next;
535 }
536
537 i = (i+1)&(TCP_SYNQ_HSIZE-1);
538
539 } while (--budget > 0);
540
541 lopt->clock_hand = i;
542
543 if (lopt->qlen)
544 tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
545}
546
547void tcp_delete_keepalive_timer (struct sock *sk)
548{
549 sk_stop_timer(sk, &sk->sk_timer);
550}
551
552void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
553{
554 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
555} 436}
556 437
557void tcp_set_keepalive(struct sock *sk, int val) 438void tcp_set_keepalive(struct sock *sk, int val)
@@ -560,15 +441,16 @@ void tcp_set_keepalive(struct sock *sk, int val)
560 return; 441 return;
561 442
562 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) 443 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
563 tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 444 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
564 else if (!val) 445 else if (!val)
565 tcp_delete_keepalive_timer(sk); 446 inet_csk_delete_keepalive_timer(sk);
566} 447}
567 448
568 449
569static void tcp_keepalive_timer (unsigned long data) 450static void tcp_keepalive_timer (unsigned long data)
570{ 451{
571 struct sock *sk = (struct sock *) data; 452 struct sock *sk = (struct sock *) data;
453 struct inet_connection_sock *icsk = inet_csk(sk);
572 struct tcp_sock *tp = tcp_sk(sk); 454 struct tcp_sock *tp = tcp_sk(sk);
573 __u32 elapsed; 455 __u32 elapsed;
574 456
@@ -576,7 +458,7 @@ static void tcp_keepalive_timer (unsigned long data)
576 bh_lock_sock(sk); 458 bh_lock_sock(sk);
577 if (sock_owned_by_user(sk)) { 459 if (sock_owned_by_user(sk)) {
578 /* Try again later. */ 460 /* Try again later. */
579 tcp_reset_keepalive_timer (sk, HZ/20); 461 inet_csk_reset_keepalive_timer (sk, HZ/20);
580 goto out; 462 goto out;
581 } 463 }
582 464
@@ -587,7 +469,7 @@ static void tcp_keepalive_timer (unsigned long data)
587 469
588 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 470 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
589 if (tp->linger2 >= 0) { 471 if (tp->linger2 >= 0) {
590 int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; 472 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
591 473
592 if (tmo > 0) { 474 if (tmo > 0) {
593 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 475 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
@@ -610,14 +492,14 @@ static void tcp_keepalive_timer (unsigned long data)
610 elapsed = tcp_time_stamp - tp->rcv_tstamp; 492 elapsed = tcp_time_stamp - tp->rcv_tstamp;
611 493
612 if (elapsed >= keepalive_time_when(tp)) { 494 if (elapsed >= keepalive_time_when(tp)) {
613 if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) || 495 if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) ||
614 (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) { 496 (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) {
615 tcp_send_active_reset(sk, GFP_ATOMIC); 497 tcp_send_active_reset(sk, GFP_ATOMIC);
616 tcp_write_err(sk); 498 tcp_write_err(sk);
617 goto out; 499 goto out;
618 } 500 }
619 if (tcp_write_wakeup(sk) <= 0) { 501 if (tcp_write_wakeup(sk) <= 0) {
620 tp->probes_out++; 502 icsk->icsk_probes_out++;
621 elapsed = keepalive_intvl_when(tp); 503 elapsed = keepalive_intvl_when(tp);
622 } else { 504 } else {
623 /* If keepalive was lost due to local congestion, 505 /* If keepalive was lost due to local congestion,
@@ -634,7 +516,7 @@ static void tcp_keepalive_timer (unsigned long data)
634 sk_stream_mem_reclaim(sk); 516 sk_stream_mem_reclaim(sk);
635 517
636resched: 518resched:
637 tcp_reset_keepalive_timer (sk, elapsed); 519 inet_csk_reset_keepalive_timer (sk, elapsed);
638 goto out; 520 goto out;
639 521
640death: 522death:
@@ -644,8 +526,3 @@ out:
644 bh_unlock_sock(sk); 526 bh_unlock_sock(sk);
645 sock_put(sk); 527 sock_put(sk);
646} 528}
647
648EXPORT_SYMBOL(tcp_clear_xmit_timers);
649EXPORT_SYMBOL(tcp_delete_keepalive_timer);
650EXPORT_SYMBOL(tcp_init_xmit_timers);
651EXPORT_SYMBOL(tcp_reset_keepalive_timer);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 9bd443db5193..93c5f92070f9 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -35,7 +35,7 @@
35#include <linux/mm.h> 35#include <linux/mm.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/skbuff.h> 37#include <linux/skbuff.h>
38#include <linux/tcp_diag.h> 38#include <linux/inet_diag.h>
39 39
40#include <net/tcp.h> 40#include <net/tcp.h>
41 41
@@ -82,9 +82,10 @@ struct vegas {
82 * Instead we must wait until the completion of an RTT during 82 * Instead we must wait until the completion of an RTT during
83 * which we actually receive ACKs. 83 * which we actually receive ACKs.
84 */ 84 */
85static inline void vegas_enable(struct tcp_sock *tp) 85static inline void vegas_enable(struct sock *sk)
86{ 86{
87 struct vegas *vegas = tcp_ca(tp); 87 const struct tcp_sock *tp = tcp_sk(sk);
88 struct vegas *vegas = inet_csk_ca(sk);
88 89
89 /* Begin taking Vegas samples next time we send something. */ 90 /* Begin taking Vegas samples next time we send something. */
90 vegas->doing_vegas_now = 1; 91 vegas->doing_vegas_now = 1;
@@ -97,19 +98,19 @@ static inline void vegas_enable(struct tcp_sock *tp)
97} 98}
98 99
99/* Stop taking Vegas samples for now. */ 100/* Stop taking Vegas samples for now. */
100static inline void vegas_disable(struct tcp_sock *tp) 101static inline void vegas_disable(struct sock *sk)
101{ 102{
102 struct vegas *vegas = tcp_ca(tp); 103 struct vegas *vegas = inet_csk_ca(sk);
103 104
104 vegas->doing_vegas_now = 0; 105 vegas->doing_vegas_now = 0;
105} 106}
106 107
107static void tcp_vegas_init(struct tcp_sock *tp) 108static void tcp_vegas_init(struct sock *sk)
108{ 109{
109 struct vegas *vegas = tcp_ca(tp); 110 struct vegas *vegas = inet_csk_ca(sk);
110 111
111 vegas->baseRTT = 0x7fffffff; 112 vegas->baseRTT = 0x7fffffff;
112 vegas_enable(tp); 113 vegas_enable(sk);
113} 114}
114 115
115/* Do RTT sampling needed for Vegas. 116/* Do RTT sampling needed for Vegas.
@@ -120,9 +121,9 @@ static void tcp_vegas_init(struct tcp_sock *tp)
120 * o min-filter RTT samples from a much longer window (forever for now) 121 * o min-filter RTT samples from a much longer window (forever for now)
121 * to find the propagation delay (baseRTT) 122 * to find the propagation delay (baseRTT)
122 */ 123 */
123static void tcp_vegas_rtt_calc(struct tcp_sock *tp, u32 usrtt) 124static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt)
124{ 125{
125 struct vegas *vegas = tcp_ca(tp); 126 struct vegas *vegas = inet_csk_ca(sk);
126 u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ 127 u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
127 128
128 /* Filter to find propagation delay: */ 129 /* Filter to find propagation delay: */
@@ -136,13 +137,13 @@ static void tcp_vegas_rtt_calc(struct tcp_sock *tp, u32 usrtt)
136 vegas->cntRTT++; 137 vegas->cntRTT++;
137} 138}
138 139
139static void tcp_vegas_state(struct tcp_sock *tp, u8 ca_state) 140static void tcp_vegas_state(struct sock *sk, u8 ca_state)
140{ 141{
141 142
142 if (ca_state == TCP_CA_Open) 143 if (ca_state == TCP_CA_Open)
143 vegas_enable(tp); 144 vegas_enable(sk);
144 else 145 else
145 vegas_disable(tp); 146 vegas_disable(sk);
146} 147}
147 148
148/* 149/*
@@ -154,20 +155,21 @@ static void tcp_vegas_state(struct tcp_sock *tp, u8 ca_state)
154 * packets, _then_ we can make Vegas calculations 155 * packets, _then_ we can make Vegas calculations
155 * again. 156 * again.
156 */ 157 */
157static void tcp_vegas_cwnd_event(struct tcp_sock *tp, enum tcp_ca_event event) 158static void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
158{ 159{
159 if (event == CA_EVENT_CWND_RESTART || 160 if (event == CA_EVENT_CWND_RESTART ||
160 event == CA_EVENT_TX_START) 161 event == CA_EVENT_TX_START)
161 tcp_vegas_init(tp); 162 tcp_vegas_init(sk);
162} 163}
163 164
164static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack, 165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
165 u32 seq_rtt, u32 in_flight, int flag) 166 u32 seq_rtt, u32 in_flight, int flag)
166{ 167{
167 struct vegas *vegas = tcp_ca(tp); 168 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk);
168 170
169 if (!vegas->doing_vegas_now) 171 if (!vegas->doing_vegas_now)
170 return tcp_reno_cong_avoid(tp, ack, seq_rtt, in_flight, flag); 172 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
171 173
172 /* The key players are v_beg_snd_una and v_beg_snd_nxt. 174 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
173 * 175 *
@@ -219,7 +221,7 @@ static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack,
219 * but that's not too awful, since we're taking the min, 221 * but that's not too awful, since we're taking the min,
220 * rather than averaging. 222 * rather than averaging.
221 */ 223 */
222 tcp_vegas_rtt_calc(tp, seq_rtt*1000); 224 tcp_vegas_rtt_calc(sk, seq_rtt * 1000);
223 225
224 /* We do the Vegas calculations only if we got enough RTT 226 /* We do the Vegas calculations only if we got enough RTT
225 * samples that we can be reasonably sure that we got 227 * samples that we can be reasonably sure that we got
@@ -359,14 +361,14 @@ static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack,
359} 361}
360 362
361/* Extract info for Tcp socket info provided via netlink. */ 363/* Extract info for Tcp socket info provided via netlink. */
362static void tcp_vegas_get_info(struct tcp_sock *tp, u32 ext, 364static void tcp_vegas_get_info(struct sock *sk, u32 ext,
363 struct sk_buff *skb) 365 struct sk_buff *skb)
364{ 366{
365 const struct vegas *ca = tcp_ca(tp); 367 const struct vegas *ca = inet_csk_ca(sk);
366 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { 368 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
367 struct tcpvegas_info *info; 369 struct tcpvegas_info *info;
368 370
369 info = RTA_DATA(__RTA_PUT(skb, TCPDIAG_VEGASINFO, 371 info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
370 sizeof(*info))); 372 sizeof(*info)));
371 373
372 info->tcpv_enabled = ca->doing_vegas_now; 374 info->tcpv_enabled = ca->doing_vegas_now;
@@ -393,7 +395,7 @@ static struct tcp_congestion_ops tcp_vegas = {
393 395
394static int __init tcp_vegas_register(void) 396static int __init tcp_vegas_register(void)
395{ 397{
396 BUG_ON(sizeof(struct vegas) > TCP_CA_PRIV_SIZE); 398 BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE);
397 tcp_register_congestion_control(&tcp_vegas); 399 tcp_register_congestion_control(&tcp_vegas);
398 return 0; 400 return 0;
399} 401}
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index ef827242c940..0c340c3756c2 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -8,7 +8,7 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/tcp_diag.h> 11#include <linux/inet_diag.h>
12#include <net/tcp.h> 12#include <net/tcp.h>
13 13
14/* TCP Westwood structure */ 14/* TCP Westwood structure */
@@ -40,9 +40,9 @@ struct westwood {
40 * way as soon as possible. It will reasonably happen within the first 40 * way as soon as possible. It will reasonably happen within the first
41 * RTT period of the connection lifetime. 41 * RTT period of the connection lifetime.
42 */ 42 */
43static void tcp_westwood_init(struct tcp_sock *tp) 43static void tcp_westwood_init(struct sock *sk)
44{ 44{
45 struct westwood *w = tcp_ca(tp); 45 struct westwood *w = inet_csk_ca(sk);
46 46
47 w->bk = 0; 47 w->bk = 0;
48 w->bw_ns_est = 0; 48 w->bw_ns_est = 0;
@@ -51,7 +51,7 @@ static void tcp_westwood_init(struct tcp_sock *tp)
51 w->cumul_ack = 0; 51 w->cumul_ack = 0;
52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; 52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
53 w->rtt_win_sx = tcp_time_stamp; 53 w->rtt_win_sx = tcp_time_stamp;
54 w->snd_una = tp->snd_una; 54 w->snd_una = tcp_sk(sk)->snd_una;
55} 55}
56 56
57/* 57/*
@@ -74,11 +74,11 @@ static inline void westwood_filter(struct westwood *w, u32 delta)
74 * Called after processing group of packets. 74 * Called after processing group of packets.
75 * but all westwood needs is the last sample of srtt. 75 * but all westwood needs is the last sample of srtt.
76 */ 76 */
77static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt) 77static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
78{ 78{
79 struct westwood *w = tcp_ca(tp); 79 struct westwood *w = inet_csk_ca(sk);
80 if (cnt > 0) 80 if (cnt > 0)
81 w->rtt = tp->srtt >> 3; 81 w->rtt = tcp_sk(sk)->srtt >> 3;
82} 82}
83 83
84/* 84/*
@@ -86,9 +86,9 @@ static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt)
86 * It updates RTT evaluation window if it is the right moment to do 86 * It updates RTT evaluation window if it is the right moment to do
87 * it. If so it calls filter for evaluating bandwidth. 87 * it. If so it calls filter for evaluating bandwidth.
88 */ 88 */
89static void westwood_update_window(struct tcp_sock *tp) 89static void westwood_update_window(struct sock *sk)
90{ 90{
91 struct westwood *w = tcp_ca(tp); 91 struct westwood *w = inet_csk_ca(sk);
92 s32 delta = tcp_time_stamp - w->rtt_win_sx; 92 s32 delta = tcp_time_stamp - w->rtt_win_sx;
93 93
94 /* 94 /*
@@ -114,11 +114,12 @@ static void westwood_update_window(struct tcp_sock *tp)
114 * header prediction is successful. In such case in fact update is 114 * header prediction is successful. In such case in fact update is
115 * straight forward and doesn't need any particular care. 115 * straight forward and doesn't need any particular care.
116 */ 116 */
117static inline void westwood_fast_bw(struct tcp_sock *tp) 117static inline void westwood_fast_bw(struct sock *sk)
118{ 118{
119 struct westwood *w = tcp_ca(tp); 119 const struct tcp_sock *tp = tcp_sk(sk);
120 struct westwood *w = inet_csk_ca(sk);
120 121
121 westwood_update_window(tp); 122 westwood_update_window(sk);
122 123
123 w->bk += tp->snd_una - w->snd_una; 124 w->bk += tp->snd_una - w->snd_una;
124 w->snd_una = tp->snd_una; 125 w->snd_una = tp->snd_una;
@@ -130,9 +131,10 @@ static inline void westwood_fast_bw(struct tcp_sock *tp)
130 * This function evaluates cumul_ack for evaluating bk in case of 131 * This function evaluates cumul_ack for evaluating bk in case of
131 * delayed or partial acks. 132 * delayed or partial acks.
132 */ 133 */
133static inline u32 westwood_acked_count(struct tcp_sock *tp) 134static inline u32 westwood_acked_count(struct sock *sk)
134{ 135{
135 struct westwood *w = tcp_ca(tp); 136 const struct tcp_sock *tp = tcp_sk(sk);
137 struct westwood *w = inet_csk_ca(sk);
136 138
137 w->cumul_ack = tp->snd_una - w->snd_una; 139 w->cumul_ack = tp->snd_una - w->snd_una;
138 140
@@ -160,9 +162,10 @@ static inline u32 westwood_acked_count(struct tcp_sock *tp)
160 return w->cumul_ack; 162 return w->cumul_ack;
161} 163}
162 164
163static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp) 165static inline u32 westwood_bw_rttmin(const struct sock *sk)
164{ 166{
165 struct westwood *w = tcp_ca(tp); 167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
166 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
167} 170}
168 171
@@ -172,31 +175,32 @@ static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp)
172 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
173 * so avoids ever returning 0. 176 * so avoids ever returning 0.
174 */ 177 */
175static u32 tcp_westwood_cwnd_min(struct tcp_sock *tp) 178static u32 tcp_westwood_cwnd_min(struct sock *sk)
176{ 179{
177 return westwood_bw_rttmin(tp); 180 return westwood_bw_rttmin(sk);
178} 181}
179 182
180static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event) 183static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
181{ 184{
182 struct westwood *w = tcp_ca(tp); 185 struct tcp_sock *tp = tcp_sk(sk);
186 struct westwood *w = inet_csk_ca(sk);
183 187
184 switch(event) { 188 switch(event) {
185 case CA_EVENT_FAST_ACK: 189 case CA_EVENT_FAST_ACK:
186 westwood_fast_bw(tp); 190 westwood_fast_bw(sk);
187 break; 191 break;
188 192
189 case CA_EVENT_COMPLETE_CWR: 193 case CA_EVENT_COMPLETE_CWR:
190 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(tp); 194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk);
191 break; 195 break;
192 196
193 case CA_EVENT_FRTO: 197 case CA_EVENT_FRTO:
194 tp->snd_ssthresh = westwood_bw_rttmin(tp); 198 tp->snd_ssthresh = westwood_bw_rttmin(sk);
195 break; 199 break;
196 200
197 case CA_EVENT_SLOW_ACK: 201 case CA_EVENT_SLOW_ACK:
198 westwood_update_window(tp); 202 westwood_update_window(sk);
199 w->bk += westwood_acked_count(tp); 203 w->bk += westwood_acked_count(sk);
200 w->rtt_min = min(w->rtt, w->rtt_min); 204 w->rtt_min = min(w->rtt, w->rtt_min);
201 break; 205 break;
202 206
@@ -208,15 +212,15 @@ static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event)
208 212
209 213
210/* Extract info for Tcp socket info provided via netlink. */ 214/* Extract info for Tcp socket info provided via netlink. */
211static void tcp_westwood_info(struct tcp_sock *tp, u32 ext, 215static void tcp_westwood_info(struct sock *sk, u32 ext,
212 struct sk_buff *skb) 216 struct sk_buff *skb)
213{ 217{
214 const struct westwood *ca = tcp_ca(tp); 218 const struct westwood *ca = inet_csk_ca(sk);
215 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { 219 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
216 struct rtattr *rta; 220 struct rtattr *rta;
217 struct tcpvegas_info *info; 221 struct tcpvegas_info *info;
218 222
219 rta = __RTA_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*info)); 223 rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
220 info = RTA_DATA(rta); 224 info = RTA_DATA(rta);
221 info->tcpv_enabled = 1; 225 info->tcpv_enabled = 1;
222 info->tcpv_rttcnt = 0; 226 info->tcpv_rttcnt = 0;
@@ -242,7 +246,7 @@ static struct tcp_congestion_ops tcp_westwood = {
242 246
243static int __init tcp_westwood_register(void) 247static int __init tcp_westwood_register(void)
244{ 248{
245 BUG_ON(sizeof(struct westwood) > TCP_CA_PRIV_SIZE); 249 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
246 return tcp_register_congestion_control(&tcp_westwood); 250 return tcp_register_congestion_control(&tcp_westwood);
247} 251}
248 252
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index dc4d07357e3a..e5beca7de86c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -95,7 +95,8 @@
95#include <linux/ipv6.h> 95#include <linux/ipv6.h>
96#include <linux/netdevice.h> 96#include <linux/netdevice.h>
97#include <net/snmp.h> 97#include <net/snmp.h>
98#include <net/tcp.h> 98#include <net/ip.h>
99#include <net/tcp_states.h>
99#include <net/protocol.h> 100#include <net/protocol.h>
100#include <linux/skbuff.h> 101#include <linux/skbuff.h>
101#include <linux/proc_fs.h> 102#include <linux/proc_fs.h>
@@ -112,7 +113,7 @@
112 * Snmp MIB for the UDP layer 113 * Snmp MIB for the UDP layer
113 */ 114 */
114 115
115DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); 116DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
116 117
117struct hlist_head udp_hash[UDP_HTABLE_SIZE]; 118struct hlist_head udp_hash[UDP_HTABLE_SIZE];
118DEFINE_RWLOCK(udp_hash_lock); 119DEFINE_RWLOCK(udp_hash_lock);
@@ -628,7 +629,7 @@ back_from_confirm:
628 /* ... which is an evident application bug. --ANK */ 629 /* ... which is an evident application bug. --ANK */
629 release_sock(sk); 630 release_sock(sk);
630 631
631 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 2\n")); 632 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
632 err = -EINVAL; 633 err = -EINVAL;
633 goto out; 634 goto out;
634 } 635 }
@@ -693,7 +694,7 @@ static int udp_sendpage(struct sock *sk, struct page *page, int offset,
693 if (unlikely(!up->pending)) { 694 if (unlikely(!up->pending)) {
694 release_sock(sk); 695 release_sock(sk);
695 696
696 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 3\n")); 697 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
697 return -EINVAL; 698 return -EINVAL;
698 } 699 }
699 700
@@ -1102,7 +1103,7 @@ static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1102 skb->ip_summed = CHECKSUM_UNNECESSARY; 1103 skb->ip_summed = CHECKSUM_UNNECESSARY;
1103 if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) 1104 if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1104 return 0; 1105 return 0;
1105 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp v4 hw csum failure.\n")); 1106 LIMIT_NETDEBUG(KERN_DEBUG "udp v4 hw csum failure.\n");
1106 skb->ip_summed = CHECKSUM_NONE; 1107 skb->ip_summed = CHECKSUM_NONE;
1107 } 1108 }
1108 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 1109 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -1181,13 +1182,13 @@ int udp_rcv(struct sk_buff *skb)
1181 return(0); 1182 return(0);
1182 1183
1183short_packet: 1184short_packet:
1184 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1185 LIMIT_NETDEBUG(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1185 NIPQUAD(saddr), 1186 NIPQUAD(saddr),
1186 ntohs(uh->source), 1187 ntohs(uh->source),
1187 ulen, 1188 ulen,
1188 len, 1189 len,
1189 NIPQUAD(daddr), 1190 NIPQUAD(daddr),
1190 ntohs(uh->dest))); 1191 ntohs(uh->dest));
1191no_header: 1192no_header:
1192 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1193 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1193 kfree_skb(skb); 1194 kfree_skb(skb);
@@ -1198,12 +1199,12 @@ csum_error:
1198 * RFC1122: OK. Discards the bad packet silently (as far as 1199 * RFC1122: OK. Discards the bad packet silently (as far as
1199 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1200 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1200 */ 1201 */
1201 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1202 LIMIT_NETDEBUG(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1202 NIPQUAD(saddr), 1203 NIPQUAD(saddr),
1203 ntohs(uh->source), 1204 ntohs(uh->source),
1204 NIPQUAD(daddr), 1205 NIPQUAD(daddr),
1205 ntohs(uh->dest), 1206 ntohs(uh->dest),
1206 ulen)); 1207 ulen);
1207drop: 1208drop:
1208 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1209 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1209 kfree_skb(skb); 1210 kfree_skb(skb);
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 050611d7a967..d23e07fc81fa 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -128,8 +128,10 @@ void __init xfrm4_state_init(void)
128 xfrm_state_register_afinfo(&xfrm4_state_afinfo); 128 xfrm_state_register_afinfo(&xfrm4_state_afinfo);
129} 129}
130 130
131#if 0
131void __exit xfrm4_state_fini(void) 132void __exit xfrm4_state_fini(void)
132{ 133{
133 xfrm_state_unregister_afinfo(&xfrm4_state_afinfo); 134 xfrm_state_unregister_afinfo(&xfrm4_state_afinfo);
134} 135}
136#endif /* 0 */
135 137
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index b39e04940590..6460eec834b7 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -8,7 +8,7 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ 8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ 10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
11 ip6_flowlabel.o ipv6_syms.o 11 ip6_flowlabel.o ipv6_syms.o netfilter.o
12 12
13ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ 13ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
14 xfrm6_output.o 14 xfrm6_output.o
@@ -23,3 +23,5 @@ obj-$(CONFIG_NETFILTER) += netfilter/
23obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 23obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
24 24
25obj-y += exthdrs_core.o 25obj-y += exthdrs_core.o
26
27obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 77004b9456c0..937ad32db77c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1041,9 +1041,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
1041 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 1041 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
1042 const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2); 1042 const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2);
1043 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; 1043 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
1044 u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); 1044 u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
1045 int sk_ipv6only = ipv6_only_sock(sk); 1045 int sk_ipv6only = ipv6_only_sock(sk);
1046 int sk2_ipv6only = tcp_v6_ipv6only(sk2); 1046 int sk2_ipv6only = inet_v6_ipv6only(sk2);
1047 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 1047 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
1048 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; 1048 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
1049 1049
@@ -1126,7 +1126,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr)
1126 __ipv6_dev_mc_dec(idev, &maddr); 1126 __ipv6_dev_mc_dec(idev, &maddr);
1127} 1127}
1128 1128
1129void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1129static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1130{ 1130{
1131 struct in6_addr addr; 1131 struct in6_addr addr;
1132 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1132 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1135,7 +1135,7 @@ void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1135 ipv6_dev_ac_inc(ifp->idev->dev, &addr); 1135 ipv6_dev_ac_inc(ifp->idev->dev, &addr);
1136} 1136}
1137 1137
1138void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 1138static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1139{ 1139{
1140 struct in6_addr addr; 1140 struct in6_addr addr;
1141 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1141 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -2858,16 +2858,16 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
2858 2858
2859 skb = alloc_skb(size, GFP_ATOMIC); 2859 skb = alloc_skb(size, GFP_ATOMIC);
2860 if (!skb) { 2860 if (!skb) {
2861 netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFADDR, ENOBUFS); 2861 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_IFADDR, ENOBUFS);
2862 return; 2862 return;
2863 } 2863 }
2864 if (inet6_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { 2864 if (inet6_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
2865 kfree_skb(skb); 2865 kfree_skb(skb);
2866 netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFADDR, EINVAL); 2866 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_IFADDR, EINVAL);
2867 return; 2867 return;
2868 } 2868 }
2869 NETLINK_CB(skb).dst_groups = RTMGRP_IPV6_IFADDR; 2869 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_IFADDR;
2870 netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_IFADDR, GFP_ATOMIC); 2870 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_IFADDR, GFP_ATOMIC);
2871} 2871}
2872 2872
2873static void inline ipv6_store_devconf(struct ipv6_devconf *cnf, 2873static void inline ipv6_store_devconf(struct ipv6_devconf *cnf,
@@ -2994,16 +2994,16 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
2994 2994
2995 skb = alloc_skb(size, GFP_ATOMIC); 2995 skb = alloc_skb(size, GFP_ATOMIC);
2996 if (!skb) { 2996 if (!skb) {
2997 netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFINFO, ENOBUFS); 2997 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_IFINFO, ENOBUFS);
2998 return; 2998 return;
2999 } 2999 }
3000 if (inet6_fill_ifinfo(skb, idev, current->pid, 0, event, 0) < 0) { 3000 if (inet6_fill_ifinfo(skb, idev, current->pid, 0, event, 0) < 0) {
3001 kfree_skb(skb); 3001 kfree_skb(skb);
3002 netlink_set_err(rtnl, 0, RTMGRP_IPV6_IFINFO, EINVAL); 3002 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_IFINFO, EINVAL);
3003 return; 3003 return;
3004 } 3004 }
3005 NETLINK_CB(skb).dst_groups = RTMGRP_IPV6_IFINFO; 3005 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_IFINFO;
3006 netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_IFINFO, GFP_ATOMIC); 3006 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_IFINFO, GFP_ATOMIC);
3007} 3007}
3008 3008
3009static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, 3009static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
@@ -3054,16 +3054,16 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3054 3054
3055 skb = alloc_skb(size, GFP_ATOMIC); 3055 skb = alloc_skb(size, GFP_ATOMIC);
3056 if (!skb) { 3056 if (!skb) {
3057 netlink_set_err(rtnl, 0, RTMGRP_IPV6_PREFIX, ENOBUFS); 3057 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_PREFIX, ENOBUFS);
3058 return; 3058 return;
3059 } 3059 }
3060 if (inet6_fill_prefix(skb, idev, pinfo, current->pid, 0, event, 0) < 0) { 3060 if (inet6_fill_prefix(skb, idev, pinfo, current->pid, 0, event, 0) < 0) {
3061 kfree_skb(skb); 3061 kfree_skb(skb);
3062 netlink_set_err(rtnl, 0, RTMGRP_IPV6_PREFIX, EINVAL); 3062 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_PREFIX, EINVAL);
3063 return; 3063 return;
3064 } 3064 }
3065 NETLINK_CB(skb).dst_groups = RTMGRP_IPV6_PREFIX; 3065 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_PREFIX;
3066 netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_PREFIX, GFP_ATOMIC); 3066 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_PREFIX, GFP_ATOMIC);
3067} 3067}
3068 3068
3069static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = { 3069static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 28d9bcab0970..4f8795af2edb 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -44,6 +44,7 @@
44#include <linux/netdevice.h> 44#include <linux/netdevice.h>
45#include <linux/icmpv6.h> 45#include <linux/icmpv6.h>
46#include <linux/smp_lock.h> 46#include <linux/smp_lock.h>
47#include <linux/netfilter_ipv6.h>
47 48
48#include <net/ip.h> 49#include <net/ip.h>
49#include <net/ipv6.h> 50#include <net/ipv6.h>
@@ -66,45 +67,14 @@ MODULE_AUTHOR("Cast of dozens");
66MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 67MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
67MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
68 69
69/* IPv6 procfs goodies... */
70
71#ifdef CONFIG_PROC_FS
72extern int raw6_proc_init(void);
73extern void raw6_proc_exit(void);
74extern int tcp6_proc_init(void);
75extern void tcp6_proc_exit(void);
76extern int udp6_proc_init(void);
77extern void udp6_proc_exit(void);
78extern int ipv6_misc_proc_init(void);
79extern void ipv6_misc_proc_exit(void);
80extern int ac6_proc_init(void);
81extern void ac6_proc_exit(void);
82extern int if6_proc_init(void);
83extern void if6_proc_exit(void);
84#endif
85
86int sysctl_ipv6_bindv6only; 70int sysctl_ipv6_bindv6only;
87 71
88#ifdef INET_REFCNT_DEBUG
89atomic_t inet6_sock_nr;
90EXPORT_SYMBOL(inet6_sock_nr);
91#endif
92
93/* The inetsw table contains everything that inet_create needs to 72/* The inetsw table contains everything that inet_create needs to
94 * build a new socket. 73 * build a new socket.
95 */ 74 */
96static struct list_head inetsw6[SOCK_MAX]; 75static struct list_head inetsw6[SOCK_MAX];
97static DEFINE_SPINLOCK(inetsw6_lock); 76static DEFINE_SPINLOCK(inetsw6_lock);
98 77
99static void inet6_sock_destruct(struct sock *sk)
100{
101 inet_sock_destruct(sk);
102
103#ifdef INET_REFCNT_DEBUG
104 atomic_dec(&inet6_sock_nr);
105#endif
106}
107
108static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 78static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
109{ 79{
110 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); 80 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@@ -185,7 +155,7 @@ static int inet6_create(struct socket *sock, int protocol)
185 inet->hdrincl = 1; 155 inet->hdrincl = 1;
186 } 156 }
187 157
188 sk->sk_destruct = inet6_sock_destruct; 158 sk->sk_destruct = inet_sock_destruct;
189 sk->sk_family = PF_INET6; 159 sk->sk_family = PF_INET6;
190 sk->sk_protocol = protocol; 160 sk->sk_protocol = protocol;
191 161
@@ -212,12 +182,17 @@ static int inet6_create(struct socket *sock, int protocol)
212 inet->pmtudisc = IP_PMTUDISC_DONT; 182 inet->pmtudisc = IP_PMTUDISC_DONT;
213 else 183 else
214 inet->pmtudisc = IP_PMTUDISC_WANT; 184 inet->pmtudisc = IP_PMTUDISC_WANT;
185 /*
186 * Increment only the relevant sk_prot->socks debug field, this changes
187 * the previous behaviour of incrementing both the equivalent to
188 * answer->prot->socks (inet6_sock_nr) and inet_sock_nr.
189 *
190 * This allows better debug granularity as we'll know exactly how many
191 * UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6
192 * transport protocol socks. -acme
193 */
194 sk_refcnt_debug_inc(sk);
215 195
216
217#ifdef INET_REFCNT_DEBUG
218 atomic_inc(&inet6_sock_nr);
219 atomic_inc(&inet_sock_nr);
220#endif
221 if (inet->num) { 196 if (inet->num) {
222 /* It assumes that any protocol which allows 197 /* It assumes that any protocol which allows
223 * the user to assign a number at socket 198 * the user to assign a number at socket
@@ -513,11 +488,6 @@ static struct net_proto_family inet6_family_ops = {
513 .owner = THIS_MODULE, 488 .owner = THIS_MODULE,
514}; 489};
515 490
516#ifdef CONFIG_SYSCTL
517extern void ipv6_sysctl_register(void);
518extern void ipv6_sysctl_unregister(void);
519#endif
520
521/* Same as inet6_dgram_ops, sans udp_poll. */ 491/* Same as inet6_dgram_ops, sans udp_poll. */
522static struct proto_ops inet6_sockraw_ops = { 492static struct proto_ops inet6_sockraw_ops = {
523 .family = PF_INET6, 493 .family = PF_INET6,
@@ -684,8 +654,6 @@ static void cleanup_ipv6_mibs(void)
684 snmp6_mib_free((void **)udp_stats_in6); 654 snmp6_mib_free((void **)udp_stats_in6);
685} 655}
686 656
687extern int ipv6_misc_proc_init(void);
688
689static int __init inet6_init(void) 657static int __init inet6_init(void)
690{ 658{
691 struct sk_buff *dummy_skb; 659 struct sk_buff *dummy_skb;
@@ -757,6 +725,9 @@ static int __init inet6_init(void)
757 err = igmp6_init(&inet6_family_ops); 725 err = igmp6_init(&inet6_family_ops);
758 if (err) 726 if (err)
759 goto igmp_fail; 727 goto igmp_fail;
728 err = ipv6_netfilter_init();
729 if (err)
730 goto netfilter_fail;
760 /* Create /proc/foo6 entries. */ 731 /* Create /proc/foo6 entries. */
761#ifdef CONFIG_PROC_FS 732#ifdef CONFIG_PROC_FS
762 err = -ENOMEM; 733 err = -ENOMEM;
@@ -813,6 +784,8 @@ proc_tcp6_fail:
813 raw6_proc_exit(); 784 raw6_proc_exit();
814proc_raw6_fail: 785proc_raw6_fail:
815#endif 786#endif
787 ipv6_netfilter_fini();
788netfilter_fail:
816 igmp6_cleanup(); 789 igmp6_cleanup();
817igmp_fail: 790igmp_fail:
818 ndisc_cleanup(); 791 ndisc_cleanup();
@@ -852,6 +825,7 @@ static void __exit inet6_exit(void)
852 ip6_route_cleanup(); 825 ip6_route_cleanup();
853 ipv6_packet_cleanup(); 826 ipv6_packet_cleanup();
854 igmp6_cleanup(); 827 igmp6_cleanup();
828 ipv6_netfilter_fini();
855 ndisc_cleanup(); 829 ndisc_cleanup();
856 icmpv6_cleanup(); 830 icmpv6_cleanup();
857#ifdef CONFIG_SYSCTL 831#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 986fdfdccbcd..0ebfad907a03 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -131,10 +131,10 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
131 case NEXTHDR_HOP: 131 case NEXTHDR_HOP:
132 case NEXTHDR_DEST: 132 case NEXTHDR_DEST:
133 if (!zero_out_mutable_opts(exthdr.opth)) { 133 if (!zero_out_mutable_opts(exthdr.opth)) {
134 LIMIT_NETDEBUG(printk( 134 LIMIT_NETDEBUG(
135 KERN_WARNING "overrun %sopts\n", 135 KERN_WARNING "overrun %sopts\n",
136 nexthdr == NEXTHDR_HOP ? 136 nexthdr == NEXTHDR_HOP ?
137 "hop" : "dest")); 137 "hop" : "dest");
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 break; 140 break;
@@ -293,8 +293,7 @@ static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
293 skb_push(skb, skb->data - skb->nh.raw); 293 skb_push(skb, skb->data - skb->nh.raw);
294 ahp->icv(ahp, skb, ah->auth_data); 294 ahp->icv(ahp, skb, ah->auth_data);
295 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { 295 if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
296 LIMIT_NETDEBUG( 296 LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
297 printk(KERN_WARNING "ipsec ah authentication error\n"));
298 x->stats.integrity_failed++; 297 x->stats.integrity_failed++;
299 goto free_out; 298 goto free_out;
300 } 299 }
@@ -332,9 +331,9 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 if (!x) 331 if (!x)
333 return; 332 return;
334 333
335 NETDEBUG(printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/" 334 NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/"
336 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 335 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
337 ntohl(ah->spi), NIP6(iph->daddr))); 336 ntohl(ah->spi), NIP6(iph->daddr));
338 337
339 xfrm_state_put(x); 338 xfrm_state_put(x);
340} 339}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5229365cd8b4..01468fab3d3d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -29,6 +29,7 @@
29#include <net/addrconf.h> 29#include <net/addrconf.h>
30#include <net/transp_v6.h> 30#include <net/transp_v6.h>
31#include <net/ip6_route.h> 31#include <net/ip6_route.h>
32#include <net/tcp_states.h>
32 33
33#include <linux/errqueue.h> 34#include <linux/errqueue.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -588,8 +589,8 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
588 break; 589 break;
589 590
590 default: 591 default:
591 LIMIT_NETDEBUG( 592 LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n",
592 printk(KERN_DEBUG "invalid cmsg type: %d\n", cmsg->cmsg_type)); 593 cmsg->cmsg_type);
593 err = -EINVAL; 594 err = -EINVAL;
594 break; 595 break;
595 }; 596 };
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 324db62515a2..e8bff9d3d96c 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -212,8 +212,7 @@ static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, stru
212 212
213 padlen = nexthdr[0]; 213 padlen = nexthdr[0];
214 if (padlen+2 >= elen) { 214 if (padlen+2 >= elen) {
215 LIMIT_NETDEBUG( 215 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
216 printk(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen));
217 ret = -EINVAL; 216 ret = -EINVAL;
218 goto out; 217 goto out;
219 } 218 }
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index e0839eafc3a9..5be6da2584ee 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -424,8 +424,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
424 IP6CB(skb)->ra = optoff; 424 IP6CB(skb)->ra = optoff;
425 return 1; 425 return 1;
426 } 426 }
427 LIMIT_NETDEBUG( 427 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
428 printk(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", skb->nh.raw[optoff+1])); 428 skb->nh.raw[optoff+1]);
429 kfree_skb(skb); 429 kfree_skb(skb);
430 return 0; 430 return 0;
431} 431}
@@ -437,8 +437,8 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
437 u32 pkt_len; 437 u32 pkt_len;
438 438
439 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { 439 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
440 LIMIT_NETDEBUG( 440 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
441 printk(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", skb->nh.raw[optoff+1])); 441 skb->nh.raw[optoff+1]);
442 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 442 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
443 goto drop; 443 goto drop;
444 } 444 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ff3ec9822e36..5176fc655ea9 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -67,7 +67,7 @@
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/system.h> 68#include <asm/system.h>
69 69
70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); 70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 71
72/* 72/*
73 * The ICMP socket(s). This is the most convenient way to flow control 73 * The ICMP socket(s). This is the most convenient way to flow control
@@ -332,8 +332,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
332 * for now we don't know that. 332 * for now we don't know that.
333 */ 333 */
334 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { 334 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
335 LIMIT_NETDEBUG( 335 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
336 printk(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"));
337 return; 336 return;
338 } 337 }
339 338
@@ -341,8 +340,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
341 * Never answer to a ICMP packet. 340 * Never answer to a ICMP packet.
342 */ 341 */
343 if (is_ineligible(skb)) { 342 if (is_ineligible(skb)) {
344 LIMIT_NETDEBUG( 343 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
345 printk(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"));
346 return; 344 return;
347 } 345 }
348 346
@@ -393,8 +391,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
393 len = skb->len - msg.offset; 391 len = skb->len - msg.offset;
394 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); 392 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
395 if (len < 0) { 393 if (len < 0) {
396 LIMIT_NETDEBUG( 394 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
397 printk(KERN_DEBUG "icmp: len problem\n"));
398 goto out_dst_release; 395 goto out_dst_release;
399 } 396 }
400 397
@@ -551,7 +548,8 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
551 548
552 read_lock(&raw_v6_lock); 549 read_lock(&raw_v6_lock);
553 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { 550 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
554 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr))) { 551 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
552 skb->dev->ifindex))) {
555 rawv6_err(sk, skb, NULL, type, code, inner_offset, info); 553 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
556 sk = sk_next(sk); 554 sk = sk_next(sk);
557 } 555 }
@@ -583,17 +581,15 @@ static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
583 skb->ip_summed = CHECKSUM_UNNECESSARY; 581 skb->ip_summed = CHECKSUM_UNNECESSARY;
584 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 582 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
585 skb->csum)) { 583 skb->csum)) {
586 LIMIT_NETDEBUG( 584 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 hw checksum failed\n");
587 printk(KERN_DEBUG "ICMPv6 hw checksum failed\n"));
588 skb->ip_summed = CHECKSUM_NONE; 585 skb->ip_summed = CHECKSUM_NONE;
589 } 586 }
590 } 587 }
591 if (skb->ip_summed == CHECKSUM_NONE) { 588 if (skb->ip_summed == CHECKSUM_NONE) {
592 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 589 if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
593 skb_checksum(skb, 0, skb->len, 0))) { 590 skb_checksum(skb, 0, skb->len, 0))) {
594 LIMIT_NETDEBUG( 591 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
595 printk(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n", 592 NIP6(*saddr), NIP6(*daddr));
596 NIP6(*saddr), NIP6(*daddr)));
597 goto discard_it; 593 goto discard_it;
598 } 594 }
599 } 595 }
@@ -669,8 +665,7 @@ static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
669 break; 665 break;
670 666
671 default: 667 default:
672 LIMIT_NETDEBUG( 668 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
673 printk(KERN_DEBUG "icmpv6: msg of unknown type\n"));
674 669
675 /* informational */ 670 /* informational */
676 if (type & ICMPV6_INFOMSG_MASK) 671 if (type & ICMPV6_INFOMSG_MASK)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
new file mode 100644
index 000000000000..01d5f46d4e40
--- /dev/null
+++ b/net/ipv6/inet6_hashtables.c
@@ -0,0 +1,81 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET6 transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17
18#include <linux/module.h>
19
20#include <net/inet_connection_sock.h>
21#include <net/inet_hashtables.h>
22#include <net/inet6_hashtables.h>
23
24struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
25 const struct in6_addr *daddr,
26 const unsigned short hnum, const int dif)
27{
28 struct sock *sk;
29 const struct hlist_node *node;
30 struct sock *result = NULL;
31 int score, hiscore = 0;
32
33 read_lock(&hashinfo->lhash_lock);
34 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) {
35 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
36 const struct ipv6_pinfo *np = inet6_sk(sk);
37
38 score = 1;
39 if (!ipv6_addr_any(&np->rcv_saddr)) {
40 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
41 continue;
42 score++;
43 }
44 if (sk->sk_bound_dev_if) {
45 if (sk->sk_bound_dev_if != dif)
46 continue;
47 score++;
48 }
49 if (score == 3) {
50 result = sk;
51 break;
52 }
53 if (score > hiscore) {
54 hiscore = score;
55 result = sk;
56 }
57 }
58 }
59 if (result)
60 sock_hold(result);
61 read_unlock(&hashinfo->lhash_lock);
62 return result;
63}
64
65EXPORT_SYMBOL_GPL(inet6_lookup_listener);
66
67struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
68 const struct in6_addr *saddr, const u16 sport,
69 const struct in6_addr *daddr, const u16 dport,
70 const int dif)
71{
72 struct sock *sk;
73
74 local_bh_disable();
75 sk = __inet6_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif);
76 local_bh_enable();
77
78 return sk;
79}
80
81EXPORT_SYMBOL_GPL(inet6_lookup);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1b354aa97934..16af874c9e8f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -49,7 +49,7 @@
49 49
50struct rt6_statistics rt6_stats; 50struct rt6_statistics rt6_stats;
51 51
52static kmem_cache_t * fib6_node_kmem; 52static kmem_cache_t * fib6_node_kmem __read_mostly;
53 53
54enum fib_walk_state_t 54enum fib_walk_state_t
55{ 55{
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 10fbb50daea4..6e3480426939 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -56,7 +56,7 @@ static inline int ip6_rcv_finish( struct sk_buff *skb)
56 return dst_input(skb); 56 return dst_input(skb);
57} 57}
58 58
59int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 59int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
60{ 60{
61 struct ipv6hdr *hdr; 61 struct ipv6hdr *hdr;
62 u32 pkt_len; 62 u32 pkt_len;
@@ -166,8 +166,8 @@ resubmit:
166 nexthdr = skb->nh.raw[nhoff]; 166 nexthdr = skb->nh.raw[nhoff];
167 167
168 raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); 168 raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
169 if (raw_sk) 169 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
170 ipv6_raw_deliver(skb, nexthdr); 170 raw_sk = NULL;
171 171
172 hash = nexthdr & (MAX_INET_PROTOS - 1); 172 hash = nexthdr & (MAX_INET_PROTOS - 1);
173 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { 173 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ae652ca14bc9..01ef94f7c7f1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -153,51 +153,6 @@ int ip6_output(struct sk_buff *skb)
153 return ip6_output2(skb); 153 return ip6_output2(skb);
154} 154}
155 155
156#ifdef CONFIG_NETFILTER
157int ip6_route_me_harder(struct sk_buff *skb)
158{
159 struct ipv6hdr *iph = skb->nh.ipv6h;
160 struct dst_entry *dst;
161 struct flowi fl = {
162 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
163 .nl_u =
164 { .ip6_u =
165 { .daddr = iph->daddr,
166 .saddr = iph->saddr, } },
167 .proto = iph->nexthdr,
168 };
169
170 dst = ip6_route_output(skb->sk, &fl);
171
172 if (dst->error) {
173 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
174 LIMIT_NETDEBUG(
175 printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n"));
176 dst_release(dst);
177 return -EINVAL;
178 }
179
180 /* Drop old route. */
181 dst_release(skb->dst);
182
183 skb->dst = dst;
184 return 0;
185}
186#endif
187
188static inline int ip6_maybe_reroute(struct sk_buff *skb)
189{
190#ifdef CONFIG_NETFILTER
191 if (skb->nfcache & NFC_ALTERED){
192 if (ip6_route_me_harder(skb) != 0){
193 kfree_skb(skb);
194 return -EINVAL;
195 }
196 }
197#endif /* CONFIG_NETFILTER */
198 return dst_output(skb);
199}
200
201/* 156/*
202 * xmit an sk_buff (used by TCP) 157 * xmit an sk_buff (used by TCP)
203 */ 158 */
@@ -266,7 +221,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
266 mtu = dst_mtu(dst); 221 mtu = dst_mtu(dst);
267 if ((skb->len <= mtu) || ipfragok) { 222 if ((skb->len <= mtu) || ipfragok) {
268 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 223 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
269 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute); 224 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
225 dst_output);
270 } 226 }
271 227
272 if (net_ratelimit()) 228 if (net_ratelimit())
@@ -321,7 +277,9 @@ static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
321 read_lock(&ip6_ra_lock); 277 read_lock(&ip6_ra_lock);
322 for (ra = ip6_ra_chain; ra; ra = ra->next) { 278 for (ra = ip6_ra_chain; ra; ra = ra->next) {
323 struct sock *sk = ra->sk; 279 struct sock *sk = ra->sk;
324 if (sk && ra->sel == sel) { 280 if (sk && ra->sel == sel &&
281 (!sk->sk_bound_dev_if ||
282 sk->sk_bound_dev_if == skb->dev->ifindex)) {
325 if (last) { 283 if (last) {
326 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 284 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
327 if (skb2) 285 if (skb2)
@@ -667,7 +625,7 @@ slow_path:
667 */ 625 */
668 626
669 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 627 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
670 NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n")); 628 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
671 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); 629 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
672 err = -ENOMEM; 630 err = -ENOMEM;
673 goto fail; 631 goto fail;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 3bc144a79fa5..76466af8331e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -55,7 +55,7 @@
55 55
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); 58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59
60static struct packet_type ipv6_packet_type = { 60static struct packet_type ipv6_packet_type = {
61 .type = __constant_htons(ETH_P_IPV6), 61 .type = __constant_htons(ETH_P_IPV6),
@@ -109,13 +109,6 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
109 return 0; 109 return 0;
110} 110}
111 111
112extern int ip6_mc_source(int add, int omode, struct sock *sk,
113 struct group_source_req *pgsr);
114extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
115extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
116 struct group_filter __user *optval, int __user *optlen);
117
118
119int ipv6_setsockopt(struct sock *sk, int level, int optname, 112int ipv6_setsockopt(struct sock *sk, int level, int optname,
120 char __user *optval, int optlen) 113 char __user *optval, int optlen)
121{ 114{
@@ -163,6 +156,13 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
163 fl6_free_socklist(sk); 156 fl6_free_socklist(sk);
164 ipv6_sock_mc_close(sk); 157 ipv6_sock_mc_close(sk);
165 158
159 /*
160 * Sock is moving from IPv6 to IPv4 (sk_prot), so
161 * remove it from the refcnt debug socks count in the
162 * original family...
163 */
164 sk_refcnt_debug_dec(sk);
165
166 if (sk->sk_protocol == IPPROTO_TCP) { 166 if (sk->sk_protocol == IPPROTO_TCP) {
167 struct tcp_sock *tp = tcp_sk(sk); 167 struct tcp_sock *tp = tcp_sk(sk);
168 168
@@ -192,9 +192,11 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
192 kfree_skb(pktopt); 192 kfree_skb(pktopt);
193 193
194 sk->sk_destruct = inet_sock_destruct; 194 sk->sk_destruct = inet_sock_destruct;
195#ifdef INET_REFCNT_DEBUG 195 /*
196 atomic_dec(&inet6_sock_nr); 196 * ... and add it to the refcnt debug socks count
197#endif 197 * in the new family. -acme
198 */
199 sk_refcnt_debug_inc(sk);
198 module_put(THIS_MODULE); 200 module_put(THIS_MODULE);
199 retv = 0; 201 retv = 0;
200 break; 202 break;
@@ -437,7 +439,6 @@ done:
437 } 439 }
438 case MCAST_MSFILTER: 440 case MCAST_MSFILTER:
439 { 441 {
440 extern int sysctl_optmem_max;
441 extern int sysctl_mld_max_msf; 442 extern int sysctl_mld_max_msf;
442 struct group_filter *gsf; 443 struct group_filter *gsf;
443 444
diff --git a/net/ipv6/ipv6_syms.c b/net/ipv6/ipv6_syms.c
index 5ade5a5d1990..37a4a99c9fe9 100644
--- a/net/ipv6/ipv6_syms.c
+++ b/net/ipv6/ipv6_syms.c
@@ -15,9 +15,6 @@ EXPORT_SYMBOL(ndisc_mc_map);
15EXPORT_SYMBOL(register_inet6addr_notifier); 15EXPORT_SYMBOL(register_inet6addr_notifier);
16EXPORT_SYMBOL(unregister_inet6addr_notifier); 16EXPORT_SYMBOL(unregister_inet6addr_notifier);
17EXPORT_SYMBOL(ip6_route_output); 17EXPORT_SYMBOL(ip6_route_output);
18#ifdef CONFIG_NETFILTER
19EXPORT_SYMBOL(ip6_route_me_harder);
20#endif
21EXPORT_SYMBOL(addrconf_lock); 18EXPORT_SYMBOL(addrconf_lock);
22EXPORT_SYMBOL(ipv6_setsockopt); 19EXPORT_SYMBOL(ipv6_setsockopt);
23EXPORT_SYMBOL(ipv6_getsockopt); 20EXPORT_SYMBOL(ipv6_getsockopt);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7ae72d4c9bd2..a7eae30f4554 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -812,7 +812,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
812 if (ipv6_chk_acast_addr(dev, &msg->target) || 812 if (ipv6_chk_acast_addr(dev, &msg->target) ||
813 (idev->cnf.forwarding && 813 (idev->cnf.forwarding &&
814 pneigh_lookup(&nd_tbl, &msg->target, dev, 0))) { 814 pneigh_lookup(&nd_tbl, &msg->target, dev, 0))) {
815 if (skb->stamp.tv_sec != LOCALLY_ENQUEUED && 815 if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
816 skb->pkt_type != PACKET_HOST && 816 skb->pkt_type != PACKET_HOST &&
817 inc != 0 && 817 inc != 0 &&
818 idev->nd_parms->proxy_delay != 0) { 818 idev->nd_parms->proxy_delay != 0) {
@@ -1487,6 +1487,8 @@ int ndisc_rcv(struct sk_buff *skb)
1487 return 0; 1487 return 0;
1488 } 1488 }
1489 1489
1490 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
1491
1490 switch (msg->icmph.icmp6_type) { 1492 switch (msg->icmph.icmp6_type) {
1491 case NDISC_NEIGHBOUR_SOLICITATION: 1493 case NDISC_NEIGHBOUR_SOLICITATION:
1492 ndisc_recv_ns(skb); 1494 ndisc_recv_ns(skb);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
new file mode 100644
index 000000000000..f8626ebf90fd
--- /dev/null
+++ b/net/ipv6/netfilter.c
@@ -0,0 +1,104 @@
1#include <linux/config.h>
2#include <linux/init.h>
3
4#ifdef CONFIG_NETFILTER
5
6#include <linux/kernel.h>
7#include <linux/ipv6.h>
8#include <linux/netfilter.h>
9#include <linux/netfilter_ipv6.h>
10#include <net/dst.h>
11#include <net/ipv6.h>
12#include <net/ip6_route.h>
13
14int ip6_route_me_harder(struct sk_buff *skb)
15{
16 struct ipv6hdr *iph = skb->nh.ipv6h;
17 struct dst_entry *dst;
18 struct flowi fl = {
19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
20 .nl_u =
21 { .ip6_u =
22 { .daddr = iph->daddr,
23 .saddr = iph->saddr, } },
24 .proto = iph->nexthdr,
25 };
26
27 dst = ip6_route_output(skb->sk, &fl);
28
29 if (dst->error) {
30 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
31 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
32 dst_release(dst);
33 return -EINVAL;
34 }
35
36 /* Drop old route. */
37 dst_release(skb->dst);
38
39 skb->dst = dst;
40 return 0;
41}
42EXPORT_SYMBOL(ip6_route_me_harder);
43
44/*
45 * Extra routing may needed on local out, as the QUEUE target never
46 * returns control to the table.
47 */
48
49struct ip6_rt_info {
50 struct in6_addr daddr;
51 struct in6_addr saddr;
52};
53
54static void save(const struct sk_buff *skb, struct nf_info *info)
55{
56 struct ip6_rt_info *rt_info = nf_info_reroute(info);
57
58 if (info->hook == NF_IP6_LOCAL_OUT) {
59 struct ipv6hdr *iph = skb->nh.ipv6h;
60
61 rt_info->daddr = iph->daddr;
62 rt_info->saddr = iph->saddr;
63 }
64}
65
66static int reroute(struct sk_buff **pskb, const struct nf_info *info)
67{
68 struct ip6_rt_info *rt_info = nf_info_reroute(info);
69
70 if (info->hook == NF_IP6_LOCAL_OUT) {
71 struct ipv6hdr *iph = (*pskb)->nh.ipv6h;
72 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
73 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr))
74 return ip6_route_me_harder(*pskb);
75 }
76 return 0;
77}
78
79static struct nf_queue_rerouter ip6_reroute = {
80 .rer_size = sizeof(struct ip6_rt_info),
81 .save = &save,
82 .reroute = &reroute,
83};
84
85int __init ipv6_netfilter_init(void)
86{
87 return nf_register_queue_rerouter(PF_INET6, &ip6_reroute);
88}
89
90void ipv6_netfilter_fini(void)
91{
92 nf_unregister_queue_rerouter(PF_INET6);
93}
94
95#else /* CONFIG_NETFILTER */
96int __init ipv6_netfilter_init(void)
97{
98 return 0;
99}
100
101void ipv6_netfilter_fini(void)
102{
103}
104#endif /* CONFIG_NETFILTER */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 77ec704c9ee3..216fbe1ac65c 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -10,13 +10,16 @@ menu "IPv6: Netfilter Configuration (EXPERIMENTAL)"
10# dep_tristate ' FTP protocol support' CONFIG_IP6_NF_FTP $CONFIG_IP6_NF_CONNTRACK 10# dep_tristate ' FTP protocol support' CONFIG_IP6_NF_FTP $CONFIG_IP6_NF_CONNTRACK
11#fi 11#fi
12config IP6_NF_QUEUE 12config IP6_NF_QUEUE
13 tristate "Userspace queueing via NETLINK" 13 tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)"
14 ---help--- 14 ---help---
15 15
16 This option adds a queue handler to the kernel for IPv6 16 This option adds a queue handler to the kernel for IPv6
17 packets which lets us to receive the filtered packets 17 packets which enables users to receive the filtered packets
18 with QUEUE target using libiptc as we can do with 18 with QUEUE target using libipq.
19 the IPv4 now. 19
20 THis option enables the old IPv6-only "ip6_queue" implementation
21 which has been obsoleted by the new "nfnetlink_queue" code (see
22 CONFIG_NETFILTER_NETLINK_QUEUE).
20 23
21 (C) Fernando Anton 2001 24 (C) Fernando Anton 2001
22 IPv64 Project - Work based in IPv64 draft by Arturo Azcorra. 25 IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
@@ -196,6 +199,16 @@ config IP6_NF_TARGET_LOG
196 199
197 To compile it as a module, choose M here. If unsure, say N. 200 To compile it as a module, choose M here. If unsure, say N.
198 201
202config IP6_NF_TARGET_REJECT
203 tristate "REJECT target support"
204 depends on IP6_NF_FILTER
205 help
206 The REJECT target allows a filtering rule to specify that an ICMPv6
207 error should be issued in response to an incoming packet, rather
208 than silently being dropped.
209
210 To compile it as a module, choose M here. If unsure, say N.
211
199# if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then 212# if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then
200# dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER 213# dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER
201# if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then 214# if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
@@ -226,6 +239,22 @@ config IP6_NF_TARGET_MARK
226 239
227 To compile it as a module, choose M here. If unsure, say N. 240 To compile it as a module, choose M here. If unsure, say N.
228 241
242config IP6_NF_TARGET_HL
243 tristate 'HL (hoplimit) target support'
244 depends on IP6_NF_MANGLE
245 help
246 This option adds a `HL' target, which enables the user to decrement
247 the hoplimit value of the IPv6 header or set it to a given (lower)
248 value.
249
250 While it is safe to decrement the hoplimit value, this option also
251 enables functionality to increment and set the hoplimit value of the
252 IPv6 header to arbitrary values. This is EXTREMELY DANGEROUS since
253 you can easily create immortal packets that loop forever on the
254 network.
255
256 To compile it as a module, choose M here. If unsure, say N.
257
229#dep_tristate ' LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_IPTABLES 258#dep_tristate ' LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_IPTABLES
230config IP6_NF_RAW 259config IP6_NF_RAW
231 tristate 'raw table support (required for TRACE)' 260 tristate 'raw table support (required for TRACE)'
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2e51714953b6..bd9a16a5cbba 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -20,7 +20,10 @@ obj-$(CONFIG_IP6_NF_MATCH_PHYSDEV) += ip6t_physdev.o
20obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o 20obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
21obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 21obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
22obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o 22obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
23obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
23obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 24obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
24obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o 25obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
25obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 26obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
26obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o 27obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
28obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
29obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ip6t_NFQUEUE.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index a16df5b27c84..aa11cf366efa 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -47,16 +47,10 @@
47#define NET_IPQ_QMAX 2088 47#define NET_IPQ_QMAX 2088
48#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen" 48#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
49 49
50struct ipq_rt_info {
51 struct in6_addr daddr;
52 struct in6_addr saddr;
53};
54
55struct ipq_queue_entry { 50struct ipq_queue_entry {
56 struct list_head list; 51 struct list_head list;
57 struct nf_info *info; 52 struct nf_info *info;
58 struct sk_buff *skb; 53 struct sk_buff *skb;
59 struct ipq_rt_info rt_info;
60}; 54};
61 55
62typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long); 56typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
@@ -244,8 +238,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
244 238
245 pmsg->packet_id = (unsigned long )entry; 239 pmsg->packet_id = (unsigned long )entry;
246 pmsg->data_len = data_len; 240 pmsg->data_len = data_len;
247 pmsg->timestamp_sec = entry->skb->stamp.tv_sec; 241 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec;
248 pmsg->timestamp_usec = entry->skb->stamp.tv_usec; 242 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec;
249 pmsg->mark = entry->skb->nfmark; 243 pmsg->mark = entry->skb->nfmark;
250 pmsg->hook = entry->info->hook; 244 pmsg->hook = entry->info->hook;
251 pmsg->hw_protocol = entry->skb->protocol; 245 pmsg->hw_protocol = entry->skb->protocol;
@@ -284,7 +278,8 @@ nlmsg_failure:
284} 278}
285 279
286static int 280static int
287ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data) 281ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
282 unsigned int queuenum, void *data)
288{ 283{
289 int status = -EINVAL; 284 int status = -EINVAL;
290 struct sk_buff *nskb; 285 struct sk_buff *nskb;
@@ -302,13 +297,6 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
302 entry->info = info; 297 entry->info = info;
303 entry->skb = skb; 298 entry->skb = skb;
304 299
305 if (entry->info->hook == NF_IP_LOCAL_OUT) {
306 struct ipv6hdr *iph = skb->nh.ipv6h;
307
308 entry->rt_info.daddr = iph->daddr;
309 entry->rt_info.saddr = iph->saddr;
310 }
311
312 nskb = ipq_build_packet_message(entry, &status); 300 nskb = ipq_build_packet_message(entry, &status);
313 if (nskb == NULL) 301 if (nskb == NULL)
314 goto err_out_free; 302 goto err_out_free;
@@ -384,23 +372,11 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
384 } 372 }
385 skb_put(e->skb, diff); 373 skb_put(e->skb, diff);
386 } 374 }
387 if (!skb_ip_make_writable(&e->skb, v->data_len)) 375 if (!skb_make_writable(&e->skb, v->data_len))
388 return -ENOMEM; 376 return -ENOMEM;
389 memcpy(e->skb->data, v->payload, v->data_len); 377 memcpy(e->skb->data, v->payload, v->data_len);
390 e->skb->ip_summed = CHECKSUM_NONE; 378 e->skb->ip_summed = CHECKSUM_NONE;
391 e->skb->nfcache |= NFC_ALTERED; 379
392
393 /*
394 * Extra routing may needed on local out, as the QUEUE target never
395 * returns control to the table.
396 * Not a nice way to cmp, but works
397 */
398 if (e->info->hook == NF_IP_LOCAL_OUT) {
399 struct ipv6hdr *iph = e->skb->nh.ipv6h;
400 if (!ipv6_addr_equal(&iph->daddr, &e->rt_info.daddr) ||
401 !ipv6_addr_equal(&iph->saddr, &e->rt_info.saddr))
402 return ip6_route_me_harder(e->skb);
403 }
404 return 0; 380 return 0;
405} 381}
406 382
@@ -676,6 +652,11 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
676 return len; 652 return len;
677} 653}
678 654
655static struct nf_queue_handler nfqh = {
656 .name = "ip6_queue",
657 .outfn = &ipq_enqueue_packet,
658};
659
679static int 660static int
680init_or_cleanup(int init) 661init_or_cleanup(int init)
681{ 662{
@@ -686,7 +667,8 @@ init_or_cleanup(int init)
686 goto cleanup; 667 goto cleanup;
687 668
688 netlink_register_notifier(&ipq_nl_notifier); 669 netlink_register_notifier(&ipq_nl_notifier);
689 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, ipq_rcv_sk); 670 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk,
671 THIS_MODULE);
690 if (ipqnl == NULL) { 672 if (ipqnl == NULL) {
691 printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); 673 printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
692 goto cleanup_netlink_notifier; 674 goto cleanup_netlink_notifier;
@@ -703,7 +685,7 @@ init_or_cleanup(int init)
703 register_netdevice_notifier(&ipq_dev_notifier); 685 register_netdevice_notifier(&ipq_dev_notifier);
704 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); 686 ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0);
705 687
706 status = nf_register_queue_handler(PF_INET6, ipq_enqueue_packet, NULL); 688 status = nf_register_queue_handler(PF_INET6, &nfqh);
707 if (status < 0) { 689 if (status < 0) {
708 printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); 690 printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
709 goto cleanup_sysctl; 691 goto cleanup_sysctl;
@@ -711,7 +693,7 @@ init_or_cleanup(int init)
711 return status; 693 return status;
712 694
713cleanup: 695cleanup:
714 nf_unregister_queue_handler(PF_INET6); 696 nf_unregister_queue_handlers(&nfqh);
715 synchronize_net(); 697 synchronize_net();
716 ipq_flush(NF_DROP); 698 ipq_flush(NF_DROP);
717 699
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 73034511c8db..1cb8adb2787f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -401,7 +401,6 @@ ip6t_do_table(struct sk_buff **pskb,
401 do { 401 do {
402 IP_NF_ASSERT(e); 402 IP_NF_ASSERT(e);
403 IP_NF_ASSERT(back); 403 IP_NF_ASSERT(back);
404 (*pskb)->nfcache |= e->nfcache;
405 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6, 404 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6,
406 &protoff, &offset)) { 405 &protoff, &offset)) {
407 struct ip6t_entry_target *t; 406 struct ip6t_entry_target *t;
@@ -434,8 +433,8 @@ ip6t_do_table(struct sk_buff **pskb,
434 back->comefrom); 433 back->comefrom);
435 continue; 434 continue;
436 } 435 }
437 if (table_base + v 436 if (table_base + v != (void *)e + e->next_offset
438 != (void *)e + e->next_offset) { 437 && !(e->ipv6.flags & IP6T_F_GOTO)) {
439 /* Save old back ptr in next entry */ 438 /* Save old back ptr in next entry */
440 struct ip6t_entry *next 439 struct ip6t_entry *next
441 = (void *)e + e->next_offset; 440 = (void *)e + e->next_offset;
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
new file mode 100644
index 000000000000..8f5549b72720
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -0,0 +1,118 @@
1/*
2 * Hop Limit modification target for ip6tables
3 * Maciej Soltysiak <solt@dns.toxicfilms.tv>
4 * Based on HW's TTL module
5 *
6 * This software is distributed under the terms of GNU GPL
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/ip.h>
12
13#include <linux/netfilter_ipv6/ip6_tables.h>
14#include <linux/netfilter_ipv6/ip6t_HL.h>
15
16MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
17MODULE_DESCRIPTION("IP tables Hop Limit modification module");
18MODULE_LICENSE("GPL");
19
20static unsigned int ip6t_hl_target(struct sk_buff **pskb,
21 const struct net_device *in,
22 const struct net_device *out,
23 unsigned int hooknum,
24 const void *targinfo, void *userinfo)
25{
26 struct ipv6hdr *ip6h;
27 const struct ip6t_HL_info *info = targinfo;
28 u_int16_t diffs[2];
29 int new_hl;
30
31 if (!skb_make_writable(pskb, (*pskb)->len))
32 return NF_DROP;
33
34 ip6h = (*pskb)->nh.ipv6h;
35
36 switch (info->mode) {
37 case IP6T_HL_SET:
38 new_hl = info->hop_limit;
39 break;
40 case IP6T_HL_INC:
41 new_hl = ip6h->hop_limit + info->hop_limit;
42 if (new_hl > 255)
43 new_hl = 255;
44 break;
45 case IP6T_HL_DEC:
46 new_hl = ip6h->hop_limit - info->hop_limit;
47 if (new_hl < 0)
48 new_hl = 0;
49 break;
50 default:
51 new_hl = ip6h->hop_limit;
52 break;
53 }
54
55 if (new_hl != ip6h->hop_limit) {
56 diffs[0] = htons(((unsigned)ip6h->hop_limit) << 8) ^ 0xFFFF;
57 ip6h->hop_limit = new_hl;
58 diffs[1] = htons(((unsigned)ip6h->hop_limit) << 8);
59 }
60
61 return IP6T_CONTINUE;
62}
63
64static int ip6t_hl_checkentry(const char *tablename,
65 const struct ip6t_entry *e,
66 void *targinfo,
67 unsigned int targinfosize,
68 unsigned int hook_mask)
69{
70 struct ip6t_HL_info *info = targinfo;
71
72 if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_HL_info))) {
73 printk(KERN_WARNING "ip6t_HL: targinfosize %u != %Zu\n",
74 targinfosize,
75 IP6T_ALIGN(sizeof(struct ip6t_HL_info)));
76 return 0;
77 }
78
79 if (strcmp(tablename, "mangle")) {
80 printk(KERN_WARNING "ip6t_HL: can only be called from "
81 "\"mangle\" table, not \"%s\"\n", tablename);
82 return 0;
83 }
84
85 if (info->mode > IP6T_HL_MAXMODE) {
86 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
87 info->mode);
88 return 0;
89 }
90
91 if ((info->mode != IP6T_HL_SET) && (info->hop_limit == 0)) {
92 printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
93 "make sense with value 0\n");
94 return 0;
95 }
96
97 return 1;
98}
99
100static struct ip6t_target ip6t_HL = {
101 .name = "HL",
102 .target = ip6t_hl_target,
103 .checkentry = ip6t_hl_checkentry,
104 .me = THIS_MODULE
105};
106
107static int __init init(void)
108{
109 return ip6t_register_target(&ip6t_HL);
110}
111
112static void __exit fini(void)
113{
114 ip6t_unregister_target(&ip6t_HL);
115}
116
117module_init(init);
118module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a692e26a4fa3..0cd1d1bd9033 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -26,10 +26,6 @@ MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
26MODULE_DESCRIPTION("IP6 tables LOG target module"); 26MODULE_DESCRIPTION("IP6 tables LOG target module");
27MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
28 28
29static unsigned int nflog = 1;
30module_param(nflog, int, 0400);
31MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
32
33struct in_device; 29struct in_device;
34#include <net/route.h> 30#include <net/route.h>
35#include <linux/netfilter_ipv6/ip6t_LOG.h> 31#include <linux/netfilter_ipv6/ip6t_LOG.h>
@@ -44,7 +40,7 @@ struct in_device;
44static DEFINE_SPINLOCK(log_lock); 40static DEFINE_SPINLOCK(log_lock);
45 41
46/* One level of recursion won't kill us */ 42/* One level of recursion won't kill us */
47static void dump_packet(const struct ip6t_log_info *info, 43static void dump_packet(const struct nf_loginfo *info,
48 const struct sk_buff *skb, unsigned int ip6hoff, 44 const struct sk_buff *skb, unsigned int ip6hoff,
49 int recurse) 45 int recurse)
50{ 46{
@@ -53,6 +49,12 @@ static void dump_packet(const struct ip6t_log_info *info,
53 struct ipv6hdr _ip6h, *ih; 49 struct ipv6hdr _ip6h, *ih;
54 unsigned int ptr; 50 unsigned int ptr;
55 unsigned int hdrlen = 0; 51 unsigned int hdrlen = 0;
52 unsigned int logflags;
53
54 if (info->type == NF_LOG_TYPE_LOG)
55 logflags = info->u.log.logflags;
56 else
57 logflags = NF_LOG_MASK;
56 58
57 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h); 59 ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
58 if (ih == NULL) { 60 if (ih == NULL) {
@@ -84,7 +86,7 @@ static void dump_packet(const struct ip6t_log_info *info,
84 } 86 }
85 87
86 /* Max length: 48 "OPT (...) " */ 88 /* Max length: 48 "OPT (...) " */
87 if (info->logflags & IP6T_LOG_IPOPT) 89 if (logflags & IP6T_LOG_IPOPT)
88 printk("OPT ( "); 90 printk("OPT ( ");
89 91
90 switch (currenthdr) { 92 switch (currenthdr) {
@@ -119,7 +121,7 @@ static void dump_packet(const struct ip6t_log_info *info,
119 case IPPROTO_ROUTING: 121 case IPPROTO_ROUTING:
120 case IPPROTO_HOPOPTS: 122 case IPPROTO_HOPOPTS:
121 if (fragment) { 123 if (fragment) {
122 if (info->logflags & IP6T_LOG_IPOPT) 124 if (logflags & IP6T_LOG_IPOPT)
123 printk(")"); 125 printk(")");
124 return; 126 return;
125 } 127 }
@@ -127,7 +129,7 @@ static void dump_packet(const struct ip6t_log_info *info,
127 break; 129 break;
128 /* Max Length */ 130 /* Max Length */
129 case IPPROTO_AH: 131 case IPPROTO_AH:
130 if (info->logflags & IP6T_LOG_IPOPT) { 132 if (logflags & IP6T_LOG_IPOPT) {
131 struct ip_auth_hdr _ahdr, *ah; 133 struct ip_auth_hdr _ahdr, *ah;
132 134
133 /* Max length: 3 "AH " */ 135 /* Max length: 3 "AH " */
@@ -158,7 +160,7 @@ static void dump_packet(const struct ip6t_log_info *info,
158 hdrlen = (hp->hdrlen+2)<<2; 160 hdrlen = (hp->hdrlen+2)<<2;
159 break; 161 break;
160 case IPPROTO_ESP: 162 case IPPROTO_ESP:
161 if (info->logflags & IP6T_LOG_IPOPT) { 163 if (logflags & IP6T_LOG_IPOPT) {
162 struct ip_esp_hdr _esph, *eh; 164 struct ip_esp_hdr _esph, *eh;
163 165
164 /* Max length: 4 "ESP " */ 166 /* Max length: 4 "ESP " */
@@ -190,7 +192,7 @@ static void dump_packet(const struct ip6t_log_info *info,
190 printk("Unknown Ext Hdr %u", currenthdr); 192 printk("Unknown Ext Hdr %u", currenthdr);
191 return; 193 return;
192 } 194 }
193 if (info->logflags & IP6T_LOG_IPOPT) 195 if (logflags & IP6T_LOG_IPOPT)
194 printk(") "); 196 printk(") ");
195 197
196 currenthdr = hp->nexthdr; 198 currenthdr = hp->nexthdr;
@@ -218,7 +220,7 @@ static void dump_packet(const struct ip6t_log_info *info,
218 printk("SPT=%u DPT=%u ", 220 printk("SPT=%u DPT=%u ",
219 ntohs(th->source), ntohs(th->dest)); 221 ntohs(th->source), ntohs(th->dest));
220 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ 222 /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
221 if (info->logflags & IP6T_LOG_TCPSEQ) 223 if (logflags & IP6T_LOG_TCPSEQ)
222 printk("SEQ=%u ACK=%u ", 224 printk("SEQ=%u ACK=%u ",
223 ntohl(th->seq), ntohl(th->ack_seq)); 225 ntohl(th->seq), ntohl(th->ack_seq));
224 /* Max length: 13 "WINDOW=65535 " */ 226 /* Max length: 13 "WINDOW=65535 " */
@@ -245,7 +247,7 @@ static void dump_packet(const struct ip6t_log_info *info,
245 /* Max length: 11 "URGP=65535 " */ 247 /* Max length: 11 "URGP=65535 " */
246 printk("URGP=%u ", ntohs(th->urg_ptr)); 248 printk("URGP=%u ", ntohs(th->urg_ptr));
247 249
248 if ((info->logflags & IP6T_LOG_TCPOPT) 250 if ((logflags & IP6T_LOG_TCPOPT)
249 && th->doff * 4 > sizeof(struct tcphdr)) { 251 && th->doff * 4 > sizeof(struct tcphdr)) {
250 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op; 252 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
251 unsigned int i; 253 unsigned int i;
@@ -349,7 +351,7 @@ static void dump_packet(const struct ip6t_log_info *info,
349 } 351 }
350 352
351 /* Max length: 15 "UID=4294967295 " */ 353 /* Max length: 15 "UID=4294967295 " */
352 if ((info->logflags & IP6T_LOG_UID) && recurse && skb->sk) { 354 if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
353 read_lock_bh(&skb->sk->sk_callback_lock); 355 read_lock_bh(&skb->sk->sk_callback_lock);
354 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 356 if (skb->sk->sk_socket && skb->sk->sk_socket->file)
355 printk("UID=%u ", skb->sk->sk_socket->file->f_uid); 357 printk("UID=%u ", skb->sk->sk_socket->file->f_uid);
@@ -357,19 +359,31 @@ static void dump_packet(const struct ip6t_log_info *info,
357 } 359 }
358} 360}
359 361
362static struct nf_loginfo default_loginfo = {
363 .type = NF_LOG_TYPE_LOG,
364 .u = {
365 .log = {
366 .level = 0,
367 .logflags = NF_LOG_MASK,
368 },
369 },
370};
371
360static void 372static void
361ip6t_log_packet(unsigned int hooknum, 373ip6t_log_packet(unsigned int pf,
374 unsigned int hooknum,
362 const struct sk_buff *skb, 375 const struct sk_buff *skb,
363 const struct net_device *in, 376 const struct net_device *in,
364 const struct net_device *out, 377 const struct net_device *out,
365 const struct ip6t_log_info *loginfo, 378 const struct nf_loginfo *loginfo,
366 const char *level_string,
367 const char *prefix) 379 const char *prefix)
368{ 380{
381 if (!loginfo)
382 loginfo = &default_loginfo;
383
369 spin_lock_bh(&log_lock); 384 spin_lock_bh(&log_lock);
370 printk(level_string); 385 printk("<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
371 printk("%sIN=%s OUT=%s ", 386 prefix,
372 prefix == NULL ? loginfo->prefix : prefix,
373 in ? in->name : "", 387 in ? in->name : "",
374 out ? out->name : ""); 388 out ? out->name : "");
375 if (in && !out) { 389 if (in && !out) {
@@ -416,29 +430,17 @@ ip6t_log_target(struct sk_buff **pskb,
416 void *userinfo) 430 void *userinfo)
417{ 431{
418 const struct ip6t_log_info *loginfo = targinfo; 432 const struct ip6t_log_info *loginfo = targinfo;
419 char level_string[4] = "< >"; 433 struct nf_loginfo li;
434
435 li.type = NF_LOG_TYPE_LOG;
436 li.u.log.level = loginfo->level;
437 li.u.log.logflags = loginfo->logflags;
420 438
421 level_string[1] = '0' + (loginfo->level % 8); 439 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, loginfo->prefix);
422 ip6t_log_packet(hooknum, *pskb, in, out, loginfo, level_string, NULL);
423 440
424 return IP6T_CONTINUE; 441 return IP6T_CONTINUE;
425} 442}
426 443
427static void
428ip6t_logfn(unsigned int hooknum,
429 const struct sk_buff *skb,
430 const struct net_device *in,
431 const struct net_device *out,
432 const char *prefix)
433{
434 struct ip6t_log_info loginfo = {
435 .level = 0,
436 .logflags = IP6T_LOG_MASK,
437 .prefix = ""
438 };
439
440 ip6t_log_packet(hooknum, skb, in, out, &loginfo, KERN_WARNING, prefix);
441}
442 444
443static int ip6t_log_checkentry(const char *tablename, 445static int ip6t_log_checkentry(const char *tablename,
444 const struct ip6t_entry *e, 446 const struct ip6t_entry *e,
@@ -475,20 +477,29 @@ static struct ip6t_target ip6t_log_reg = {
475 .me = THIS_MODULE, 477 .me = THIS_MODULE,
476}; 478};
477 479
480static struct nf_logger ip6t_logger = {
481 .name = "ip6t_LOG",
482 .logfn = &ip6t_log_packet,
483 .me = THIS_MODULE,
484};
485
478static int __init init(void) 486static int __init init(void)
479{ 487{
480 if (ip6t_register_target(&ip6t_log_reg)) 488 if (ip6t_register_target(&ip6t_log_reg))
481 return -EINVAL; 489 return -EINVAL;
482 if (nflog) 490 if (nf_log_register(PF_INET6, &ip6t_logger) < 0) {
483 nf_log_register(PF_INET6, &ip6t_logfn); 491 printk(KERN_WARNING "ip6t_LOG: not logging via system console "
492 "since somebody else already registered for PF_INET6\n");
493 /* we cannot make module load fail here, since otherwise
494 * ip6tables userspace would abort */
495 }
484 496
485 return 0; 497 return 0;
486} 498}
487 499
488static void __exit fini(void) 500static void __exit fini(void)
489{ 501{
490 if (nflog) 502 nf_log_unregister_logger(&ip6t_logger);
491 nf_log_unregister(PF_INET6, &ip6t_logfn);
492 ip6t_unregister_target(&ip6t_log_reg); 503 ip6t_unregister_target(&ip6t_log_reg);
493} 504}
494 505
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
index d09ceb05013a..81924fcc5857 100644
--- a/net/ipv6/netfilter/ip6t_MARK.c
+++ b/net/ipv6/netfilter/ip6t_MARK.c
@@ -28,10 +28,9 @@ target(struct sk_buff **pskb,
28{ 28{
29 const struct ip6t_mark_target_info *markinfo = targinfo; 29 const struct ip6t_mark_target_info *markinfo = targinfo;
30 30
31 if((*pskb)->nfmark != markinfo->mark) { 31 if((*pskb)->nfmark != markinfo->mark)
32 (*pskb)->nfmark = markinfo->mark; 32 (*pskb)->nfmark = markinfo->mark;
33 (*pskb)->nfcache |= NFC_ALTERED; 33
34 }
35 return IP6T_CONTINUE; 34 return IP6T_CONTINUE;
36} 35}
37 36
diff --git a/net/ipv6/netfilter/ip6t_NFQUEUE.c b/net/ipv6/netfilter/ip6t_NFQUEUE.c
new file mode 100644
index 000000000000..c6e3730e7409
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_NFQUEUE.c
@@ -0,0 +1,70 @@
1/* ip6tables module for using new netfilter netlink queue
2 *
3 * (C) 2005 by Harald Welte <laforge@netfilter.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13
14#include <linux/netfilter.h>
15#include <linux/netfilter_ipv6/ip6_tables.h>
16#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("ip6tables NFQUEUE target");
20MODULE_LICENSE("GPL");
21
22static unsigned int
23target(struct sk_buff **pskb,
24 const struct net_device *in,
25 const struct net_device *out,
26 unsigned int hooknum,
27 const void *targinfo,
28 void *userinfo)
29{
30 const struct ipt_NFQ_info *tinfo = targinfo;
31
32 return NF_QUEUE_NR(tinfo->queuenum);
33}
34
35static int
36checkentry(const char *tablename,
37 const struct ip6t_entry *e,
38 void *targinfo,
39 unsigned int targinfosize,
40 unsigned int hook_mask)
41{
42 if (targinfosize != IP6T_ALIGN(sizeof(struct ipt_NFQ_info))) {
43 printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
44 targinfosize,
45 IP6T_ALIGN(sizeof(struct ipt_NFQ_info)));
46 return 0;
47 }
48
49 return 1;
50}
51
52static struct ip6t_target ipt_NFQ_reg = {
53 .name = "NFQUEUE",
54 .target = target,
55 .checkentry = checkentry,
56 .me = THIS_MODULE,
57};
58
59static int __init init(void)
60{
61 return ip6t_register_target(&ipt_NFQ_reg);
62}
63
64static void __exit fini(void)
65{
66 ip6t_unregister_target(&ipt_NFQ_reg);
67}
68
69module_init(init);
70module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
new file mode 100644
index 000000000000..14316c3ebde4
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -0,0 +1,284 @@
1/*
2 * IP6 tables REJECT target module
3 * Linux INET6 implementation
4 *
5 * Copyright (C)2003 USAGI/WIDE Project
6 *
7 * Authors:
8 * Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
9 *
10 * Based on net/ipv4/netfilter/ipt_REJECT.c
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/skbuff.h>
21#include <linux/icmpv6.h>
22#include <linux/netdevice.h>
23#include <net/ipv6.h>
24#include <net/tcp.h>
25#include <net/icmp.h>
26#include <net/ip6_checksum.h>
27#include <net/ip6_fib.h>
28#include <net/ip6_route.h>
29#include <net/flow.h>
30#include <linux/netfilter_ipv6/ip6_tables.h>
31#include <linux/netfilter_ipv6/ip6t_REJECT.h>
32
33MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
34MODULE_DESCRIPTION("IP6 tables REJECT target module");
35MODULE_LICENSE("GPL");
36
37#if 0
38#define DEBUGP printk
39#else
40#define DEBUGP(format, args...)
41#endif
42
43/* Send RST reply */
44static void send_reset(struct sk_buff *oldskb)
45{
46 struct sk_buff *nskb;
47 struct tcphdr otcph, *tcph;
48 unsigned int otcplen, hh_len;
49 int tcphoff, needs_ack;
50 struct ipv6hdr *oip6h = oldskb->nh.ipv6h, *ip6h;
51 struct dst_entry *dst = NULL;
52 u8 proto;
53 struct flowi fl;
54
55 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
56 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
57 DEBUGP("ip6t_REJECT: addr is not unicast.\n");
58 return;
59 }
60
61 proto = oip6h->nexthdr;
62 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
63
64 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
65 DEBUGP("ip6t_REJECT: Can't get TCP header.\n");
66 return;
67 }
68
69 otcplen = oldskb->len - tcphoff;
70
71 /* IP header checks: fragment, too short. */
72 if ((proto != IPPROTO_TCP) || (otcplen < sizeof(struct tcphdr))) {
73 DEBUGP("ip6t_REJECT: proto(%d) != IPPROTO_TCP, or too short. otcplen = %d\n",
74 proto, otcplen);
75 return;
76 }
77
78 if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
79 BUG();
80
81 /* No RST for RST. */
82 if (otcph.rst) {
83 DEBUGP("ip6t_REJECT: RST is set\n");
84 return;
85 }
86
87 /* Check checksum. */
88 if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
89 skb_checksum(oldskb, tcphoff, otcplen, 0))) {
90 DEBUGP("ip6t_REJECT: TCP checksum is invalid\n");
91 return;
92 }
93
94 memset(&fl, 0, sizeof(fl));
95 fl.proto = IPPROTO_TCP;
96 ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr);
97 ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr);
98 fl.fl_ip_sport = otcph.dest;
99 fl.fl_ip_dport = otcph.source;
100 dst = ip6_route_output(NULL, &fl);
101 if (dst == NULL)
102 return;
103 if (dst->error ||
104 xfrm_lookup(&dst, &fl, NULL, 0)) {
105 dst_release(dst);
106 return;
107 }
108
109 hh_len = (dst->dev->hard_header_len + 15)&~15;
110 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
111 + sizeof(struct tcphdr) + dst->trailer_len,
112 GFP_ATOMIC);
113
114 if (!nskb) {
115 if (net_ratelimit())
116 printk("ip6t_REJECT: Can't alloc skb\n");
117 dst_release(dst);
118 return;
119 }
120
121 nskb->dst = dst;
122
123 skb_reserve(nskb, hh_len + dst->header_len);
124
125 ip6h = nskb->nh.ipv6h = (struct ipv6hdr *)
126 skb_put(nskb, sizeof(struct ipv6hdr));
127 ip6h->version = 6;
128 ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
129 ip6h->nexthdr = IPPROTO_TCP;
130 ip6h->payload_len = htons(sizeof(struct tcphdr));
131 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
132 ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
133
134 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
135 /* Truncate to length (no data) */
136 tcph->doff = sizeof(struct tcphdr)/4;
137 tcph->source = otcph.dest;
138 tcph->dest = otcph.source;
139
140 if (otcph.ack) {
141 needs_ack = 0;
142 tcph->seq = otcph.ack_seq;
143 tcph->ack_seq = 0;
144 } else {
145 needs_ack = 1;
146 tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
147 + otcplen - (otcph.doff<<2));
148 tcph->seq = 0;
149 }
150
151 /* Reset flags */
152 ((u_int8_t *)tcph)[13] = 0;
153 tcph->rst = 1;
154 tcph->ack = needs_ack;
155 tcph->window = 0;
156 tcph->urg_ptr = 0;
157 tcph->check = 0;
158
159 /* Adjust TCP checksum */
160 tcph->check = csum_ipv6_magic(&nskb->nh.ipv6h->saddr,
161 &nskb->nh.ipv6h->daddr,
162 sizeof(struct tcphdr), IPPROTO_TCP,
163 csum_partial((char *)tcph,
164 sizeof(struct tcphdr), 0));
165
166 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
167 dst_output);
168}
169
170static inline void
171send_unreach(struct sk_buff *skb_in, unsigned char code, unsigned int hooknum)
172{
173 if (hooknum == NF_IP6_LOCAL_OUT && skb_in->dev == NULL)
174 skb_in->dev = &loopback_dev;
175
176 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL);
177}
178
179static unsigned int reject6_target(struct sk_buff **pskb,
180 const struct net_device *in,
181 const struct net_device *out,
182 unsigned int hooknum,
183 const void *targinfo,
184 void *userinfo)
185{
186 const struct ip6t_reject_info *reject = targinfo;
187
188 DEBUGP(KERN_DEBUG "%s: medium point\n", __FUNCTION__);
189 /* WARNING: This code causes reentry within ip6tables.
190 This means that the ip6tables jump stack is now crap. We
191 must return an absolute verdict. --RR */
192 switch (reject->with) {
193 case IP6T_ICMP6_NO_ROUTE:
194 send_unreach(*pskb, ICMPV6_NOROUTE, hooknum);
195 break;
196 case IP6T_ICMP6_ADM_PROHIBITED:
197 send_unreach(*pskb, ICMPV6_ADM_PROHIBITED, hooknum);
198 break;
199 case IP6T_ICMP6_NOT_NEIGHBOUR:
200 send_unreach(*pskb, ICMPV6_NOT_NEIGHBOUR, hooknum);
201 break;
202 case IP6T_ICMP6_ADDR_UNREACH:
203 send_unreach(*pskb, ICMPV6_ADDR_UNREACH, hooknum);
204 break;
205 case IP6T_ICMP6_PORT_UNREACH:
206 send_unreach(*pskb, ICMPV6_PORT_UNREACH, hooknum);
207 break;
208 case IP6T_ICMP6_ECHOREPLY:
209 /* Do nothing */
210 break;
211 case IP6T_TCP_RESET:
212 send_reset(*pskb);
213 break;
214 default:
215 if (net_ratelimit())
216 printk(KERN_WARNING "ip6t_REJECT: case %u not handled yet\n", reject->with);
217 break;
218 }
219
220 return NF_DROP;
221}
222
223static int check(const char *tablename,
224 const struct ip6t_entry *e,
225 void *targinfo,
226 unsigned int targinfosize,
227 unsigned int hook_mask)
228{
229 const struct ip6t_reject_info *rejinfo = targinfo;
230
231 if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) {
232 DEBUGP("ip6t_REJECT: targinfosize %u != 0\n", targinfosize);
233 return 0;
234 }
235
236 /* Only allow these for packet filtering. */
237 if (strcmp(tablename, "filter") != 0) {
238 DEBUGP("ip6t_REJECT: bad table `%s'.\n", tablename);
239 return 0;
240 }
241
242 if ((hook_mask & ~((1 << NF_IP6_LOCAL_IN)
243 | (1 << NF_IP6_FORWARD)
244 | (1 << NF_IP6_LOCAL_OUT))) != 0) {
245 DEBUGP("ip6t_REJECT: bad hook mask %X\n", hook_mask);
246 return 0;
247 }
248
249 if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
250 printk("ip6t_REJECT: ECHOREPLY is not supported.\n");
251 return 0;
252 } else if (rejinfo->with == IP6T_TCP_RESET) {
253 /* Must specify that it's a TCP packet */
254 if (e->ipv6.proto != IPPROTO_TCP
255 || (e->ipv6.invflags & IP6T_INV_PROTO)) {
256 DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
257 return 0;
258 }
259 }
260
261 return 1;
262}
263
264static struct ip6t_target ip6t_reject_reg = {
265 .name = "REJECT",
266 .target = reject6_target,
267 .checkentry = check,
268 .me = THIS_MODULE
269};
270
271static int __init init(void)
272{
273 if (ip6t_register_target(&ip6t_reject_reg))
274 return -EINVAL;
275 return 0;
276}
277
278static void __exit fini(void)
279{
280 ip6t_unregister_target(&ip6t_reject_reg);
281}
282
283module_init(init);
284module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index ab0e32d3de46..9b91decbfddb 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -20,71 +20,6 @@ MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
20MODULE_DESCRIPTION("IP6 tables owner matching module"); 20MODULE_DESCRIPTION("IP6 tables owner matching module");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22 22
23static int
24match_pid(const struct sk_buff *skb, pid_t pid)
25{
26 struct task_struct *p;
27 struct files_struct *files;
28 int i;
29
30 read_lock(&tasklist_lock);
31 p = find_task_by_pid(pid);
32 if (!p)
33 goto out;
34 task_lock(p);
35 files = p->files;
36 if(files) {
37 spin_lock(&files->file_lock);
38 for (i=0; i < files->max_fds; i++) {
39 if (fcheck_files(files, i) == skb->sk->sk_socket->file) {
40 spin_unlock(&files->file_lock);
41 task_unlock(p);
42 read_unlock(&tasklist_lock);
43 return 1;
44 }
45 }
46 spin_unlock(&files->file_lock);
47 }
48 task_unlock(p);
49out:
50 read_unlock(&tasklist_lock);
51 return 0;
52}
53
54static int
55match_sid(const struct sk_buff *skb, pid_t sid)
56{
57 struct task_struct *g, *p;
58 struct file *file = skb->sk->sk_socket->file;
59 int i, found=0;
60
61 read_lock(&tasklist_lock);
62 do_each_thread(g, p) {
63 struct files_struct *files;
64 if (p->signal->session != sid)
65 continue;
66
67 task_lock(p);
68 files = p->files;
69 if (files) {
70 spin_lock(&files->file_lock);
71 for (i=0; i < files->max_fds; i++) {
72 if (fcheck_files(files, i) == file) {
73 found = 1;
74 break;
75 }
76 }
77 spin_unlock(&files->file_lock);
78 }
79 task_unlock(p);
80 if (found)
81 goto out;
82 } while_each_thread(g, p);
83out:
84 read_unlock(&tasklist_lock);
85
86 return found;
87}
88 23
89static int 24static int
90match(const struct sk_buff *skb, 25match(const struct sk_buff *skb,
@@ -112,18 +47,6 @@ match(const struct sk_buff *skb,
112 return 0; 47 return 0;
113 } 48 }
114 49
115 if(info->match & IP6T_OWNER_PID) {
116 if (!match_pid(skb, info->pid) ^
117 !!(info->invert & IP6T_OWNER_PID))
118 return 0;
119 }
120
121 if(info->match & IP6T_OWNER_SID) {
122 if (!match_sid(skb, info->sid) ^
123 !!(info->invert & IP6T_OWNER_SID))
124 return 0;
125 }
126
127 return 1; 50 return 1;
128} 51}
129 52
@@ -134,6 +57,8 @@ checkentry(const char *tablename,
134 unsigned int matchsize, 57 unsigned int matchsize,
135 unsigned int hook_mask) 58 unsigned int hook_mask)
136{ 59{
60 const struct ip6t_owner_info *info = matchinfo;
61
137 if (hook_mask 62 if (hook_mask
138 & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { 63 & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
139 printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); 64 printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
@@ -142,14 +67,13 @@ checkentry(const char *tablename,
142 67
143 if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info))) 68 if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info)))
144 return 0; 69 return 0;
145#ifdef CONFIG_SMP 70
146 /* files->file_lock can not be used in a BH */ 71 if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) {
147 if (((struct ip6t_owner_info *)matchinfo)->match 72 printk("ipt_owner: pid and sid matching "
148 & (IP6T_OWNER_PID|IP6T_OWNER_SID)) { 73 "not supported anymore\n");
149 printk("ip6t_owner: pid and sid matching is broken on SMP.\n");
150 return 0; 74 return 0;
151 } 75 }
152#endif 76
153 return 1; 77 return 1;
154} 78}
155 79
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1d4d75b34d32..7a5863298f3f 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -49,6 +49,7 @@
49#include <net/transp_v6.h> 49#include <net/transp_v6.h>
50#include <net/udp.h> 50#include <net/udp.h>
51#include <net/inet_common.h> 51#include <net/inet_common.h>
52#include <net/tcp_states.h>
52 53
53#include <net/rawv6.h> 54#include <net/rawv6.h>
54#include <net/xfrm.h> 55#include <net/xfrm.h>
@@ -81,7 +82,8 @@ static void raw_v6_unhash(struct sock *sk)
81 82
82/* Grumble... icmp and ip_input want to get at this... */ 83/* Grumble... icmp and ip_input want to get at this... */
83struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num, 84struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
84 struct in6_addr *loc_addr, struct in6_addr *rmt_addr) 85 struct in6_addr *loc_addr, struct in6_addr *rmt_addr,
86 int dif)
85{ 87{
86 struct hlist_node *node; 88 struct hlist_node *node;
87 int is_multicast = ipv6_addr_is_multicast(loc_addr); 89 int is_multicast = ipv6_addr_is_multicast(loc_addr);
@@ -94,6 +96,9 @@ struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
94 !ipv6_addr_equal(&np->daddr, rmt_addr)) 96 !ipv6_addr_equal(&np->daddr, rmt_addr))
95 continue; 97 continue;
96 98
99 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
100 continue;
101
97 if (!ipv6_addr_any(&np->rcv_saddr)) { 102 if (!ipv6_addr_any(&np->rcv_saddr)) {
98 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr)) 103 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
99 goto found; 104 goto found;
@@ -137,11 +142,12 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
137 * 142 *
138 * Caller owns SKB so we must make clones. 143 * Caller owns SKB so we must make clones.
139 */ 144 */
140void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 145int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
141{ 146{
142 struct in6_addr *saddr; 147 struct in6_addr *saddr;
143 struct in6_addr *daddr; 148 struct in6_addr *daddr;
144 struct sock *sk; 149 struct sock *sk;
150 int delivered = 0;
145 __u8 hash; 151 __u8 hash;
146 152
147 saddr = &skb->nh.ipv6h->saddr; 153 saddr = &skb->nh.ipv6h->saddr;
@@ -160,9 +166,10 @@ void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
160 if (sk == NULL) 166 if (sk == NULL)
161 goto out; 167 goto out;
162 168
163 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr); 169 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, skb->dev->ifindex);
164 170
165 while (sk) { 171 while (sk) {
172 delivered = 1;
166 if (nexthdr != IPPROTO_ICMPV6 || !icmpv6_filter(sk, skb)) { 173 if (nexthdr != IPPROTO_ICMPV6 || !icmpv6_filter(sk, skb)) {
167 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 174 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
168 175
@@ -170,10 +177,12 @@ void ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
170 if (clone) 177 if (clone)
171 rawv6_rcv(sk, clone); 178 rawv6_rcv(sk, clone);
172 } 179 }
173 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr); 180 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
181 skb->dev->ifindex);
174 } 182 }
175out: 183out:
176 read_unlock(&raw_v6_lock); 184 read_unlock(&raw_v6_lock);
185 return delivered;
177} 186}
178 187
179/* This cleans up af_inet6 a bit. -DaveM */ 188/* This cleans up af_inet6 a bit. -DaveM */
@@ -334,8 +343,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
334 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, 343 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
335 &skb->nh.ipv6h->daddr, 344 &skb->nh.ipv6h->daddr,
336 skb->len, inet->num, skb->csum)) { 345 skb->len, inet->num, skb->csum)) {
337 LIMIT_NETDEBUG( 346 LIMIT_NETDEBUG(KERN_DEBUG "raw v6 hw csum failure.\n");
338 printk(KERN_DEBUG "raw v6 hw csum failure.\n"));
339 skb->ip_summed = CHECKSUM_NONE; 347 skb->ip_summed = CHECKSUM_NONE;
340 } 348 }
341 } 349 }
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 59e7c6317872..9d9e04344c77 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -562,7 +562,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
562 if (skb->dev) 562 if (skb->dev)
563 fq->iif = skb->dev->ifindex; 563 fq->iif = skb->dev->ifindex;
564 skb->dev = NULL; 564 skb->dev = NULL;
565 fq->stamp = skb->stamp; 565 skb_get_timestamp(skb, &fq->stamp);
566 fq->meat += skb->len; 566 fq->meat += skb->len;
567 atomic_add(skb->truesize, &ip6_frag_mem); 567 atomic_add(skb->truesize, &ip6_frag_mem);
568 568
@@ -664,7 +664,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
664 664
665 head->next = NULL; 665 head->next = NULL;
666 head->dev = dev; 666 head->dev = dev;
667 head->stamp = fq->stamp; 667 skb_set_timestamp(head, &fq->stamp);
668 head->nh.ipv6h->payload_len = htons(payload_len); 668 head->nh.ipv6h->payload_len = htons(payload_len);
669 669
670 *skb_in = head; 670 *skb_in = head;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 878789b3122d..5d5bbb49ec78 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1372,7 +1372,7 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1372 * Drop the packet on the floor 1372 * Drop the packet on the floor
1373 */ 1373 */
1374 1374
1375int ip6_pkt_discard(struct sk_buff *skb) 1375static int ip6_pkt_discard(struct sk_buff *skb)
1376{ 1376{
1377 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 1377 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1378 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); 1378 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
@@ -1380,7 +1380,7 @@ int ip6_pkt_discard(struct sk_buff *skb)
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383int ip6_pkt_discard_out(struct sk_buff *skb) 1383static int ip6_pkt_discard_out(struct sk_buff *skb)
1384{ 1384{
1385 skb->dev = skb->dst->dev; 1385 skb->dev = skb->dst->dev;
1386 return ip6_pkt_discard(skb); 1386 return ip6_pkt_discard(skb);
@@ -1850,16 +1850,16 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nlmsghdr *nlh,
1850 1850
1851 skb = alloc_skb(size, gfp_any()); 1851 skb = alloc_skb(size, gfp_any());
1852 if (!skb) { 1852 if (!skb) {
1853 netlink_set_err(rtnl, 0, RTMGRP_IPV6_ROUTE, ENOBUFS); 1853 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, ENOBUFS);
1854 return; 1854 return;
1855 } 1855 }
1856 if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0) < 0) { 1856 if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0) < 0) {
1857 kfree_skb(skb); 1857 kfree_skb(skb);
1858 netlink_set_err(rtnl, 0, RTMGRP_IPV6_ROUTE, EINVAL); 1858 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, EINVAL);
1859 return; 1859 return;
1860 } 1860 }
1861 NETLINK_CB(skb).dst_groups = RTMGRP_IPV6_ROUTE; 1861 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_ROUTE;
1862 netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_ROUTE, gfp_any()); 1862 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_ROUTE, gfp_any());
1863} 1863}
1864 1864
1865/* 1865/*
@@ -1960,8 +1960,6 @@ static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
1960 return arg.len; 1960 return arg.len;
1961} 1961}
1962 1962
1963extern struct rt6_statistics rt6_stats;
1964
1965static int rt6_stats_seq_show(struct seq_file *seq, void *v) 1963static int rt6_stats_seq_show(struct seq_file *seq, void *v)
1966{ 1964{
1967 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 1965 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e553e5b80d6e..c3123c9e1a8e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -770,7 +770,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
770 return 0; 770 return 0;
771} 771}
772 772
773int __init ipip6_fb_tunnel_init(struct net_device *dev) 773static int __init ipip6_fb_tunnel_init(struct net_device *dev)
774{ 774{
775 struct ip_tunnel *tunnel = dev->priv; 775 struct ip_tunnel *tunnel = dev->priv;
776 struct iphdr *iph = &tunnel->parms.iph; 776 struct iphdr *iph = &tunnel->parms.iph;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 3a18e0e6ffed..8eff9fa1e983 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -14,9 +14,6 @@
14#include <net/ipv6.h> 14#include <net/ipv6.h>
15#include <net/addrconf.h> 15#include <net/addrconf.h>
16 16
17extern ctl_table ipv6_route_table[];
18extern ctl_table ipv6_icmp_table[];
19
20#ifdef CONFIG_SYSCTL 17#ifdef CONFIG_SYSCTL
21 18
22static ctl_table ipv6_table[] = { 19static ctl_table ipv6_table[] = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ef29cfd936d3..794734f1d230 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -47,6 +47,7 @@
47 47
48#include <net/tcp.h> 48#include <net/tcp.h>
49#include <net/ndisc.h> 49#include <net/ndisc.h>
50#include <net/inet6_hashtables.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/transp_v6.h> 52#include <net/transp_v6.h>
52#include <net/addrconf.h> 53#include <net/addrconf.h>
@@ -75,34 +76,11 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
75static struct tcp_func ipv6_mapped; 76static struct tcp_func ipv6_mapped;
76static struct tcp_func ipv6_specific; 77static struct tcp_func ipv6_specific;
77 78
78/* I have no idea if this is a good hash for v6 or not. -DaveM */ 79static inline int tcp_v6_bind_conflict(const struct sock *sk,
79static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, 80 const struct inet_bind_bucket *tb)
80 struct in6_addr *faddr, u16 fport)
81{ 81{
82 int hashent = (lport ^ fport); 82 const struct sock *sk2;
83 83 const struct hlist_node *node;
84 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
85 hashent ^= hashent>>16;
86 hashent ^= hashent>>8;
87 return (hashent & (tcp_ehash_size - 1));
88}
89
90static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
91{
92 struct inet_sock *inet = inet_sk(sk);
93 struct ipv6_pinfo *np = inet6_sk(sk);
94 struct in6_addr *laddr = &np->rcv_saddr;
95 struct in6_addr *faddr = &np->daddr;
96 __u16 lport = inet->num;
97 __u16 fport = inet->dport;
98 return tcp_v6_hashfn(laddr, lport, faddr, fport);
99}
100
101static inline int tcp_v6_bind_conflict(struct sock *sk,
102 struct tcp_bind_bucket *tb)
103{
104 struct sock *sk2;
105 struct hlist_node *node;
106 84
107 /* We must walk the whole port owner list in this case. -DaveM */ 85 /* We must walk the whole port owner list in this case. -DaveM */
108 sk_for_each_bound(sk2, node, &tb->owners) { 86 sk_for_each_bound(sk2, node, &tb->owners) {
@@ -126,8 +104,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk,
126 */ 104 */
127static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 105static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
128{ 106{
129 struct tcp_bind_hashbucket *head; 107 struct inet_bind_hashbucket *head;
130 struct tcp_bind_bucket *tb; 108 struct inet_bind_bucket *tb;
131 struct hlist_node *node; 109 struct hlist_node *node;
132 int ret; 110 int ret;
133 111
@@ -138,25 +116,25 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
138 int remaining = (high - low) + 1; 116 int remaining = (high - low) + 1;
139 int rover; 117 int rover;
140 118
141 spin_lock(&tcp_portalloc_lock); 119 spin_lock(&tcp_hashinfo.portalloc_lock);
142 if (tcp_port_rover < low) 120 if (tcp_hashinfo.port_rover < low)
143 rover = low; 121 rover = low;
144 else 122 else
145 rover = tcp_port_rover; 123 rover = tcp_hashinfo.port_rover;
146 do { rover++; 124 do { rover++;
147 if (rover > high) 125 if (rover > high)
148 rover = low; 126 rover = low;
149 head = &tcp_bhash[tcp_bhashfn(rover)]; 127 head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
150 spin_lock(&head->lock); 128 spin_lock(&head->lock);
151 tb_for_each(tb, node, &head->chain) 129 inet_bind_bucket_for_each(tb, node, &head->chain)
152 if (tb->port == rover) 130 if (tb->port == rover)
153 goto next; 131 goto next;
154 break; 132 break;
155 next: 133 next:
156 spin_unlock(&head->lock); 134 spin_unlock(&head->lock);
157 } while (--remaining > 0); 135 } while (--remaining > 0);
158 tcp_port_rover = rover; 136 tcp_hashinfo.port_rover = rover;
159 spin_unlock(&tcp_portalloc_lock); 137 spin_unlock(&tcp_hashinfo.portalloc_lock);
160 138
161 /* Exhausted local port range during search? It is not 139 /* Exhausted local port range during search? It is not
162 * possible for us to be holding one of the bind hash 140 * possible for us to be holding one of the bind hash
@@ -171,9 +149,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
171 /* OK, here is the one we will use. */ 149 /* OK, here is the one we will use. */
172 snum = rover; 150 snum = rover;
173 } else { 151 } else {
174 head = &tcp_bhash[tcp_bhashfn(snum)]; 152 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
175 spin_lock(&head->lock); 153 spin_lock(&head->lock);
176 tb_for_each(tb, node, &head->chain) 154 inet_bind_bucket_for_each(tb, node, &head->chain)
177 if (tb->port == snum) 155 if (tb->port == snum)
178 goto tb_found; 156 goto tb_found;
179 } 157 }
@@ -192,8 +170,11 @@ tb_found:
192 } 170 }
193tb_not_found: 171tb_not_found:
194 ret = 1; 172 ret = 1;
195 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) 173 if (tb == NULL) {
196 goto fail_unlock; 174 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
175 if (tb == NULL)
176 goto fail_unlock;
177 }
197 if (hlist_empty(&tb->owners)) { 178 if (hlist_empty(&tb->owners)) {
198 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 179 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
199 tb->fastreuse = 1; 180 tb->fastreuse = 1;
@@ -204,9 +185,9 @@ tb_not_found:
204 tb->fastreuse = 0; 185 tb->fastreuse = 0;
205 186
206success: 187success:
207 if (!tcp_sk(sk)->bind_hash) 188 if (!inet_csk(sk)->icsk_bind_hash)
208 tcp_bind_hash(sk, tb, snum); 189 inet_bind_hash(sk, tb, snum);
209 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 190 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
210 ret = 0; 191 ret = 0;
211 192
212fail_unlock: 193fail_unlock:
@@ -224,13 +205,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
224 BUG_TRAP(sk_unhashed(sk)); 205 BUG_TRAP(sk_unhashed(sk));
225 206
226 if (sk->sk_state == TCP_LISTEN) { 207 if (sk->sk_state == TCP_LISTEN) {
227 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; 208 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
228 lock = &tcp_lhash_lock; 209 lock = &tcp_hashinfo.lhash_lock;
229 tcp_listen_wlock(); 210 inet_listen_wlock(&tcp_hashinfo);
230 } else { 211 } else {
231 sk->sk_hashent = tcp_v6_sk_hashfn(sk); 212 sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
232 list = &tcp_ehash[sk->sk_hashent].chain; 213 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
233 lock = &tcp_ehash[sk->sk_hashent].lock; 214 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
234 write_lock(lock); 215 write_lock(lock);
235 } 216 }
236 217
@@ -255,131 +236,11 @@ static void tcp_v6_hash(struct sock *sk)
255 } 236 }
256} 237}
257 238
258static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
259{
260 struct sock *sk;
261 struct hlist_node *node;
262 struct sock *result = NULL;
263 int score, hiscore;
264
265 hiscore=0;
266 read_lock(&tcp_lhash_lock);
267 sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
268 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
269 struct ipv6_pinfo *np = inet6_sk(sk);
270
271 score = 1;
272 if (!ipv6_addr_any(&np->rcv_saddr)) {
273 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
274 continue;
275 score++;
276 }
277 if (sk->sk_bound_dev_if) {
278 if (sk->sk_bound_dev_if != dif)
279 continue;
280 score++;
281 }
282 if (score == 3) {
283 result = sk;
284 break;
285 }
286 if (score > hiscore) {
287 hiscore = score;
288 result = sk;
289 }
290 }
291 }
292 if (result)
293 sock_hold(result);
294 read_unlock(&tcp_lhash_lock);
295 return result;
296}
297
298/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
299 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
300 *
301 * The sockhash lock must be held as a reader here.
302 */
303
304static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
305 struct in6_addr *daddr, u16 hnum,
306 int dif)
307{
308 struct tcp_ehash_bucket *head;
309 struct sock *sk;
310 struct hlist_node *node;
311 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
312 int hash;
313
314 /* Optimize here for direct hit, only listening connections can
315 * have wildcards anyways.
316 */
317 hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
318 head = &tcp_ehash[hash];
319 read_lock(&head->lock);
320 sk_for_each(sk, node, &head->chain) {
321 /* For IPV6 do the cheaper port and family tests first. */
322 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
323 goto hit; /* You sunk my battleship! */
324 }
325 /* Must check for a TIME_WAIT'er before going to listener hash. */
326 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
327 /* FIXME: acme: check this... */
328 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
329
330 if(*((__u32 *)&(tw->tw_dport)) == ports &&
331 sk->sk_family == PF_INET6) {
332 if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
333 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
334 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
335 goto hit;
336 }
337 }
338 read_unlock(&head->lock);
339 return NULL;
340
341hit:
342 sock_hold(sk);
343 read_unlock(&head->lock);
344 return sk;
345}
346
347
348static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
349 struct in6_addr *daddr, u16 hnum,
350 int dif)
351{
352 struct sock *sk;
353
354 sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
355
356 if (sk)
357 return sk;
358
359 return tcp_v6_lookup_listener(daddr, hnum, dif);
360}
361
362inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
363 struct in6_addr *daddr, u16 dport,
364 int dif)
365{
366 struct sock *sk;
367
368 local_bh_disable();
369 sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
370 local_bh_enable();
371
372 return sk;
373}
374
375EXPORT_SYMBOL_GPL(tcp_v6_lookup);
376
377
378/* 239/*
379 * Open request hash tables. 240 * Open request hash tables.
380 */ 241 */
381 242
382static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) 243static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
383{ 244{
384 u32 a, b, c; 245 u32 a, b, c;
385 246
@@ -399,14 +260,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
399 return c & (TCP_SYNQ_HSIZE - 1); 260 return c & (TCP_SYNQ_HSIZE - 1);
400} 261}
401 262
402static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, 263static struct request_sock *tcp_v6_search_req(const struct sock *sk,
403 struct request_sock ***prevp, 264 struct request_sock ***prevp,
404 __u16 rport, 265 __u16 rport,
405 struct in6_addr *raddr, 266 struct in6_addr *raddr,
406 struct in6_addr *laddr, 267 struct in6_addr *laddr,
407 int iif) 268 int iif)
408{ 269{
409 struct listen_sock *lopt = tp->accept_queue.listen_opt; 270 const struct inet_connection_sock *icsk = inet_csk(sk);
271 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
410 struct request_sock *req, **prev; 272 struct request_sock *req, **prev;
411 273
412 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; 274 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
@@ -451,44 +313,48 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
451 } 313 }
452} 314}
453 315
454static int __tcp_v6_check_established(struct sock *sk, __u16 lport, 316static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
455 struct tcp_tw_bucket **twp) 317 struct inet_timewait_sock **twp)
456{ 318{
457 struct inet_sock *inet = inet_sk(sk); 319 struct inet_sock *inet = inet_sk(sk);
458 struct ipv6_pinfo *np = inet6_sk(sk); 320 const struct ipv6_pinfo *np = inet6_sk(sk);
459 struct in6_addr *daddr = &np->rcv_saddr; 321 const struct in6_addr *daddr = &np->rcv_saddr;
460 struct in6_addr *saddr = &np->daddr; 322 const struct in6_addr *saddr = &np->daddr;
461 int dif = sk->sk_bound_dev_if; 323 const int dif = sk->sk_bound_dev_if;
462 u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 324 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
463 int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); 325 const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport,
464 struct tcp_ehash_bucket *head = &tcp_ehash[hash]; 326 tcp_hashinfo.ehash_size);
327 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
465 struct sock *sk2; 328 struct sock *sk2;
466 struct hlist_node *node; 329 const struct hlist_node *node;
467 struct tcp_tw_bucket *tw; 330 struct inet_timewait_sock *tw;
468 331
469 write_lock(&head->lock); 332 write_lock(&head->lock);
470 333
471 /* Check TIME-WAIT sockets first. */ 334 /* Check TIME-WAIT sockets first. */
472 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 335 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
473 tw = (struct tcp_tw_bucket*)sk2; 336 const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
337
338 tw = inet_twsk(sk2);
474 339
475 if(*((__u32 *)&(tw->tw_dport)) == ports && 340 if(*((__u32 *)&(tw->tw_dport)) == ports &&
476 sk2->sk_family == PF_INET6 && 341 sk2->sk_family == PF_INET6 &&
477 ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && 342 ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
478 ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && 343 ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
479 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 344 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
345 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
480 struct tcp_sock *tp = tcp_sk(sk); 346 struct tcp_sock *tp = tcp_sk(sk);
481 347
482 if (tw->tw_ts_recent_stamp && 348 if (tcptw->tw_ts_recent_stamp &&
483 (!twp || (sysctl_tcp_tw_reuse && 349 (!twp ||
484 xtime.tv_sec - 350 (sysctl_tcp_tw_reuse &&
485 tw->tw_ts_recent_stamp > 1))) { 351 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
486 /* See comment in tcp_ipv4.c */ 352 /* See comment in tcp_ipv4.c */
487 tp->write_seq = tw->tw_snd_nxt + 65535 + 2; 353 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
488 if (!tp->write_seq) 354 if (!tp->write_seq)
489 tp->write_seq = 1; 355 tp->write_seq = 1;
490 tp->rx_opt.ts_recent = tw->tw_ts_recent; 356 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
491 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 357 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
492 sock_hold(sk2); 358 sock_hold(sk2);
493 goto unique; 359 goto unique;
494 } else 360 } else
@@ -499,7 +365,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
499 365
500 /* And established part... */ 366 /* And established part... */
501 sk_for_each(sk2, node, &head->chain) { 367 sk_for_each(sk2, node, &head->chain) {
502 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif)) 368 if (INET6_MATCH(sk2, saddr, daddr, ports, dif))
503 goto not_unique; 369 goto not_unique;
504 } 370 }
505 371
@@ -515,10 +381,10 @@ unique:
515 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 381 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
516 } else if (tw) { 382 } else if (tw) {
517 /* Silly. Should hash-dance instead... */ 383 /* Silly. Should hash-dance instead... */
518 tcp_tw_deschedule(tw); 384 inet_twsk_deschedule(tw, &tcp_death_row);
519 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 385 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
520 386
521 tcp_tw_put(tw); 387 inet_twsk_put(tw);
522 } 388 }
523 return 0; 389 return 0;
524 390
@@ -540,8 +406,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk)
540static int tcp_v6_hash_connect(struct sock *sk) 406static int tcp_v6_hash_connect(struct sock *sk)
541{ 407{
542 unsigned short snum = inet_sk(sk)->num; 408 unsigned short snum = inet_sk(sk)->num;
543 struct tcp_bind_hashbucket *head; 409 struct inet_bind_hashbucket *head;
544 struct tcp_bind_bucket *tb; 410 struct inet_bind_bucket *tb;
545 int ret; 411 int ret;
546 412
547 if (!snum) { 413 if (!snum) {
@@ -553,19 +419,19 @@ static int tcp_v6_hash_connect(struct sock *sk)
553 static u32 hint; 419 static u32 hint;
554 u32 offset = hint + tcpv6_port_offset(sk); 420 u32 offset = hint + tcpv6_port_offset(sk);
555 struct hlist_node *node; 421 struct hlist_node *node;
556 struct tcp_tw_bucket *tw = NULL; 422 struct inet_timewait_sock *tw = NULL;
557 423
558 local_bh_disable(); 424 local_bh_disable();
559 for (i = 1; i <= range; i++) { 425 for (i = 1; i <= range; i++) {
560 port = low + (i + offset) % range; 426 port = low + (i + offset) % range;
561 head = &tcp_bhash[tcp_bhashfn(port)]; 427 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
562 spin_lock(&head->lock); 428 spin_lock(&head->lock);
563 429
564 /* Does not bother with rcv_saddr checks, 430 /* Does not bother with rcv_saddr checks,
565 * because the established check is already 431 * because the established check is already
566 * unique enough. 432 * unique enough.
567 */ 433 */
568 tb_for_each(tb, node, &head->chain) { 434 inet_bind_bucket_for_each(tb, node, &head->chain) {
569 if (tb->port == port) { 435 if (tb->port == port) {
570 BUG_TRAP(!hlist_empty(&tb->owners)); 436 BUG_TRAP(!hlist_empty(&tb->owners));
571 if (tb->fastreuse >= 0) 437 if (tb->fastreuse >= 0)
@@ -578,7 +444,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
578 } 444 }
579 } 445 }
580 446
581 tb = tcp_bucket_create(head, port); 447 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
582 if (!tb) { 448 if (!tb) {
583 spin_unlock(&head->lock); 449 spin_unlock(&head->lock);
584 break; 450 break;
@@ -597,7 +463,7 @@ ok:
597 hint += i; 463 hint += i;
598 464
599 /* Head lock still held and bh's disabled */ 465 /* Head lock still held and bh's disabled */
600 tcp_bind_hash(sk, tb, port); 466 inet_bind_hash(sk, tb, port);
601 if (sk_unhashed(sk)) { 467 if (sk_unhashed(sk)) {
602 inet_sk(sk)->sport = htons(port); 468 inet_sk(sk)->sport = htons(port);
603 __tcp_v6_hash(sk); 469 __tcp_v6_hash(sk);
@@ -605,16 +471,16 @@ ok:
605 spin_unlock(&head->lock); 471 spin_unlock(&head->lock);
606 472
607 if (tw) { 473 if (tw) {
608 tcp_tw_deschedule(tw); 474 inet_twsk_deschedule(tw, &tcp_death_row);
609 tcp_tw_put(tw); 475 inet_twsk_put(tw);
610 } 476 }
611 477
612 ret = 0; 478 ret = 0;
613 goto out; 479 goto out;
614 } 480 }
615 481
616 head = &tcp_bhash[tcp_bhashfn(snum)]; 482 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
617 tb = tcp_sk(sk)->bind_hash; 483 tb = inet_csk(sk)->icsk_bind_hash;
618 spin_lock_bh(&head->lock); 484 spin_lock_bh(&head->lock);
619 485
620 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 486 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -631,11 +497,6 @@ out:
631 } 497 }
632} 498}
633 499
634static __inline__ int tcp_v6_iif(struct sk_buff *skb)
635{
636 return IP6CB(skb)->iif;
637}
638
639static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 500static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
640 int addr_len) 501 int addr_len)
641{ 502{
@@ -827,14 +688,15 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
827 int type, int code, int offset, __u32 info) 688 int type, int code, int offset, __u32 info)
828{ 689{
829 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 690 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
830 struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 691 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
831 struct ipv6_pinfo *np; 692 struct ipv6_pinfo *np;
832 struct sock *sk; 693 struct sock *sk;
833 int err; 694 int err;
834 struct tcp_sock *tp; 695 struct tcp_sock *tp;
835 __u32 seq; 696 __u32 seq;
836 697
837 sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 698 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
699 th->source, skb->dev->ifindex);
838 700
839 if (sk == NULL) { 701 if (sk == NULL) {
840 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 702 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
@@ -842,7 +704,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
842 } 704 }
843 705
844 if (sk->sk_state == TCP_TIME_WAIT) { 706 if (sk->sk_state == TCP_TIME_WAIT) {
845 tcp_tw_put((struct tcp_tw_bucket*)sk); 707 inet_twsk_put((struct inet_timewait_sock *)sk);
846 return; 708 return;
847 } 709 }
848 710
@@ -920,8 +782,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
920 if (sock_owned_by_user(sk)) 782 if (sock_owned_by_user(sk))
921 goto out; 783 goto out;
922 784
923 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, 785 req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
924 &hdr->saddr, tcp_v6_iif(skb)); 786 &hdr->saddr, inet6_iif(skb));
925 if (!req) 787 if (!req)
926 goto out; 788 goto out;
927 789
@@ -935,7 +797,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
935 goto out; 797 goto out;
936 } 798 }
937 799
938 tcp_synq_drop(sk, req, prev); 800 inet_csk_reqsk_queue_drop(sk, req, prev);
939 goto out; 801 goto out;
940 802
941 case TCP_SYN_SENT: 803 case TCP_SYN_SENT:
@@ -1132,7 +994,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
1132 buff->csum); 994 buff->csum);
1133 995
1134 fl.proto = IPPROTO_TCP; 996 fl.proto = IPPROTO_TCP;
1135 fl.oif = tcp_v6_iif(skb); 997 fl.oif = inet6_iif(skb);
1136 fl.fl_ip_dport = t1->dest; 998 fl.fl_ip_dport = t1->dest;
1137 fl.fl_ip_sport = t1->source; 999 fl.fl_ip_sport = t1->source;
1138 1000
@@ -1201,7 +1063,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1201 buff->csum); 1063 buff->csum);
1202 1064
1203 fl.proto = IPPROTO_TCP; 1065 fl.proto = IPPROTO_TCP;
1204 fl.oif = tcp_v6_iif(skb); 1066 fl.oif = inet6_iif(skb);
1205 fl.fl_ip_dport = t1->dest; 1067 fl.fl_ip_dport = t1->dest;
1206 fl.fl_ip_sport = t1->source; 1068 fl.fl_ip_sport = t1->source;
1207 1069
@@ -1220,12 +1082,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
1220 1082
1221static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1083static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1222{ 1084{
1223 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; 1085 struct inet_timewait_sock *tw = inet_twsk(sk);
1086 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1224 1087
1225 tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, 1088 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1226 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); 1089 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1090 tcptw->tw_ts_recent);
1227 1091
1228 tcp_tw_put(tw); 1092 inet_twsk_put(tw);
1229} 1093}
1230 1094
1231static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1095static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1237,28 +1101,25 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1237static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 1101static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1238{ 1102{
1239 struct request_sock *req, **prev; 1103 struct request_sock *req, **prev;
1240 struct tcphdr *th = skb->h.th; 1104 const struct tcphdr *th = skb->h.th;
1241 struct tcp_sock *tp = tcp_sk(sk);
1242 struct sock *nsk; 1105 struct sock *nsk;
1243 1106
1244 /* Find possible connection requests. */ 1107 /* Find possible connection requests. */
1245 req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, 1108 req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
1246 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); 1109 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1247 if (req) 1110 if (req)
1248 return tcp_check_req(sk, skb, req, prev); 1111 return tcp_check_req(sk, skb, req, prev);
1249 1112
1250 nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr, 1113 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1251 th->source, 1114 th->source, &skb->nh.ipv6h->daddr,
1252 &skb->nh.ipv6h->daddr, 1115 ntohs(th->dest), inet6_iif(skb));
1253 ntohs(th->dest),
1254 tcp_v6_iif(skb));
1255 1116
1256 if (nsk) { 1117 if (nsk) {
1257 if (nsk->sk_state != TCP_TIME_WAIT) { 1118 if (nsk->sk_state != TCP_TIME_WAIT) {
1258 bh_lock_sock(nsk); 1119 bh_lock_sock(nsk);
1259 return nsk; 1120 return nsk;
1260 } 1121 }
1261 tcp_tw_put((struct tcp_tw_bucket*)nsk); 1122 inet_twsk_put((struct inet_timewait_sock *)nsk);
1262 return NULL; 1123 return NULL;
1263 } 1124 }
1264 1125
@@ -1271,12 +1132,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1271 1132
1272static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) 1133static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
1273{ 1134{
1274 struct tcp_sock *tp = tcp_sk(sk); 1135 struct inet_connection_sock *icsk = inet_csk(sk);
1275 struct listen_sock *lopt = tp->accept_queue.listen_opt; 1136 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
1276 u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 1137 const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
1277 1138
1278 reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); 1139 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
1279 tcp_synq_added(sk); 1140 inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
1280} 1141}
1281 1142
1282 1143
@@ -1301,13 +1162,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1301 /* 1162 /*
1302 * There are no SYN attacks on IPv6, yet... 1163 * There are no SYN attacks on IPv6, yet...
1303 */ 1164 */
1304 if (tcp_synq_is_full(sk) && !isn) { 1165 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1305 if (net_ratelimit()) 1166 if (net_ratelimit())
1306 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1167 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1307 goto drop; 1168 goto drop;
1308 } 1169 }
1309 1170
1310 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1171 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1311 goto drop; 1172 goto drop;
1312 1173
1313 req = reqsk_alloc(&tcp6_request_sock_ops); 1174 req = reqsk_alloc(&tcp6_request_sock_ops);
@@ -1339,7 +1200,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1339 /* So that link locals have meaning */ 1200 /* So that link locals have meaning */
1340 if (!sk->sk_bound_dev_if && 1201 if (!sk->sk_bound_dev_if &&
1341 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1202 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1342 treq->iif = tcp_v6_iif(skb); 1203 treq->iif = inet6_iif(skb);
1343 1204
1344 if (isn == 0) 1205 if (isn == 0)
1345 isn = tcp_v6_init_sequence(sk,skb); 1206 isn = tcp_v6_init_sequence(sk,skb);
@@ -1404,15 +1265,14 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1404 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1265 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1405 newnp->pktoptions = NULL; 1266 newnp->pktoptions = NULL;
1406 newnp->opt = NULL; 1267 newnp->opt = NULL;
1407 newnp->mcast_oif = tcp_v6_iif(skb); 1268 newnp->mcast_oif = inet6_iif(skb);
1408 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1269 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1409 1270
1410 /* Charge newly allocated IPv6 socket. Though it is mapped, 1271 /*
1411 * it is IPv6 yet. 1272 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1273 * here, tcp_create_openreq_child now does this for us, see the comment in
1274 * that function for the gory details. -acme
1412 */ 1275 */
1413#ifdef INET_REFCNT_DEBUG
1414 atomic_inc(&inet6_sock_nr);
1415#endif
1416 1276
1417 /* It is tricky place. Until this moment IPv4 tcp 1277 /* It is tricky place. Until this moment IPv4 tcp
1418 worked with IPv6 af_tcp.af_specific. 1278 worked with IPv6 af_tcp.af_specific.
@@ -1467,10 +1327,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1467 if (newsk == NULL) 1327 if (newsk == NULL)
1468 goto out; 1328 goto out;
1469 1329
1470 /* Charge newly allocated IPv6 socket */ 1330 /*
1471#ifdef INET_REFCNT_DEBUG 1331 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1472 atomic_inc(&inet6_sock_nr); 1332 * count here, tcp_create_openreq_child now does this for us, see the
1473#endif 1333 * comment in that function for the gory details. -acme
1334 */
1474 1335
1475 ip6_dst_store(newsk, dst, NULL); 1336 ip6_dst_store(newsk, dst, NULL);
1476 newsk->sk_route_caps = dst->dev->features & 1337 newsk->sk_route_caps = dst->dev->features &
@@ -1509,7 +1370,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1509 skb_set_owner_r(newnp->pktoptions, newsk); 1370 skb_set_owner_r(newnp->pktoptions, newsk);
1510 } 1371 }
1511 newnp->opt = NULL; 1372 newnp->opt = NULL;
1512 newnp->mcast_oif = tcp_v6_iif(skb); 1373 newnp->mcast_oif = inet6_iif(skb);
1513 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 1374 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1514 1375
1515 /* Clone native IPv6 options from listening socket (if any) 1376 /* Clone native IPv6 options from listening socket (if any)
@@ -1536,7 +1397,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1536 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1397 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1537 1398
1538 __tcp_v6_hash(newsk); 1399 __tcp_v6_hash(newsk);
1539 tcp_inherit_port(sk, newsk); 1400 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1540 1401
1541 return newsk; 1402 return newsk;
1542 1403
@@ -1557,7 +1418,7 @@ static int tcp_v6_checksum_init(struct sk_buff *skb)
1557 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1418 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1558 &skb->nh.ipv6h->daddr,skb->csum)) 1419 &skb->nh.ipv6h->daddr,skb->csum))
1559 return 0; 1420 return 0;
1560 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v6 csum failed\n")); 1421 LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
1561 } 1422 }
1562 if (skb->len <= 76) { 1423 if (skb->len <= 76) {
1563 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1424 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
@@ -1684,7 +1545,7 @@ ipv6_pktoptions:
1684 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1545 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1685 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1546 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1686 if (np->rxopt.bits.rxinfo) 1547 if (np->rxopt.bits.rxinfo)
1687 np->mcast_oif = tcp_v6_iif(opt_skb); 1548 np->mcast_oif = inet6_iif(opt_skb);
1688 if (np->rxopt.bits.rxhlim) 1549 if (np->rxopt.bits.rxhlim)
1689 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit; 1550 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1690 if (ipv6_opt_accepted(sk, opt_skb)) { 1551 if (ipv6_opt_accepted(sk, opt_skb)) {
@@ -1739,8 +1600,9 @@ static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1739 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h); 1600 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1740 TCP_SKB_CB(skb)->sacked = 0; 1601 TCP_SKB_CB(skb)->sacked = 0;
1741 1602
1742 sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source, 1603 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1743 &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1604 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1605 inet6_iif(skb));
1744 1606
1745 if (!sk) 1607 if (!sk)
1746 goto no_tcp_socket; 1608 goto no_tcp_socket;
@@ -1795,26 +1657,29 @@ discard_and_relse:
1795 1657
1796do_time_wait: 1658do_time_wait:
1797 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1659 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1798 tcp_tw_put((struct tcp_tw_bucket *) sk); 1660 inet_twsk_put((struct inet_timewait_sock *)sk);
1799 goto discard_it; 1661 goto discard_it;
1800 } 1662 }
1801 1663
1802 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1664 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1803 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1665 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1804 tcp_tw_put((struct tcp_tw_bucket *) sk); 1666 inet_twsk_put((struct inet_timewait_sock *)sk);
1805 goto discard_it; 1667 goto discard_it;
1806 } 1668 }
1807 1669
1808 switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1670 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1809 skb, th, skb->len)) { 1671 skb, th)) {
1810 case TCP_TW_SYN: 1672 case TCP_TW_SYN:
1811 { 1673 {
1812 struct sock *sk2; 1674 struct sock *sk2;
1813 1675
1814 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); 1676 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1677 &skb->nh.ipv6h->daddr,
1678 ntohs(th->dest), inet6_iif(skb));
1815 if (sk2 != NULL) { 1679 if (sk2 != NULL) {
1816 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1680 struct inet_timewait_sock *tw = inet_twsk(sk);
1817 tcp_tw_put((struct tcp_tw_bucket *)sk); 1681 inet_twsk_deschedule(tw, &tcp_death_row);
1682 inet_twsk_put(tw);
1818 sk = sk2; 1683 sk = sk2;
1819 goto process; 1684 goto process;
1820 } 1685 }
@@ -1983,7 +1848,7 @@ static struct tcp_func ipv6_specific = {
1983static struct tcp_func ipv6_mapped = { 1848static struct tcp_func ipv6_mapped = {
1984 .queue_xmit = ip_queue_xmit, 1849 .queue_xmit = ip_queue_xmit,
1985 .send_check = tcp_v4_send_check, 1850 .send_check = tcp_v4_send_check,
1986 .rebuild_header = tcp_v4_rebuild_header, 1851 .rebuild_header = inet_sk_rebuild_header,
1987 .conn_request = tcp_v6_conn_request, 1852 .conn_request = tcp_v6_conn_request,
1988 .syn_recv_sock = tcp_v6_syn_recv_sock, 1853 .syn_recv_sock = tcp_v6_syn_recv_sock,
1989 .remember_stamp = tcp_v4_remember_stamp, 1854 .remember_stamp = tcp_v4_remember_stamp,
@@ -2002,13 +1867,14 @@ static struct tcp_func ipv6_mapped = {
2002 */ 1867 */
2003static int tcp_v6_init_sock(struct sock *sk) 1868static int tcp_v6_init_sock(struct sock *sk)
2004{ 1869{
1870 struct inet_connection_sock *icsk = inet_csk(sk);
2005 struct tcp_sock *tp = tcp_sk(sk); 1871 struct tcp_sock *tp = tcp_sk(sk);
2006 1872
2007 skb_queue_head_init(&tp->out_of_order_queue); 1873 skb_queue_head_init(&tp->out_of_order_queue);
2008 tcp_init_xmit_timers(sk); 1874 tcp_init_xmit_timers(sk);
2009 tcp_prequeue_init(tp); 1875 tcp_prequeue_init(tp);
2010 1876
2011 tp->rto = TCP_TIMEOUT_INIT; 1877 icsk->icsk_rto = TCP_TIMEOUT_INIT;
2012 tp->mdev = TCP_TIMEOUT_INIT; 1878 tp->mdev = TCP_TIMEOUT_INIT;
2013 1879
2014 /* So many TCP implementations out there (incorrectly) count the 1880 /* So many TCP implementations out there (incorrectly) count the
@@ -2030,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
2030 sk->sk_state = TCP_CLOSE; 1896 sk->sk_state = TCP_CLOSE;
2031 1897
2032 tp->af_specific = &ipv6_specific; 1898 tp->af_specific = &ipv6_specific;
2033 tp->ca_ops = &tcp_init_congestion_ops; 1899 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
2034 sk->sk_write_space = sk_stream_write_space; 1900 sk->sk_write_space = sk_stream_write_space;
2035 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1901 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2036 1902
@@ -2044,8 +1910,6 @@ static int tcp_v6_init_sock(struct sock *sk)
2044 1910
2045static int tcp_v6_destroy_sock(struct sock *sk) 1911static int tcp_v6_destroy_sock(struct sock *sk)
2046{ 1912{
2047 extern int tcp_v4_destroy_sock(struct sock *sk);
2048
2049 tcp_v4_destroy_sock(sk); 1913 tcp_v4_destroy_sock(sk);
2050 return inet6_destroy_sock(sk); 1914 return inet6_destroy_sock(sk);
2051} 1915}
@@ -2091,18 +1955,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2091 unsigned long timer_expires; 1955 unsigned long timer_expires;
2092 struct inet_sock *inet = inet_sk(sp); 1956 struct inet_sock *inet = inet_sk(sp);
2093 struct tcp_sock *tp = tcp_sk(sp); 1957 struct tcp_sock *tp = tcp_sk(sp);
1958 const struct inet_connection_sock *icsk = inet_csk(sp);
2094 struct ipv6_pinfo *np = inet6_sk(sp); 1959 struct ipv6_pinfo *np = inet6_sk(sp);
2095 1960
2096 dest = &np->daddr; 1961 dest = &np->daddr;
2097 src = &np->rcv_saddr; 1962 src = &np->rcv_saddr;
2098 destp = ntohs(inet->dport); 1963 destp = ntohs(inet->dport);
2099 srcp = ntohs(inet->sport); 1964 srcp = ntohs(inet->sport);
2100 if (tp->pending == TCP_TIME_RETRANS) { 1965
1966 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2101 timer_active = 1; 1967 timer_active = 1;
2102 timer_expires = tp->timeout; 1968 timer_expires = icsk->icsk_timeout;
2103 } else if (tp->pending == TCP_TIME_PROBE0) { 1969 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2104 timer_active = 4; 1970 timer_active = 4;
2105 timer_expires = tp->timeout; 1971 timer_expires = icsk->icsk_timeout;
2106 } else if (timer_pending(&sp->sk_timer)) { 1972 } else if (timer_pending(&sp->sk_timer)) {
2107 timer_active = 2; 1973 timer_active = 2;
2108 timer_expires = sp->sk_timer.expires; 1974 timer_expires = sp->sk_timer.expires;
@@ -2123,28 +1989,31 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2123 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 1989 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
2124 timer_active, 1990 timer_active,
2125 jiffies_to_clock_t(timer_expires - jiffies), 1991 jiffies_to_clock_t(timer_expires - jiffies),
2126 tp->retransmits, 1992 icsk->icsk_retransmits,
2127 sock_i_uid(sp), 1993 sock_i_uid(sp),
2128 tp->probes_out, 1994 icsk->icsk_probes_out,
2129 sock_i_ino(sp), 1995 sock_i_ino(sp),
2130 atomic_read(&sp->sk_refcnt), sp, 1996 atomic_read(&sp->sk_refcnt), sp,
2131 tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, 1997 icsk->icsk_rto,
1998 icsk->icsk_ack.ato,
1999 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2132 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 2000 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2133 ); 2001 );
2134} 2002}
2135 2003
2136static void get_timewait6_sock(struct seq_file *seq, 2004static void get_timewait6_sock(struct seq_file *seq,
2137 struct tcp_tw_bucket *tw, int i) 2005 struct inet_timewait_sock *tw, int i)
2138{ 2006{
2139 struct in6_addr *dest, *src; 2007 struct in6_addr *dest, *src;
2140 __u16 destp, srcp; 2008 __u16 destp, srcp;
2009 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
2141 int ttd = tw->tw_ttd - jiffies; 2010 int ttd = tw->tw_ttd - jiffies;
2142 2011
2143 if (ttd < 0) 2012 if (ttd < 0)
2144 ttd = 0; 2013 ttd = 0;
2145 2014
2146 dest = &tw->tw_v6_daddr; 2015 dest = &tcp6tw->tw_v6_daddr;
2147 src = &tw->tw_v6_rcv_saddr; 2016 src = &tcp6tw->tw_v6_rcv_saddr;
2148 destp = ntohs(tw->tw_dport); 2017 destp = ntohs(tw->tw_dport);
2149 srcp = ntohs(tw->tw_sport); 2018 srcp = ntohs(tw->tw_sport);
2150 2019
@@ -2219,7 +2088,7 @@ struct proto tcpv6_prot = {
2219 .close = tcp_close, 2088 .close = tcp_close,
2220 .connect = tcp_v6_connect, 2089 .connect = tcp_v6_connect,
2221 .disconnect = tcp_disconnect, 2090 .disconnect = tcp_disconnect,
2222 .accept = tcp_accept, 2091 .accept = inet_csk_accept,
2223 .ioctl = tcp_ioctl, 2092 .ioctl = tcp_ioctl,
2224 .init = tcp_v6_init_sock, 2093 .init = tcp_v6_init_sock,
2225 .destroy = tcp_v6_destroy_sock, 2094 .destroy = tcp_v6_destroy_sock,
@@ -2236,11 +2105,13 @@ struct proto tcpv6_prot = {
2236 .sockets_allocated = &tcp_sockets_allocated, 2105 .sockets_allocated = &tcp_sockets_allocated,
2237 .memory_allocated = &tcp_memory_allocated, 2106 .memory_allocated = &tcp_memory_allocated,
2238 .memory_pressure = &tcp_memory_pressure, 2107 .memory_pressure = &tcp_memory_pressure,
2108 .orphan_count = &tcp_orphan_count,
2239 .sysctl_mem = sysctl_tcp_mem, 2109 .sysctl_mem = sysctl_tcp_mem,
2240 .sysctl_wmem = sysctl_tcp_wmem, 2110 .sysctl_wmem = sysctl_tcp_wmem,
2241 .sysctl_rmem = sysctl_tcp_rmem, 2111 .sysctl_rmem = sysctl_tcp_rmem,
2242 .max_header = MAX_TCP_HEADER, 2112 .max_header = MAX_TCP_HEADER,
2243 .obj_size = sizeof(struct tcp6_sock), 2113 .obj_size = sizeof(struct tcp6_sock),
2114 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2244 .rsk_prot = &tcp6_request_sock_ops, 2115 .rsk_prot = &tcp6_request_sock_ops,
2245}; 2116};
2246 2117
@@ -2250,8 +2121,6 @@ static struct inet6_protocol tcpv6_protocol = {
2250 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2121 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2251}; 2122};
2252 2123
2253extern struct proto_ops inet6_stream_ops;
2254
2255static struct inet_protosw tcpv6_protosw = { 2124static struct inet_protosw tcpv6_protosw = {
2256 .type = SOCK_STREAM, 2125 .type = SOCK_STREAM,
2257 .protocol = IPPROTO_TCP, 2126 .protocol = IPPROTO_TCP,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index eff050ac7049..390d750449ce 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -51,6 +51,7 @@
51#include <net/udp.h> 51#include <net/udp.h>
52#include <net/raw.h> 52#include <net/raw.h>
53#include <net/inet_common.h> 53#include <net/inet_common.h>
54#include <net/tcp_states.h>
54 55
55#include <net/ip6_checksum.h> 56#include <net/ip6_checksum.h>
56#include <net/xfrm.h> 57#include <net/xfrm.h>
@@ -58,7 +59,7 @@
58#include <linux/proc_fs.h> 59#include <linux/proc_fs.h>
59#include <linux/seq_file.h> 60#include <linux/seq_file.h>
60 61
61DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); 62DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
62 63
63/* Grrr, addr_type already calculated by caller, but I don't want 64/* Grrr, addr_type already calculated by caller, but I don't want
64 * to add some silly "cookie" argument to this method just for that. 65 * to add some silly "cookie" argument to this method just for that.
@@ -477,8 +478,7 @@ static int udpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
477 /* RFC 2460 section 8.1 says that we SHOULD log 478 /* RFC 2460 section 8.1 says that we SHOULD log
478 this error. Well, it is reasonable. 479 this error. Well, it is reasonable.
479 */ 480 */
480 LIMIT_NETDEBUG( 481 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
481 printk(KERN_INFO "IPv6: udp checksum is 0\n"));
482 goto discard; 482 goto discard;
483 } 483 }
484 484
@@ -493,7 +493,7 @@ static int udpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
493 if (skb->ip_summed==CHECKSUM_HW) { 493 if (skb->ip_summed==CHECKSUM_HW) {
494 skb->ip_summed = CHECKSUM_UNNECESSARY; 494 skb->ip_summed = CHECKSUM_UNNECESSARY;
495 if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) { 495 if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) {
496 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp v6 hw csum failure.\n")); 496 LIMIT_NETDEBUG(KERN_DEBUG "udp v6 hw csum failure.\n");
497 skb->ip_summed = CHECKSUM_NONE; 497 skb->ip_summed = CHECKSUM_NONE;
498 } 498 }
499 } 499 }
@@ -825,7 +825,7 @@ back_from_confirm:
825 /* ... which is an evident application bug. --ANK */ 825 /* ... which is an evident application bug. --ANK */
826 release_sock(sk); 826 release_sock(sk);
827 827
828 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 2\n")); 828 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
829 err = -EINVAL; 829 err = -EINVAL;
830 goto out; 830 goto out;
831 } 831 }
@@ -1054,8 +1054,6 @@ struct proto udpv6_prot = {
1054 .obj_size = sizeof(struct udp6_sock), 1054 .obj_size = sizeof(struct udp6_sock),
1055}; 1055};
1056 1056
1057extern struct proto_ops inet6_dgram_ops;
1058
1059static struct inet_protosw udpv6_protosw = { 1057static struct inet_protosw udpv6_protosw = {
1060 .type = SOCK_DGRAM, 1058 .type = SOCK_DGRAM,
1061 .protocol = IPPROTO_UDP, 1059 .protocol = IPPROTO_UDP,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 60c26c87277e..fbef7826a74f 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -79,7 +79,7 @@ static u32 xfrm6_tunnel_spi;
79#define XFRM6_TUNNEL_SPI_MIN 1 79#define XFRM6_TUNNEL_SPI_MIN 1
80#define XFRM6_TUNNEL_SPI_MAX 0xffffffff 80#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
81 81
82static kmem_cache_t *xfrm6_tunnel_spi_kmem; 82static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
83 83
84#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 84#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
85#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 85#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 5a27e5df5886..34b3bb868409 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -44,7 +44,6 @@
44#include <linux/socket.h> 44#include <linux/socket.h>
45#include <linux/sockios.h> 45#include <linux/sockios.h>
46#include <linux/string.h> 46#include <linux/string.h>
47#include <linux/tcp.h>
48#include <linux/types.h> 47#include <linux/types.h>
49#include <linux/termios.h> 48#include <linux/termios.h>
50 49
@@ -52,6 +51,7 @@
52#include <net/p8022.h> 51#include <net/p8022.h>
53#include <net/psnap.h> 52#include <net/psnap.h>
54#include <net/sock.h> 53#include <net/sock.h>
54#include <net/tcp_states.h>
55 55
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
57 57
@@ -1627,7 +1627,7 @@ out:
1627 return rc; 1627 return rc;
1628} 1628}
1629 1629
1630static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 1630static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
1631{ 1631{
1632 /* NULL here for pt means the packet was looped back */ 1632 /* NULL here for pt means the packet was looped back */
1633 struct ipx_interface *intrfc; 1633 struct ipx_interface *intrfc;
@@ -1796,8 +1796,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1796 copied); 1796 copied);
1797 if (rc) 1797 if (rc)
1798 goto out_free; 1798 goto out_free;
1799 if (skb->stamp.tv_sec) 1799 if (skb->tstamp.off_sec)
1800 sk->sk_stamp = skb->stamp; 1800 skb_get_timestamp(skb, &sk->sk_stamp);
1801 1801
1802 msg->msg_namelen = sizeof(*sipx); 1802 msg->msg_namelen = sizeof(*sipx);
1803 1803
@@ -1940,9 +1940,7 @@ static struct notifier_block ipx_dev_notifier = {
1940}; 1940};
1941 1941
1942extern struct datalink_proto *make_EII_client(void); 1942extern struct datalink_proto *make_EII_client(void);
1943extern struct datalink_proto *make_8023_client(void);
1944extern void destroy_EII_client(struct datalink_proto *); 1943extern void destroy_EII_client(struct datalink_proto *);
1945extern void destroy_8023_client(struct datalink_proto *);
1946 1944
1947static unsigned char ipx_8022_type = 0xE0; 1945static unsigned char ipx_8022_type = 0xE0;
1948static unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; 1946static unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index b6761913445a..1f73d9ea434d 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -10,7 +10,7 @@
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/tcp.h> 13#include <net/tcp_states.h>
14#include <net/ipx.h> 14#include <net/ipx.h>
15 15
16static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos) 16static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 92c6e8d4e731..6f92f9c62990 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -56,7 +56,7 @@
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
57 57
58#include <net/sock.h> 58#include <net/sock.h>
59#include <net/tcp.h> 59#include <net/tcp_states.h>
60 60
61#include <net/irda/af_irda.h> 61#include <net/irda/af_irda.h>
62 62
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 6dafbb43b529..3e9a06abbdd0 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
989 return; 989 return;
990 } 990 }
991 /* Unlink tx_skb from list */
992 tx_skb->next = tx_skb->prev = NULL;
993 tx_skb->list = NULL;
994 991
995 /* Clear old Nr field + poll bit */ 992 /* Clear old Nr field + poll bit */
996 tx_skb->data[1] &= 0x0f; 993 tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
1063 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 1060 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
1064 return; 1061 return;
1065 } 1062 }
1066 /* Unlink tx_skb from list */
1067 tx_skb->next = tx_skb->prev = NULL;
1068 tx_skb->list = NULL;
1069 1063
1070 /* Clear old Nr field + poll bit */ 1064 /* Clear old Nr field + poll bit */
1071 tx_skb->data[1] &= 0x0f; 1065 tx_skb->data[1] &= 0x0f;
@@ -1309,7 +1303,7 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
1309 * Jean II 1303 * Jean II
1310 */ 1304 */
1311int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, 1305int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
1312 struct packet_type *ptype) 1306 struct packet_type *ptype, struct net_device *orig_dev)
1313{ 1307{
1314 struct irlap_info info; 1308 struct irlap_info info;
1315 struct irlap_cb *self; 1309 struct irlap_cb *self;
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 7a4a4d7fbe66..c19e9ce05a3a 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -53,7 +53,6 @@ struct irlmp_cb *irlmp = NULL;
53/* These can be altered by the sysctl interface */ 53/* These can be altered by the sysctl interface */
54int sysctl_discovery = 0; 54int sysctl_discovery = 0;
55int sysctl_discovery_timeout = 3; /* 3 seconds by default */ 55int sysctl_discovery_timeout = 3; /* 3 seconds by default */
56EXPORT_SYMBOL(sysctl_discovery_timeout);
57int sysctl_discovery_slots = 6; /* 6 slots by default */ 56int sysctl_discovery_slots = 6; /* 6 slots by default */
58int sysctl_lap_keepalive_time = LM_IDLE_TIMEOUT * 1000 / HZ; 57int sysctl_lap_keepalive_time = LM_IDLE_TIMEOUT * 1000 / HZ;
59char sysctl_devname[65]; 58char sysctl_devname[65];
@@ -67,7 +66,6 @@ const char *irlmp_reasons[] = {
67 "LM_INIT_DISCONNECT", 66 "LM_INIT_DISCONNECT",
68 "ERROR, NOT USED", 67 "ERROR, NOT USED",
69}; 68};
70EXPORT_SYMBOL(irlmp_reasons);
71 69
72/* 70/*
73 * Function irlmp_init (void) 71 * Function irlmp_init (void)
@@ -675,7 +673,6 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
675 673
676 return new; 674 return new;
677} 675}
678EXPORT_SYMBOL(irlmp_dup);
679 676
680/* 677/*
681 * Function irlmp_disconnect_request (handle, userdata) 678 * Function irlmp_disconnect_request (handle, userdata)
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 6ffaed4544e9..634901dd156f 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -54,7 +54,7 @@ extern int irsock_init(void);
54extern void irsock_cleanup(void); 54extern void irsock_cleanup(void);
55/* irlap_frame.c */ 55/* irlap_frame.c */
56extern int irlap_driver_rcv(struct sk_buff *, struct net_device *, 56extern int irlap_driver_rcv(struct sk_buff *, struct net_device *,
57 struct packet_type *); 57 struct packet_type *, struct net_device *);
58 58
59/* 59/*
60 * Module parameters 60 * Module parameters
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 9004f7349a76..b391cb3893d4 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -517,9 +517,6 @@ extern int
517 irda_irnet_init(void); /* Initialise IrDA part of IrNET */ 517 irda_irnet_init(void); /* Initialise IrDA part of IrNET */
518extern void 518extern void
519 irda_irnet_cleanup(void); /* Teardown IrDA part of IrNET */ 519 irda_irnet_cleanup(void); /* Teardown IrDA part of IrNET */
520/* ---------------------------- MODULE ---------------------------- */
521extern int
522 irnet_init(void); /* Initialise IrNET module */
523 520
524/**************************** VARIABLES ****************************/ 521/**************************** VARIABLES ****************************/
525 522
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index f8f984bb9922..e53bf9e0053e 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -1107,7 +1107,7 @@ ppp_irnet_cleanup(void)
1107/* 1107/*
1108 * Module main entry point 1108 * Module main entry point
1109 */ 1109 */
1110int __init 1110static int __init
1111irnet_init(void) 1111irnet_init(void)
1112{ 1112{
1113 int err; 1113 int err;
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index b0dd3ea35999..1ba8c7106639 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -822,7 +822,6 @@ void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
822 822
823 return entry; 823 return entry;
824} 824}
825EXPORT_SYMBOL(hashbin_find_next);
826 825
827/* 826/*
828 * Function hashbin_get_first (hashbin) 827 * Function hashbin_get_first (hashbin)
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 5de05a0bc0ff..8b5eefd70f03 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
78 if (!skb_prev) 78 if (!skb_prev)
79 skb_queue_head(&lapb->write_queue, skb); 79 skb_queue_head(&lapb->write_queue, skb);
80 else 80 else
81 skb_append(skb_prev, skb); 81 skb_append(skb_prev, skb, &lapb->write_queue);
82 skb_prev = skb; 82 skb_prev = skb;
83 } 83 }
84} 84}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 20b4cfebd74c..66f55e514b56 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -23,13 +23,13 @@
23#include <linux/config.h> 23#include <linux/config.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/tcp.h>
27#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <net/llc.h> 28#include <net/llc.h>
30#include <net/llc_sap.h> 29#include <net/llc_sap.h>
31#include <net/llc_pdu.h> 30#include <net/llc_pdu.h>
32#include <net/llc_conn.h> 31#include <net/llc_conn.h>
32#include <net/tcp_states.h>
33 33
34/* remember: uninitialized global data is zeroed because its in .bss */ 34/* remember: uninitialized global data is zeroed because its in .bss */
35static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; 35static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
714 if (uaddr) 714 if (uaddr)
715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
716 msg->msg_namelen = sizeof(*uaddr); 716 msg->msg_namelen = sizeof(*uaddr);
717 if (!skb->list) { 717 if (!skb->next) {
718dgram_free: 718dgram_free:
719 kfree_skb(skb); 719 kfree_skb(skb);
720 } 720 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index eba812a9c69c..4c644bc70eae 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -16,7 +16,7 @@
16#include <net/llc_sap.h> 16#include <net/llc_sap.h>
17#include <net/llc_conn.h> 17#include <net/llc_conn.h>
18#include <net/sock.h> 18#include <net/sock.h>
19#include <linux/tcp.h> 19#include <net/tcp_states.h>
20#include <net/llc_c_ev.h> 20#include <net/llc_c_ev.h>
21#include <net/llc_c_ac.h> 21#include <net/llc_c_ac.h>
22#include <net/llc_c_st.h> 22#include <net/llc_c_st.h>
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
71 71
72 if (!ev->ind_prim && !ev->cfm_prim) { 72 if (!ev->ind_prim && !ev->cfm_prim) {
73 /* indicate or confirm not required */ 73 /* indicate or confirm not required */
74 if (!skb->list) 74 /* XXX this is not very pretty, perhaps we should store
75 * XXX indicate/confirm-needed state in the llc_conn_state_ev
76 * XXX control block of the SKB instead? -DaveM
77 */
78 if (!skb->next)
75 goto out_kfree_skb; 79 goto out_kfree_skb;
76 goto out_skb_put; 80 goto out_skb_put;
77 } 81 }
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 5ff02c080a0b..9727455bf0e7 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -103,7 +103,8 @@ out:
103struct llc_sap *llc_sap_open(unsigned char lsap, 103struct llc_sap *llc_sap_open(unsigned char lsap,
104 int (*func)(struct sk_buff *skb, 104 int (*func)(struct sk_buff *skb,
105 struct net_device *dev, 105 struct net_device *dev,
106 struct packet_type *pt)) 106 struct packet_type *pt,
107 struct net_device *orig_dev))
107{ 108{
108 struct llc_sap *sap = llc_sap_find(lsap); 109 struct llc_sap *sap = llc_sap_find(lsap);
109 110
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 0f9fc48aeaf9..0f84f66018e4 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -15,7 +15,6 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/tcp.h>
19#include <asm/errno.h> 18#include <asm/errno.h>
20#include <net/llc_if.h> 19#include <net/llc_if.h>
21#include <net/llc_sap.h> 20#include <net/llc_sap.h>
@@ -25,6 +24,7 @@
25#include <net/llc_c_ev.h> 24#include <net/llc_c_ev.h>
26#include <net/llc_c_ac.h> 25#include <net/llc_c_ac.h>
27#include <net/llc_c_st.h> 26#include <net/llc_c_st.h>
27#include <net/tcp_states.h>
28 28
29u8 llc_mac_null_var[IFHWADDRLEN]; 29u8 llc_mac_null_var[IFHWADDRLEN];
30 30
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 4da6976efc9c..13b46240b7a1 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -132,7 +132,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
132 * data now), it queues this frame in the connection's backlog. 132 * data now), it queues this frame in the connection's backlog.
133 */ 133 */
134int llc_rcv(struct sk_buff *skb, struct net_device *dev, 134int llc_rcv(struct sk_buff *skb, struct net_device *dev,
135 struct packet_type *pt) 135 struct packet_type *pt, struct net_device *orig_dev)
136{ 136{
137 struct llc_sap *sap; 137 struct llc_sap *sap;
138 struct llc_pdu_sn *pdu; 138 struct llc_pdu_sn *pdu;
@@ -165,7 +165,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
165 * LLC functionality 165 * LLC functionality
166 */ 166 */
167 if (sap->rcv_func) { 167 if (sap->rcv_func) {
168 sap->rcv_func(skb, dev, pt); 168 sap->rcv_func(skb, dev, pt, orig_dev);
169 goto out; 169 goto out;
170 } 170 }
171 dest = llc_pdu_type(skb); 171 dest = llc_pdu_type(skb);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 965c94eb4bbc..34228ef14985 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -21,7 +21,7 @@
21#include <net/llc_s_ev.h> 21#include <net/llc_s_ev.h>
22#include <net/llc_s_st.h> 22#include <net/llc_s_st.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <linux/tcp.h> 24#include <net/tcp_states.h>
25#include <linux/llc.h> 25#include <linux/llc.h>
26 26
27/** 27/**
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
new file mode 100644
index 000000000000..8296b38bf270
--- /dev/null
+++ b/net/netfilter/Kconfig
@@ -0,0 +1,24 @@
1config NETFILTER_NETLINK
2 tristate "Netfilter netlink interface"
3 help
4 If this option is enabled, the kernel will include support
5 for the new netfilter netlink interface.
6
7config NETFILTER_NETLINK_QUEUE
8 tristate "Netfilter NFQUEUE over NFNETLINK interface"
9 depends on NETFILTER_NETLINK
10 help
11 If this option isenabled, the kernel will include support
12 for queueing packets via NFNETLINK.
13
14config NETFILTER_NETLINK_LOG
15 tristate "Netfilter LOG over NFNETLINK interface"
16 depends on NETFILTER_NETLINK
17 help
18 If this option is enabled, the kernel will include support
19 for logging packets via NFNETLINK.
20
21 This obsoletes the existing ipt_ULOG and ebg_ulog mechanisms,
22 and is also scheduled to replace the old syslog-based ipt_LOG
23 and ip6t_LOG modules.
24
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
new file mode 100644
index 000000000000..b3b44f8b415a
--- /dev/null
+++ b/net/netfilter/Makefile
@@ -0,0 +1,7 @@
1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
2
3obj-$(CONFIG_NETFILTER) = netfilter.o
4
5obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
6obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
7obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
new file mode 100644
index 000000000000..1ceb1a6c254b
--- /dev/null
+++ b/net/netfilter/core.c
@@ -0,0 +1,216 @@
1/* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
8 *
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
12 */
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/netfilter.h>
16#include <net/protocol.h>
17#include <linux/init.h>
18#include <linux/skbuff.h>
19#include <linux/wait.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/if.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/proc_fs.h>
26#include <net/sock.h>
27
28#include "nf_internals.h"
29
30/* In this code, we can be waiting indefinitely for userspace to
31 * service a packet if a hook returns NF_QUEUE. We could keep a count
32 * of skbuffs queued for userspace, and not deregister a hook unless
33 * this is zero, but that sucks. Now, we simply check when the
34 * packets come back: if the hook is gone, the packet is discarded. */
35struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
36EXPORT_SYMBOL(nf_hooks);
37static DEFINE_SPINLOCK(nf_hook_lock);
38
39int nf_register_hook(struct nf_hook_ops *reg)
40{
41 struct list_head *i;
42
43 spin_lock_bh(&nf_hook_lock);
44 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
45 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
46 break;
47 }
48 list_add_rcu(&reg->list, i->prev);
49 spin_unlock_bh(&nf_hook_lock);
50
51 synchronize_net();
52 return 0;
53}
54EXPORT_SYMBOL(nf_register_hook);
55
56void nf_unregister_hook(struct nf_hook_ops *reg)
57{
58 spin_lock_bh(&nf_hook_lock);
59 list_del_rcu(&reg->list);
60 spin_unlock_bh(&nf_hook_lock);
61
62 synchronize_net();
63}
64EXPORT_SYMBOL(nf_unregister_hook);
65
66unsigned int nf_iterate(struct list_head *head,
67 struct sk_buff **skb,
68 int hook,
69 const struct net_device *indev,
70 const struct net_device *outdev,
71 struct list_head **i,
72 int (*okfn)(struct sk_buff *),
73 int hook_thresh)
74{
75 unsigned int verdict;
76
77 /*
78 * The caller must not block between calls to this
79 * function because of risk of continuing from deleted element.
80 */
81 list_for_each_continue_rcu(*i, head) {
82 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
83
84 if (hook_thresh > elem->priority)
85 continue;
86
87 /* Optimization: we don't need to hold module
88 reference here, since function can't sleep. --RR */
89 verdict = elem->hook(hook, skb, indev, outdev, okfn);
90 if (verdict != NF_ACCEPT) {
91#ifdef CONFIG_NETFILTER_DEBUG
92 if (unlikely((verdict & NF_VERDICT_MASK)
93 > NF_MAX_VERDICT)) {
94 NFDEBUG("Evil return from %p(%u).\n",
95 elem->hook, hook);
96 continue;
97 }
98#endif
99 if (verdict != NF_REPEAT)
100 return verdict;
101 *i = (*i)->prev;
102 }
103 }
104 return NF_ACCEPT;
105}
106
107
108/* Returns 1 if okfn() needs to be executed by the caller,
109 * -EPERM for NF_DROP, 0 otherwise. */
110int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
111 struct net_device *indev,
112 struct net_device *outdev,
113 int (*okfn)(struct sk_buff *),
114 int hook_thresh)
115{
116 struct list_head *elem;
117 unsigned int verdict;
118 int ret = 0;
119
120 /* We may already have this, but read-locks nest anyway */
121 rcu_read_lock();
122
123 elem = &nf_hooks[pf][hook];
124next_hook:
125 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
126 outdev, &elem, okfn, hook_thresh);
127 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
128 ret = 1;
129 goto unlock;
130 } else if (verdict == NF_DROP) {
131 kfree_skb(*pskb);
132 ret = -EPERM;
133 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
134 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
135 if (!nf_queue(pskb, elem, pf, hook, indev, outdev, okfn,
136 verdict >> NF_VERDICT_BITS))
137 goto next_hook;
138 }
139unlock:
140 rcu_read_unlock();
141 return ret;
142}
143EXPORT_SYMBOL(nf_hook_slow);
144
145
146int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len)
147{
148 struct sk_buff *nskb;
149
150 if (writable_len > (*pskb)->len)
151 return 0;
152
153 /* Not exclusive use of packet? Must copy. */
154 if (skb_shared(*pskb) || skb_cloned(*pskb))
155 goto copy_skb;
156
157 return pskb_may_pull(*pskb, writable_len);
158
159copy_skb:
160 nskb = skb_copy(*pskb, GFP_ATOMIC);
161 if (!nskb)
162 return 0;
163 BUG_ON(skb_is_nonlinear(nskb));
164
165 /* Rest of kernel will get very unhappy if we pass it a
166 suddenly-orphaned skbuff */
167 if ((*pskb)->sk)
168 skb_set_owner_w(nskb, (*pskb)->sk);
169 kfree_skb(*pskb);
170 *pskb = nskb;
171 return 1;
172}
173EXPORT_SYMBOL(skb_make_writable);
174
175
176/* This does not belong here, but locally generated errors need it if connection
177 tracking in use: without this, connection may not be in hash table, and hence
178 manufactured ICMP or RST packets will not be associated with it. */
179void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
180EXPORT_SYMBOL(ip_ct_attach);
181
182void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
183{
184 void (*attach)(struct sk_buff *, struct sk_buff *);
185
186 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
187 mb(); /* Just to be sure: must be read before executing this */
188 attach(new, skb);
189 }
190}
191EXPORT_SYMBOL(nf_ct_attach);
192
193#ifdef CONFIG_PROC_FS
194struct proc_dir_entry *proc_net_netfilter;
195EXPORT_SYMBOL(proc_net_netfilter);
196#endif
197
198void __init netfilter_init(void)
199{
200 int i, h;
201 for (i = 0; i < NPROTO; i++) {
202 for (h = 0; h < NF_MAX_HOOKS; h++)
203 INIT_LIST_HEAD(&nf_hooks[i][h]);
204 }
205
206#ifdef CONFIG_PROC_FS
207 proc_net_netfilter = proc_mkdir("netfilter", proc_net);
208 if (!proc_net_netfilter)
209 panic("cannot create netfilter proc entry");
210#endif
211
212 if (netfilter_queue_init() < 0)
213 panic("cannot initialize nf_queue");
214 if (netfilter_log_init() < 0)
215 panic("cannot initialize nf_log");
216}
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
new file mode 100644
index 000000000000..6bdee2910617
--- /dev/null
+++ b/net/netfilter/nf_internals.h
@@ -0,0 +1,39 @@
1#ifndef _NF_INTERNALS_H
2#define _NF_INTERNALS_H
3
4#include <linux/config.h>
5#include <linux/list.h>
6#include <linux/skbuff.h>
7#include <linux/netdevice.h>
8
9#ifdef CONFIG_NETFILTER_DEBUG
10#define NFDEBUG(format, args...) printk(format , ## args)
11#else
12#define NFDEBUG(format, args...)
13#endif
14
15
16/* core.c */
17extern unsigned int nf_iterate(struct list_head *head,
18 struct sk_buff **skb,
19 int hook,
20 const struct net_device *indev,
21 const struct net_device *outdev,
22 struct list_head **i,
23 int (*okfn)(struct sk_buff *),
24 int hook_thresh);
25
26/* nf_queue.c */
27extern int nf_queue(struct sk_buff **skb,
28 struct list_head *elem,
29 int pf, unsigned int hook,
30 struct net_device *indev,
31 struct net_device *outdev,
32 int (*okfn)(struct sk_buff *),
33 unsigned int queuenum);
34extern int __init netfilter_queue_init(void);
35
36/* nf_log.c */
37extern int __init netfilter_log_init(void);
38
39#endif
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
new file mode 100644
index 000000000000..3e76bd0824a2
--- /dev/null
+++ b/net/netfilter/nf_log.c
@@ -0,0 +1,178 @@
1#include <linux/config.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/proc_fs.h>
6#include <linux/skbuff.h>
7#include <linux/netfilter.h>
8#include <linux/seq_file.h>
9#include <net/protocol.h>
10
11#include "nf_internals.h"
12
13/* Internal logging interface, which relies on the real
14 LOG target modules */
15
16#define NF_LOG_PREFIXLEN 128
17
18static struct nf_logger *nf_logging[NPROTO]; /* = NULL */
19static DEFINE_SPINLOCK(nf_log_lock);
20
21/* return EBUSY if somebody else is registered, EEXIST if the same logger
22 * is registred, 0 on success. */
23int nf_log_register(int pf, struct nf_logger *logger)
24{
25 int ret = -EBUSY;
26
27 if (pf >= NPROTO)
28 return -EINVAL;
29
30 /* Any setup of logging members must be done before
31 * substituting pointer. */
32 spin_lock(&nf_log_lock);
33 if (!nf_logging[pf]) {
34 rcu_assign_pointer(nf_logging[pf], logger);
35 ret = 0;
36 } else if (nf_logging[pf] == logger)
37 ret = -EEXIST;
38
39 spin_unlock(&nf_log_lock);
40 return ret;
41}
42EXPORT_SYMBOL(nf_log_register);
43
44int nf_log_unregister_pf(int pf)
45{
46 if (pf >= NPROTO)
47 return -EINVAL;
48
49 spin_lock(&nf_log_lock);
50 nf_logging[pf] = NULL;
51 spin_unlock(&nf_log_lock);
52
53 /* Give time to concurrent readers. */
54 synchronize_net();
55
56 return 0;
57}
58EXPORT_SYMBOL(nf_log_unregister_pf);
59
60void nf_log_unregister_logger(struct nf_logger *logger)
61{
62 int i;
63
64 spin_lock(&nf_log_lock);
65 for (i = 0; i < NPROTO; i++) {
66 if (nf_logging[i] == logger)
67 nf_logging[i] = NULL;
68 }
69 spin_unlock(&nf_log_lock);
70
71 synchronize_net();
72}
73EXPORT_SYMBOL(nf_log_unregister_logger);
74
75void nf_log_packet(int pf,
76 unsigned int hooknum,
77 const struct sk_buff *skb,
78 const struct net_device *in,
79 const struct net_device *out,
80 struct nf_loginfo *loginfo,
81 const char *fmt, ...)
82{
83 va_list args;
84 char prefix[NF_LOG_PREFIXLEN];
85 struct nf_logger *logger;
86
87 rcu_read_lock();
88 logger = rcu_dereference(nf_logging[pf]);
89 if (logger) {
90 va_start(args, fmt);
91 vsnprintf(prefix, sizeof(prefix), fmt, args);
92 va_end(args);
93 /* We must read logging before nf_logfn[pf] */
94 logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
95 } else if (net_ratelimit()) {
96 printk(KERN_WARNING "nf_log_packet: can\'t log since "
97 "no backend logging module loaded in! Please either "
98 "load one, or disable logging explicitly\n");
99 }
100 rcu_read_unlock();
101}
102EXPORT_SYMBOL(nf_log_packet);
103
104#ifdef CONFIG_PROC_FS
105static void *seq_start(struct seq_file *seq, loff_t *pos)
106{
107 rcu_read_lock();
108
109 if (*pos >= NPROTO)
110 return NULL;
111
112 return pos;
113}
114
115static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
116{
117 (*pos)++;
118
119 if (*pos >= NPROTO)
120 return NULL;
121
122 return pos;
123}
124
125static void seq_stop(struct seq_file *s, void *v)
126{
127 rcu_read_unlock();
128}
129
130static int seq_show(struct seq_file *s, void *v)
131{
132 loff_t *pos = v;
133 const struct nf_logger *logger;
134
135 logger = rcu_dereference(nf_logging[*pos]);
136
137 if (!logger)
138 return seq_printf(s, "%2lld NONE\n", *pos);
139
140 return seq_printf(s, "%2lld %s\n", *pos, logger->name);
141}
142
143static struct seq_operations nflog_seq_ops = {
144 .start = seq_start,
145 .next = seq_next,
146 .stop = seq_stop,
147 .show = seq_show,
148};
149
150static int nflog_open(struct inode *inode, struct file *file)
151{
152 return seq_open(file, &nflog_seq_ops);
153}
154
155static struct file_operations nflog_file_ops = {
156 .owner = THIS_MODULE,
157 .open = nflog_open,
158 .read = seq_read,
159 .llseek = seq_lseek,
160 .release = seq_release,
161};
162
163#endif /* PROC_FS */
164
165
166int __init netfilter_log_init(void)
167{
168#ifdef CONFIG_PROC_FS
169 struct proc_dir_entry *pde;
170
171 pde = create_proc_entry("nf_log", S_IRUGO, proc_net_netfilter);
172 if (!pde)
173 return -1;
174
175 pde->proc_fops = &nflog_file_ops;
176#endif
177 return 0;
178}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
new file mode 100644
index 000000000000..d10d552d9c40
--- /dev/null
+++ b/net/netfilter/nf_queue.c
@@ -0,0 +1,343 @@
1#include <linux/config.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/proc_fs.h>
6#include <linux/skbuff.h>
7#include <linux/netfilter.h>
8#include <linux/seq_file.h>
9#include <net/protocol.h>
10
11#include "nf_internals.h"
12
13/*
14 * A queue handler may be registered for each protocol. Each is protected by
15 * long term mutex. The handler must provide an an outfn() to accept packets
16 * for queueing and must reinject all packets it receives, no matter what.
17 */
18static struct nf_queue_handler *queue_handler[NPROTO];
19static struct nf_queue_rerouter *queue_rerouter;
20
21static DEFINE_RWLOCK(queue_handler_lock);
22
23/* return EBUSY when somebody else is registered, return EEXIST if the
24 * same handler is registered, return 0 in case of success. */
25int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
26{
27 int ret;
28
29 if (pf >= NPROTO)
30 return -EINVAL;
31
32 write_lock_bh(&queue_handler_lock);
33 if (queue_handler[pf] == qh)
34 ret = -EEXIST;
35 else if (queue_handler[pf])
36 ret = -EBUSY;
37 else {
38 queue_handler[pf] = qh;
39 ret = 0;
40 }
41 write_unlock_bh(&queue_handler_lock);
42
43 return ret;
44}
45EXPORT_SYMBOL(nf_register_queue_handler);
46
47/* The caller must flush their queue before this */
48int nf_unregister_queue_handler(int pf)
49{
50 if (pf >= NPROTO)
51 return -EINVAL;
52
53 write_lock_bh(&queue_handler_lock);
54 queue_handler[pf] = NULL;
55 write_unlock_bh(&queue_handler_lock);
56
57 return 0;
58}
59EXPORT_SYMBOL(nf_unregister_queue_handler);
60
61int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
62{
63 if (pf >= NPROTO)
64 return -EINVAL;
65
66 write_lock_bh(&queue_handler_lock);
67 memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf]));
68 write_unlock_bh(&queue_handler_lock);
69
70 return 0;
71}
72EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
73
74int nf_unregister_queue_rerouter(int pf)
75{
76 if (pf >= NPROTO)
77 return -EINVAL;
78
79 write_lock_bh(&queue_handler_lock);
80 memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf]));
81 write_unlock_bh(&queue_handler_lock);
82 return 0;
83}
84EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
85
86void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
87{
88 int pf;
89
90 write_lock_bh(&queue_handler_lock);
91 for (pf = 0; pf < NPROTO; pf++) {
92 if (queue_handler[pf] == qh)
93 queue_handler[pf] = NULL;
94 }
95 write_unlock_bh(&queue_handler_lock);
96}
97EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
98
99/*
100 * Any packet that leaves via this function must come back
101 * through nf_reinject().
102 */
103int nf_queue(struct sk_buff **skb,
104 struct list_head *elem,
105 int pf, unsigned int hook,
106 struct net_device *indev,
107 struct net_device *outdev,
108 int (*okfn)(struct sk_buff *),
109 unsigned int queuenum)
110{
111 int status;
112 struct nf_info *info;
113#ifdef CONFIG_BRIDGE_NETFILTER
114 struct net_device *physindev = NULL;
115 struct net_device *physoutdev = NULL;
116#endif
117
118 /* QUEUE == DROP if noone is waiting, to be safe. */
119 read_lock(&queue_handler_lock);
120 if (!queue_handler[pf]->outfn) {
121 read_unlock(&queue_handler_lock);
122 kfree_skb(*skb);
123 return 1;
124 }
125
126 info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC);
127 if (!info) {
128 if (net_ratelimit())
129 printk(KERN_ERR "OOM queueing packet %p\n",
130 *skb);
131 read_unlock(&queue_handler_lock);
132 kfree_skb(*skb);
133 return 1;
134 }
135
136 *info = (struct nf_info) {
137 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
138
139 /* If it's going away, ignore hook. */
140 if (!try_module_get(info->elem->owner)) {
141 read_unlock(&queue_handler_lock);
142 kfree(info);
143 return 0;
144 }
145
146 /* Bump dev refs so they don't vanish while packet is out */
147 if (indev) dev_hold(indev);
148 if (outdev) dev_hold(outdev);
149
150#ifdef CONFIG_BRIDGE_NETFILTER
151 if ((*skb)->nf_bridge) {
152 physindev = (*skb)->nf_bridge->physindev;
153 if (physindev) dev_hold(physindev);
154 physoutdev = (*skb)->nf_bridge->physoutdev;
155 if (physoutdev) dev_hold(physoutdev);
156 }
157#endif
158 if (queue_rerouter[pf].save)
159 queue_rerouter[pf].save(*skb, info);
160
161 status = queue_handler[pf]->outfn(*skb, info, queuenum,
162 queue_handler[pf]->data);
163
164 if (status >= 0 && queue_rerouter[pf].reroute)
165 status = queue_rerouter[pf].reroute(skb, info);
166
167 read_unlock(&queue_handler_lock);
168
169 if (status < 0) {
170 /* James M doesn't say fuck enough. */
171 if (indev) dev_put(indev);
172 if (outdev) dev_put(outdev);
173#ifdef CONFIG_BRIDGE_NETFILTER
174 if (physindev) dev_put(physindev);
175 if (physoutdev) dev_put(physoutdev);
176#endif
177 module_put(info->elem->owner);
178 kfree(info);
179 kfree_skb(*skb);
180
181 return 1;
182 }
183
184 return 1;
185}
186
187void nf_reinject(struct sk_buff *skb, struct nf_info *info,
188 unsigned int verdict)
189{
190 struct list_head *elem = &info->elem->list;
191 struct list_head *i;
192
193 rcu_read_lock();
194
195 /* Release those devices we held, or Alexey will kill me. */
196 if (info->indev) dev_put(info->indev);
197 if (info->outdev) dev_put(info->outdev);
198#ifdef CONFIG_BRIDGE_NETFILTER
199 if (skb->nf_bridge) {
200 if (skb->nf_bridge->physindev)
201 dev_put(skb->nf_bridge->physindev);
202 if (skb->nf_bridge->physoutdev)
203 dev_put(skb->nf_bridge->physoutdev);
204 }
205#endif
206
207 /* Drop reference to owner of hook which queued us. */
208 module_put(info->elem->owner);
209
210 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
211 if (i == elem)
212 break;
213 }
214
215 if (elem == &nf_hooks[info->pf][info->hook]) {
216 /* The module which sent it to userspace is gone. */
217 NFDEBUG("%s: module disappeared, dropping packet.\n",
218 __FUNCTION__);
219 verdict = NF_DROP;
220 }
221
222 /* Continue traversal iff userspace said ok... */
223 if (verdict == NF_REPEAT) {
224 elem = elem->prev;
225 verdict = NF_ACCEPT;
226 }
227
228 if (verdict == NF_ACCEPT) {
229 next_hook:
230 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
231 &skb, info->hook,
232 info->indev, info->outdev, &elem,
233 info->okfn, INT_MIN);
234 }
235
236 switch (verdict & NF_VERDICT_MASK) {
237 case NF_ACCEPT:
238 info->okfn(skb);
239 break;
240
241 case NF_QUEUE:
242 if (!nf_queue(&skb, elem, info->pf, info->hook,
243 info->indev, info->outdev, info->okfn,
244 verdict >> NF_VERDICT_BITS))
245 goto next_hook;
246 break;
247 }
248 rcu_read_unlock();
249
250 if (verdict == NF_DROP)
251 kfree_skb(skb);
252
253 kfree(info);
254 return;
255}
256EXPORT_SYMBOL(nf_reinject);
257
258#ifdef CONFIG_PROC_FS
259static void *seq_start(struct seq_file *seq, loff_t *pos)
260{
261 if (*pos >= NPROTO)
262 return NULL;
263
264 return pos;
265}
266
267static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
268{
269 (*pos)++;
270
271 if (*pos >= NPROTO)
272 return NULL;
273
274 return pos;
275}
276
277static void seq_stop(struct seq_file *s, void *v)
278{
279
280}
281
282static int seq_show(struct seq_file *s, void *v)
283{
284 int ret;
285 loff_t *pos = v;
286 struct nf_queue_handler *qh;
287
288 read_lock_bh(&queue_handler_lock);
289 qh = queue_handler[*pos];
290 if (!qh)
291 ret = seq_printf(s, "%2lld NONE\n", *pos);
292 else
293 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
294 read_unlock_bh(&queue_handler_lock);
295
296 return ret;
297}
298
299static struct seq_operations nfqueue_seq_ops = {
300 .start = seq_start,
301 .next = seq_next,
302 .stop = seq_stop,
303 .show = seq_show,
304};
305
306static int nfqueue_open(struct inode *inode, struct file *file)
307{
308 return seq_open(file, &nfqueue_seq_ops);
309}
310
311static struct file_operations nfqueue_file_ops = {
312 .owner = THIS_MODULE,
313 .open = nfqueue_open,
314 .read = seq_read,
315 .llseek = seq_lseek,
316 .release = seq_release,
317};
318#endif /* PROC_FS */
319
320
321int __init netfilter_queue_init(void)
322{
323#ifdef CONFIG_PROC_FS
324 struct proc_dir_entry *pde;
325#endif
326 queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
327 GFP_KERNEL);
328 if (!queue_rerouter)
329 return -ENOMEM;
330
331#ifdef CONFIG_PROC_FS
332 pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
333 if (!pde) {
334 kfree(queue_rerouter);
335 return -1;
336 }
337 pde->proc_fops = &nfqueue_file_ops;
338#endif
339 memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
340
341 return 0;
342}
343
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
new file mode 100644
index 000000000000..61a833a9caa6
--- /dev/null
+++ b/net/netfilter/nf_sockopt.c
@@ -0,0 +1,132 @@
1#include <linux/config.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/skbuff.h>
6#include <linux/netfilter.h>
7#include <net/sock.h>
8
9#include "nf_internals.h"
10
11/* Sockopts only registered and called from user context, so
12 net locking would be overkill. Also, [gs]etsockopt calls may
13 sleep. */
14static DECLARE_MUTEX(nf_sockopt_mutex);
15static LIST_HEAD(nf_sockopts);
16
17/* Do exclusive ranges overlap? */
18static inline int overlap(int min1, int max1, int min2, int max2)
19{
20 return max1 > min2 && min1 < max2;
21}
22
23/* Functions to register sockopt ranges (exclusive). */
24int nf_register_sockopt(struct nf_sockopt_ops *reg)
25{
26 struct list_head *i;
27 int ret = 0;
28
29 if (down_interruptible(&nf_sockopt_mutex) != 0)
30 return -EINTR;
31
32 list_for_each(i, &nf_sockopts) {
33 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
34 if (ops->pf == reg->pf
35 && (overlap(ops->set_optmin, ops->set_optmax,
36 reg->set_optmin, reg->set_optmax)
37 || overlap(ops->get_optmin, ops->get_optmax,
38 reg->get_optmin, reg->get_optmax))) {
39 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
40 ops->set_optmin, ops->set_optmax,
41 ops->get_optmin, ops->get_optmax,
42 reg->set_optmin, reg->set_optmax,
43 reg->get_optmin, reg->get_optmax);
44 ret = -EBUSY;
45 goto out;
46 }
47 }
48
49 list_add(&reg->list, &nf_sockopts);
50out:
51 up(&nf_sockopt_mutex);
52 return ret;
53}
54EXPORT_SYMBOL(nf_register_sockopt);
55
56void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
57{
58 /* No point being interruptible: we're probably in cleanup_module() */
59 restart:
60 down(&nf_sockopt_mutex);
61 if (reg->use != 0) {
62 /* To be woken by nf_sockopt call... */
63 /* FIXME: Stuart Young's name appears gratuitously. */
64 set_current_state(TASK_UNINTERRUPTIBLE);
65 reg->cleanup_task = current;
66 up(&nf_sockopt_mutex);
67 schedule();
68 goto restart;
69 }
70 list_del(&reg->list);
71 up(&nf_sockopt_mutex);
72}
73EXPORT_SYMBOL(nf_unregister_sockopt);
74
75/* Call get/setsockopt() */
76static int nf_sockopt(struct sock *sk, int pf, int val,
77 char __user *opt, int *len, int get)
78{
79 struct list_head *i;
80 struct nf_sockopt_ops *ops;
81 int ret;
82
83 if (down_interruptible(&nf_sockopt_mutex) != 0)
84 return -EINTR;
85
86 list_for_each(i, &nf_sockopts) {
87 ops = (struct nf_sockopt_ops *)i;
88 if (ops->pf == pf) {
89 if (get) {
90 if (val >= ops->get_optmin
91 && val < ops->get_optmax) {
92 ops->use++;
93 up(&nf_sockopt_mutex);
94 ret = ops->get(sk, val, opt, len);
95 goto out;
96 }
97 } else {
98 if (val >= ops->set_optmin
99 && val < ops->set_optmax) {
100 ops->use++;
101 up(&nf_sockopt_mutex);
102 ret = ops->set(sk, val, opt, *len);
103 goto out;
104 }
105 }
106 }
107 }
108 up(&nf_sockopt_mutex);
109 return -ENOPROTOOPT;
110
111 out:
112 down(&nf_sockopt_mutex);
113 ops->use--;
114 if (ops->cleanup_task)
115 wake_up_process(ops->cleanup_task);
116 up(&nf_sockopt_mutex);
117 return ret;
118}
119
120int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
121 int len)
122{
123 return nf_sockopt(sk, pf, val, opt, &len, 0);
124}
125EXPORT_SYMBOL(nf_setsockopt);
126
127int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
128{
129 return nf_sockopt(sk, pf, val, opt, len, 1);
130}
131EXPORT_SYMBOL(nf_getsockopt);
132
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
new file mode 100644
index 000000000000..e089f17bb803
--- /dev/null
+++ b/net/netfilter/nfnetlink.c
@@ -0,0 +1,376 @@
1/* Netfilter messages via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2005 by Pablo Neira Ayuso <pablo@eurodev.net>
7 *
8 * Initial netfilter messages via netlink development funded and
9 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
10 *
11 * Further development of this code funded by Astaro AG (http://www.astaro.com)
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/socket.h>
21#include <linux/kernel.h>
22#include <linux/major.h>
23#include <linux/sched.h>
24#include <linux/timer.h>
25#include <linux/string.h>
26#include <linux/sockios.h>
27#include <linux/net.h>
28#include <linux/fcntl.h>
29#include <linux/skbuff.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <net/sock.h>
33#include <linux/init.h>
34#include <linux/spinlock.h>
35
36#include <linux/netfilter.h>
37#include <linux/netlink.h>
38#include <linux/netfilter/nfnetlink.h>
39
40MODULE_LICENSE("GPL");
41MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
42MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
43
44static char __initdata nfversion[] = "0.30";
45
46#if 0
47#define DEBUGP(format, args...) \
48 printk(KERN_DEBUG "%s(%d):%s(): " format, __FILE__, \
49 __LINE__, __FUNCTION__, ## args)
50#else
51#define DEBUGP(format, args...)
52#endif
53
54static struct sock *nfnl = NULL;
55static struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
56DECLARE_MUTEX(nfnl_sem);
57
58void nfnl_lock(void)
59{
60 nfnl_shlock();
61}
62
63void nfnl_unlock(void)
64{
65 nfnl_shunlock();
66}
67
68int nfnetlink_subsys_register(struct nfnetlink_subsystem *n)
69{
70 DEBUGP("registering subsystem ID %u\n", n->subsys_id);
71
72 nfnl_lock();
73 if (subsys_table[n->subsys_id]) {
74 nfnl_unlock();
75 return -EBUSY;
76 }
77 subsys_table[n->subsys_id] = n;
78 nfnl_unlock();
79
80 return 0;
81}
82
83int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n)
84{
85 DEBUGP("unregistering subsystem ID %u\n", n->subsys_id);
86
87 nfnl_lock();
88 subsys_table[n->subsys_id] = NULL;
89 nfnl_unlock();
90
91 return 0;
92}
93
94static inline struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type)
95{
96 u_int8_t subsys_id = NFNL_SUBSYS_ID(type);
97
98 if (subsys_id >= NFNL_SUBSYS_COUNT
99 || subsys_table[subsys_id] == NULL)
100 return NULL;
101
102 return subsys_table[subsys_id];
103}
104
105static inline struct nfnl_callback *
106nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss)
107{
108 u_int8_t cb_id = NFNL_MSG_TYPE(type);
109
110 if (cb_id >= ss->cb_count) {
111 DEBUGP("msgtype %u >= %u, returning\n", type, ss->cb_count);
112 return NULL;
113 }
114
115 return &ss->cb[cb_id];
116}
117
118void __nfa_fill(struct sk_buff *skb, int attrtype, int attrlen,
119 const void *data)
120{
121 struct nfattr *nfa;
122 int size = NFA_LENGTH(attrlen);
123
124 nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
125 nfa->nfa_type = attrtype;
126 nfa->nfa_len = size;
127 memcpy(NFA_DATA(nfa), data, attrlen);
128 memset(NFA_DATA(nfa) + attrlen, 0, NFA_ALIGN(size) - size);
129}
130
131int nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
132{
133 memset(tb, 0, sizeof(struct nfattr *) * maxattr);
134
135 while (NFA_OK(nfa, len)) {
136 unsigned flavor = nfa->nfa_type;
137 if (flavor && flavor <= maxattr)
138 tb[flavor-1] = nfa;
139 nfa = NFA_NEXT(nfa, len);
140 }
141
142 return 0;
143}
144
145/**
146 * nfnetlink_check_attributes - check and parse nfnetlink attributes
147 *
148 * subsys: nfnl subsystem for which this message is to be parsed
149 * nlmsghdr: netlink message to be checked/parsed
150 * cda: array of pointers, needs to be at least subsys->attr_count big
151 *
152 */
153static int
154nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys,
155 struct nlmsghdr *nlh, struct nfattr *cda[])
156{
157 int min_len;
158 u_int16_t attr_count;
159 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
160
161 if (unlikely(cb_id >= subsys->cb_count)) {
162 DEBUGP("msgtype %u >= %u, returning\n",
163 cb_id, subsys->cb_count);
164 return -EINVAL;
165 }
166
167 min_len = NLMSG_ALIGN(sizeof(struct nfgenmsg));
168 if (unlikely(nlh->nlmsg_len < min_len))
169 return -EINVAL;
170
171 attr_count = subsys->cb[cb_id].attr_count;
172 memset(cda, 0, sizeof(struct nfattr *) * attr_count);
173
174 /* check attribute lengths. */
175 if (likely(nlh->nlmsg_len > min_len)) {
176 struct nfattr *attr = NFM_NFA(NLMSG_DATA(nlh));
177 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
178
179 while (NFA_OK(attr, attrlen)) {
180 unsigned flavor = attr->nfa_type;
181 if (flavor) {
182 if (flavor > attr_count)
183 return -EINVAL;
184 cda[flavor - 1] = attr;
185 }
186 attr = NFA_NEXT(attr, attrlen);
187 }
188 }
189
190 /* implicit: if nlmsg_len == min_len, we return 0, and an empty
191 * (zeroed) cda[] array. The message is valid, but empty. */
192
193 return 0;
194}
195
196int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
197{
198 int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
199 int err = 0;
200
201 NETLINK_CB(skb).dst_group = group;
202 if (echo)
203 atomic_inc(&skb->users);
204 netlink_broadcast(nfnl, skb, pid, group, allocation);
205 if (echo)
206 err = netlink_unicast(nfnl, skb, pid, MSG_DONTWAIT);
207
208 return err;
209}
210
211int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags)
212{
213 return netlink_unicast(nfnl, skb, pid, flags);
214}
215
216/* Process one complete nfnetlink message. */
217static inline int nfnetlink_rcv_msg(struct sk_buff *skb,
218 struct nlmsghdr *nlh, int *errp)
219{
220 struct nfnl_callback *nc;
221 struct nfnetlink_subsystem *ss;
222 int type, err = 0;
223
224 DEBUGP("entered; subsys=%u, msgtype=%u\n",
225 NFNL_SUBSYS_ID(nlh->nlmsg_type),
226 NFNL_MSG_TYPE(nlh->nlmsg_type));
227
228 /* Only requests are handled by kernel now. */
229 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
230 DEBUGP("received non-request message\n");
231 return 0;
232 }
233
234 /* All the messages must at least contain nfgenmsg */
235 if (nlh->nlmsg_len <
236 NLMSG_LENGTH(NLMSG_ALIGN(sizeof(struct nfgenmsg)))) {
237 DEBUGP("received message was too short\n");
238 return 0;
239 }
240
241 type = nlh->nlmsg_type;
242 ss = nfnetlink_get_subsys(type);
243 if (!ss) {
244#ifdef CONFIG_KMOD
245 /* don't call nfnl_shunlock, since it would reenter
246 * with further packet processing */
247 up(&nfnl_sem);
248 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
249 nfnl_shlock();
250 ss = nfnetlink_get_subsys(type);
251 if (!ss)
252#endif
253 goto err_inval;
254 }
255
256 nc = nfnetlink_find_client(type, ss);
257 if (!nc) {
258 DEBUGP("unable to find client for type %d\n", type);
259 goto err_inval;
260 }
261
262 if (nc->cap_required &&
263 !cap_raised(NETLINK_CB(skb).eff_cap, nc->cap_required)) {
264 DEBUGP("permission denied for type %d\n", type);
265 *errp = -EPERM;
266 return -1;
267 }
268
269 {
270 u_int16_t attr_count =
271 ss->cb[NFNL_MSG_TYPE(nlh->nlmsg_type)].attr_count;
272 struct nfattr *cda[attr_count];
273
274 memset(cda, 0, sizeof(struct nfattr *) * attr_count);
275
276 err = nfnetlink_check_attributes(ss, nlh, cda);
277 if (err < 0)
278 goto err_inval;
279
280 DEBUGP("calling handler\n");
281 err = nc->call(nfnl, skb, nlh, cda, errp);
282 *errp = err;
283 return err;
284 }
285
286err_inval:
287 DEBUGP("returning -EINVAL\n");
288 *errp = -EINVAL;
289 return -1;
290}
291
292/* Process one packet of messages. */
293static inline int nfnetlink_rcv_skb(struct sk_buff *skb)
294{
295 int err;
296 struct nlmsghdr *nlh;
297
298 while (skb->len >= NLMSG_SPACE(0)) {
299 u32 rlen;
300
301 nlh = (struct nlmsghdr *)skb->data;
302 if (nlh->nlmsg_len < sizeof(struct nlmsghdr)
303 || skb->len < nlh->nlmsg_len)
304 return 0;
305 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
306 if (rlen > skb->len)
307 rlen = skb->len;
308 if (nfnetlink_rcv_msg(skb, nlh, &err)) {
309 if (!err)
310 return -1;
311 netlink_ack(skb, nlh, err);
312 } else
313 if (nlh->nlmsg_flags & NLM_F_ACK)
314 netlink_ack(skb, nlh, 0);
315 skb_pull(skb, rlen);
316 }
317
318 return 0;
319}
320
321static void nfnetlink_rcv(struct sock *sk, int len)
322{
323 do {
324 struct sk_buff *skb;
325
326 if (nfnl_shlock_nowait())
327 return;
328
329 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
330 if (nfnetlink_rcv_skb(skb)) {
331 if (skb->len)
332 skb_queue_head(&sk->sk_receive_queue,
333 skb);
334 else
335 kfree_skb(skb);
336 break;
337 }
338 kfree_skb(skb);
339 }
340
341 /* don't call nfnl_shunlock, since it would reenter
342 * with further packet processing */
343 up(&nfnl_sem);
344 } while(nfnl && nfnl->sk_receive_queue.qlen);
345}
346
347void __exit nfnetlink_exit(void)
348{
349 printk("Removing netfilter NETLINK layer.\n");
350 sock_release(nfnl->sk_socket);
351 return;
352}
353
354int __init nfnetlink_init(void)
355{
356 printk("Netfilter messages via NETLINK v%s.\n", nfversion);
357
358 nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX,
359 nfnetlink_rcv, THIS_MODULE);
360 if (!nfnl) {
361 printk(KERN_ERR "cannot initialize nfnetlink!\n");
362 return -1;
363 }
364
365 return 0;
366}
367
368module_init(nfnetlink_init);
369module_exit(nfnetlink_exit);
370
371EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
372EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
373EXPORT_SYMBOL_GPL(nfnetlink_send);
374EXPORT_SYMBOL_GPL(nfnetlink_unicast);
375EXPORT_SYMBOL_GPL(nfattr_parse);
376EXPORT_SYMBOL_GPL(__nfa_fill);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
new file mode 100644
index 000000000000..ff5601ceedcb
--- /dev/null
+++ b/net/netfilter/nfnetlink_log.c
@@ -0,0 +1,1055 @@
1/*
2 * This is a module which is used for logging packets to userspace via
3 * nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ipt_ULOG.c:
8 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/init.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter.h>
22#include <linux/netlink.h>
23#include <linux/netfilter/nfnetlink.h>
24#include <linux/netfilter/nfnetlink_log.h>
25#include <linux/spinlock.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/security.h>
29#include <linux/list.h>
30#include <linux/jhash.h>
31#include <linux/random.h>
32#include <net/sock.h>
33
34#include <asm/atomic.h>
35
36#ifdef CONFIG_BRIDGE_NETFILTER
37#include "../bridge/br_private.h"
38#endif
39
40#define NFULNL_NLBUFSIZ_DEFAULT 4096
41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
43
44#define PRINTR(x, args...) do { if (net_ratelimit()) \
45 printk(x, ## args); } while (0);
46
47#if 0
48#define UDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
49 __FILE__, __LINE__, __FUNCTION__, \
50 ## args)
51#else
52#define UDEBUG(x, ...)
53#endif
54
55struct nfulnl_instance {
56 struct hlist_node hlist; /* global list of instances */
57 spinlock_t lock;
58 atomic_t use; /* use count */
59
60 unsigned int qlen; /* number of nlmsgs in skb */
61 struct sk_buff *skb; /* pre-allocatd skb */
62 struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
63 struct timer_list timer;
64 int peer_pid; /* PID of the peer process */
65
66 /* configurable parameters */
67 unsigned int flushtimeout; /* timeout until queue flush */
68 unsigned int nlbufsiz; /* netlink buffer allocation size */
69 unsigned int qthreshold; /* threshold of the queue */
70 u_int32_t copy_range;
71 u_int16_t group_num; /* number of this queue */
72 u_int8_t copy_mode;
73};
74
75static DEFINE_RWLOCK(instances_lock);
76
77#define INSTANCE_BUCKETS 16
78static struct hlist_head instance_table[INSTANCE_BUCKETS];
79static unsigned int hash_init;
80
81static inline u_int8_t instance_hashfn(u_int16_t group_num)
82{
83 return ((group_num & 0xff) % INSTANCE_BUCKETS);
84}
85
86static struct nfulnl_instance *
87__instance_lookup(u_int16_t group_num)
88{
89 struct hlist_head *head;
90 struct hlist_node *pos;
91 struct nfulnl_instance *inst;
92
93 UDEBUG("entering (group_num=%u)\n", group_num);
94
95 head = &instance_table[instance_hashfn(group_num)];
96 hlist_for_each_entry(inst, pos, head, hlist) {
97 if (inst->group_num == group_num)
98 return inst;
99 }
100 return NULL;
101}
102
103static inline void
104instance_get(struct nfulnl_instance *inst)
105{
106 atomic_inc(&inst->use);
107}
108
109static struct nfulnl_instance *
110instance_lookup_get(u_int16_t group_num)
111{
112 struct nfulnl_instance *inst;
113
114 read_lock_bh(&instances_lock);
115 inst = __instance_lookup(group_num);
116 if (inst)
117 instance_get(inst);
118 read_unlock_bh(&instances_lock);
119
120 return inst;
121}
122
123static void
124instance_put(struct nfulnl_instance *inst)
125{
126 if (inst && atomic_dec_and_test(&inst->use)) {
127 UDEBUG("kfree(inst=%p)\n", inst);
128 kfree(inst);
129 }
130}
131
132static void nfulnl_timer(unsigned long data);
133
134static struct nfulnl_instance *
135instance_create(u_int16_t group_num, int pid)
136{
137 struct nfulnl_instance *inst;
138
139 UDEBUG("entering (group_num=%u, pid=%d)\n", group_num,
140 pid);
141
142 write_lock_bh(&instances_lock);
143 if (__instance_lookup(group_num)) {
144 inst = NULL;
145 UDEBUG("aborting, instance already exists\n");
146 goto out_unlock;
147 }
148
149 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
150 if (!inst)
151 goto out_unlock;
152
153 memset(inst, 0, sizeof(*inst));
154 INIT_HLIST_NODE(&inst->hlist);
155 inst->lock = SPIN_LOCK_UNLOCKED;
156 /* needs to be two, since we _put() after creation */
157 atomic_set(&inst->use, 2);
158
159 init_timer(&inst->timer);
160 inst->timer.function = nfulnl_timer;
161 inst->timer.data = (unsigned long)inst;
162 /* don't start timer yet. (re)start it with every packet */
163
164 inst->peer_pid = pid;
165 inst->group_num = group_num;
166
167 inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
168 inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
169 inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
170 inst->copy_mode = NFULNL_COPY_PACKET;
171 inst->copy_range = 0xffff;
172
173 if (!try_module_get(THIS_MODULE))
174 goto out_free;
175
176 hlist_add_head(&inst->hlist,
177 &instance_table[instance_hashfn(group_num)]);
178
179 UDEBUG("newly added node: %p, next=%p\n", &inst->hlist,
180 inst->hlist.next);
181
182 write_unlock_bh(&instances_lock);
183
184 return inst;
185
186out_free:
187 instance_put(inst);
188out_unlock:
189 write_unlock_bh(&instances_lock);
190 return NULL;
191}
192
193static int __nfulnl_send(struct nfulnl_instance *inst);
194
195static void
196_instance_destroy2(struct nfulnl_instance *inst, int lock)
197{
198 /* first pull it out of the global list */
199 if (lock)
200 write_lock_bh(&instances_lock);
201
202 UDEBUG("removing instance %p (queuenum=%u) from hash\n",
203 inst, inst->group_num);
204
205 hlist_del(&inst->hlist);
206
207 if (lock)
208 write_unlock_bh(&instances_lock);
209
210 /* then flush all pending packets from skb */
211
212 spin_lock_bh(&inst->lock);
213 if (inst->skb) {
214 if (inst->qlen)
215 __nfulnl_send(inst);
216 if (inst->skb) {
217 kfree_skb(inst->skb);
218 inst->skb = NULL;
219 }
220 }
221 spin_unlock_bh(&inst->lock);
222
223 /* and finally put the refcount */
224 instance_put(inst);
225
226 module_put(THIS_MODULE);
227}
228
229static inline void
230__instance_destroy(struct nfulnl_instance *inst)
231{
232 _instance_destroy2(inst, 0);
233}
234
235static inline void
236instance_destroy(struct nfulnl_instance *inst)
237{
238 _instance_destroy2(inst, 1);
239}
240
241static int
242nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
243 unsigned int range)
244{
245 int status = 0;
246
247 spin_lock_bh(&inst->lock);
248
249 switch (mode) {
250 case NFULNL_COPY_NONE:
251 case NFULNL_COPY_META:
252 inst->copy_mode = mode;
253 inst->copy_range = 0;
254 break;
255
256 case NFULNL_COPY_PACKET:
257 inst->copy_mode = mode;
258 /* we're using struct nfattr which has 16bit nfa_len */
259 if (range > 0xffff)
260 inst->copy_range = 0xffff;
261 else
262 inst->copy_range = range;
263 break;
264
265 default:
266 status = -EINVAL;
267 break;
268 }
269
270 spin_unlock_bh(&inst->lock);
271
272 return status;
273}
274
275static int
276nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
277{
278 int status;
279
280 spin_lock_bh(&inst->lock);
281 if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
282 status = -ERANGE;
283 else if (nlbufsiz > 131072)
284 status = -ERANGE;
285 else {
286 inst->nlbufsiz = nlbufsiz;
287 status = 0;
288 }
289 spin_unlock_bh(&inst->lock);
290
291 return status;
292}
293
294static int
295nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
296{
297 spin_lock_bh(&inst->lock);
298 inst->flushtimeout = timeout;
299 spin_unlock_bh(&inst->lock);
300
301 return 0;
302}
303
304static int
305nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
306{
307 spin_lock_bh(&inst->lock);
308 inst->qthreshold = qthresh;
309 spin_unlock_bh(&inst->lock);
310
311 return 0;
312}
313
314static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
315 unsigned int pkt_size)
316{
317 struct sk_buff *skb;
318
319 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
320
321 /* alloc skb which should be big enough for a whole multipart
322 * message. WARNING: has to be <= 128k due to slab restrictions */
323
324 skb = alloc_skb(inst_size, GFP_ATOMIC);
325 if (!skb) {
326 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
327 inst_size);
328
329 /* try to allocate only as much as we need for current
330 * packet */
331
332 skb = alloc_skb(pkt_size, GFP_ATOMIC);
333 if (!skb)
334 PRINTR("nfnetlink_log: can't even alloc %u bytes\n",
335 pkt_size);
336 }
337
338 return skb;
339}
340
341static int
342__nfulnl_send(struct nfulnl_instance *inst)
343{
344 int status;
345
346 if (timer_pending(&inst->timer))
347 del_timer(&inst->timer);
348
349 if (inst->qlen > 1)
350 inst->lastnlh->nlmsg_type = NLMSG_DONE;
351
352 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
353 if (status < 0) {
354 UDEBUG("netlink_unicast() failed\n");
355 /* FIXME: statistics */
356 }
357
358 inst->qlen = 0;
359 inst->skb = NULL;
360 inst->lastnlh = NULL;
361
362 return status;
363}
364
365static void nfulnl_timer(unsigned long data)
366{
367 struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
368
369 UDEBUG("timer function called, flushing buffer\n");
370
371 spin_lock_bh(&inst->lock);
372 __nfulnl_send(inst);
373 instance_put(inst);
374 spin_unlock_bh(&inst->lock);
375}
376
377static inline int
378__build_packet_message(struct nfulnl_instance *inst,
379 const struct sk_buff *skb,
380 unsigned int data_len,
381 unsigned int pf,
382 unsigned int hooknum,
383 const struct net_device *indev,
384 const struct net_device *outdev,
385 const struct nf_loginfo *li,
386 const char *prefix)
387{
388 unsigned char *old_tail;
389 struct nfulnl_msg_packet_hdr pmsg;
390 struct nlmsghdr *nlh;
391 struct nfgenmsg *nfmsg;
392 u_int32_t tmp_uint;
393
394 UDEBUG("entered\n");
395
396 old_tail = inst->skb->tail;
397 nlh = NLMSG_PUT(inst->skb, 0, 0,
398 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
399 sizeof(struct nfgenmsg));
400 nfmsg = NLMSG_DATA(nlh);
401 nfmsg->nfgen_family = pf;
402 nfmsg->version = NFNETLINK_V0;
403 nfmsg->res_id = htons(inst->group_num);
404
405 pmsg.hw_protocol = htons(skb->protocol);
406 pmsg.hook = hooknum;
407
408 NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
409
410 if (prefix) {
411 int slen = strlen(prefix);
412 if (slen > NFULNL_PREFIXLEN)
413 slen = NFULNL_PREFIXLEN;
414 NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
415 }
416
417 if (indev) {
418 tmp_uint = htonl(indev->ifindex);
419#ifndef CONFIG_BRIDGE_NETFILTER
420 NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint),
421 &tmp_uint);
422#else
423 if (pf == PF_BRIDGE) {
424 /* Case 1: outdev is physical input device, we need to
425 * look for bridge group (when called from
426 * netfilter_bridge) */
427 NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
428 sizeof(tmp_uint), &tmp_uint);
429 /* this is the bridge group "brX" */
430 tmp_uint = htonl(indev->br_port->br->dev->ifindex);
431 NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
432 sizeof(tmp_uint), &tmp_uint);
433 } else {
434 /* Case 2: indev is bridge group, we need to look for
435 * physical device (when called from ipv4) */
436 NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
437 sizeof(tmp_uint), &tmp_uint);
438 if (skb->nf_bridge && skb->nf_bridge->physindev) {
439 tmp_uint =
440 htonl(skb->nf_bridge->physindev->ifindex);
441 NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
442 sizeof(tmp_uint), &tmp_uint);
443 }
444 }
445#endif
446 }
447
448 if (outdev) {
449 tmp_uint = htonl(outdev->ifindex);
450#ifndef CONFIG_BRIDGE_NETFILTER
451 NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint),
452 &tmp_uint);
453#else
454 if (pf == PF_BRIDGE) {
455 /* Case 1: outdev is physical output device, we need to
456 * look for bridge group (when called from
457 * netfilter_bridge) */
458 NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
459 sizeof(tmp_uint), &tmp_uint);
460 /* this is the bridge group "brX" */
461 tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
462 NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
463 sizeof(tmp_uint), &tmp_uint);
464 } else {
465 /* Case 2: indev is a bridge group, we need to look
466 * for physical device (when called from ipv4) */
467 NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
468 sizeof(tmp_uint), &tmp_uint);
469 if (skb->nf_bridge) {
470 tmp_uint =
471 htonl(skb->nf_bridge->physoutdev->ifindex);
472 NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
473 sizeof(tmp_uint), &tmp_uint);
474 }
475 }
476#endif
477 }
478
479 if (skb->nfmark) {
480 tmp_uint = htonl(skb->nfmark);
481 NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
482 }
483
484 if (indev && skb->dev && skb->dev->hard_header_parse) {
485 struct nfulnl_msg_packet_hw phw;
486
487 phw.hw_addrlen =
488 skb->dev->hard_header_parse((struct sk_buff *)skb,
489 phw.hw_addr);
490 phw.hw_addrlen = htons(phw.hw_addrlen);
491 NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
492 }
493
494 if (skb->tstamp.off_sec) {
495 struct nfulnl_msg_packet_timestamp ts;
496
497 ts.sec = cpu_to_be64(skb_tv_base.tv_sec + skb->tstamp.off_sec);
498 ts.usec = cpu_to_be64(skb_tv_base.tv_usec + skb->tstamp.off_usec);
499
500 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
501 }
502
503 /* UID */
504 if (skb->sk) {
505 read_lock_bh(&skb->sk->sk_callback_lock);
506 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
507 u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
508 /* need to unlock here since NFA_PUT may goto */
509 read_unlock_bh(&skb->sk->sk_callback_lock);
510 NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
511 } else
512 read_unlock_bh(&skb->sk->sk_callback_lock);
513 }
514
515 if (data_len) {
516 struct nfattr *nfa;
517 int size = NFA_LENGTH(data_len);
518
519 if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) {
520 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
521 goto nlmsg_failure;
522 }
523
524 nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size));
525 nfa->nfa_type = NFULA_PAYLOAD;
526 nfa->nfa_len = size;
527
528 if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len))
529 BUG();
530 }
531
532 nlh->nlmsg_len = inst->skb->tail - old_tail;
533 return 0;
534
535nlmsg_failure:
536 UDEBUG("nlmsg_failure\n");
537nfattr_failure:
538 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
539 return -1;
540}
541
542#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
543
544static struct nf_loginfo default_loginfo = {
545 .type = NF_LOG_TYPE_ULOG,
546 .u = {
547 .ulog = {
548 .copy_len = 0xffff,
549 .group = 0,
550 .qthreshold = 1,
551 },
552 },
553};
554
555/* log handler for internal netfilter logging api */
556static void
557nfulnl_log_packet(unsigned int pf,
558 unsigned int hooknum,
559 const struct sk_buff *skb,
560 const struct net_device *in,
561 const struct net_device *out,
562 const struct nf_loginfo *li_user,
563 const char *prefix)
564{
565 unsigned int size, data_len;
566 struct nfulnl_instance *inst;
567 const struct nf_loginfo *li;
568 unsigned int qthreshold;
569 unsigned int nlbufsiz;
570
571 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
572 li = li_user;
573 else
574 li = &default_loginfo;
575
576 inst = instance_lookup_get(li->u.ulog.group);
577 if (!inst)
578 inst = instance_lookup_get(0);
579 if (!inst) {
580 PRINTR("nfnetlink_log: trying to log packet, "
581 "but no instance for group %u\n", li->u.ulog.group);
582 return;
583 }
584
585 /* all macros expand to constant values at compile time */
586 /* FIXME: do we want to make the size calculation conditional based on
587 * what is actually present? way more branches and checks, but more
588 * memory efficient... */
589 size = NLMSG_SPACE(sizeof(struct nfgenmsg))
590 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr))
591 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
592 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
593#ifdef CONFIG_BRIDGE_NETFILTER
594 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
595 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
596#endif
597 + NFA_SPACE(sizeof(u_int32_t)) /* mark */
598 + NFA_SPACE(sizeof(u_int32_t)) /* uid */
599 + NFA_SPACE(NFULNL_PREFIXLEN) /* prefix */
600 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
601 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));
602
603 UDEBUG("initial size=%u\n", size);
604
605 spin_lock_bh(&inst->lock);
606
607 qthreshold = inst->qthreshold;
608 /* per-rule qthreshold overrides per-instance */
609 if (qthreshold > li->u.ulog.qthreshold)
610 qthreshold = li->u.ulog.qthreshold;
611
612 switch (inst->copy_mode) {
613 case NFULNL_COPY_META:
614 case NFULNL_COPY_NONE:
615 data_len = 0;
616 break;
617
618 case NFULNL_COPY_PACKET:
619 if (inst->copy_range == 0
620 || inst->copy_range > skb->len)
621 data_len = skb->len;
622 else
623 data_len = inst->copy_range;
624
625 size += NFA_SPACE(data_len);
626 UDEBUG("copy_packet, therefore size now %u\n", size);
627 break;
628
629 default:
630 spin_unlock_bh(&inst->lock);
631 instance_put(inst);
632 return;
633 }
634
635 if (size > inst->nlbufsiz)
636 nlbufsiz = size;
637 else
638 nlbufsiz = inst->nlbufsiz;
639
640 if (!inst->skb) {
641 if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
642 UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
643 inst->nlbufsiz, size);
644 goto alloc_failure;
645 }
646 } else if (inst->qlen >= qthreshold ||
647 size > skb_tailroom(inst->skb)) {
648 /* either the queue len is too high or we don't have
649 * enough room in the skb left. flush to userspace. */
650 UDEBUG("flushing old skb\n");
651
652 __nfulnl_send(inst);
653
654 if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
655 UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
656 inst->nlbufsiz, size);
657 goto alloc_failure;
658 }
659 }
660
661 UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
662 inst->qlen++;
663
664 __build_packet_message(inst, skb, data_len, pf,
665 hooknum, in, out, li, prefix);
666
667 /* timer_pending always called within inst->lock, so there
668 * is no chance of a race here */
669 if (!timer_pending(&inst->timer)) {
670 instance_get(inst);
671 inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
672 add_timer(&inst->timer);
673 }
674 spin_unlock_bh(&inst->lock);
675
676 return;
677
678alloc_failure:
679 spin_unlock_bh(&inst->lock);
680 instance_put(inst);
681 UDEBUG("error allocating skb\n");
682 /* FIXME: statistics */
683}
684
685static int
686nfulnl_rcv_nl_event(struct notifier_block *this,
687 unsigned long event, void *ptr)
688{
689 struct netlink_notify *n = ptr;
690
691 if (event == NETLINK_URELEASE &&
692 n->protocol == NETLINK_NETFILTER && n->pid) {
693 int i;
694
695 /* destroy all instances for this pid */
696 write_lock_bh(&instances_lock);
697 for (i = 0; i < INSTANCE_BUCKETS; i++) {
698 struct hlist_node *tmp, *t2;
699 struct nfulnl_instance *inst;
700 struct hlist_head *head = &instance_table[i];
701
702 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
703 UDEBUG("node = %p\n", inst);
704 if (n->pid == inst->peer_pid)
705 __instance_destroy(inst);
706 }
707 }
708 write_unlock_bh(&instances_lock);
709 }
710 return NOTIFY_DONE;
711}
712
713static struct notifier_block nfulnl_rtnl_notifier = {
714 .notifier_call = nfulnl_rcv_nl_event,
715};
716
717static int
718nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
719 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
720{
721 return -ENOTSUPP;
722}
723
724static struct nf_logger nfulnl_logger = {
725 .name = "nfnetlink_log",
726 .logfn = &nfulnl_log_packet,
727 .me = THIS_MODULE,
728};
729
730static const int nfula_min[NFULA_MAX] = {
731 [NFULA_PACKET_HDR-1] = sizeof(struct nfulnl_msg_packet_hdr),
732 [NFULA_MARK-1] = sizeof(u_int32_t),
733 [NFULA_TIMESTAMP-1] = sizeof(struct nfulnl_msg_packet_timestamp),
734 [NFULA_IFINDEX_INDEV-1] = sizeof(u_int32_t),
735 [NFULA_IFINDEX_OUTDEV-1]= sizeof(u_int32_t),
736 [NFULA_HWADDR-1] = sizeof(struct nfulnl_msg_packet_hw),
737 [NFULA_PAYLOAD-1] = 0,
738 [NFULA_PREFIX-1] = 0,
739 [NFULA_UID-1] = sizeof(u_int32_t),
740};
741
742static const int nfula_cfg_min[NFULA_CFG_MAX] = {
743 [NFULA_CFG_CMD-1] = sizeof(struct nfulnl_msg_config_cmd),
744 [NFULA_CFG_MODE-1] = sizeof(struct nfulnl_msg_config_mode),
745 [NFULA_CFG_TIMEOUT-1] = sizeof(u_int32_t),
746 [NFULA_CFG_QTHRESH-1] = sizeof(u_int32_t),
747 [NFULA_CFG_NLBUFSIZ-1] = sizeof(u_int32_t),
748};
749
750static int
751nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
752 struct nlmsghdr *nlh, struct nfattr *nfula[], int *errp)
753{
754 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
755 u_int16_t group_num = ntohs(nfmsg->res_id);
756 struct nfulnl_instance *inst;
757 int ret = 0;
758
759 UDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
760
761 if (nfattr_bad_size(nfula, NFULA_CFG_MAX, nfula_cfg_min)) {
762 UDEBUG("bad attribute size\n");
763 return -EINVAL;
764 }
765
766 inst = instance_lookup_get(group_num);
767 if (nfula[NFULA_CFG_CMD-1]) {
768 u_int8_t pf = nfmsg->nfgen_family;
769 struct nfulnl_msg_config_cmd *cmd;
770 cmd = NFA_DATA(nfula[NFULA_CFG_CMD-1]);
771 UDEBUG("found CFG_CMD for\n");
772
773 switch (cmd->command) {
774 case NFULNL_CFG_CMD_BIND:
775 if (inst) {
776 ret = -EBUSY;
777 goto out_put;
778 }
779
780 inst = instance_create(group_num,
781 NETLINK_CB(skb).pid);
782 if (!inst) {
783 ret = -EINVAL;
784 goto out_put;
785 }
786 break;
787 case NFULNL_CFG_CMD_UNBIND:
788 if (!inst) {
789 ret = -ENODEV;
790 goto out_put;
791 }
792
793 if (inst->peer_pid != NETLINK_CB(skb).pid) {
794 ret = -EPERM;
795 goto out_put;
796 }
797
798 instance_destroy(inst);
799 break;
800 case NFULNL_CFG_CMD_PF_BIND:
801 UDEBUG("registering log handler for pf=%u\n", pf);
802 ret = nf_log_register(pf, &nfulnl_logger);
803 break;
804 case NFULNL_CFG_CMD_PF_UNBIND:
805 UDEBUG("unregistering log handler for pf=%u\n", pf);
806 /* This is a bug and a feature. We cannot unregister
807 * other handlers, like nfnetlink_inst can */
808 nf_log_unregister_pf(pf);
809 break;
810 default:
811 ret = -EINVAL;
812 break;
813 }
814 } else {
815 if (!inst) {
816 UDEBUG("no config command, and no instance for "
817 "group=%u pid=%u =>ENOENT\n",
818 group_num, NETLINK_CB(skb).pid);
819 ret = -ENOENT;
820 goto out_put;
821 }
822
823 if (inst->peer_pid != NETLINK_CB(skb).pid) {
824 UDEBUG("no config command, and wrong pid\n");
825 ret = -EPERM;
826 goto out_put;
827 }
828 }
829
830 if (nfula[NFULA_CFG_MODE-1]) {
831 struct nfulnl_msg_config_mode *params;
832 params = NFA_DATA(nfula[NFULA_CFG_MODE-1]);
833
834 nfulnl_set_mode(inst, params->copy_mode,
835 ntohs(params->copy_range));
836 }
837
838 if (nfula[NFULA_CFG_TIMEOUT-1]) {
839 u_int32_t timeout =
840 *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]);
841
842 nfulnl_set_timeout(inst, ntohl(timeout));
843 }
844
845 if (nfula[NFULA_CFG_NLBUFSIZ-1]) {
846 u_int32_t nlbufsiz =
847 *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]);
848
849 nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
850 }
851
852 if (nfula[NFULA_CFG_QTHRESH-1]) {
853 u_int32_t qthresh =
854 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_QTHRESH-1]);
855
856 nfulnl_set_qthresh(inst, ntohl(qthresh));
857 }
858
859out_put:
860 instance_put(inst);
861 return ret;
862}
863
864static struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
865 [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp,
866 .attr_count = NFULA_MAX,
867 .cap_required = CAP_NET_ADMIN, },
868 [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config,
869 .attr_count = NFULA_CFG_MAX,
870 .cap_required = CAP_NET_ADMIN },
871};
872
873static struct nfnetlink_subsystem nfulnl_subsys = {
874 .name = "log",
875 .subsys_id = NFNL_SUBSYS_ULOG,
876 .cb_count = NFULNL_MSG_MAX,
877 .cb = nfulnl_cb,
878};
879
880#ifdef CONFIG_PROC_FS
881struct iter_state {
882 unsigned int bucket;
883};
884
885static struct hlist_node *get_first(struct seq_file *seq)
886{
887 struct iter_state *st = seq->private;
888
889 if (!st)
890 return NULL;
891
892 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
893 if (!hlist_empty(&instance_table[st->bucket]))
894 return instance_table[st->bucket].first;
895 }
896 return NULL;
897}
898
899static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
900{
901 struct iter_state *st = seq->private;
902
903 h = h->next;
904 while (!h) {
905 if (++st->bucket >= INSTANCE_BUCKETS)
906 return NULL;
907
908 h = instance_table[st->bucket].first;
909 }
910 return h;
911}
912
913static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
914{
915 struct hlist_node *head;
916 head = get_first(seq);
917
918 if (head)
919 while (pos && (head = get_next(seq, head)))
920 pos--;
921 return pos ? NULL : head;
922}
923
924static void *seq_start(struct seq_file *seq, loff_t *pos)
925{
926 read_lock_bh(&instances_lock);
927 return get_idx(seq, *pos);
928}
929
930static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
931{
932 (*pos)++;
933 return get_next(s, v);
934}
935
936static void seq_stop(struct seq_file *s, void *v)
937{
938 read_unlock_bh(&instances_lock);
939}
940
941static int seq_show(struct seq_file *s, void *v)
942{
943 const struct nfulnl_instance *inst = v;
944
945 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
946 inst->group_num,
947 inst->peer_pid, inst->qlen,
948 inst->copy_mode, inst->copy_range,
949 inst->flushtimeout, atomic_read(&inst->use));
950}
951
952static struct seq_operations nful_seq_ops = {
953 .start = seq_start,
954 .next = seq_next,
955 .stop = seq_stop,
956 .show = seq_show,
957};
958
959static int nful_open(struct inode *inode, struct file *file)
960{
961 struct seq_file *seq;
962 struct iter_state *is;
963 int ret;
964
965 is = kmalloc(sizeof(*is), GFP_KERNEL);
966 if (!is)
967 return -ENOMEM;
968 memset(is, 0, sizeof(*is));
969 ret = seq_open(file, &nful_seq_ops);
970 if (ret < 0)
971 goto out_free;
972 seq = file->private_data;
973 seq->private = is;
974 return ret;
975out_free:
976 kfree(is);
977 return ret;
978}
979
980static struct file_operations nful_file_ops = {
981 .owner = THIS_MODULE,
982 .open = nful_open,
983 .read = seq_read,
984 .llseek = seq_lseek,
985 .release = seq_release_private,
986};
987
988#endif /* PROC_FS */
989
990static int
991init_or_cleanup(int init)
992{
993 int i, status = -ENOMEM;
994#ifdef CONFIG_PROC_FS
995 struct proc_dir_entry *proc_nful;
996#endif
997
998 if (!init)
999 goto cleanup;
1000
1001 for (i = 0; i < INSTANCE_BUCKETS; i++)
1002 INIT_HLIST_HEAD(&instance_table[i]);
1003
1004 /* it's not really all that important to have a random value, so
1005 * we can do this from the init function, even if there hasn't
1006 * been that much entropy yet */
1007 get_random_bytes(&hash_init, sizeof(hash_init));
1008
1009 netlink_register_notifier(&nfulnl_rtnl_notifier);
1010 status = nfnetlink_subsys_register(&nfulnl_subsys);
1011 if (status < 0) {
1012 printk(KERN_ERR "log: failed to create netlink socket\n");
1013 goto cleanup_netlink_notifier;
1014 }
1015
1016#ifdef CONFIG_PROC_FS
1017 proc_nful = create_proc_entry("nfnetlink_log", 0440,
1018 proc_net_netfilter);
1019 if (!proc_nful)
1020 goto cleanup_subsys;
1021 proc_nful->proc_fops = &nful_file_ops;
1022#endif
1023
1024 return status;
1025
1026cleanup:
1027 nf_log_unregister_logger(&nfulnl_logger);
1028#ifdef CONFIG_PROC_FS
1029 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
1030cleanup_subsys:
1031#endif
1032 nfnetlink_subsys_unregister(&nfulnl_subsys);
1033cleanup_netlink_notifier:
1034 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1035 return status;
1036}
1037
1038static int __init init(void)
1039{
1040
1041 return init_or_cleanup(1);
1042}
1043
1044static void __exit fini(void)
1045{
1046 init_or_cleanup(0);
1047}
1048
1049MODULE_DESCRIPTION("netfilter userspace logging");
1050MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1051MODULE_LICENSE("GPL");
1052MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
1053
1054module_init(init);
1055module_exit(fini);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
new file mode 100644
index 000000000000..e3a5285329af
--- /dev/null
+++ b/net/netfilter/nfnetlink_queue.c
@@ -0,0 +1,1132 @@
1/*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/notifier.h>
21#include <linux/netdevice.h>
22#include <linux/netfilter.h>
23#include <linux/proc_fs.h>
24#include <linux/netfilter_ipv4.h>
25#include <linux/netfilter_ipv6.h>
26#include <linux/netfilter/nfnetlink.h>
27#include <linux/netfilter/nfnetlink_queue.h>
28#include <linux/list.h>
29#include <net/sock.h>
30
31#include <asm/atomic.h>
32
33#ifdef CONFIG_BRIDGE_NETFILTER
34#include "../bridge/br_private.h"
35#endif
36
37#define NFQNL_QMAX_DEFAULT 1024
38
39#if 0
40#define QDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
41 __FILE__, __LINE__, __FUNCTION__, \
42 ## args)
43#else
44#define QDEBUG(x, ...)
45#endif
46
47struct nfqnl_queue_entry {
48 struct list_head list;
49 struct nf_info *info;
50 struct sk_buff *skb;
51 unsigned int id;
52};
53
54struct nfqnl_instance {
55 struct hlist_node hlist; /* global list of queues */
56 atomic_t use;
57
58 int peer_pid;
59 unsigned int queue_maxlen;
60 unsigned int copy_range;
61 unsigned int queue_total;
62 unsigned int queue_dropped;
63 unsigned int queue_user_dropped;
64
65 atomic_t id_sequence; /* 'sequence' of pkt ids */
66
67 u_int16_t queue_num; /* number of this queue */
68 u_int8_t copy_mode;
69
70 spinlock_t lock;
71
72 struct list_head queue_list; /* packets in queue */
73};
74
75typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
76
77static DEFINE_RWLOCK(instances_lock);
78
79u_int64_t htonll(u_int64_t in)
80{
81 u_int64_t out;
82 int i;
83
84 for (i = 0; i < sizeof(u_int64_t); i++)
85 ((u_int8_t *)&out)[sizeof(u_int64_t)-1] = ((u_int8_t *)&in)[i];
86
87 return out;
88}
89
90#define INSTANCE_BUCKETS 16
91static struct hlist_head instance_table[INSTANCE_BUCKETS];
92
93static inline u_int8_t instance_hashfn(u_int16_t queue_num)
94{
95 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
96}
97
98static struct nfqnl_instance *
99__instance_lookup(u_int16_t queue_num)
100{
101 struct hlist_head *head;
102 struct hlist_node *pos;
103 struct nfqnl_instance *inst;
104
105 head = &instance_table[instance_hashfn(queue_num)];
106 hlist_for_each_entry(inst, pos, head, hlist) {
107 if (inst->queue_num == queue_num)
108 return inst;
109 }
110 return NULL;
111}
112
113static struct nfqnl_instance *
114instance_lookup_get(u_int16_t queue_num)
115{
116 struct nfqnl_instance *inst;
117
118 read_lock_bh(&instances_lock);
119 inst = __instance_lookup(queue_num);
120 if (inst)
121 atomic_inc(&inst->use);
122 read_unlock_bh(&instances_lock);
123
124 return inst;
125}
126
127static void
128instance_put(struct nfqnl_instance *inst)
129{
130 if (inst && atomic_dec_and_test(&inst->use)) {
131 QDEBUG("kfree(inst=%p)\n", inst);
132 kfree(inst);
133 }
134}
135
136static struct nfqnl_instance *
137instance_create(u_int16_t queue_num, int pid)
138{
139 struct nfqnl_instance *inst;
140
141 QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
142
143 write_lock_bh(&instances_lock);
144 if (__instance_lookup(queue_num)) {
145 inst = NULL;
146 QDEBUG("aborting, instance already exists\n");
147 goto out_unlock;
148 }
149
150 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
151 if (!inst)
152 goto out_unlock;
153
154 memset(inst, 0, sizeof(*inst));
155 inst->queue_num = queue_num;
156 inst->peer_pid = pid;
157 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
158 inst->copy_range = 0xfffff;
159 inst->copy_mode = NFQNL_COPY_NONE;
160 atomic_set(&inst->id_sequence, 0);
161 /* needs to be two, since we _put() after creation */
162 atomic_set(&inst->use, 2);
163 inst->lock = SPIN_LOCK_UNLOCKED;
164 INIT_LIST_HEAD(&inst->queue_list);
165
166 if (!try_module_get(THIS_MODULE))
167 goto out_free;
168
169 hlist_add_head(&inst->hlist,
170 &instance_table[instance_hashfn(queue_num)]);
171
172 write_unlock_bh(&instances_lock);
173
174 QDEBUG("successfully created new instance\n");
175
176 return inst;
177
178out_free:
179 kfree(inst);
180out_unlock:
181 write_unlock_bh(&instances_lock);
182 return NULL;
183}
184
185static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
186
187static void
188_instance_destroy2(struct nfqnl_instance *inst, int lock)
189{
190 /* first pull it out of the global list */
191 if (lock)
192 write_lock_bh(&instances_lock);
193
194 QDEBUG("removing instance %p (queuenum=%u) from hash\n",
195 inst, inst->queue_num);
196 hlist_del(&inst->hlist);
197
198 if (lock)
199 write_unlock_bh(&instances_lock);
200
201 /* then flush all pending skbs from the queue */
202 nfqnl_flush(inst, NF_DROP);
203
204 /* and finally put the refcount */
205 instance_put(inst);
206
207 module_put(THIS_MODULE);
208}
209
210static inline void
211__instance_destroy(struct nfqnl_instance *inst)
212{
213 _instance_destroy2(inst, 0);
214}
215
216static inline void
217instance_destroy(struct nfqnl_instance *inst)
218{
219 _instance_destroy2(inst, 1);
220}
221
222
223
224static void
225issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
226{
227 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
228
229 /* TCP input path (and probably other bits) assume to be called
230 * from softirq context, not from syscall, like issue_verdict is
231 * called. TCP input path deadlocks with locks taken from timer
232 * softirq, e.g. We therefore emulate this by local_bh_disable() */
233
234 local_bh_disable();
235 nf_reinject(entry->skb, entry->info, verdict);
236 local_bh_enable();
237
238 kfree(entry);
239}
240
241static inline void
242__enqueue_entry(struct nfqnl_instance *queue,
243 struct nfqnl_queue_entry *entry)
244{
245 list_add(&entry->list, &queue->queue_list);
246 queue->queue_total++;
247}
248
249/*
250 * Find and return a queued entry matched by cmpfn, or return the last
251 * entry if cmpfn is NULL.
252 */
253static inline struct nfqnl_queue_entry *
254__find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
255 unsigned long data)
256{
257 struct list_head *p;
258
259 list_for_each_prev(p, &queue->queue_list) {
260 struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
261
262 if (!cmpfn || cmpfn(entry, data))
263 return entry;
264 }
265 return NULL;
266}
267
268static inline void
269__dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
270{
271 list_del(&entry->list);
272 q->queue_total--;
273}
274
275static inline struct nfqnl_queue_entry *
276__find_dequeue_entry(struct nfqnl_instance *queue,
277 nfqnl_cmpfn cmpfn, unsigned long data)
278{
279 struct nfqnl_queue_entry *entry;
280
281 entry = __find_entry(queue, cmpfn, data);
282 if (entry == NULL)
283 return NULL;
284
285 __dequeue_entry(queue, entry);
286 return entry;
287}
288
289
290static inline void
291__nfqnl_flush(struct nfqnl_instance *queue, int verdict)
292{
293 struct nfqnl_queue_entry *entry;
294
295 while ((entry = __find_dequeue_entry(queue, NULL, 0)))
296 issue_verdict(entry, verdict);
297}
298
299static inline int
300__nfqnl_set_mode(struct nfqnl_instance *queue,
301 unsigned char mode, unsigned int range)
302{
303 int status = 0;
304
305 switch (mode) {
306 case NFQNL_COPY_NONE:
307 case NFQNL_COPY_META:
308 queue->copy_mode = mode;
309 queue->copy_range = 0;
310 break;
311
312 case NFQNL_COPY_PACKET:
313 queue->copy_mode = mode;
314 /* we're using struct nfattr which has 16bit nfa_len */
315 if (range > 0xffff)
316 queue->copy_range = 0xffff;
317 else
318 queue->copy_range = range;
319 break;
320
321 default:
322 status = -EINVAL;
323
324 }
325 return status;
326}
327
328static struct nfqnl_queue_entry *
329find_dequeue_entry(struct nfqnl_instance *queue,
330 nfqnl_cmpfn cmpfn, unsigned long data)
331{
332 struct nfqnl_queue_entry *entry;
333
334 spin_lock_bh(&queue->lock);
335 entry = __find_dequeue_entry(queue, cmpfn, data);
336 spin_unlock_bh(&queue->lock);
337
338 return entry;
339}
340
341static void
342nfqnl_flush(struct nfqnl_instance *queue, int verdict)
343{
344 spin_lock_bh(&queue->lock);
345 __nfqnl_flush(queue, verdict);
346 spin_unlock_bh(&queue->lock);
347}
348
349static struct sk_buff *
350nfqnl_build_packet_message(struct nfqnl_instance *queue,
351 struct nfqnl_queue_entry *entry, int *errp)
352{
353 unsigned char *old_tail;
354 size_t size;
355 size_t data_len = 0;
356 struct sk_buff *skb;
357 struct nfqnl_msg_packet_hdr pmsg;
358 struct nlmsghdr *nlh;
359 struct nfgenmsg *nfmsg;
360 unsigned int tmp_uint;
361
362 QDEBUG("entered\n");
363
364 /* all macros expand to constant values at compile time */
365 size = NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hdr))
366 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
367 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
368#ifdef CONFIG_BRIDGE_NETFILTER
369 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
370 + NLMSG_SPACE(sizeof(u_int32_t)) /* ifindex */
371#endif
372 + NLMSG_SPACE(sizeof(u_int32_t)) /* mark */
373 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_hw))
374 + NLMSG_SPACE(sizeof(struct nfqnl_msg_packet_timestamp));
375
376 spin_lock_bh(&queue->lock);
377
378 switch (queue->copy_mode) {
379 case NFQNL_COPY_META:
380 case NFQNL_COPY_NONE:
381 data_len = 0;
382 break;
383
384 case NFQNL_COPY_PACKET:
385 if (queue->copy_range == 0
386 || queue->copy_range > entry->skb->len)
387 data_len = entry->skb->len;
388 else
389 data_len = queue->copy_range;
390
391 size += NLMSG_SPACE(data_len);
392 break;
393
394 default:
395 *errp = -EINVAL;
396 spin_unlock_bh(&queue->lock);
397 return NULL;
398 }
399
400 spin_unlock_bh(&queue->lock);
401
402 skb = alloc_skb(size, GFP_ATOMIC);
403 if (!skb)
404 goto nlmsg_failure;
405
406 old_tail= skb->tail;
407 nlh = NLMSG_PUT(skb, 0, 0,
408 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
409 sizeof(struct nfgenmsg));
410 nfmsg = NLMSG_DATA(nlh);
411 nfmsg->nfgen_family = entry->info->pf;
412 nfmsg->version = NFNETLINK_V0;
413 nfmsg->res_id = htons(queue->queue_num);
414
415 pmsg.packet_id = htonl(entry->id);
416 pmsg.hw_protocol = htons(entry->skb->protocol);
417 pmsg.hook = entry->info->hook;
418
419 NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
420
421 if (entry->info->indev) {
422 tmp_uint = htonl(entry->info->indev->ifindex);
423#ifndef CONFIG_BRIDGE_NETFILTER
424 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
425#else
426 if (entry->info->pf == PF_BRIDGE) {
427 /* Case 1: indev is physical input device, we need to
428 * look for bridge group (when called from
429 * netfilter_bridge) */
430 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
431 &tmp_uint);
432 /* this is the bridge group "brX" */
433 tmp_uint = htonl(entry->info->indev->br_port->br->dev->ifindex);
434 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
435 &tmp_uint);
436 } else {
437 /* Case 2: indev is bridge group, we need to look for
438 * physical device (when called from ipv4) */
439 NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
440 &tmp_uint);
441 if (entry->skb->nf_bridge
442 && entry->skb->nf_bridge->physindev) {
443 tmp_uint = htonl(entry->skb->nf_bridge->physindev->ifindex);
444 NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
445 sizeof(tmp_uint), &tmp_uint);
446 }
447 }
448#endif
449 }
450
451 if (entry->info->outdev) {
452 tmp_uint = htonl(entry->info->outdev->ifindex);
453#ifndef CONFIG_BRIDGE_NETFILTER
454 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
455#else
456 if (entry->info->pf == PF_BRIDGE) {
457 /* Case 1: outdev is physical output device, we need to
458 * look for bridge group (when called from
459 * netfilter_bridge) */
460 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
461 &tmp_uint);
462 /* this is the bridge group "brX" */
463 tmp_uint = htonl(entry->info->outdev->br_port->br->dev->ifindex);
464 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
465 &tmp_uint);
466 } else {
467 /* Case 2: outdev is bridge group, we need to look for
468 * physical output device (when called from ipv4) */
469 NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
470 &tmp_uint);
471 if (entry->skb->nf_bridge
472 && entry->skb->nf_bridge->physoutdev) {
473 tmp_uint = htonl(entry->skb->nf_bridge->physoutdev->ifindex);
474 NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
475 sizeof(tmp_uint), &tmp_uint);
476 }
477 }
478#endif
479 }
480
481 if (entry->skb->nfmark) {
482 tmp_uint = htonl(entry->skb->nfmark);
483 NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
484 }
485
486 if (entry->info->indev && entry->skb->dev
487 && entry->skb->dev->hard_header_parse) {
488 struct nfqnl_msg_packet_hw phw;
489
490 phw.hw_addrlen =
491 entry->skb->dev->hard_header_parse(entry->skb,
492 phw.hw_addr);
493 phw.hw_addrlen = htons(phw.hw_addrlen);
494 NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
495 }
496
497 if (entry->skb->tstamp.off_sec) {
498 struct nfqnl_msg_packet_timestamp ts;
499
500 ts.sec = htonll(skb_tv_base.tv_sec + entry->skb->tstamp.off_sec);
501 ts.usec = htonll(skb_tv_base.tv_usec + entry->skb->tstamp.off_usec);
502
503 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
504 }
505
506 if (data_len) {
507 struct nfattr *nfa;
508 int size = NFA_LENGTH(data_len);
509
510 if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) {
511 printk(KERN_WARNING "nf_queue: no tailroom!\n");
512 goto nlmsg_failure;
513 }
514
515 nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
516 nfa->nfa_type = NFQA_PAYLOAD;
517 nfa->nfa_len = size;
518
519 if (skb_copy_bits(entry->skb, 0, NFA_DATA(nfa), data_len))
520 BUG();
521 }
522
523 nlh->nlmsg_len = skb->tail - old_tail;
524 return skb;
525
526nlmsg_failure:
527nfattr_failure:
528 if (skb)
529 kfree_skb(skb);
530 *errp = -EINVAL;
531 if (net_ratelimit())
532 printk(KERN_ERR "nf_queue: error creating packet message\n");
533 return NULL;
534}
535
536static int
537nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
538 unsigned int queuenum, void *data)
539{
540 int status = -EINVAL;
541 struct sk_buff *nskb;
542 struct nfqnl_instance *queue;
543 struct nfqnl_queue_entry *entry;
544
545 QDEBUG("entered\n");
546
547 queue = instance_lookup_get(queuenum);
548 if (!queue) {
549 QDEBUG("no queue instance matching\n");
550 return -EINVAL;
551 }
552
553 if (queue->copy_mode == NFQNL_COPY_NONE) {
554 QDEBUG("mode COPY_NONE, aborting\n");
555 status = -EAGAIN;
556 goto err_out_put;
557 }
558
559 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
560 if (entry == NULL) {
561 if (net_ratelimit())
562 printk(KERN_ERR
563 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
564 status = -ENOMEM;
565 goto err_out_put;
566 }
567
568 entry->info = info;
569 entry->skb = skb;
570 entry->id = atomic_inc_return(&queue->id_sequence);
571
572 nskb = nfqnl_build_packet_message(queue, entry, &status);
573 if (nskb == NULL)
574 goto err_out_free;
575
576 spin_lock_bh(&queue->lock);
577
578 if (!queue->peer_pid)
579 goto err_out_free_nskb;
580
581 if (queue->queue_total >= queue->queue_maxlen) {
582 queue->queue_dropped++;
583 status = -ENOSPC;
584 if (net_ratelimit())
585 printk(KERN_WARNING "ip_queue: full at %d entries, "
586 "dropping packets(s). Dropped: %d\n",
587 queue->queue_total, queue->queue_dropped);
588 goto err_out_free_nskb;
589 }
590
591 /* nfnetlink_unicast will either free the nskb or add it to a socket */
592 status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
593 if (status < 0) {
594 queue->queue_user_dropped++;
595 goto err_out_unlock;
596 }
597
598 __enqueue_entry(queue, entry);
599
600 spin_unlock_bh(&queue->lock);
601 instance_put(queue);
602 return status;
603
604err_out_free_nskb:
605 kfree_skb(nskb);
606
607err_out_unlock:
608 spin_unlock_bh(&queue->lock);
609
610err_out_free:
611 kfree(entry);
612err_out_put:
613 instance_put(queue);
614 return status;
615}
616
617static int
618nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
619{
620 int diff;
621
622 diff = data_len - e->skb->len;
623 if (diff < 0)
624 skb_trim(e->skb, data_len);
625 else if (diff > 0) {
626 if (data_len > 0xFFFF)
627 return -EINVAL;
628 if (diff > skb_tailroom(e->skb)) {
629 struct sk_buff *newskb;
630
631 newskb = skb_copy_expand(e->skb,
632 skb_headroom(e->skb),
633 diff,
634 GFP_ATOMIC);
635 if (newskb == NULL) {
636 printk(KERN_WARNING "ip_queue: OOM "
637 "in mangle, dropping packet\n");
638 return -ENOMEM;
639 }
640 if (e->skb->sk)
641 skb_set_owner_w(newskb, e->skb->sk);
642 kfree_skb(e->skb);
643 e->skb = newskb;
644 }
645 skb_put(e->skb, diff);
646 }
647 if (!skb_make_writable(&e->skb, data_len))
648 return -ENOMEM;
649 memcpy(e->skb->data, data, data_len);
650
651 return 0;
652}
653
654static inline int
655id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
656{
657 return (id == e->id);
658}
659
660static int
661nfqnl_set_mode(struct nfqnl_instance *queue,
662 unsigned char mode, unsigned int range)
663{
664 int status;
665
666 spin_lock_bh(&queue->lock);
667 status = __nfqnl_set_mode(queue, mode, range);
668 spin_unlock_bh(&queue->lock);
669
670 return status;
671}
672
673static int
674dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
675{
676 if (entry->info->indev)
677 if (entry->info->indev->ifindex == ifindex)
678 return 1;
679
680 if (entry->info->outdev)
681 if (entry->info->outdev->ifindex == ifindex)
682 return 1;
683
684 return 0;
685}
686
687/* drop all packets with either indev or outdev == ifindex from all queue
688 * instances */
689static void
690nfqnl_dev_drop(int ifindex)
691{
692 int i;
693
694 QDEBUG("entering for ifindex %u\n", ifindex);
695
696 /* this only looks like we have to hold the readlock for a way too long
697 * time, issue_verdict(), nf_reinject(), ... - but we always only
698 * issue NF_DROP, which is processed directly in nf_reinject() */
699 read_lock_bh(&instances_lock);
700
701 for (i = 0; i < INSTANCE_BUCKETS; i++) {
702 struct hlist_node *tmp;
703 struct nfqnl_instance *inst;
704 struct hlist_head *head = &instance_table[i];
705
706 hlist_for_each_entry(inst, tmp, head, hlist) {
707 struct nfqnl_queue_entry *entry;
708 while ((entry = find_dequeue_entry(inst, dev_cmp,
709 ifindex)) != NULL)
710 issue_verdict(entry, NF_DROP);
711 }
712 }
713
714 read_unlock_bh(&instances_lock);
715}
716
717#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
718
719static int
720nfqnl_rcv_dev_event(struct notifier_block *this,
721 unsigned long event, void *ptr)
722{
723 struct net_device *dev = ptr;
724
725 /* Drop any packets associated with the downed device */
726 if (event == NETDEV_DOWN)
727 nfqnl_dev_drop(dev->ifindex);
728 return NOTIFY_DONE;
729}
730
731static struct notifier_block nfqnl_dev_notifier = {
732 .notifier_call = nfqnl_rcv_dev_event,
733};
734
735static int
736nfqnl_rcv_nl_event(struct notifier_block *this,
737 unsigned long event, void *ptr)
738{
739 struct netlink_notify *n = ptr;
740
741 if (event == NETLINK_URELEASE &&
742 n->protocol == NETLINK_NETFILTER && n->pid) {
743 int i;
744
745 /* destroy all instances for this pid */
746 write_lock_bh(&instances_lock);
747 for (i = 0; i < INSTANCE_BUCKETS; i++) {
748 struct hlist_node *tmp, *t2;
749 struct nfqnl_instance *inst;
750 struct hlist_head *head = &instance_table[i];
751
752 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
753 if (n->pid == inst->peer_pid)
754 __instance_destroy(inst);
755 }
756 }
757 write_unlock_bh(&instances_lock);
758 }
759 return NOTIFY_DONE;
760}
761
762static struct notifier_block nfqnl_rtnl_notifier = {
763 .notifier_call = nfqnl_rcv_nl_event,
764};
765
766static const int nfqa_verdict_min[NFQA_MAX] = {
767 [NFQA_VERDICT_HDR-1] = sizeof(struct nfqnl_msg_verdict_hdr),
768 [NFQA_MARK-1] = sizeof(u_int32_t),
769 [NFQA_PAYLOAD-1] = 0,
770};
771
772static int
773nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
774 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
775{
776 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
777 u_int16_t queue_num = ntohs(nfmsg->res_id);
778
779 struct nfqnl_msg_verdict_hdr *vhdr;
780 struct nfqnl_instance *queue;
781 unsigned int verdict;
782 struct nfqnl_queue_entry *entry;
783 int err;
784
785 if (nfattr_bad_size(nfqa, NFQA_MAX, nfqa_verdict_min)) {
786 QDEBUG("bad attribute size\n");
787 return -EINVAL;
788 }
789
790 queue = instance_lookup_get(queue_num);
791 if (!queue)
792 return -ENODEV;
793
794 if (queue->peer_pid != NETLINK_CB(skb).pid) {
795 err = -EPERM;
796 goto err_out_put;
797 }
798
799 if (!nfqa[NFQA_VERDICT_HDR-1]) {
800 err = -EINVAL;
801 goto err_out_put;
802 }
803
804 vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]);
805 verdict = ntohl(vhdr->verdict);
806
807 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
808 err = -EINVAL;
809 goto err_out_put;
810 }
811
812 entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
813 if (entry == NULL) {
814 err = -ENOENT;
815 goto err_out_put;
816 }
817
818 if (nfqa[NFQA_PAYLOAD-1]) {
819 if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]),
820 NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0)
821 verdict = NF_DROP;
822 }
823
824 if (nfqa[NFQA_MARK-1])
825 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1]));
826
827 issue_verdict(entry, verdict);
828 instance_put(queue);
829 return 0;
830
831err_out_put:
832 instance_put(queue);
833 return err;
834}
835
836static int
837nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
838 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
839{
840 return -ENOTSUPP;
841}
842
843static const int nfqa_cfg_min[NFQA_CFG_MAX] = {
844 [NFQA_CFG_CMD-1] = sizeof(struct nfqnl_msg_config_cmd),
845 [NFQA_CFG_PARAMS-1] = sizeof(struct nfqnl_msg_config_params),
846};
847
848static struct nf_queue_handler nfqh = {
849 .name = "nf_queue",
850 .outfn = &nfqnl_enqueue_packet,
851};
852
853static int
854nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
855 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
856{
857 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
858 u_int16_t queue_num = ntohs(nfmsg->res_id);
859 struct nfqnl_instance *queue;
860 int ret = 0;
861
862 QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
863
864 if (nfattr_bad_size(nfqa, NFQA_CFG_MAX, nfqa_cfg_min)) {
865 QDEBUG("bad attribute size\n");
866 return -EINVAL;
867 }
868
869 queue = instance_lookup_get(queue_num);
870 if (nfqa[NFQA_CFG_CMD-1]) {
871 struct nfqnl_msg_config_cmd *cmd;
872 cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]);
873 QDEBUG("found CFG_CMD\n");
874
875 switch (cmd->command) {
876 case NFQNL_CFG_CMD_BIND:
877 if (queue)
878 return -EBUSY;
879
880 queue = instance_create(queue_num, NETLINK_CB(skb).pid);
881 if (!queue)
882 return -EINVAL;
883 break;
884 case NFQNL_CFG_CMD_UNBIND:
885 if (!queue)
886 return -ENODEV;
887
888 if (queue->peer_pid != NETLINK_CB(skb).pid) {
889 ret = -EPERM;
890 goto out_put;
891 }
892
893 instance_destroy(queue);
894 break;
895 case NFQNL_CFG_CMD_PF_BIND:
896 QDEBUG("registering queue handler for pf=%u\n",
897 ntohs(cmd->pf));
898 ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
899 break;
900 case NFQNL_CFG_CMD_PF_UNBIND:
901 QDEBUG("unregistering queue handler for pf=%u\n",
902 ntohs(cmd->pf));
903 /* This is a bug and a feature. We can unregister
904 * other handlers(!) */
905 ret = nf_unregister_queue_handler(ntohs(cmd->pf));
906 break;
907 default:
908 ret = -EINVAL;
909 break;
910 }
911 } else {
912 if (!queue) {
913 QDEBUG("no config command, and no instance ENOENT\n");
914 ret = -ENOENT;
915 goto out_put;
916 }
917
918 if (queue->peer_pid != NETLINK_CB(skb).pid) {
919 QDEBUG("no config command, and wrong pid\n");
920 ret = -EPERM;
921 goto out_put;
922 }
923 }
924
925 if (nfqa[NFQA_CFG_PARAMS-1]) {
926 struct nfqnl_msg_config_params *params;
927 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
928
929 nfqnl_set_mode(queue, params->copy_mode,
930 ntohl(params->copy_range));
931 }
932
933out_put:
934 instance_put(queue);
935 return ret;
936}
937
938static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
939 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp,
940 .attr_count = NFQA_MAX,
941 .cap_required = CAP_NET_ADMIN },
942 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict,
943 .attr_count = NFQA_MAX,
944 .cap_required = CAP_NET_ADMIN },
945 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
946 .attr_count = NFQA_CFG_MAX,
947 .cap_required = CAP_NET_ADMIN },
948};
949
950static struct nfnetlink_subsystem nfqnl_subsys = {
951 .name = "nf_queue",
952 .subsys_id = NFNL_SUBSYS_QUEUE,
953 .cb_count = NFQNL_MSG_MAX,
954 .cb = nfqnl_cb,
955};
956
957#ifdef CONFIG_PROC_FS
958struct iter_state {
959 unsigned int bucket;
960};
961
962static struct hlist_node *get_first(struct seq_file *seq)
963{
964 struct iter_state *st = seq->private;
965
966 if (!st)
967 return NULL;
968
969 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
970 if (!hlist_empty(&instance_table[st->bucket]))
971 return instance_table[st->bucket].first;
972 }
973 return NULL;
974}
975
976static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
977{
978 struct iter_state *st = seq->private;
979
980 h = h->next;
981 while (!h) {
982 if (++st->bucket >= INSTANCE_BUCKETS)
983 return NULL;
984
985 h = instance_table[st->bucket].first;
986 }
987 return h;
988}
989
990static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
991{
992 struct hlist_node *head;
993 head = get_first(seq);
994
995 if (head)
996 while (pos && (head = get_next(seq, head)))
997 pos--;
998 return pos ? NULL : head;
999}
1000
1001static void *seq_start(struct seq_file *seq, loff_t *pos)
1002{
1003 read_lock_bh(&instances_lock);
1004 return get_idx(seq, *pos);
1005}
1006
1007static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1008{
1009 (*pos)++;
1010 return get_next(s, v);
1011}
1012
1013static void seq_stop(struct seq_file *s, void *v)
1014{
1015 read_unlock_bh(&instances_lock);
1016}
1017
1018static int seq_show(struct seq_file *s, void *v)
1019{
1020 const struct nfqnl_instance *inst = v;
1021
1022 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1023 inst->queue_num,
1024 inst->peer_pid, inst->queue_total,
1025 inst->copy_mode, inst->copy_range,
1026 inst->queue_dropped, inst->queue_user_dropped,
1027 atomic_read(&inst->id_sequence),
1028 atomic_read(&inst->use));
1029}
1030
1031static struct seq_operations nfqnl_seq_ops = {
1032 .start = seq_start,
1033 .next = seq_next,
1034 .stop = seq_stop,
1035 .show = seq_show,
1036};
1037
1038static int nfqnl_open(struct inode *inode, struct file *file)
1039{
1040 struct seq_file *seq;
1041 struct iter_state *is;
1042 int ret;
1043
1044 is = kmalloc(sizeof(*is), GFP_KERNEL);
1045 if (!is)
1046 return -ENOMEM;
1047 memset(is, 0, sizeof(*is));
1048 ret = seq_open(file, &nfqnl_seq_ops);
1049 if (ret < 0)
1050 goto out_free;
1051 seq = file->private_data;
1052 seq->private = is;
1053 return ret;
1054out_free:
1055 kfree(is);
1056 return ret;
1057}
1058
1059static struct file_operations nfqnl_file_ops = {
1060 .owner = THIS_MODULE,
1061 .open = nfqnl_open,
1062 .read = seq_read,
1063 .llseek = seq_lseek,
1064 .release = seq_release_private,
1065};
1066
1067#endif /* PROC_FS */
1068
1069static int
1070init_or_cleanup(int init)
1071{
1072 int i, status = -ENOMEM;
1073#ifdef CONFIG_PROC_FS
1074 struct proc_dir_entry *proc_nfqueue;
1075#endif
1076
1077 if (!init)
1078 goto cleanup;
1079
1080 for (i = 0; i < INSTANCE_BUCKETS; i++)
1081 INIT_HLIST_HEAD(&instance_table[i]);
1082
1083 netlink_register_notifier(&nfqnl_rtnl_notifier);
1084 status = nfnetlink_subsys_register(&nfqnl_subsys);
1085 if (status < 0) {
1086 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1087 goto cleanup_netlink_notifier;
1088 }
1089
1090#ifdef CONFIG_PROC_FS
1091 proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1092 proc_net_netfilter);
1093 if (!proc_nfqueue)
1094 goto cleanup_subsys;
1095 proc_nfqueue->proc_fops = &nfqnl_file_ops;
1096#endif
1097
1098 register_netdevice_notifier(&nfqnl_dev_notifier);
1099
1100 return status;
1101
1102cleanup:
1103 nf_unregister_queue_handlers(&nfqh);
1104 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1105#ifdef CONFIG_PROC_FS
1106 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1107cleanup_subsys:
1108#endif
1109 nfnetlink_subsys_unregister(&nfqnl_subsys);
1110cleanup_netlink_notifier:
1111 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1112 return status;
1113}
1114
1115static int __init init(void)
1116{
1117
1118 return init_or_cleanup(1);
1119}
1120
1121static void __exit fini(void)
1122{
1123 init_or_cleanup(0);
1124}
1125
1126MODULE_DESCRIPTION("netfilter packet queue handler");
1127MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1128MODULE_LICENSE("GPL");
1129MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1130
1131module_init(init);
1132module_exit(fini);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ff774a06c89d..62435ffc6184 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -13,7 +13,12 @@
13 * added netlink_proto_exit 13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> 14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8) 15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * 16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
17 */ 22 */
18 23
19#include <linux/config.h> 24#include <linux/config.h>
@@ -55,21 +60,29 @@
55#include <net/scm.h> 60#include <net/scm.h>
56 61
57#define Nprintk(a...) 62#define Nprintk(a...)
63#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
58 64
59struct netlink_sock { 65struct netlink_sock {
60 /* struct sock has to be the first member of netlink_sock */ 66 /* struct sock has to be the first member of netlink_sock */
61 struct sock sk; 67 struct sock sk;
62 u32 pid; 68 u32 pid;
63 unsigned int groups;
64 u32 dst_pid; 69 u32 dst_pid;
65 unsigned int dst_groups; 70 u32 dst_group;
71 u32 flags;
72 u32 subscriptions;
73 u32 ngroups;
74 unsigned long *groups;
66 unsigned long state; 75 unsigned long state;
67 wait_queue_head_t wait; 76 wait_queue_head_t wait;
68 struct netlink_callback *cb; 77 struct netlink_callback *cb;
69 spinlock_t cb_lock; 78 spinlock_t cb_lock;
70 void (*data_ready)(struct sock *sk, int bytes); 79 void (*data_ready)(struct sock *sk, int bytes);
80 struct module *module;
71}; 81};
72 82
83#define NETLINK_KERNEL_SOCKET 0x1
84#define NETLINK_RECV_PKTINFO 0x2
85
73static inline struct netlink_sock *nlk_sk(struct sock *sk) 86static inline struct netlink_sock *nlk_sk(struct sock *sk)
74{ 87{
75 return (struct netlink_sock *)sk; 88 return (struct netlink_sock *)sk;
@@ -92,6 +105,9 @@ struct netlink_table {
92 struct nl_pid_hash hash; 105 struct nl_pid_hash hash;
93 struct hlist_head mc_list; 106 struct hlist_head mc_list;
94 unsigned int nl_nonroot; 107 unsigned int nl_nonroot;
108 unsigned int groups;
109 struct module *module;
110 int registered;
95}; 111};
96 112
97static struct netlink_table *nl_table; 113static struct netlink_table *nl_table;
@@ -106,6 +122,11 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
106 122
107static struct notifier_block *netlink_chain; 123static struct notifier_block *netlink_chain;
108 124
125static u32 netlink_group_mask(u32 group)
126{
127 return group ? 1 << (group - 1) : 0;
128}
129
109static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) 130static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
110{ 131{
111 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; 132 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
@@ -122,6 +143,7 @@ static void netlink_sock_destruct(struct sock *sk)
122 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 143 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
123 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 144 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
124 BUG_TRAP(!nlk_sk(sk)->cb); 145 BUG_TRAP(!nlk_sk(sk)->cb);
146 BUG_TRAP(!nlk_sk(sk)->groups);
125} 147}
126 148
127/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP. 149/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
@@ -317,7 +339,7 @@ static void netlink_remove(struct sock *sk)
317 netlink_table_grab(); 339 netlink_table_grab();
318 if (sk_del_node_init(sk)) 340 if (sk_del_node_init(sk))
319 nl_table[sk->sk_protocol].hash.entries--; 341 nl_table[sk->sk_protocol].hash.entries--;
320 if (nlk_sk(sk)->groups) 342 if (nlk_sk(sk)->subscriptions)
321 __sk_del_bind_node(sk); 343 __sk_del_bind_node(sk);
322 netlink_table_ungrab(); 344 netlink_table_ungrab();
323} 345}
@@ -328,19 +350,11 @@ static struct proto netlink_proto = {
328 .obj_size = sizeof(struct netlink_sock), 350 .obj_size = sizeof(struct netlink_sock),
329}; 351};
330 352
331static int netlink_create(struct socket *sock, int protocol) 353static int __netlink_create(struct socket *sock, int protocol)
332{ 354{
333 struct sock *sk; 355 struct sock *sk;
334 struct netlink_sock *nlk; 356 struct netlink_sock *nlk;
335 357
336 sock->state = SS_UNCONNECTED;
337
338 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
339 return -ESOCKTNOSUPPORT;
340
341 if (protocol<0 || protocol >= MAX_LINKS)
342 return -EPROTONOSUPPORT;
343
344 sock->ops = &netlink_ops; 358 sock->ops = &netlink_ops;
345 359
346 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); 360 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
@@ -350,15 +364,67 @@ static int netlink_create(struct socket *sock, int protocol)
350 sock_init_data(sock, sk); 364 sock_init_data(sock, sk);
351 365
352 nlk = nlk_sk(sk); 366 nlk = nlk_sk(sk);
353
354 spin_lock_init(&nlk->cb_lock); 367 spin_lock_init(&nlk->cb_lock);
355 init_waitqueue_head(&nlk->wait); 368 init_waitqueue_head(&nlk->wait);
356 sk->sk_destruct = netlink_sock_destruct;
357 369
370 sk->sk_destruct = netlink_sock_destruct;
358 sk->sk_protocol = protocol; 371 sk->sk_protocol = protocol;
359 return 0; 372 return 0;
360} 373}
361 374
375static int netlink_create(struct socket *sock, int protocol)
376{
377 struct module *module = NULL;
378 struct netlink_sock *nlk;
379 unsigned int groups;
380 int err = 0;
381
382 sock->state = SS_UNCONNECTED;
383
384 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
385 return -ESOCKTNOSUPPORT;
386
387 if (protocol<0 || protocol >= MAX_LINKS)
388 return -EPROTONOSUPPORT;
389
390 netlink_lock_table();
391#ifdef CONFIG_KMOD
392 if (!nl_table[protocol].registered) {
393 netlink_unlock_table();
394 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
395 netlink_lock_table();
396 }
397#endif
398 if (nl_table[protocol].registered &&
399 try_module_get(nl_table[protocol].module))
400 module = nl_table[protocol].module;
401 else
402 err = -EPROTONOSUPPORT;
403 groups = nl_table[protocol].groups;
404 netlink_unlock_table();
405
406 if (err || (err = __netlink_create(sock, protocol) < 0))
407 goto out_module;
408
409 nlk = nlk_sk(sock->sk);
410
411 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL);
412 if (nlk->groups == NULL) {
413 err = -ENOMEM;
414 goto out_module;
415 }
416 memset(nlk->groups, 0, NLGRPSZ(groups));
417 nlk->ngroups = groups;
418
419 nlk->module = module;
420out:
421 return err;
422
423out_module:
424 module_put(module);
425 goto out;
426}
427
362static int netlink_release(struct socket *sock) 428static int netlink_release(struct socket *sock)
363{ 429{
364 struct sock *sk = sock->sk; 430 struct sock *sk = sock->sk;
@@ -387,14 +453,27 @@ static int netlink_release(struct socket *sock)
387 453
388 skb_queue_purge(&sk->sk_write_queue); 454 skb_queue_purge(&sk->sk_write_queue);
389 455
390 if (nlk->pid && !nlk->groups) { 456 if (nlk->pid && !nlk->subscriptions) {
391 struct netlink_notify n = { 457 struct netlink_notify n = {
392 .protocol = sk->sk_protocol, 458 .protocol = sk->sk_protocol,
393 .pid = nlk->pid, 459 .pid = nlk->pid,
394 }; 460 };
395 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); 461 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
396 } 462 }
397 463
464 if (nlk->module)
465 module_put(nlk->module);
466
467 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
468 netlink_table_grab();
469 nl_table[sk->sk_protocol].module = NULL;
470 nl_table[sk->sk_protocol].registered = 0;
471 netlink_table_ungrab();
472 }
473
474 kfree(nlk->groups);
475 nlk->groups = NULL;
476
398 sock_put(sk); 477 sock_put(sk);
399 return 0; 478 return 0;
400} 479}
@@ -443,6 +522,18 @@ static inline int netlink_capable(struct socket *sock, unsigned int flag)
443 capable(CAP_NET_ADMIN); 522 capable(CAP_NET_ADMIN);
444} 523}
445 524
525static void
526netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
527{
528 struct netlink_sock *nlk = nlk_sk(sk);
529
530 if (nlk->subscriptions && !subscriptions)
531 __sk_del_bind_node(sk);
532 else if (!nlk->subscriptions && subscriptions)
533 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
534 nlk->subscriptions = subscriptions;
535}
536
446static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 537static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
447{ 538{
448 struct sock *sk = sock->sk; 539 struct sock *sk = sock->sk;
@@ -468,15 +559,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
468 return err; 559 return err;
469 } 560 }
470 561
471 if (!nladdr->nl_groups && !nlk->groups) 562 if (!nladdr->nl_groups && !(u32)nlk->groups[0])
472 return 0; 563 return 0;
473 564
474 netlink_table_grab(); 565 netlink_table_grab();
475 if (nlk->groups && !nladdr->nl_groups) 566 netlink_update_subscriptions(sk, nlk->subscriptions +
476 __sk_del_bind_node(sk); 567 hweight32(nladdr->nl_groups) -
477 else if (!nlk->groups && nladdr->nl_groups) 568 hweight32(nlk->groups[0]));
478 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); 569 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
479 nlk->groups = nladdr->nl_groups;
480 netlink_table_ungrab(); 570 netlink_table_ungrab();
481 571
482 return 0; 572 return 0;
@@ -493,7 +583,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
493 if (addr->sa_family == AF_UNSPEC) { 583 if (addr->sa_family == AF_UNSPEC) {
494 sk->sk_state = NETLINK_UNCONNECTED; 584 sk->sk_state = NETLINK_UNCONNECTED;
495 nlk->dst_pid = 0; 585 nlk->dst_pid = 0;
496 nlk->dst_groups = 0; 586 nlk->dst_group = 0;
497 return 0; 587 return 0;
498 } 588 }
499 if (addr->sa_family != AF_NETLINK) 589 if (addr->sa_family != AF_NETLINK)
@@ -509,7 +599,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
509 if (err == 0) { 599 if (err == 0) {
510 sk->sk_state = NETLINK_CONNECTED; 600 sk->sk_state = NETLINK_CONNECTED;
511 nlk->dst_pid = nladdr->nl_pid; 601 nlk->dst_pid = nladdr->nl_pid;
512 nlk->dst_groups = nladdr->nl_groups; 602 nlk->dst_group = ffs(nladdr->nl_groups);
513 } 603 }
514 604
515 return err; 605 return err;
@@ -527,10 +617,10 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr
527 617
528 if (peer) { 618 if (peer) {
529 nladdr->nl_pid = nlk->dst_pid; 619 nladdr->nl_pid = nlk->dst_pid;
530 nladdr->nl_groups = nlk->dst_groups; 620 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
531 } else { 621 } else {
532 nladdr->nl_pid = nlk->pid; 622 nladdr->nl_pid = nlk->pid;
533 nladdr->nl_groups = nlk->groups; 623 nladdr->nl_groups = nlk->groups[0];
534 } 624 }
535 return 0; 625 return 0;
536} 626}
@@ -731,7 +821,8 @@ static inline int do_one_broadcast(struct sock *sk,
731 if (p->exclude_sk == sk) 821 if (p->exclude_sk == sk)
732 goto out; 822 goto out;
733 823
734 if (nlk->pid == p->pid || !(nlk->groups & p->group)) 824 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
825 !test_bit(p->group - 1, nlk->groups))
735 goto out; 826 goto out;
736 827
737 if (p->failure) { 828 if (p->failure) {
@@ -770,7 +861,7 @@ out:
770} 861}
771 862
772int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 863int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
773 u32 group, int allocation) 864 u32 group, unsigned int __nocast allocation)
774{ 865{
775 struct netlink_broadcast_data info; 866 struct netlink_broadcast_data info;
776 struct hlist_node *node; 867 struct hlist_node *node;
@@ -827,7 +918,8 @@ static inline int do_one_set_err(struct sock *sk,
827 if (sk == p->exclude_sk) 918 if (sk == p->exclude_sk)
828 goto out; 919 goto out;
829 920
830 if (nlk->pid == p->pid || !(nlk->groups & p->group)) 921 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
922 !test_bit(p->group - 1, nlk->groups))
831 goto out; 923 goto out;
832 924
833 sk->sk_err = p->code; 925 sk->sk_err = p->code;
@@ -855,6 +947,94 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
855 read_unlock(&nl_table_lock); 947 read_unlock(&nl_table_lock);
856} 948}
857 949
950static int netlink_setsockopt(struct socket *sock, int level, int optname,
951 char __user *optval, int optlen)
952{
953 struct sock *sk = sock->sk;
954 struct netlink_sock *nlk = nlk_sk(sk);
955 int val = 0, err;
956
957 if (level != SOL_NETLINK)
958 return -ENOPROTOOPT;
959
960 if (optlen >= sizeof(int) &&
961 get_user(val, (int __user *)optval))
962 return -EFAULT;
963
964 switch (optname) {
965 case NETLINK_PKTINFO:
966 if (val)
967 nlk->flags |= NETLINK_RECV_PKTINFO;
968 else
969 nlk->flags &= ~NETLINK_RECV_PKTINFO;
970 err = 0;
971 break;
972 case NETLINK_ADD_MEMBERSHIP:
973 case NETLINK_DROP_MEMBERSHIP: {
974 unsigned int subscriptions;
975 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
976
977 if (!netlink_capable(sock, NL_NONROOT_RECV))
978 return -EPERM;
979 if (!val || val - 1 >= nlk->ngroups)
980 return -EINVAL;
981 netlink_table_grab();
982 old = test_bit(val - 1, nlk->groups);
983 subscriptions = nlk->subscriptions - old + new;
984 if (new)
985 __set_bit(val - 1, nlk->groups);
986 else
987 __clear_bit(val - 1, nlk->groups);
988 netlink_update_subscriptions(sk, subscriptions);
989 netlink_table_ungrab();
990 err = 0;
991 break;
992 }
993 default:
994 err = -ENOPROTOOPT;
995 }
996 return err;
997}
998
999static int netlink_getsockopt(struct socket *sock, int level, int optname,
1000 char __user *optval, int __user *optlen)
1001{
1002 struct sock *sk = sock->sk;
1003 struct netlink_sock *nlk = nlk_sk(sk);
1004 int len, val, err;
1005
1006 if (level != SOL_NETLINK)
1007 return -ENOPROTOOPT;
1008
1009 if (get_user(len, optlen))
1010 return -EFAULT;
1011 if (len < 0)
1012 return -EINVAL;
1013
1014 switch (optname) {
1015 case NETLINK_PKTINFO:
1016 if (len < sizeof(int))
1017 return -EINVAL;
1018 len = sizeof(int);
1019 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1020 put_user(len, optlen);
1021 put_user(val, optval);
1022 err = 0;
1023 break;
1024 default:
1025 err = -ENOPROTOOPT;
1026 }
1027 return err;
1028}
1029
1030static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1031{
1032 struct nl_pktinfo info;
1033
1034 info.group = NETLINK_CB(skb).dst_group;
1035 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1036}
1037
858static inline void netlink_rcv_wake(struct sock *sk) 1038static inline void netlink_rcv_wake(struct sock *sk)
859{ 1039{
860 struct netlink_sock *nlk = nlk_sk(sk); 1040 struct netlink_sock *nlk = nlk_sk(sk);
@@ -873,7 +1053,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
873 struct netlink_sock *nlk = nlk_sk(sk); 1053 struct netlink_sock *nlk = nlk_sk(sk);
874 struct sockaddr_nl *addr=msg->msg_name; 1054 struct sockaddr_nl *addr=msg->msg_name;
875 u32 dst_pid; 1055 u32 dst_pid;
876 u32 dst_groups; 1056 u32 dst_group;
877 struct sk_buff *skb; 1057 struct sk_buff *skb;
878 int err; 1058 int err;
879 struct scm_cookie scm; 1059 struct scm_cookie scm;
@@ -891,12 +1071,12 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
891 if (addr->nl_family != AF_NETLINK) 1071 if (addr->nl_family != AF_NETLINK)
892 return -EINVAL; 1072 return -EINVAL;
893 dst_pid = addr->nl_pid; 1073 dst_pid = addr->nl_pid;
894 dst_groups = addr->nl_groups; 1074 dst_group = ffs(addr->nl_groups);
895 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND)) 1075 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
896 return -EPERM; 1076 return -EPERM;
897 } else { 1077 } else {
898 dst_pid = nlk->dst_pid; 1078 dst_pid = nlk->dst_pid;
899 dst_groups = nlk->dst_groups; 1079 dst_group = nlk->dst_group;
900 } 1080 }
901 1081
902 if (!nlk->pid) { 1082 if (!nlk->pid) {
@@ -914,9 +1094,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
914 goto out; 1094 goto out;
915 1095
916 NETLINK_CB(skb).pid = nlk->pid; 1096 NETLINK_CB(skb).pid = nlk->pid;
917 NETLINK_CB(skb).groups = nlk->groups;
918 NETLINK_CB(skb).dst_pid = dst_pid; 1097 NETLINK_CB(skb).dst_pid = dst_pid;
919 NETLINK_CB(skb).dst_groups = dst_groups; 1098 NETLINK_CB(skb).dst_group = dst_group;
920 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); 1099 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
921 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1100 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
922 1101
@@ -938,9 +1117,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
938 goto out; 1117 goto out;
939 } 1118 }
940 1119
941 if (dst_groups) { 1120 if (dst_group) {
942 atomic_inc(&skb->users); 1121 atomic_inc(&skb->users);
943 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL); 1122 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
944 } 1123 }
945 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); 1124 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
946 1125
@@ -986,7 +1165,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
986 addr->nl_family = AF_NETLINK; 1165 addr->nl_family = AF_NETLINK;
987 addr->nl_pad = 0; 1166 addr->nl_pad = 0;
988 addr->nl_pid = NETLINK_CB(skb).pid; 1167 addr->nl_pid = NETLINK_CB(skb).pid;
989 addr->nl_groups = NETLINK_CB(skb).dst_groups; 1168 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
990 msg->msg_namelen = sizeof(*addr); 1169 msg->msg_namelen = sizeof(*addr);
991 } 1170 }
992 1171
@@ -1001,6 +1180,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1001 netlink_dump(sk); 1180 netlink_dump(sk);
1002 1181
1003 scm_recv(sock, msg, siocb->scm, flags); 1182 scm_recv(sock, msg, siocb->scm, flags);
1183 if (nlk->flags & NETLINK_RECV_PKTINFO)
1184 netlink_cmsg_recv_pktinfo(msg, skb);
1004 1185
1005out: 1186out:
1006 netlink_rcv_wake(sk); 1187 netlink_rcv_wake(sk);
@@ -1023,10 +1204,13 @@ static void netlink_data_ready(struct sock *sk, int len)
1023 */ 1204 */
1024 1205
1025struct sock * 1206struct sock *
1026netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len)) 1207netlink_kernel_create(int unit, unsigned int groups,
1208 void (*input)(struct sock *sk, int len),
1209 struct module *module)
1027{ 1210{
1028 struct socket *sock; 1211 struct socket *sock;
1029 struct sock *sk; 1212 struct sock *sk;
1213 struct netlink_sock *nlk;
1030 1214
1031 if (!nl_table) 1215 if (!nl_table)
1032 return NULL; 1216 return NULL;
@@ -1037,20 +1221,31 @@ netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
1037 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) 1221 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1038 return NULL; 1222 return NULL;
1039 1223
1040 if (netlink_create(sock, unit) < 0) { 1224 if (__netlink_create(sock, unit) < 0)
1041 sock_release(sock); 1225 goto out_sock_release;
1042 return NULL; 1226
1043 }
1044 sk = sock->sk; 1227 sk = sock->sk;
1045 sk->sk_data_ready = netlink_data_ready; 1228 sk->sk_data_ready = netlink_data_ready;
1046 if (input) 1229 if (input)
1047 nlk_sk(sk)->data_ready = input; 1230 nlk_sk(sk)->data_ready = input;
1048 1231
1049 if (netlink_insert(sk, 0)) { 1232 if (netlink_insert(sk, 0))
1050 sock_release(sock); 1233 goto out_sock_release;
1051 return NULL; 1234
1052 } 1235 nlk = nlk_sk(sk);
1236 nlk->flags |= NETLINK_KERNEL_SOCKET;
1237
1238 netlink_table_grab();
1239 nl_table[unit].groups = groups < 32 ? 32 : groups;
1240 nl_table[unit].module = module;
1241 nl_table[unit].registered = 1;
1242 netlink_table_ungrab();
1243
1053 return sk; 1244 return sk;
1245
1246out_sock_release:
1247 sock_release(sock);
1248 return NULL;
1054} 1249}
1055 1250
1056void netlink_set_nonroot(int protocol, unsigned int flags) 1251void netlink_set_nonroot(int protocol, unsigned int flags)
@@ -1288,7 +1483,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1288 s, 1483 s,
1289 s->sk_protocol, 1484 s->sk_protocol,
1290 nlk->pid, 1485 nlk->pid,
1291 nlk->groups, 1486 nlk->flags & NETLINK_KERNEL_SOCKET ?
1487 0 : (unsigned int)nlk->groups[0],
1292 atomic_read(&s->sk_rmem_alloc), 1488 atomic_read(&s->sk_rmem_alloc),
1293 atomic_read(&s->sk_wmem_alloc), 1489 atomic_read(&s->sk_wmem_alloc),
1294 nlk->cb, 1490 nlk->cb,
@@ -1362,8 +1558,8 @@ static struct proto_ops netlink_ops = {
1362 .ioctl = sock_no_ioctl, 1558 .ioctl = sock_no_ioctl,
1363 .listen = sock_no_listen, 1559 .listen = sock_no_listen,
1364 .shutdown = sock_no_shutdown, 1560 .shutdown = sock_no_shutdown,
1365 .setsockopt = sock_no_setsockopt, 1561 .setsockopt = netlink_setsockopt,
1366 .getsockopt = sock_no_getsockopt, 1562 .getsockopt = netlink_getsockopt,
1367 .sendmsg = netlink_sendmsg, 1563 .sendmsg = netlink_sendmsg,
1368 .recvmsg = netlink_recvmsg, 1564 .recvmsg = netlink_recvmsg,
1369 .mmap = sock_no_mmap, 1565 .mmap = sock_no_mmap,
@@ -1438,21 +1634,7 @@ out:
1438 return err; 1634 return err;
1439} 1635}
1440 1636
1441static void __exit netlink_proto_exit(void)
1442{
1443 sock_unregister(PF_NETLINK);
1444 proc_net_remove("netlink");
1445 kfree(nl_table);
1446 nl_table = NULL;
1447 proto_unregister(&netlink_proto);
1448}
1449
1450core_initcall(netlink_proto_init); 1637core_initcall(netlink_proto_init);
1451module_exit(netlink_proto_exit);
1452
1453MODULE_LICENSE("GPL");
1454
1455MODULE_ALIAS_NETPROTO(PF_NETLINK);
1456 1638
1457EXPORT_SYMBOL(netlink_ack); 1639EXPORT_SYMBOL(netlink_ack);
1458EXPORT_SYMBOL(netlink_broadcast); 1640EXPORT_SYMBOL(netlink_broadcast);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 162a85fed150..4b53de982114 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -39,7 +39,7 @@
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <net/ip.h> 41#include <net/ip.h>
42#include <net/tcp.h> 42#include <net/tcp_states.h>
43#include <net/arp.h> 43#include <net/arp.h>
44#include <linux/init.h> 44#include <linux/init.h>
45 45
@@ -858,17 +858,16 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
858 frametype = skb->data[19] & 0x0F; 858 frametype = skb->data[19] & 0x0F;
859 flags = skb->data[19] & 0xF0; 859 flags = skb->data[19] & 0xF0;
860 860
861#ifdef CONFIG_INET
862 /* 861 /*
863 * Check for an incoming IP over NET/ROM frame. 862 * Check for an incoming IP over NET/ROM frame.
864 */ 863 */
865 if (frametype == NR_PROTOEXT && circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { 864 if (frametype == NR_PROTOEXT &&
865 circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
866 skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); 866 skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
867 skb->h.raw = skb->data; 867 skb->h.raw = skb->data;
868 868
869 return nr_rx_ip(skb, dev); 869 return nr_rx_ip(skb, dev);
870 } 870 }
871#endif
872 871
873 /* 872 /*
874 * Find an existing socket connection, based on circuit ID, if it's 873 * Find an existing socket connection, based on circuit ID, if it's
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 220bf7494f71..263da4c26494 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -38,8 +38,6 @@
38#include <net/ax25.h> 38#include <net/ax25.h>
39#include <net/netrom.h> 39#include <net/netrom.h>
40 40
41#ifdef CONFIG_INET
42
43/* 41/*
44 * Only allow IP over NET/ROM frames through if the netrom device is up. 42 * Only allow IP over NET/ROM frames through if the netrom device is up.
45 */ 43 */
@@ -64,11 +62,12 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
64 skb->nh.raw = skb->data; 62 skb->nh.raw = skb->data;
65 skb->pkt_type = PACKET_HOST; 63 skb->pkt_type = PACKET_HOST;
66 64
67 ip_rcv(skb, skb->dev, NULL); 65 netif_rx(skb);
68 66
69 return 1; 67 return 1;
70} 68}
71 69
70#ifdef CONFIG_INET
72 71
73static int nr_rebuild_header(struct sk_buff *skb) 72static int nr_rebuild_header(struct sk_buff *skb)
74{ 73{
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 9c44b3794126..64b81a796907 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -22,8 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp.h> 25#include <net/tcp_states.h>
26#include <net/ip.h> /* For ip_rcv */
27#include <asm/uaccess.h> 26#include <asm/uaccess.h>
28#include <asm/system.h> 27#include <asm/system.h>
29#include <linux/fcntl.h> 28#include <linux/fcntl.h>
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 0627347b14b8..587bed2674bf 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -21,7 +21,7 @@
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp.h> 24#include <net/tcp_states.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <linux/fcntl.h> 27#include <linux/fcntl.h>
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
77 if (skb_prev == NULL) 77 if (skb_prev == NULL)
78 skb_queue_head(&sk->sk_write_queue, skb); 78 skb_queue_head(&sk->sk_write_queue, skb);
79 else 79 else
80 skb_append(skb_prev, skb); 80 skb_append(skb_prev, skb, &sk->sk_write_queue);
81 skb_prev = skb; 81 skb_prev = skb;
82 } 82 }
83} 83}
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index faabda8088be..75b72d389ba9 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -22,7 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp.h> 25#include <net/tcp_states.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/system.h> 27#include <asm/system.h>
28#include <linux/fcntl.h> 28#include <linux/fcntl.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9d5980aa4de..ba997095f08f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -241,7 +241,7 @@ static struct proto_ops packet_ops;
241#ifdef CONFIG_SOCK_PACKET 241#ifdef CONFIG_SOCK_PACKET
242static struct proto_ops packet_ops_spkt; 242static struct proto_ops packet_ops_spkt;
243 243
244static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 244static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
245{ 245{
246 struct sock *sk; 246 struct sock *sk;
247 struct sockaddr_pkt *spkt; 247 struct sockaddr_pkt *spkt;
@@ -441,7 +441,7 @@ static inline unsigned run_filter(struct sk_buff *skb, struct sock *sk, unsigned
441 we will not harm anyone. 441 we will not harm anyone.
442 */ 442 */
443 443
444static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 444static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
445{ 445{
446 struct sock *sk; 446 struct sock *sk;
447 struct sockaddr_ll *sll; 447 struct sockaddr_ll *sll;
@@ -546,7 +546,7 @@ drop:
546} 546}
547 547
548#ifdef CONFIG_PACKET_MMAP 548#ifdef CONFIG_PACKET_MMAP
549static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 549static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
550{ 550{
551 struct sock *sk; 551 struct sock *sk;
552 struct packet_sock *po; 552 struct packet_sock *po;
@@ -635,12 +635,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
635 h->tp_snaplen = snaplen; 635 h->tp_snaplen = snaplen;
636 h->tp_mac = macoff; 636 h->tp_mac = macoff;
637 h->tp_net = netoff; 637 h->tp_net = netoff;
638 if (skb->stamp.tv_sec == 0) { 638 if (skb->tstamp.off_sec == 0) {
639 do_gettimeofday(&skb->stamp); 639 __net_timestamp(skb);
640 sock_enable_timestamp(sk); 640 sock_enable_timestamp(sk);
641 } 641 }
642 h->tp_sec = skb->stamp.tv_sec; 642 h->tp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec;
643 h->tp_usec = skb->stamp.tv_usec; 643 h->tp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec;
644 644
645 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); 645 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
646 sll->sll_halen = 0; 646 sll->sll_halen = 0;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 5480caf8ccc2..c6e59f84c3ae 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -41,7 +41,7 @@
41#include <net/rose.h> 41#include <net/rose.h>
42#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <net/tcp.h> 44#include <net/tcp_states.h>
45#include <net/ip.h> 45#include <net/ip.h>
46#include <net/arp.h> 46#include <net/arp.h>
47 47
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index ef475a1bb1ba..8348d33f1efe 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -26,8 +26,7 @@
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <net/sock.h> 28#include <net/sock.h>
29#include <net/ip.h> /* For ip_rcv */ 29#include <net/tcp_states.h>
30#include <net/tcp.h>
31#include <asm/system.h> 30#include <asm/system.h>
32#include <linux/fcntl.h> 31#include <linux/fcntl.h>
33#include <linux/mm.h> 32#include <linux/mm.h>
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 25da6f699fd0..4510cd7613ec 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -24,7 +24,7 @@
24#include <linux/if_arp.h> 24#include <linux/if_arp.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/tcp.h> 27#include <net/tcp_states.h>
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <linux/fcntl.h> 30#include <linux/fcntl.h>
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7db7e1cedc3a..a29a3a960fd6 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -21,7 +21,7 @@
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp.h> 24#include <net/tcp_states.h>
25#include <asm/system.h> 25#include <asm/system.h>
26#include <linux/fcntl.h> 26#include <linux/fcntl.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
74 if (skb_prev == NULL) 74 if (skb_prev == NULL)
75 skb_queue_head(&sk->sk_write_queue, skb); 75 skb_queue_head(&sk->sk_write_queue, skb);
76 else 76 else
77 skb_append(skb_prev, skb); 77 skb_append(skb_prev, skb, &sk->sk_write_queue);
78 skb_prev = skb; 78 skb_prev = skb;
79 } 79 }
80} 80}
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index 84dd4403f792..50ae0371dab8 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -22,7 +22,7 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/tcp.h> 25#include <net/tcp_states.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <linux/fcntl.h> 27#include <linux/fcntl.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 9bce7794130a..122c086ee2db 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -330,7 +330,7 @@ static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
330 330
331 msg->trans = trans; 331 msg->trans = trans;
332 msg->state = RXRPC_MSG_RECEIVED; 332 msg->state = RXRPC_MSG_RECEIVED;
333 msg->stamp = pkt->stamp; 333 skb_get_timestamp(pkt, &msg->stamp);
334 if (msg->stamp.tv_sec == 0) { 334 if (msg->stamp.tv_sec == 0) {
335 do_gettimeofday(&msg->stamp); 335 do_gettimeofday(&msg->stamp);
336 if (pkt->sk) 336 if (pkt->sk)
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 59d3e71f8b85..45d3bc0812c8 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -491,6 +491,7 @@ config NET_EMATCH_TEXT
491 depends on NET_EMATCH 491 depends on NET_EMATCH
492 select TEXTSEARCH 492 select TEXTSEARCH
493 select TEXTSEARCH_KMP 493 select TEXTSEARCH_KMP
494 select TEXTSEARCH_BM
494 select TEXTSEARCH_FSM 495 select TEXTSEARCH_FSM
495 ---help--- 496 ---help---
496 Say Y here if you want to be ablt to classify packets based on 497 Say Y here if you want to be ablt to classify packets based on
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 249c61936ea0..8aebe8f6d271 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -165,7 +165,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
165 while ((a = act) != NULL) { 165 while ((a = act) != NULL) {
166repeat: 166repeat:
167 if (a->ops && a->ops->act) { 167 if (a->ops && a->ops->act) {
168 ret = a->ops->act(&skb, a); 168 ret = a->ops->act(&skb, a, res);
169 if (TC_MUNGED & skb->tc_verd) { 169 if (TC_MUNGED & skb->tc_verd) {
170 /* copied already, allow trampling */ 170 /* copied already, allow trampling */
171 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 171 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
@@ -179,11 +179,6 @@ repeat:
179 act = a->next; 179 act = a->next;
180 } 180 }
181exec_done: 181exec_done:
182 if (skb->tc_classid > 0) {
183 res->classid = skb->tc_classid;
184 res->class = 0;
185 skb->tc_classid = 0;
186 }
187 return ret; 182 return ret;
188} 183}
189 184
@@ -598,7 +593,7 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
598 nlh->nlmsg_flags |= NLM_F_ROOT; 593 nlh->nlmsg_flags |= NLM_F_ROOT;
599 module_put(a->ops->owner); 594 module_put(a->ops->owner);
600 kfree(a); 595 kfree(a);
601 err = rtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 596 err = rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
602 if (err > 0) 597 if (err > 0)
603 return 0; 598 return 0;
604 599
@@ -661,7 +656,7 @@ tca_action_gd(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int event)
661 656
662 /* now do the delete */ 657 /* now do the delete */
663 tcf_action_destroy(head, 0); 658 tcf_action_destroy(head, 0);
664 ret = rtnetlink_send(skb, pid, RTMGRP_TC, 659 ret = rtnetlink_send(skb, pid, RTNLGRP_TC,
665 n->nlmsg_flags&NLM_F_ECHO); 660 n->nlmsg_flags&NLM_F_ECHO);
666 if (ret > 0) 661 if (ret > 0)
667 return 0; 662 return 0;
@@ -703,9 +698,9 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
703 x->rta_len = skb->tail - (u8*)x; 698 x->rta_len = skb->tail - (u8*)x;
704 699
705 nlh->nlmsg_len = skb->tail - b; 700 nlh->nlmsg_len = skb->tail - b;
706 NETLINK_CB(skb).dst_groups = RTMGRP_TC; 701 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
707 702
708 err = rtnetlink_send(skb, pid, RTMGRP_TC, flags&NLM_F_ECHO); 703 err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
709 if (err > 0) 704 if (err > 0)
710 err = 0; 705 err = 0;
711 return err; 706 return err;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3b5714ef4d1a..b4d89fbb3782 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -367,7 +367,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
367 return -EINVAL; 367 return -EINVAL;
368 } 368 }
369 369
370 return rtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 370 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
371} 371}
372 372
373struct tcf_dump_args 373struct tcf_dump_args
diff --git a/net/sched/gact.c b/net/sched/gact.c
index a811c89fef7f..d1c6d542912a 100644
--- a/net/sched/gact.c
+++ b/net/sched/gact.c
@@ -135,7 +135,7 @@ tcf_gact_cleanup(struct tc_action *a, int bind)
135} 135}
136 136
137static int 137static int
138tcf_gact(struct sk_buff **pskb, struct tc_action *a) 138tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
139{ 139{
140 struct tcf_gact *p = PRIV(a, gact); 140 struct tcf_gact *p = PRIV(a, gact);
141 struct sk_buff *skb = *pskb; 141 struct sk_buff *skb = *pskb;
diff --git a/net/sched/ipt.c b/net/sched/ipt.c
index b114d994d523..f50136eed211 100644
--- a/net/sched/ipt.c
+++ b/net/sched/ipt.c
@@ -201,7 +201,7 @@ tcf_ipt_cleanup(struct tc_action *a, int bind)
201} 201}
202 202
203static int 203static int
204tcf_ipt(struct sk_buff **pskb, struct tc_action *a) 204tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
205{ 205{
206 int ret = 0, result = 0; 206 int ret = 0, result = 0;
207 struct tcf_ipt *p = PRIV(a, ipt); 207 struct tcf_ipt *p = PRIV(a, ipt);
diff --git a/net/sched/mirred.c b/net/sched/mirred.c
index f309ce336803..20d06916dc0b 100644
--- a/net/sched/mirred.c
+++ b/net/sched/mirred.c
@@ -158,7 +158,7 @@ tcf_mirred_cleanup(struct tc_action *a, int bind)
158} 158}
159 159
160static int 160static int
161tcf_mirred(struct sk_buff **pskb, struct tc_action *a) 161tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
162{ 162{
163 struct tcf_mirred *p = PRIV(a, mirred); 163 struct tcf_mirred *p = PRIV(a, mirred);
164 struct net_device *dev; 164 struct net_device *dev;
diff --git a/net/sched/pedit.c b/net/sched/pedit.c
index 678be6a645fb..767d24f4610e 100644
--- a/net/sched/pedit.c
+++ b/net/sched/pedit.c
@@ -130,7 +130,7 @@ tcf_pedit_cleanup(struct tc_action *a, int bind)
130} 130}
131 131
132static int 132static int
133tcf_pedit(struct sk_buff **pskb, struct tc_action *a) 133tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
134{ 134{
135 struct tcf_pedit *p = PRIV(a, pedit); 135 struct tcf_pedit *p = PRIV(a, pedit);
136 struct sk_buff *skb = *pskb; 136 struct sk_buff *skb = *pskb;
diff --git a/net/sched/police.c b/net/sched/police.c
index c03545faf523..eb39fb2f39b6 100644
--- a/net/sched/police.c
+++ b/net/sched/police.c
@@ -284,7 +284,8 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
284 return 0; 284 return 0;
285} 285}
286 286
287static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a) 287static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a,
288 struct tcf_result *res)
288{ 289{
289 psched_time_t now; 290 psched_time_t now;
290 struct sk_buff *skb = *pskb; 291 struct sk_buff *skb = *pskb;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b9a069af4a02..737681cb9a92 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -816,7 +816,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
816 } 816 }
817 817
818 if (skb->len) 818 if (skb->len)
819 return rtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 819 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
820 820
821err_out: 821err_out:
822 kfree_skb(skb); 822 kfree_skb(skb);
@@ -1040,7 +1040,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1040 return -EINVAL; 1040 return -EINVAL;
1041 } 1041 }
1042 1042
1043 return rtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1043 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1044} 1044}
1045 1045
1046struct qdisc_dump_args 1046struct qdisc_dump_args
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0d066c965342..99ceb91f0150 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -238,6 +238,20 @@ static void dev_watchdog_down(struct net_device *dev)
238 spin_unlock_bh(&dev->xmit_lock); 238 spin_unlock_bh(&dev->xmit_lock);
239} 239}
240 240
241void netif_carrier_on(struct net_device *dev)
242{
243 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
244 linkwatch_fire_event(dev);
245 if (netif_running(dev))
246 __netdev_watchdog_up(dev);
247}
248
249void netif_carrier_off(struct net_device *dev)
250{
251 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
252 linkwatch_fire_event(dev);
253}
254
241/* "NOOP" scheduler: the best scheduler, recommended for all interfaces 255/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
242 under all circumstances. It is difficult to invent anything faster or 256 under all circumstances. It is difficult to invent anything faster or
243 cheaper. 257 cheaper.
@@ -600,6 +614,8 @@ void dev_shutdown(struct net_device *dev)
600} 614}
601 615
602EXPORT_SYMBOL(__netdev_watchdog_up); 616EXPORT_SYMBOL(__netdev_watchdog_up);
617EXPORT_SYMBOL(netif_carrier_on);
618EXPORT_SYMBOL(netif_carrier_off);
603EXPORT_SYMBOL(noop_qdisc); 619EXPORT_SYMBOL(noop_qdisc);
604EXPORT_SYMBOL(noop_qdisc_ops); 620EXPORT_SYMBOL(noop_qdisc_ops);
605EXPORT_SYMBOL(qdisc_create_dflt); 621EXPORT_SYMBOL(qdisc_create_dflt);
diff --git a/net/sched/simple.c b/net/sched/simple.c
index 3ab4c675ab5d..8a6ae4f491e8 100644
--- a/net/sched/simple.c
+++ b/net/sched/simple.c
@@ -44,7 +44,7 @@ static DEFINE_RWLOCK(simp_lock);
44#include <net/pkt_act.h> 44#include <net/pkt_act.h>
45#include <net/act_generic.h> 45#include <net/act_generic.h>
46 46
47static int tcf_simp(struct sk_buff **pskb, struct tc_action *a) 47static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
48{ 48{
49 struct sk_buff *skb = *pskb; 49 struct sk_buff *skb = *pskb;
50 struct tcf_defact *p = PRIV(a, defact); 50 struct tcf_defact *p = PRIV(a, defact);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 742be9171b7d..28f32243397f 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -236,8 +236,8 @@ int sctp_rcv(struct sk_buff *skb)
236 } 236 }
237 237
238 /* SCTP seems to always need a timestamp right now (FIXME) */ 238 /* SCTP seems to always need a timestamp right now (FIXME) */
239 if (skb->stamp.tv_sec == 0) { 239 if (skb->tstamp.off_sec == 0) {
240 do_gettimeofday(&skb->stamp); 240 __net_timestamp(skb);
241 sock_enable_timestamp(sk); 241 sock_enable_timestamp(sk);
242 } 242 }
243 243
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e9b2fd480d61..fa3be2b8fb5f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -66,8 +66,8 @@
66#include <linux/seq_file.h> 66#include <linux/seq_file.h>
67 67
68#include <net/protocol.h> 68#include <net/protocol.h>
69#include <net/tcp.h>
70#include <net/ndisc.h> 69#include <net/ndisc.h>
70#include <net/ip.h>
71#include <net/ipv6.h> 71#include <net/ipv6.h>
72#include <net/transp_v6.h> 72#include <net/transp_v6.h>
73#include <net/addrconf.h> 73#include <net/addrconf.h>
@@ -641,10 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
641 else 641 else
642 newinet->pmtudisc = IP_PMTUDISC_WANT; 642 newinet->pmtudisc = IP_PMTUDISC_WANT;
643 643
644#ifdef INET_REFCNT_DEBUG 644 sk_refcnt_debug_inc(newsk);
645 atomic_inc(&inet6_sock_nr);
646 atomic_inc(&inet_sock_nr);
647#endif
648 645
649 if (newsk->sk_prot->init(newsk)) { 646 if (newsk->sk_prot->init(newsk)) {
650 sk_common_release(newsk); 647 sk_common_release(newsk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index ce9245e71fca..e7025be77691 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -62,7 +62,7 @@
62/* Global data structures. */ 62/* Global data structures. */
63struct sctp_globals sctp_globals; 63struct sctp_globals sctp_globals;
64struct proc_dir_entry *proc_net_sctp; 64struct proc_dir_entry *proc_net_sctp;
65DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics); 65DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
66 66
67struct idr sctp_assocs_id; 67struct idr sctp_assocs_id;
68DEFINE_SPINLOCK(sctp_assocs_id_lock); 68DEFINE_SPINLOCK(sctp_assocs_id_lock);
@@ -78,8 +78,8 @@ static struct sctp_pf *sctp_pf_inet_specific;
78static struct sctp_af *sctp_af_v4_specific; 78static struct sctp_af *sctp_af_v4_specific;
79static struct sctp_af *sctp_af_v6_specific; 79static struct sctp_af *sctp_af_v6_specific;
80 80
81kmem_cache_t *sctp_chunk_cachep; 81kmem_cache_t *sctp_chunk_cachep __read_mostly;
82kmem_cache_t *sctp_bucket_cachep; 82kmem_cache_t *sctp_bucket_cachep __read_mostly;
83 83
84extern int sctp_snmp_proc_init(void); 84extern int sctp_snmp_proc_init(void);
85extern int sctp_snmp_proc_exit(void); 85extern int sctp_snmp_proc_exit(void);
@@ -593,9 +593,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
593 newinet->mc_index = 0; 593 newinet->mc_index = 0;
594 newinet->mc_list = NULL; 594 newinet->mc_list = NULL;
595 595
596#ifdef INET_REFCNT_DEBUG 596 sk_refcnt_debug_inc(newsk);
597 atomic_inc(&inet_sock_nr);
598#endif
599 597
600 if (newsk->sk_prot->init(newsk)) { 598 if (newsk->sk_prot->init(newsk)) {
601 sk_common_release(newsk); 599 sk_common_release(newsk);
@@ -1244,6 +1242,10 @@ SCTP_STATIC __exit void sctp_exit(void)
1244module_init(sctp_init); 1242module_init(sctp_init);
1245module_exit(sctp_exit); 1243module_exit(sctp_exit);
1246 1244
1245/*
1246 * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly.
1247 */
1248MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
1247MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); 1249MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
1248MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); 1250MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
1249MODULE_LICENSE("GPL"); 1251MODULE_LICENSE("GPL");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 00d32b7c8266..3868a8d70cc0 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1362,6 +1362,7 @@ struct sctp_association *sctp_unpack_cookie(
1362 char *key; 1362 char *key;
1363 sctp_scope_t scope; 1363 sctp_scope_t scope;
1364 struct sk_buff *skb = chunk->skb; 1364 struct sk_buff *skb = chunk->skb;
1365 struct timeval tv;
1365 1366
1366 headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; 1367 headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE;
1367 bodysize = ntohs(chunk->chunk_hdr->length) - headersize; 1368 bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
@@ -1434,7 +1435,8 @@ no_hmac:
1434 * an association, there is no need to check cookie's expiration 1435 * an association, there is no need to check cookie's expiration
1435 * for init collision case of lost COOKIE ACK. 1436 * for init collision case of lost COOKIE ACK.
1436 */ 1437 */
1437 if (!asoc && tv_lt(bear_cookie->expiration, skb->stamp)) { 1438 skb_get_timestamp(skb, &tv);
1439 if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
1438 __u16 len; 1440 __u16 len;
1439 /* 1441 /*
1440 * Section 3.3.10.3 Stale Cookie Error (3) 1442 * Section 3.3.10.3 Stale Cookie Error (3)
@@ -1447,10 +1449,9 @@ no_hmac:
1447 len = ntohs(chunk->chunk_hdr->length); 1449 len = ntohs(chunk->chunk_hdr->length);
1448 *errp = sctp_make_op_error_space(asoc, chunk, len); 1450 *errp = sctp_make_op_error_space(asoc, chunk, len);
1449 if (*errp) { 1451 if (*errp) {
1450 suseconds_t usecs = (skb->stamp.tv_sec - 1452 suseconds_t usecs = (tv.tv_sec -
1451 bear_cookie->expiration.tv_sec) * 1000000L + 1453 bear_cookie->expiration.tv_sec) * 1000000L +
1452 skb->stamp.tv_usec - 1454 tv.tv_usec - bear_cookie->expiration.tv_usec;
1453 bear_cookie->expiration.tv_usec;
1454 1455
1455 usecs = htonl(usecs); 1456 usecs = htonl(usecs);
1456 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1457 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 091a66f06a35..4454afe4727e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
4893 event = sctp_skb2event(skb); 4893 event = sctp_skb2event(skb);
4894 if (event->asoc == assoc) { 4894 if (event->asoc == assoc) {
4895 __skb_unlink(skb, skb->list); 4895 __skb_unlink(skb, &oldsk->sk_receive_queue);
4896 __skb_queue_tail(&newsk->sk_receive_queue, skb); 4896 __skb_queue_tail(&newsk->sk_receive_queue, skb);
4897 } 4897 }
4898 } 4898 }
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
4922 event = sctp_skb2event(skb); 4922 event = sctp_skb2event(skb);
4923 if (event->asoc == assoc) { 4923 if (event->asoc == assoc) {
4924 __skb_unlink(skb, skb->list); 4924 __skb_unlink(skb, &oldsp->pd_lobby);
4925 __skb_queue_tail(queue, skb); 4925 __skb_queue_tail(queue, skb);
4926 } 4926 }
4927 } 4927 }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 8bbc279d6c99..ec2c857eae7f 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -50,9 +50,9 @@
50 50
51/* Forward declarations for internal helpers. */ 51/* Forward declarations for internal helpers. */
52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, 52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *); 53 struct sctp_ulpevent *);
54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, 54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *); 55 struct sctp_ulpevent *);
56 56
57/* 1st Level Abstractions */ 57/* 1st Level Abstractions */
58 58
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
125 event = sctp_ulpq_order(ulpq, event); 125 event = sctp_ulpq_order(ulpq, event);
126 } 126 }
127 127
128 /* Send event to the ULP. */ 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
130 */
129 if (event) 131 if (event)
130 sctp_ulpq_tail_event(ulpq, event); 132 sctp_ulpq_tail_event(ulpq, event);
131 133
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158 return sctp_clear_pd(ulpq->asoc->base.sk); 160 return sctp_clear_pd(ulpq->asoc->base.sk);
159} 161}
160 162
161 163/* If the SKB of 'event' is on a list, it is the first such member
162 164 * of that list.
165 */
163int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) 166int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
164{ 167{
165 struct sock *sk = ulpq->asoc->base.sk; 168 struct sock *sk = ulpq->asoc->base.sk;
166 struct sk_buff_head *queue; 169 struct sk_buff_head *queue, *skb_list;
170 struct sk_buff *skb = sctp_event2skb(event);
167 int clear_pd = 0; 171 int clear_pd = 0;
168 172
173 skb_list = (struct sk_buff_head *) skb->prev;
174
169 /* If the socket is just going to throw this away, do not 175 /* If the socket is just going to throw this away, do not
170 * even try to deliver it. 176 * even try to deliver it.
171 */ 177 */
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
197 /* If we are harvesting multiple skbs they will be 203 /* If we are harvesting multiple skbs they will be
198 * collected on a list. 204 * collected on a list.
199 */ 205 */
200 if (sctp_event2skb(event)->list) 206 if (skb_list)
201 sctp_skb_list_tail(sctp_event2skb(event)->list, queue); 207 sctp_skb_list_tail(skb_list, queue);
202 else 208 else
203 __skb_queue_tail(queue, sctp_event2skb(event)); 209 __skb_queue_tail(queue, skb);
204 210
205 /* Did we just complete partial delivery and need to get 211 /* Did we just complete partial delivery and need to get
206 * rolling again? Move pending data to the receive 212 * rolling again? Move pending data to the receive
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
214 return 1; 220 return 1;
215 221
216out_free: 222out_free:
217 if (sctp_event2skb(event)->list) 223 if (skb_list)
218 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); 224 sctp_queue_purge_ulpevents(skb_list);
219 else 225 else
220 sctp_ulpevent_free(event); 226 sctp_ulpevent_free(event);
227
221 return 0; 228 return 0;
222} 229}
223 230
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
269 * payload was fragmented on the way and ip had to reassemble them. 276 * payload was fragmented on the way and ip had to reassemble them.
270 * We add the rest of skb's to the first skb's fraglist. 277 * We add the rest of skb's to the first skb's fraglist.
271 */ 278 */
272static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) 279static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
273{ 280{
274 struct sk_buff *pos; 281 struct sk_buff *pos;
275 struct sctp_ulpevent *event; 282 struct sctp_ulpevent *event;
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
294 skb_shinfo(f_frag)->frag_list = pos; 301 skb_shinfo(f_frag)->frag_list = pos;
295 302
296 /* Remove the first fragment from the reassembly queue. */ 303 /* Remove the first fragment from the reassembly queue. */
297 __skb_unlink(f_frag, f_frag->list); 304 __skb_unlink(f_frag, queue);
298 while (pos) { 305 while (pos) {
299 306
300 pnext = pos->next; 307 pnext = pos->next;
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
304 f_frag->data_len += pos->len; 311 f_frag->data_len += pos->len;
305 312
306 /* Remove the fragment from the reassembly queue. */ 313 /* Remove the fragment from the reassembly queue. */
307 __skb_unlink(pos, pos->list); 314 __skb_unlink(pos, queue);
308 315
309 /* Break if we have reached the last fragment. */ 316 /* Break if we have reached the last fragment. */
310 if (pos == l_frag) 317 if (pos == l_frag)
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
375done: 382done:
376 return retval; 383 return retval;
377found: 384found:
378 retval = sctp_make_reassembled_event(first_frag, pos); 385 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
379 if (retval) 386 if (retval)
380 retval->msg_flags |= MSG_EOR; 387 retval->msg_flags |= MSG_EOR;
381 goto done; 388 goto done;
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
435 * further. 442 * further.
436 */ 443 */
437done: 444done:
438 retval = sctp_make_reassembled_event(first_frag, last_frag); 445 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
439 if (retval && is_last) 446 if (retval && is_last)
440 retval->msg_flags |= MSG_EOR; 447 retval->msg_flags |= MSG_EOR;
441 448
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
527 * further. 534 * further.
528 */ 535 */
529done: 536done:
530 retval = sctp_make_reassembled_event(first_frag, last_frag); 537 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
531 return retval; 538 return retval;
532} 539}
533 540
@@ -537,6 +544,7 @@ done:
537static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, 544static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
538 struct sctp_ulpevent *event) 545 struct sctp_ulpevent *event)
539{ 546{
547 struct sk_buff_head *event_list;
540 struct sk_buff *pos, *tmp; 548 struct sk_buff *pos, *tmp;
541 struct sctp_ulpevent *cevent; 549 struct sctp_ulpevent *cevent;
542 struct sctp_stream *in; 550 struct sctp_stream *in;
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
547 ssn = event->ssn; 555 ssn = event->ssn;
548 in = &ulpq->asoc->ssnmap->in; 556 in = &ulpq->asoc->ssnmap->in;
549 557
558 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
559
550 /* We are holding the chunks by stream, by SSN. */ 560 /* We are holding the chunks by stream, by SSN. */
551 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 561 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
552 cevent = (struct sctp_ulpevent *) pos->cb; 562 cevent = (struct sctp_ulpevent *) pos->cb;
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
567 /* Found it, so mark in the ssnmap. */ 577 /* Found it, so mark in the ssnmap. */
568 sctp_ssn_next(in, sid); 578 sctp_ssn_next(in, sid);
569 579
570 __skb_unlink(pos, pos->list); 580 __skb_unlink(pos, &ulpq->lobby);
571 581
572 /* Attach all gathered skbs to the event. */ 582 /* Attach all gathered skbs to the event. */
573 __skb_queue_tail(sctp_event2skb(event)->list, pos); 583 __skb_queue_tail(event_list, pos);
574 } 584 }
575} 585}
576 586
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
626} 636}
627 637
628static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, 638static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
629 struct sctp_ulpevent *event) 639 struct sctp_ulpevent *event)
630{ 640{
631 __u16 sid, ssn; 641 __u16 sid, ssn;
632 struct sctp_stream *in; 642 struct sctp_stream *in;
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
667{ 677{
668 struct sk_buff *pos, *tmp; 678 struct sk_buff *pos, *tmp;
669 struct sctp_ulpevent *cevent; 679 struct sctp_ulpevent *cevent;
670 struct sctp_ulpevent *event = NULL; 680 struct sctp_ulpevent *event;
671 struct sctp_stream *in; 681 struct sctp_stream *in;
672 struct sk_buff_head temp; 682 struct sk_buff_head temp;
673 __u16 csid, cssn; 683 __u16 csid, cssn;
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
675 in = &ulpq->asoc->ssnmap->in; 685 in = &ulpq->asoc->ssnmap->in;
676 686
677 /* We are holding the chunks by stream, by SSN. */ 687 /* We are holding the chunks by stream, by SSN. */
688 skb_queue_head_init(&temp);
689 event = NULL;
678 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 690 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
679 cevent = (struct sctp_ulpevent *) pos->cb; 691 cevent = (struct sctp_ulpevent *) pos->cb;
680 csid = cevent->stream; 692 csid = cevent->stream;
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
686 /* Found it, so mark in the ssnmap. */ 698 /* Found it, so mark in the ssnmap. */
687 sctp_ssn_next(in, csid); 699 sctp_ssn_next(in, csid);
688 700
689 __skb_unlink(pos, pos->list); 701 __skb_unlink(pos, &ulpq->lobby);
690 if (!event) { 702 if (!event) {
691 /* Create a temporary list to collect chunks on. */ 703 /* Create a temporary list to collect chunks on. */
692 event = sctp_skb2event(pos); 704 event = sctp_skb2event(pos);
693 skb_queue_head_init(&temp);
694 __skb_queue_tail(&temp, sctp_event2skb(event)); 705 __skb_queue_tail(&temp, sctp_event2skb(event));
695 } else { 706 } else {
696 /* Attach all gathered skbs to the event. */ 707 /* Attach all gathered skbs to the event. */
697 __skb_queue_tail(sctp_event2skb(event)->list, pos); 708 __skb_queue_tail(&temp, pos);
698 } 709 }
699 } 710 }
700 711
701 /* Send event to the ULP. */ 712 /* Send event to the ULP. 'event' is the sctp_ulpevent for
713 * very first SKB on the 'temp' list.
714 */
702 if (event) 715 if (event)
703 sctp_ulpq_tail_event(ulpq, event); 716 sctp_ulpq_tail_event(ulpq, event);
704} 717}
diff --git a/net/socket.c b/net/socket.c
index 6f2a17881972..94fe638b4d72 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -70,6 +70,8 @@
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include <linux/wanrouter.h> 71#include <linux/wanrouter.h>
72#include <linux/if_bridge.h> 72#include <linux/if_bridge.h>
73#include <linux/if_frad.h>
74#include <linux/if_vlan.h>
73#include <linux/init.h> 75#include <linux/init.h>
74#include <linux/poll.h> 76#include <linux/poll.h>
75#include <linux/cache.h> 77#include <linux/cache.h>
@@ -272,7 +274,7 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ule
272 274
273#define SOCKFS_MAGIC 0x534F434B 275#define SOCKFS_MAGIC 0x534F434B
274 276
275static kmem_cache_t * sock_inode_cachep; 277static kmem_cache_t * sock_inode_cachep __read_mostly;
276 278
277static struct inode *sock_alloc_inode(struct super_block *sb) 279static struct inode *sock_alloc_inode(struct super_block *sb)
278{ 280{
@@ -331,7 +333,7 @@ static struct super_block *sockfs_get_sb(struct file_system_type *fs_type,
331 return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); 333 return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
332} 334}
333 335
334static struct vfsmount *sock_mnt; 336static struct vfsmount *sock_mnt __read_mostly;
335 337
336static struct file_system_type sock_fs_type = { 338static struct file_system_type sock_fs_type = {
337 .name = "sockfs", 339 .name = "sockfs",
@@ -404,6 +406,7 @@ int sock_map_fd(struct socket *sock)
404 file->f_mode = FMODE_READ | FMODE_WRITE; 406 file->f_mode = FMODE_READ | FMODE_WRITE;
405 file->f_flags = O_RDWR; 407 file->f_flags = O_RDWR;
406 file->f_pos = 0; 408 file->f_pos = 0;
409 file->private_data = sock;
407 fd_install(fd, file); 410 fd_install(fd, file);
408 } 411 }
409 412
@@ -436,6 +439,9 @@ struct socket *sockfd_lookup(int fd, int *err)
436 return NULL; 439 return NULL;
437 } 440 }
438 441
442 if (file->f_op == &socket_file_ops)
443 return file->private_data; /* set in sock_map_fd */
444
439 inode = file->f_dentry->d_inode; 445 inode = file->f_dentry->d_inode;
440 if (!S_ISSOCK(inode->i_mode)) { 446 if (!S_ISSOCK(inode->i_mode)) {
441 *err = -ENOTSOCK; 447 *err = -ENOTSOCK;
@@ -720,8 +726,8 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf,
720 return __sock_sendmsg(iocb, sock, &x->async_msg, size); 726 return __sock_sendmsg(iocb, sock, &x->async_msg, size);
721} 727}
722 728
723ssize_t sock_sendpage(struct file *file, struct page *page, 729static ssize_t sock_sendpage(struct file *file, struct page *page,
724 int offset, size_t size, loff_t *ppos, int more) 730 int offset, size_t size, loff_t *ppos, int more)
725{ 731{
726 struct socket *sock; 732 struct socket *sock;
727 int flags; 733 int flags;
@@ -944,7 +950,7 @@ static int sock_mmap(struct file * file, struct vm_area_struct * vma)
944 return sock->ops->mmap(file, sock, vma); 950 return sock->ops->mmap(file, sock, vma);
945} 951}
946 952
947int sock_close(struct inode *inode, struct file *filp) 953static int sock_close(struct inode *inode, struct file *filp)
948{ 954{
949 /* 955 /*
950 * It was possible the inode is NULL we were 956 * It was possible the inode is NULL we were
@@ -2023,9 +2029,6 @@ int sock_unregister(int family)
2023 return 0; 2029 return 0;
2024} 2030}
2025 2031
2026
2027extern void sk_init(void);
2028
2029void __init sock_init(void) 2032void __init sock_init(void)
2030{ 2033{
2031 /* 2034 /*
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 554f224c0445..fe1a73ce6cff 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -28,13 +28,13 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/sunrpc/rpc_pipe_fs.h> 29#include <linux/sunrpc/rpc_pipe_fs.h>
30 30
31static struct vfsmount *rpc_mount; 31static struct vfsmount *rpc_mount __read_mostly;
32static int rpc_mount_count; 32static int rpc_mount_count;
33 33
34static struct file_system_type rpc_pipe_fs_type; 34static struct file_system_type rpc_pipe_fs_type;
35 35
36 36
37static kmem_cache_t *rpc_inode_cachep; 37static kmem_cache_t *rpc_inode_cachep __read_mostly;
38 38
39#define RPC_UPCALL_TIMEOUT (30*HZ) 39#define RPC_UPCALL_TIMEOUT (30*HZ)
40 40
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 2d9eb7fbd521..f3104035e35d 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -34,10 +34,10 @@ static int rpc_task_id;
34#define RPC_BUFFER_MAXSIZE (2048) 34#define RPC_BUFFER_MAXSIZE (2048)
35#define RPC_BUFFER_POOLSIZE (8) 35#define RPC_BUFFER_POOLSIZE (8)
36#define RPC_TASK_POOLSIZE (8) 36#define RPC_TASK_POOLSIZE (8)
37static kmem_cache_t *rpc_task_slabp; 37static kmem_cache_t *rpc_task_slabp __read_mostly;
38static kmem_cache_t *rpc_buffer_slabp; 38static kmem_cache_t *rpc_buffer_slabp __read_mostly;
39static mempool_t *rpc_task_mempool; 39static mempool_t *rpc_task_mempool __read_mostly;
40static mempool_t *rpc_buffer_mempool; 40static mempool_t *rpc_buffer_mempool __read_mostly;
41 41
42static void __rpc_default_timer(struct rpc_task *task); 42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void); 43static void rpciod_killall(void);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d0c3120d0233..05fe2e735538 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -34,7 +34,7 @@
34#include <net/sock.h> 34#include <net/sock.h>
35#include <net/checksum.h> 35#include <net/checksum.h>
36#include <net/ip.h> 36#include <net/ip.h>
37#include <net/tcp.h> 37#include <net/tcp_states.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/ioctls.h> 39#include <asm/ioctls.h>
40 40
@@ -584,13 +584,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
584 /* possibly an icmp error */ 584 /* possibly an icmp error */
585 dprintk("svc: recvfrom returned error %d\n", -err); 585 dprintk("svc: recvfrom returned error %d\n", -err);
586 } 586 }
587 if (skb->stamp.tv_sec == 0) { 587 if (skb->tstamp.off_sec == 0) {
588 skb->stamp.tv_sec = xtime.tv_sec; 588 struct timeval tv;
589 skb->stamp.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 589
590 tv.tv_sec = xtime.tv_sec;
591 tv.tv_usec = xtime.tv_nsec * 1000;
592 skb_set_timestamp(skb, &tv);
590 /* Don't enable netstamp, sunrpc doesn't 593 /* Don't enable netstamp, sunrpc doesn't
591 need that much accuracy */ 594 need that much accuracy */
592 } 595 }
593 svsk->sk_sk->sk_stamp = skb->stamp; 596 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
594 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 597 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
595 598
596 /* 599 /*
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 3f6e31069c54..c5241fcbb966 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -17,17 +17,15 @@
17#include <linux/sysctl.h> 17#include <linux/sysctl.h>
18 18
19#ifdef CONFIG_INET 19#ifdef CONFIG_INET
20extern struct ctl_table ipv4_table[]; 20#include <net/ip.h>
21#endif 21#endif
22 22
23extern struct ctl_table core_table[];
24
25#ifdef CONFIG_NET 23#ifdef CONFIG_NET
26extern struct ctl_table ether_table[]; 24#include <linux/if_ether.h>
27#endif 25#endif
28 26
29#ifdef CONFIG_TR 27#ifdef CONFIG_TR
30extern struct ctl_table tr_table[]; 28#include <linux/if_tr.h>
31#endif 29#endif
32 30
33struct ctl_table net_table[] = { 31struct ctl_table net_table[] = {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d403e34088ad..41feca3bef86 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -105,7 +105,7 @@
105#include <linux/skbuff.h> 105#include <linux/skbuff.h>
106#include <linux/netdevice.h> 106#include <linux/netdevice.h>
107#include <net/sock.h> 107#include <net/sock.h>
108#include <linux/tcp.h> 108#include <net/tcp_states.h>
109#include <net/af_unix.h> 109#include <net/af_unix.h>
110#include <linux/proc_fs.h> 110#include <linux/proc_fs.h>
111#include <linux/seq_file.h> 111#include <linux/seq_file.h>
@@ -2026,14 +2026,6 @@ static struct net_proto_family unix_family_ops = {
2026 .owner = THIS_MODULE, 2026 .owner = THIS_MODULE,
2027}; 2027};
2028 2028
2029#ifdef CONFIG_SYSCTL
2030extern void unix_sysctl_register(void);
2031extern void unix_sysctl_unregister(void);
2032#else
2033static inline void unix_sysctl_register(void) {}
2034static inline void unix_sysctl_unregister(void) {}
2035#endif
2036
2037static int __init af_unix_init(void) 2029static int __init af_unix_init(void)
2038{ 2030{
2039 int rc = -1; 2031 int rc = -1;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 4bd95c8f5934..6ffc64e1712d 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -76,11 +76,11 @@
76#include <linux/netdevice.h> 76#include <linux/netdevice.h>
77#include <linux/file.h> 77#include <linux/file.h>
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/tcp.h>
80 79
81#include <net/sock.h> 80#include <net/sock.h>
82#include <net/af_unix.h> 81#include <net/af_unix.h>
83#include <net/scm.h> 82#include <net/scm.h>
83#include <net/tcp_states.h>
84 84
85/* Internal data structures and random procedures: */ 85/* Internal data structures and random procedures: */
86 86
@@ -286,16 +286,16 @@ void unix_gc(void)
286 skb = skb_peek(&s->sk_receive_queue); 286 skb = skb_peek(&s->sk_receive_queue);
287 while (skb && 287 while (skb &&
288 skb != (struct sk_buff *)&s->sk_receive_queue) { 288 skb != (struct sk_buff *)&s->sk_receive_queue) {
289 nextsk=skb->next; 289 nextsk = skb->next;
290 /* 290 /*
291 * Do we have file descriptors ? 291 * Do we have file descriptors ?
292 */ 292 */
293 if(UNIXCB(skb).fp) 293 if (UNIXCB(skb).fp) {
294 { 294 __skb_unlink(skb,
295 __skb_unlink(skb, skb->list); 295 &s->sk_receive_queue);
296 __skb_queue_tail(&hitlist,skb); 296 __skb_queue_tail(&hitlist, skb);
297 } 297 }
298 skb=nextsk; 298 skb = nextsk;
299 } 299 }
300 spin_unlock(&s->sk_receive_queue.lock); 300 spin_unlock(&s->sk_receive_queue.lock);
301 } 301 }
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index c974dac4580a..690ffa5d5bfb 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -12,7 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/sysctl.h> 13#include <linux/sysctl.h>
14 14
15extern int sysctl_unix_max_dgram_qlen; 15#include <net/af_unix.h>
16 16
17static ctl_table unix_table[] = { 17static ctl_table unix_table[] = {
18 { 18 {
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index d93b19faaab7..596cb96e5f47 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -57,7 +57,7 @@
57#include <linux/wanpipe.h> 57#include <linux/wanpipe.h>
58#include <linux/if_wanpipe.h> 58#include <linux/if_wanpipe.h>
59#include <linux/pkt_sched.h> 59#include <linux/pkt_sched.h>
60#include <linux/tcp.h> 60#include <linux/tcp_states.h>
61#include <linux/if_wanpipe_common.h> 61#include <linux/if_wanpipe_common.h>
62#include <linux/sdla_x25.h> 62#include <linux/sdla_x25.h>
63 63
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 04bec047fa9a..020d73cc8414 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -47,7 +47,7 @@
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/skbuff.h> 48#include <linux/skbuff.h>
49#include <net/sock.h> 49#include <net/sock.h>
50#include <net/tcp.h> 50#include <net/tcp_states.h>
51#include <asm/uaccess.h> 51#include <asm/uaccess.h>
52#include <linux/fcntl.h> 52#include <linux/fcntl.h>
53#include <linux/termios.h> /* For TIOCINQ/OUTQ */ 53#include <linux/termios.h> /* For TIOCINQ/OUTQ */
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 36fc3bf6d882..adfe7b8df355 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -81,7 +81,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
81} 81}
82 82
83int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, 83int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
84 struct packet_type *ptype) 84 struct packet_type *ptype, struct net_device *orig_dev)
85{ 85{
86 struct sk_buff *nskb; 86 struct sk_buff *nskb;
87 struct x25_neigh *nb; 87 struct x25_neigh *nb;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index b0197c70a9fc..26146874b839 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -28,7 +28,7 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <net/sock.h> 30#include <net/sock.h>
31#include <net/tcp.h> 31#include <net/tcp_states.h>
32#include <net/x25.h> 32#include <net/x25.h>
33 33
34static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) 34static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 7fd872ad0c20..8be9b8fbc24d 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -27,7 +27,7 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/tcp.h> 30#include <net/tcp_states.h>
31#include <net/x25.h> 31#include <net/x25.h>
32 32
33/* 33/*
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
80 if (!skb_prev) 80 if (!skb_prev)
81 skb_queue_head(&sk->sk_write_queue, skb); 81 skb_queue_head(&sk->sk_write_queue, skb);
82 else 82 else
83 skb_append(skb_prev, skb); 83 skb_append(skb_prev, skb, &sk->sk_write_queue);
84 skb_prev = skb; 84 skb_prev = skb;
85 } 85 }
86} 86}
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index d6a21a3ad80e..0a92e1da3922 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -23,7 +23,7 @@
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/timer.h> 24#include <linux/timer.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <net/tcp.h> 26#include <net/tcp_states.h>
27#include <net/x25.h> 27#include <net/x25.h>
28 28
29static void x25_heartbeat_expiry(unsigned long); 29static void x25_heartbeat_expiry(unsigned long);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index c58a6f05a0b6..2407a7072327 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -12,7 +12,7 @@
12#include <net/ip.h> 12#include <net/ip.h>
13#include <net/xfrm.h> 13#include <net/xfrm.h>
14 14
15static kmem_cache_t *secpath_cachep; 15static kmem_cache_t *secpath_cachep __read_mostly;
16 16
17void __secpath_destroy(struct sec_path *sp) 17void __secpath_destroy(struct sec_path *sp)
18{ 18{
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d65ed8684fc1..83c8135e1764 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -37,7 +37,7 @@ EXPORT_SYMBOL(xfrm_policy_list);
37static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 37static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
38static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 38static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
39 39
40static kmem_cache_t *xfrm_dst_cache; 40static kmem_cache_t *xfrm_dst_cache __read_mostly;
41 41
42static struct work_struct xfrm_policy_gc_work; 42static struct work_struct xfrm_policy_gc_work;
43static struct list_head xfrm_policy_gc_list = 43static struct list_head xfrm_policy_gc_list =
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8da3e25b2c4c..c35336a0f71b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1125,9 +1125,8 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1125 if (build_expire(skb, x, c->data.hard) < 0) 1125 if (build_expire(skb, x, c->data.hard) < 0)
1126 BUG(); 1126 BUG();
1127 1127
1128 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE; 1128 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
1129 1129 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1130 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1131} 1130}
1132 1131
1133static int xfrm_notify_sa_flush(struct km_event *c) 1132static int xfrm_notify_sa_flush(struct km_event *c)
@@ -1152,7 +1151,8 @@ static int xfrm_notify_sa_flush(struct km_event *c)
1152 1151
1153 nlh->nlmsg_len = skb->tail - b; 1152 nlh->nlmsg_len = skb->tail - b;
1154 1153
1155 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC); 1154 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
1155 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
1156 1156
1157nlmsg_failure: 1157nlmsg_failure:
1158 kfree_skb(skb); 1158 kfree_skb(skb);
@@ -1226,7 +1226,8 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
1226 1226
1227 nlh->nlmsg_len = skb->tail - b; 1227 nlh->nlmsg_len = skb->tail - b;
1228 1228
1229 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_SA, GFP_ATOMIC); 1229 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
1230 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
1230 1231
1231nlmsg_failure: 1232nlmsg_failure:
1232rtattr_failure: 1233rtattr_failure:
@@ -1304,9 +1305,8 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1304 if (build_acquire(skb, x, xt, xp, dir) < 0) 1305 if (build_acquire(skb, x, xt, xp, dir) < 0)
1305 BUG(); 1306 BUG();
1306 1307
1307 NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE; 1308 NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE;
1308 1309 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
1309 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
1310} 1310}
1311 1311
1312/* User gives us xfrm_user_policy_info followed by an array of 0 1312/* User gives us xfrm_user_policy_info followed by an array of 0
@@ -1405,9 +1405,8 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_eve
1405 if (build_polexpire(skb, xp, dir, c->data.hard) < 0) 1405 if (build_polexpire(skb, xp, dir, c->data.hard) < 0)
1406 BUG(); 1406 BUG();
1407 1407
1408 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE; 1408 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
1409 1409 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1410 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1411} 1410}
1412 1411
1413static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) 1412static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
@@ -1455,7 +1454,8 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
1455 1454
1456 nlh->nlmsg_len = skb->tail - b; 1455 nlh->nlmsg_len = skb->tail - b;
1457 1456
1458 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC); 1457 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
1458 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
1459 1459
1460nlmsg_failure: 1460nlmsg_failure:
1461rtattr_failure: 1461rtattr_failure:
@@ -1480,7 +1480,8 @@ static int xfrm_notify_policy_flush(struct km_event *c)
1480 1480
1481 nlh->nlmsg_len = skb->tail - b; 1481 nlh->nlmsg_len = skb->tail - b;
1482 1482
1483 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_POLICY, GFP_ATOMIC); 1483 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
1484 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
1484 1485
1485nlmsg_failure: 1486nlmsg_failure:
1486 kfree_skb(skb); 1487 kfree_skb(skb);
@@ -1519,7 +1520,8 @@ static int __init xfrm_user_init(void)
1519{ 1520{
1520 printk(KERN_INFO "Initializing IPsec netlink socket\n"); 1521 printk(KERN_INFO "Initializing IPsec netlink socket\n");
1521 1522
1522 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, xfrm_netlink_rcv); 1523 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
1524 xfrm_netlink_rcv, THIS_MODULE);
1523 if (xfrm_nl == NULL) 1525 if (xfrm_nl == NULL)
1524 return -ENOMEM; 1526 return -ENOMEM;
1525 1527
@@ -1537,3 +1539,4 @@ static void __exit xfrm_user_exit(void)
1537module_init(xfrm_user_init); 1539module_init(xfrm_user_init);
1538module_exit(xfrm_user_exit); 1540module_exit(xfrm_user_exit);
1539MODULE_LICENSE("GPL"); 1541MODULE_LICENSE("GPL");
1542MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5180405c1a84..d8ee38aede26 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -341,6 +341,22 @@ static int do_of_entry (const char *filename, struct of_device_id *of, char *ali
341 return 1; 341 return 1;
342} 342}
343 343
344static int do_vio_entry(const char *filename, struct vio_device_id *vio,
345 char *alias)
346{
347 char *tmp;
348
349 sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*",
350 vio->compat[0] ? vio->compat : "*");
351
352 /* Replace all whitespace with underscores */
353 for (tmp = alias; tmp && *tmp; tmp++)
354 if (isspace (*tmp))
355 *tmp = '_';
356
357 return 1;
358}
359
344/* Ignore any prefix, eg. v850 prepends _ */ 360/* Ignore any prefix, eg. v850 prepends _ */
345static inline int sym_is(const char *symbol, const char *name) 361static inline int sym_is(const char *symbol, const char *name)
346{ 362{
@@ -422,6 +438,9 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
422 else if (sym_is(symname, "__mod_of_device_table")) 438 else if (sym_is(symname, "__mod_of_device_table"))
423 do_table(symval, sym->st_size, sizeof(struct of_device_id), 439 do_table(symval, sym->st_size, sizeof(struct of_device_id),
424 do_of_entry, mod); 440 do_of_entry, mod);
441 else if (sym_is(symname, "__mod_vio_device_table"))
442 do_table(symval, sym->st_size, sizeof(struct vio_device_id),
443 do_vio_entry, mod);
425 444
426} 445}
427 446
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2253f388234f..8641f8894b4c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -659,7 +659,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
659 return SECCLASS_NETLINK_ROUTE_SOCKET; 659 return SECCLASS_NETLINK_ROUTE_SOCKET;
660 case NETLINK_FIREWALL: 660 case NETLINK_FIREWALL:
661 return SECCLASS_NETLINK_FIREWALL_SOCKET; 661 return SECCLASS_NETLINK_FIREWALL_SOCKET;
662 case NETLINK_TCPDIAG: 662 case NETLINK_INET_DIAG:
663 return SECCLASS_NETLINK_TCPDIAG_SOCKET; 663 return SECCLASS_NETLINK_TCPDIAG_SOCKET;
664 case NETLINK_NFLOG: 664 case NETLINK_NFLOG:
665 return SECCLASS_NETLINK_NFLOG_SOCKET; 665 return SECCLASS_NETLINK_NFLOG_SOCKET;
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 18d08acafa78..e203883406dd 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -80,7 +80,8 @@ static void selnl_notify(int msgtype, void *data)
80 nlh = NLMSG_PUT(skb, 0, 0, msgtype, len); 80 nlh = NLMSG_PUT(skb, 0, 0, msgtype, len);
81 selnl_add_payload(nlh, len, msgtype, data); 81 selnl_add_payload(nlh, len, msgtype, data);
82 nlh->nlmsg_len = skb->tail - tmp; 82 nlh->nlmsg_len = skb->tail - tmp;
83 netlink_broadcast(selnl, skb, 0, SELNL_GRP_AVC, GFP_USER); 83 NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
84 netlink_broadcast(selnl, skb, 0, SELNLGRP_AVC, GFP_USER);
84out: 85out:
85 return; 86 return;
86 87
@@ -103,7 +104,8 @@ void selnl_notify_policyload(u32 seqno)
103 104
104static int __init selnl_init(void) 105static int __init selnl_init(void)
105{ 106{
106 selnl = netlink_kernel_create(NETLINK_SELINUX, NULL); 107 selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL,
108 THIS_MODULE);
107 if (selnl == NULL) 109 if (selnl == NULL)
108 panic("SELinux: Cannot create netlink socket."); 110 panic("SELinux: Cannot create netlink socket.");
109 netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV); 111 netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 92b057becb4b..69b9329b2054 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -16,7 +16,7 @@
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/if.h> 17#include <linux/if.h>
18#include <linux/netfilter_ipv4/ip_queue.h> 18#include <linux/netfilter_ipv4/ip_queue.h>
19#include <linux/tcp_diag.h> 19#include <linux/inet_diag.h>
20#include <linux/xfrm.h> 20#include <linux/xfrm.h>
21#include <linux/audit.h> 21#include <linux/audit.h>
22 22
@@ -76,6 +76,7 @@ static struct nlmsg_perm nlmsg_firewall_perms[] =
76static struct nlmsg_perm nlmsg_tcpdiag_perms[] = 76static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
77{ 77{
78 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 78 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
79 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
79}; 80};
80 81
81static struct nlmsg_perm nlmsg_xfrm_perms[] = 82static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 46052304e230..29450befb5da 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -132,9 +132,9 @@ static void pxa2xx_ac97_reset(ac97_t *ac97)
132 udelay(10); 132 udelay(10);
133 GCR |= GCR_WARM_RST; 133 GCR |= GCR_WARM_RST;
134 pxa_gpio_mode(113 | GPIO_ALT_FN_2_OUT); 134 pxa_gpio_mode(113 | GPIO_ALT_FN_2_OUT);
135 udelay(50); 135 udelay(500);
136#else 136#else
137 GCR |= GCR_WARM_RST|GCR_PRIRDY_IEN|GCR_SECRDY_IEN;; 137 GCR |= GCR_WARM_RST|GCR_PRIRDY_IEN|GCR_SECRDY_IEN;
138 wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1); 138 wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
139#endif 139#endif
140 140
@@ -261,7 +261,7 @@ static int pxa2xx_ac97_do_suspend(snd_card_t *card, unsigned int state)
261 return 0; 261 return 0;
262} 262}
263 263
264static int pxa2xx_ac97_do_resume(snd_card_t *card, unsigned int state) 264static int pxa2xx_ac97_do_resume(snd_card_t *card)
265{ 265{
266 if (card->power_state != SNDRV_CTL_POWER_D0) { 266 if (card->power_state != SNDRV_CTL_POWER_D0) {
267 pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; 267 pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data;
@@ -275,13 +275,13 @@ static int pxa2xx_ac97_do_resume(snd_card_t *card, unsigned int state)
275 return 0; 275 return 0;
276} 276}
277 277
278static int pxa2xx_ac97_suspend(struct device *_dev, u32 state, u32 level) 278static int pxa2xx_ac97_suspend(struct device *_dev, pm_message_t state, u32 level)
279{ 279{
280 snd_card_t *card = dev_get_drvdata(_dev); 280 snd_card_t *card = dev_get_drvdata(_dev);
281 int ret = 0; 281 int ret = 0;
282 282
283 if (card && level == SUSPEND_DISABLE) 283 if (card && level == SUSPEND_DISABLE)
284 ret = pxa2xx_ac97_do_suspend(card, SNDRV_CTL_POWER_D3cold); 284 ret = pxa2xx_ac97_do_suspend(card, PMSG_SUSPEND);
285 285
286 return ret; 286 return ret;
287} 287}
@@ -292,7 +292,7 @@ static int pxa2xx_ac97_resume(struct device *_dev, u32 level)
292 int ret = 0; 292 int ret = 0;
293 293
294 if (card && level == RESUME_ENABLE) 294 if (card && level == RESUME_ENABLE)
295 ret = pxa2xx_ac97_do_resume(card, SNDRV_CTL_POWER_D0); 295 ret = pxa2xx_ac97_do_resume(card);
296 296
297 return ret; 297 return ret;
298} 298}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 02132561c3f8..39a54a415528 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -512,7 +512,7 @@ static void free_all_reserved_pages(void)
512 * proc file interface 512 * proc file interface
513 */ 513 */
514#define SND_MEM_PROC_FILE "driver/snd-page-alloc" 514#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
515struct proc_dir_entry *snd_mem_proc; 515static struct proc_dir_entry *snd_mem_proc;
516 516
517static int snd_mem_proc_read(char *page, char **start, off_t off, 517static int snd_mem_proc_read(char *page, char **start, off_t off,
518 int count, int *eof, void *data) 518 int count, int *eof, void *data)
@@ -655,8 +655,7 @@ static int __init snd_mem_init(void)
655 655
656static void __exit snd_mem_exit(void) 656static void __exit snd_mem_exit(void)
657{ 657{
658 if (snd_mem_proc) 658 remove_proc_entry(SND_MEM_PROC_FILE, NULL);
659 remove_proc_entry(SND_MEM_PROC_FILE, NULL);
660 free_all_reserved_pages(); 659 free_all_reserved_pages();
661 if (snd_allocated_pages > 0) 660 if (snd_allocated_pages > 0)
662 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages); 661 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
diff --git a/sound/core/memory.c b/sound/core/memory.c
index f6895577bf86..1622893d00a2 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -56,7 +56,7 @@ static DEFINE_SPINLOCK(snd_alloc_vmalloc_lock);
56#define VMALLOC_MAGIC 0x87654320 56#define VMALLOC_MAGIC 0x87654320
57static snd_info_entry_t *snd_memory_info_entry; 57static snd_info_entry_t *snd_memory_info_entry;
58 58
59void snd_memory_init(void) 59void __init snd_memory_init(void)
60{ 60{
61 snd_alloc_kmalloc = 0; 61 snd_alloc_kmalloc = 0;
62 snd_alloc_vmalloc = 0; 62 snd_alloc_vmalloc = 0;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index de7444c586f9..a13bd7bb4c9f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1705,13 +1705,12 @@ static int snd_pcm_oss_release_file(snd_pcm_oss_file_t *pcm_oss_file)
1705 if (snd_pcm_running(substream)) 1705 if (snd_pcm_running(substream))
1706 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 1706 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1707 snd_pcm_stream_unlock_irq(substream); 1707 snd_pcm_stream_unlock_irq(substream);
1708 if (substream->open_flag) { 1708 if (substream->ffile != NULL) {
1709 if (substream->ops->hw_free != NULL) 1709 if (substream->ops->hw_free != NULL)
1710 substream->ops->hw_free(substream); 1710 substream->ops->hw_free(substream);
1711 substream->ops->close(substream); 1711 substream->ops->close(substream);
1712 substream->open_flag = 0; 1712 substream->ffile = NULL;
1713 } 1713 }
1714 substream->ffile = NULL;
1715 snd_pcm_oss_release_substream(substream); 1714 snd_pcm_oss_release_substream(substream);
1716 snd_pcm_release_substream(substream); 1715 snd_pcm_release_substream(substream);
1717 } 1716 }
@@ -1778,14 +1777,13 @@ static int snd_pcm_oss_open_file(struct file *file,
1778 snd_pcm_oss_release_file(pcm_oss_file); 1777 snd_pcm_oss_release_file(pcm_oss_file);
1779 return err; 1778 return err;
1780 } 1779 }
1781 psubstream->open_flag = 1; 1780 psubstream->ffile = file;
1782 err = snd_pcm_hw_constraints_complete(psubstream); 1781 err = snd_pcm_hw_constraints_complete(psubstream);
1783 if (err < 0) { 1782 if (err < 0) {
1784 snd_printd("snd_pcm_hw_constraint_complete failed\n"); 1783 snd_printd("snd_pcm_hw_constraint_complete failed\n");
1785 snd_pcm_oss_release_file(pcm_oss_file); 1784 snd_pcm_oss_release_file(pcm_oss_file);
1786 return err; 1785 return err;
1787 } 1786 }
1788 psubstream->ffile = file;
1789 snd_pcm_oss_init_substream(psubstream, psetup, minor); 1787 snd_pcm_oss_init_substream(psubstream, psetup, minor);
1790 } 1788 }
1791 if (csubstream != NULL) { 1789 if (csubstream != NULL) {
@@ -1800,14 +1798,13 @@ static int snd_pcm_oss_open_file(struct file *file,
1800 snd_pcm_oss_release_file(pcm_oss_file); 1798 snd_pcm_oss_release_file(pcm_oss_file);
1801 return err; 1799 return err;
1802 } 1800 }
1803 csubstream->open_flag = 1; 1801 csubstream->ffile = file;
1804 err = snd_pcm_hw_constraints_complete(csubstream); 1802 err = snd_pcm_hw_constraints_complete(csubstream);
1805 if (err < 0) { 1803 if (err < 0) {
1806 snd_printd("snd_pcm_hw_constraint_complete failed\n"); 1804 snd_printd("snd_pcm_hw_constraint_complete failed\n");
1807 snd_pcm_oss_release_file(pcm_oss_file); 1805 snd_pcm_oss_release_file(pcm_oss_file);
1808 return err; 1806 return err;
1809 } 1807 }
1810 csubstream->ffile = file;
1811 snd_pcm_oss_init_substream(csubstream, csetup, minor); 1808 snd_pcm_oss_init_substream(csubstream, csetup, minor);
1812 } 1809 }
1813 1810
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 3920bf0eebbf..4b6307df846d 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -103,10 +103,24 @@ struct sndrv_pcm_sw_params32 {
103 unsigned char reserved[64]; 103 unsigned char reserved[64];
104}; 104};
105 105
106/* recalcuate the boundary within 32bit */
107static snd_pcm_uframes_t recalculate_boundary(snd_pcm_runtime_t *runtime)
108{
109 snd_pcm_uframes_t boundary;
110
111 if (! runtime->buffer_size)
112 return 0;
113 boundary = runtime->buffer_size;
114 while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
115 boundary *= 2;
116 return boundary;
117}
118
106static int snd_pcm_ioctl_sw_params_compat(snd_pcm_substream_t *substream, 119static int snd_pcm_ioctl_sw_params_compat(snd_pcm_substream_t *substream,
107 struct sndrv_pcm_sw_params32 __user *src) 120 struct sndrv_pcm_sw_params32 __user *src)
108{ 121{
109 snd_pcm_sw_params_t params; 122 snd_pcm_sw_params_t params;
123 snd_pcm_uframes_t boundary;
110 int err; 124 int err;
111 125
112 memset(&params, 0, sizeof(params)); 126 memset(&params, 0, sizeof(params));
@@ -120,10 +134,17 @@ static int snd_pcm_ioctl_sw_params_compat(snd_pcm_substream_t *substream,
120 get_user(params.silence_threshold, &src->silence_threshold) || 134 get_user(params.silence_threshold, &src->silence_threshold) ||
121 get_user(params.silence_size, &src->silence_size)) 135 get_user(params.silence_size, &src->silence_size))
122 return -EFAULT; 136 return -EFAULT;
137 /*
138 * Check silent_size parameter. Since we have 64bit boundary,
139 * silence_size must be compared with the 32bit boundary.
140 */
141 boundary = recalculate_boundary(substream->runtime);
142 if (boundary && params.silence_size >= boundary)
143 params.silence_size = substream->runtime->boundary;
123 err = snd_pcm_sw_params(substream, &params); 144 err = snd_pcm_sw_params(substream, &params);
124 if (err < 0) 145 if (err < 0)
125 return err; 146 return err;
126 if (put_user(params.boundary, &src->boundary)) 147 if (boundary && put_user(boundary, &src->boundary))
127 return -EFAULT; 148 return -EFAULT;
128 return err; 149 return err;
129} 150}
@@ -199,16 +220,6 @@ static int snd_pcm_status_user_compat(snd_pcm_substream_t *substream,
199 return err; 220 return err;
200} 221}
201 222
202/* recalcuate the boundary within 32bit */
203static void recalculate_boundary(snd_pcm_runtime_t *runtime)
204{
205 if (! runtime->buffer_size)
206 return;
207 runtime->boundary = runtime->buffer_size;
208 while (runtime->boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
209 runtime->boundary *= 2;
210}
211
212/* both for HW_PARAMS and HW_REFINE */ 223/* both for HW_PARAMS and HW_REFINE */
213static int snd_pcm_ioctl_hw_params_compat(snd_pcm_substream_t *substream, 224static int snd_pcm_ioctl_hw_params_compat(snd_pcm_substream_t *substream,
214 int refine, 225 int refine,
@@ -241,8 +252,11 @@ static int snd_pcm_ioctl_hw_params_compat(snd_pcm_substream_t *substream,
241 goto error; 252 goto error;
242 } 253 }
243 254
244 if (! refine) 255 if (! refine) {
245 recalculate_boundary(runtime); 256 unsigned int new_boundary = recalculate_boundary(runtime);
257 if (new_boundary)
258 runtime->boundary = new_boundary;
259 }
246 error: 260 error:
247 kfree(data); 261 kfree(data);
248 return err; 262 return err;
@@ -380,6 +394,7 @@ static int snd_pcm_ioctl_sync_ptr_compat(snd_pcm_substream_t *substream,
380 u32 sflags; 394 u32 sflags;
381 struct sndrv_pcm_mmap_control scontrol; 395 struct sndrv_pcm_mmap_control scontrol;
382 struct sndrv_pcm_mmap_status sstatus; 396 struct sndrv_pcm_mmap_status sstatus;
397 snd_pcm_uframes_t boundary;
383 int err; 398 int err;
384 399
385 snd_assert(runtime, return -EINVAL); 400 snd_assert(runtime, return -EINVAL);
@@ -395,17 +410,21 @@ static int snd_pcm_ioctl_sync_ptr_compat(snd_pcm_substream_t *substream,
395 } 410 }
396 status = runtime->status; 411 status = runtime->status;
397 control = runtime->control; 412 control = runtime->control;
413 boundary = recalculate_boundary(runtime);
414 if (! boundary)
415 boundary = 0x7fffffff;
398 snd_pcm_stream_lock_irq(substream); 416 snd_pcm_stream_lock_irq(substream);
417 /* FIXME: we should consider the boundary for the sync from app */
399 if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) 418 if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
400 control->appl_ptr = scontrol.appl_ptr; 419 control->appl_ptr = scontrol.appl_ptr;
401 else 420 else
402 scontrol.appl_ptr = control->appl_ptr; 421 scontrol.appl_ptr = control->appl_ptr % boundary;
403 if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) 422 if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
404 control->avail_min = scontrol.avail_min; 423 control->avail_min = scontrol.avail_min;
405 else 424 else
406 scontrol.avail_min = control->avail_min; 425 scontrol.avail_min = control->avail_min;
407 sstatus.state = status->state; 426 sstatus.state = status->state;
408 sstatus.hw_ptr = status->hw_ptr; 427 sstatus.hw_ptr = status->hw_ptr % boundary;
409 sstatus.tstamp = status->tstamp; 428 sstatus.tstamp = status->tstamp;
410 sstatus.suspended_state = status->suspended_state; 429 sstatus.suspended_state = status->suspended_state;
411 snd_pcm_stream_unlock_irq(substream); 430 snd_pcm_stream_unlock_irq(substream);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index c5bfd0918cff..0082914a7e33 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1584,8 +1584,8 @@ int snd_pcm_hw_param_set(snd_pcm_t *pcm, snd_pcm_hw_params_t *params,
1584 return snd_pcm_hw_param_value(params, var, NULL); 1584 return snd_pcm_hw_param_value(params, var, NULL);
1585} 1585}
1586 1586
1587int _snd_pcm_hw_param_mask(snd_pcm_hw_params_t *params, 1587static int _snd_pcm_hw_param_mask(snd_pcm_hw_params_t *params,
1588 snd_pcm_hw_param_t var, const snd_mask_t *val) 1588 snd_pcm_hw_param_t var, const snd_mask_t *val)
1589{ 1589{
1590 int changed; 1590 int changed;
1591 assert(hw_is_mask(var)); 1591 assert(hw_is_mask(var));
@@ -2063,7 +2063,7 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
2063 if (((avail < runtime->control->avail_min && size > avail) || 2063 if (((avail < runtime->control->avail_min && size > avail) ||
2064 (size >= runtime->xfer_align && avail < runtime->xfer_align))) { 2064 (size >= runtime->xfer_align && avail < runtime->xfer_align))) {
2065 wait_queue_t wait; 2065 wait_queue_t wait;
2066 enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state; 2066 enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED, DROPPED } state;
2067 long tout; 2067 long tout;
2068 2068
2069 if (nonblock) { 2069 if (nonblock) {
@@ -2097,6 +2097,9 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
2097 case SNDRV_PCM_STATE_SUSPENDED: 2097 case SNDRV_PCM_STATE_SUSPENDED:
2098 state = SUSPENDED; 2098 state = SUSPENDED;
2099 goto _end_loop; 2099 goto _end_loop;
2100 case SNDRV_PCM_STATE_SETUP:
2101 state = DROPPED;
2102 goto _end_loop;
2100 default: 2103 default:
2101 break; 2104 break;
2102 } 2105 }
@@ -2123,6 +2126,9 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(snd_pcm_substream_t *substream,
2123 snd_printd("playback write error (DMA or IRQ trouble?)\n"); 2126 snd_printd("playback write error (DMA or IRQ trouble?)\n");
2124 err = -EIO; 2127 err = -EIO;
2125 goto _end_unlock; 2128 goto _end_unlock;
2129 case DROPPED:
2130 err = -EBADFD;
2131 goto _end_unlock;
2126 default: 2132 default:
2127 break; 2133 break;
2128 } 2134 }
@@ -2359,7 +2365,7 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(snd_pcm_substream_t *substream,
2359 } else if ((avail < runtime->control->avail_min && size > avail) || 2365 } else if ((avail < runtime->control->avail_min && size > avail) ||
2360 (size >= runtime->xfer_align && avail < runtime->xfer_align)) { 2366 (size >= runtime->xfer_align && avail < runtime->xfer_align)) {
2361 wait_queue_t wait; 2367 wait_queue_t wait;
2362 enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state; 2368 enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED, DROPPED } state;
2363 long tout; 2369 long tout;
2364 2370
2365 if (nonblock) { 2371 if (nonblock) {
@@ -2394,6 +2400,9 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(snd_pcm_substream_t *substream,
2394 goto _end_loop; 2400 goto _end_loop;
2395 case SNDRV_PCM_STATE_DRAINING: 2401 case SNDRV_PCM_STATE_DRAINING:
2396 goto __draining; 2402 goto __draining;
2403 case SNDRV_PCM_STATE_SETUP:
2404 state = DROPPED;
2405 goto _end_loop;
2397 default: 2406 default:
2398 break; 2407 break;
2399 } 2408 }
@@ -2420,6 +2429,9 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(snd_pcm_substream_t *substream,
2420 snd_printd("capture read error (DMA or IRQ trouble?)\n"); 2429 snd_printd("capture read error (DMA or IRQ trouble?)\n");
2421 err = -EIO; 2430 err = -EIO;
2422 goto _end_unlock; 2431 goto _end_unlock;
2432 case DROPPED:
2433 err = -EBADFD;
2434 goto _end_unlock;
2423 default: 2435 default:
2424 break; 2436 break;
2425 } 2437 }
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 10c2c9832649..03c17159dd8e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1025,7 +1025,7 @@ static void snd_pcm_post_suspend(snd_pcm_substream_t *substream, int state)
1025 snd_pcm_runtime_t *runtime = substream->runtime; 1025 snd_pcm_runtime_t *runtime = substream->runtime;
1026 snd_pcm_trigger_tstamp(substream); 1026 snd_pcm_trigger_tstamp(substream);
1027 if (substream->timer) 1027 if (substream->timer)
1028 snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MPAUSE, &runtime->trigger_tstamp); 1028 snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp);
1029 runtime->status->suspended_state = runtime->status->state; 1029 runtime->status->suspended_state = runtime->status->state;
1030 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; 1030 runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
1031 snd_pcm_tick_set(substream, 0); 1031 snd_pcm_tick_set(substream, 0);
@@ -1115,7 +1115,7 @@ static void snd_pcm_post_resume(snd_pcm_substream_t *substream, int state)
1115 snd_pcm_runtime_t *runtime = substream->runtime; 1115 snd_pcm_runtime_t *runtime = substream->runtime;
1116 snd_pcm_trigger_tstamp(substream); 1116 snd_pcm_trigger_tstamp(substream);
1117 if (substream->timer) 1117 if (substream->timer)
1118 snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MCONTINUE, &runtime->trigger_tstamp); 1118 snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp);
1119 runtime->status->state = runtime->status->suspended_state; 1119 runtime->status->state = runtime->status->suspended_state;
1120 if (runtime->sleep_min) 1120 if (runtime->sleep_min)
1121 snd_pcm_tick_prepare(substream); 1121 snd_pcm_tick_prepare(substream);
@@ -1967,13 +1967,12 @@ static int snd_pcm_release_file(snd_pcm_file_t * pcm_file)
1967 runtime = substream->runtime; 1967 runtime = substream->runtime;
1968 str = substream->pstr; 1968 str = substream->pstr;
1969 snd_pcm_unlink(substream); 1969 snd_pcm_unlink(substream);
1970 if (substream->open_flag) { 1970 if (substream->ffile != NULL) {
1971 if (substream->ops->hw_free != NULL) 1971 if (substream->ops->hw_free != NULL)
1972 substream->ops->hw_free(substream); 1972 substream->ops->hw_free(substream);
1973 substream->ops->close(substream); 1973 substream->ops->close(substream);
1974 substream->open_flag = 0; 1974 substream->ffile = NULL;
1975 } 1975 }
1976 substream->ffile = NULL;
1977 snd_pcm_remove_file(str, pcm_file); 1976 snd_pcm_remove_file(str, pcm_file);
1978 snd_pcm_release_substream(substream); 1977 snd_pcm_release_substream(substream);
1979 kfree(pcm_file); 1978 kfree(pcm_file);
@@ -2022,18 +2021,15 @@ static int snd_pcm_open_file(struct file *file,
2022 snd_pcm_release_file(pcm_file); 2021 snd_pcm_release_file(pcm_file);
2023 return err; 2022 return err;
2024 } 2023 }
2025 substream->open_flag = 1; 2024 substream->ffile = file;
2026 2025
2027 err = snd_pcm_hw_constraints_complete(substream); 2026 err = snd_pcm_hw_constraints_complete(substream);
2028 if (err < 0) { 2027 if (err < 0) {
2029 snd_printd("snd_pcm_hw_constraints_complete failed\n"); 2028 snd_printd("snd_pcm_hw_constraints_complete failed\n");
2030 substream->ops->close(substream);
2031 snd_pcm_release_file(pcm_file); 2029 snd_pcm_release_file(pcm_file);
2032 return err; 2030 return err;
2033 } 2031 }
2034 2032
2035 substream->ffile = file;
2036
2037 file->private_data = pcm_file; 2033 file->private_data = pcm_file;
2038 *rpcm_file = pcm_file; 2034 *rpcm_file = pcm_file;
2039 return 0; 2035 return 0;
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index de39d212bc15..e401c6703297 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -98,6 +98,7 @@ int snd_register_oss_device(int type, snd_card_t * card, int dev, snd_minor_t *
98 int cidx = SNDRV_MINOR_OSS_CARD(minor); 98 int cidx = SNDRV_MINOR_OSS_CARD(minor);
99 int track2 = -1; 99 int track2 = -1;
100 int register1 = -1, register2 = -1; 100 int register1 = -1, register2 = -1;
101 struct device *carddev = NULL;
101 102
102 if (minor < 0) 103 if (minor < 0)
103 return minor; 104 return minor;
@@ -121,11 +122,13 @@ int snd_register_oss_device(int type, snd_card_t * card, int dev, snd_minor_t *
121 track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1); 122 track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1);
122 break; 123 break;
123 } 124 }
124 register1 = register_sound_special(reg->f_ops, minor); 125 if (card)
126 carddev = card->dev;
127 register1 = register_sound_special_device(reg->f_ops, minor, carddev);
125 if (register1 != minor) 128 if (register1 != minor)
126 goto __end; 129 goto __end;
127 if (track2 >= 0) { 130 if (track2 >= 0) {
128 register2 = register_sound_special(reg->f_ops, track2); 131 register2 = register_sound_special_device(reg->f_ops, track2, carddev);
129 if (register2 != track2) 132 if (register2 != track2)
130 goto __end; 133 goto __end;
131 } 134 }
diff --git a/sound/core/timer.c b/sound/core/timer.c
index cfaccd415b3b..4104f6e292e9 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -799,13 +799,13 @@ static int snd_timer_free(snd_timer_t *timer)
799 return 0; 799 return 0;
800} 800}
801 801
802int snd_timer_dev_free(snd_device_t *device) 802static int snd_timer_dev_free(snd_device_t *device)
803{ 803{
804 snd_timer_t *timer = device->device_data; 804 snd_timer_t *timer = device->device_data;
805 return snd_timer_free(timer); 805 return snd_timer_free(timer);
806} 806}
807 807
808int snd_timer_dev_register(snd_device_t *dev) 808static int snd_timer_dev_register(snd_device_t *dev)
809{ 809{
810 snd_timer_t *timer = dev->device_data; 810 snd_timer_t *timer = dev->device_data;
811 snd_timer_t *timer1; 811 snd_timer_t *timer1;
@@ -880,9 +880,11 @@ void snd_timer_notify(snd_timer_t *timer, enum sndrv_timer_event event, struct t
880 struct list_head *p, *n; 880 struct list_head *p, *n;
881 881
882 snd_runtime_check(timer->hw.flags & SNDRV_TIMER_HW_SLAVE, return); 882 snd_runtime_check(timer->hw.flags & SNDRV_TIMER_HW_SLAVE, return);
883 snd_assert(event >= SNDRV_TIMER_EVENT_MSTART && event <= SNDRV_TIMER_EVENT_MPAUSE, return); 883 snd_assert(event >= SNDRV_TIMER_EVENT_MSTART && event <= SNDRV_TIMER_EVENT_MRESUME, return);
884 spin_lock_irqsave(&timer->lock, flags); 884 spin_lock_irqsave(&timer->lock, flags);
885 if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE) { 885 if (event == SNDRV_TIMER_EVENT_MSTART ||
886 event == SNDRV_TIMER_EVENT_MCONTINUE ||
887 event == SNDRV_TIMER_EVENT_MRESUME) {
886 if (timer->hw.c_resolution) 888 if (timer->hw.c_resolution)
887 resolution = timer->hw.c_resolution(timer); 889 resolution = timer->hw.c_resolution(timer);
888 else 890 else
@@ -1555,10 +1557,14 @@ static int snd_timer_user_params(struct file *file, snd_timer_params_t __user *_
1555 (1<<SNDRV_TIMER_EVENT_STOP)| 1557 (1<<SNDRV_TIMER_EVENT_STOP)|
1556 (1<<SNDRV_TIMER_EVENT_CONTINUE)| 1558 (1<<SNDRV_TIMER_EVENT_CONTINUE)|
1557 (1<<SNDRV_TIMER_EVENT_PAUSE)| 1559 (1<<SNDRV_TIMER_EVENT_PAUSE)|
1560 (1<<SNDRV_TIMER_EVENT_SUSPEND)|
1561 (1<<SNDRV_TIMER_EVENT_RESUME)|
1558 (1<<SNDRV_TIMER_EVENT_MSTART)| 1562 (1<<SNDRV_TIMER_EVENT_MSTART)|
1559 (1<<SNDRV_TIMER_EVENT_MSTOP)| 1563 (1<<SNDRV_TIMER_EVENT_MSTOP)|
1560 (1<<SNDRV_TIMER_EVENT_MCONTINUE)| 1564 (1<<SNDRV_TIMER_EVENT_MCONTINUE)|
1561 (1<<SNDRV_TIMER_EVENT_MPAUSE))) { 1565 (1<<SNDRV_TIMER_EVENT_MPAUSE)|
1566 (1<<SNDRV_TIMER_EVENT_MSUSPEND)|
1567 (1<<SNDRV_TIMER_EVENT_MRESUME))) {
1562 err = -EINVAL; 1568 err = -EINVAL;
1563 goto _end; 1569 goto _end;
1564 } 1570 }
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index f00c88886460..19fc68c23378 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -796,14 +796,14 @@ static int vx_iec958_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *ucontro
796 796
797static snd_kcontrol_new_t vx_control_iec958_mask = { 797static snd_kcontrol_new_t vx_control_iec958_mask = {
798 .access = SNDRV_CTL_ELEM_ACCESS_READ, 798 .access = SNDRV_CTL_ELEM_ACCESS_READ,
799 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 799 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
800 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), 800 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
801 .info = vx_iec958_info, /* shared */ 801 .info = vx_iec958_info, /* shared */
802 .get = vx_iec958_mask_get, 802 .get = vx_iec958_mask_get,
803}; 803};
804 804
805static snd_kcontrol_new_t vx_control_iec958 = { 805static snd_kcontrol_new_t vx_control_iec958 = {
806 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 806 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
807 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 807 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
808 .info = vx_iec958_info, 808 .info = vx_iec958_info,
809 .get = vx_iec958_get, 809 .get = vx_iec958_get,
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index af381b15fe5c..d4becf44e247 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -549,8 +549,8 @@ static int vx_stop_stream(vx_core_t *chip, vx_pipe_t *pipe)
549 549
550static snd_pcm_hardware_t vx_pcm_playback_hw = { 550static snd_pcm_hardware_t vx_pcm_playback_hw = {
551 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 551 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
552 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID | 552 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/
553 SNDRV_PCM_INFO_RESUME), 553 /*SNDRV_PCM_INFO_RESUME*/),
554 .formats = /*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE, 554 .formats = /*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE,
555 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, 555 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
556 .rate_min = 5000, 556 .rate_min = 5000,
@@ -949,8 +949,8 @@ static snd_pcm_ops_t vx_pcm_playback_ops = {
949 949
950static snd_pcm_hardware_t vx_pcm_capture_hw = { 950static snd_pcm_hardware_t vx_pcm_capture_hw = {
951 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 951 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
952 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID | 952 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/
953 SNDRV_PCM_INFO_RESUME), 953 /*SNDRV_PCM_INFO_RESUME*/),
954 .formats = /*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE, 954 .formats = /*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE,
955 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, 955 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
956 .rate_min = 5000, 956 .rate_min = 5000,
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index 563296d02894..0eb442ca23d6 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -53,6 +53,7 @@ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */
53static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ 53static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */
54static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ 54static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
55static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ 55static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
56static int clockfreq[SNDRV_CARDS];
56 57
57module_param_array(index, int, NULL, 0444); 58module_param_array(index, int, NULL, 0444);
58MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard."); 59MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard.");
@@ -74,6 +75,8 @@ module_param_array(dma1, int, NULL, 0444);
74MODULE_PARM_DESC(dma1, "1st DMA # for ad1816a driver."); 75MODULE_PARM_DESC(dma1, "1st DMA # for ad1816a driver.");
75module_param_array(dma2, int, NULL, 0444); 76module_param_array(dma2, int, NULL, 0444);
76MODULE_PARM_DESC(dma2, "2nd DMA # for ad1816a driver."); 77MODULE_PARM_DESC(dma2, "2nd DMA # for ad1816a driver.");
78module_param_array(clockfreq, int, NULL, 0444);
79MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0).");
77 80
78struct snd_card_ad1816a { 81struct snd_card_ad1816a {
79 struct pnp_dev *dev; 82 struct pnp_dev *dev;
@@ -209,6 +212,8 @@ static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard
209 snd_card_free(card); 212 snd_card_free(card);
210 return error; 213 return error;
211 } 214 }
215 if (clockfreq[dev] >= 5000 && clockfreq[dev] <= 100000)
216 chip->clock_freq = clockfreq[dev];
212 217
213 strcpy(card->driver, "AD1816A"); 218 strcpy(card->driver, "AD1816A");
214 strcpy(card->shortname, "ADI SoundPort AD1816A"); 219 strcpy(card->shortname, "ADI SoundPort AD1816A");
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index 625b2eff14a1..ae860360ecf9 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -234,7 +234,7 @@ static int snd_ad1816a_playback_prepare(snd_pcm_substream_t *substream)
234 ad1816a_t *chip = snd_pcm_substream_chip(substream); 234 ad1816a_t *chip = snd_pcm_substream_chip(substream);
235 unsigned long flags; 235 unsigned long flags;
236 snd_pcm_runtime_t *runtime = substream->runtime; 236 snd_pcm_runtime_t *runtime = substream->runtime;
237 unsigned int size; 237 unsigned int size, rate;
238 238
239 spin_lock_irqsave(&chip->lock, flags); 239 spin_lock_irqsave(&chip->lock, flags);
240 240
@@ -245,7 +245,10 @@ static int snd_ad1816a_playback_prepare(snd_pcm_substream_t *substream)
245 snd_dma_program(chip->dma1, runtime->dma_addr, size, 245 snd_dma_program(chip->dma1, runtime->dma_addr, size,
246 DMA_MODE_WRITE | DMA_AUTOINIT); 246 DMA_MODE_WRITE | DMA_AUTOINIT);
247 247
248 snd_ad1816a_write(chip, AD1816A_PLAYBACK_SAMPLE_RATE, runtime->rate); 248 rate = runtime->rate;
249 if (chip->clock_freq)
250 rate = (rate * 33000) / chip->clock_freq;
251 snd_ad1816a_write(chip, AD1816A_PLAYBACK_SAMPLE_RATE, rate);
249 snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG, 252 snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
250 AD1816A_FMT_ALL | AD1816A_FMT_STEREO, 253 AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
251 snd_ad1816a_get_format(chip, runtime->format, 254 snd_ad1816a_get_format(chip, runtime->format,
@@ -263,7 +266,7 @@ static int snd_ad1816a_capture_prepare(snd_pcm_substream_t *substream)
263 ad1816a_t *chip = snd_pcm_substream_chip(substream); 266 ad1816a_t *chip = snd_pcm_substream_chip(substream);
264 unsigned long flags; 267 unsigned long flags;
265 snd_pcm_runtime_t *runtime = substream->runtime; 268 snd_pcm_runtime_t *runtime = substream->runtime;
266 unsigned int size; 269 unsigned int size, rate;
267 270
268 spin_lock_irqsave(&chip->lock, flags); 271 spin_lock_irqsave(&chip->lock, flags);
269 272
@@ -274,7 +277,10 @@ static int snd_ad1816a_capture_prepare(snd_pcm_substream_t *substream)
274 snd_dma_program(chip->dma2, runtime->dma_addr, size, 277 snd_dma_program(chip->dma2, runtime->dma_addr, size,
275 DMA_MODE_READ | DMA_AUTOINIT); 278 DMA_MODE_READ | DMA_AUTOINIT);
276 279
277 snd_ad1816a_write(chip, AD1816A_CAPTURE_SAMPLE_RATE, runtime->rate); 280 rate = runtime->rate;
281 if (chip->clock_freq)
282 rate = (rate * 33000) / chip->clock_freq;
283 snd_ad1816a_write(chip, AD1816A_CAPTURE_SAMPLE_RATE, rate);
278 snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG, 284 snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
279 AD1816A_FMT_ALL | AD1816A_FMT_STEREO, 285 AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
280 snd_ad1816a_get_format(chip, runtime->format, 286 snd_ad1816a_get_format(chip, runtime->format,
diff --git a/sound/isa/ad1848/ad1848_lib.c b/sound/isa/ad1848/ad1848_lib.c
index 8fb3db103e48..bc642dc94547 100644
--- a/sound/isa/ad1848/ad1848_lib.c
+++ b/sound/isa/ad1848/ad1848_lib.c
@@ -1196,6 +1196,7 @@ int snd_ad1848_add_ctl(ad1848_t *chip, const char *name, int index, int type, un
1196 .put = snd_ad1848_put_double, 1196 .put = snd_ad1848_put_double,
1197 }, 1197 },
1198 [AD1848_MIX_CAPTURE] = { 1198 [AD1848_MIX_CAPTURE] = {
1199 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1199 .info = snd_ad1848_info_mux, 1200 .info = snd_ad1848_info_mux,
1200 .get = snd_ad1848_get_mux, 1201 .get = snd_ad1848_get_mux,
1201 .put = snd_ad1848_put_mux, 1202 .put = snd_ad1848_put_mux,
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 46776cc0c157..1fce8b9f37cf 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -194,8 +194,8 @@ AD1848_DOUBLE("Wavetable Capture Volume", 0, CMI8330_WAVGAIN, CMI8330_WAVGAIN, 4
194AD1848_SINGLE("3D Control - Switch", 0, CMI8330_RMUX3D, 5, 1, 1), 194AD1848_SINGLE("3D Control - Switch", 0, CMI8330_RMUX3D, 5, 1, 1),
195AD1848_SINGLE("PC Speaker Playback Volume", 0, CMI8330_OUTPUTVOL, 3, 3, 0), 195AD1848_SINGLE("PC Speaker Playback Volume", 0, CMI8330_OUTPUTVOL, 3, 3, 0),
196AD1848_SINGLE("FM Playback Switch", 0, CMI8330_RECMUX, 3, 1, 1), 196AD1848_SINGLE("FM Playback Switch", 0, CMI8330_RECMUX, 3, 1, 1),
197AD1848_SINGLE("IEC958 Input Capture Switch", 0, CMI8330_RMUX3D, 7, 1, 1), 197AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",CAPTURE,SWITCH), 0, CMI8330_RMUX3D, 7, 1, 1),
198AD1848_SINGLE("IEC958 Input Playback Switch", 0, CMI8330_MUTEMUX, 7, 1, 1), 198AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",PLAYBACK,SWITCH), 0, CMI8330_MUTEMUX, 7, 1, 1),
199}; 199};
200 200
201#ifdef ENABLE_SB_MIXER 201#ifdef ENABLE_SB_MIXER
diff --git a/sound/isa/cs423x/cs4231_lib.c b/sound/isa/cs423x/cs4231_lib.c
index 3e7a2a33a5ca..3199941edd9b 100644
--- a/sound/isa/cs423x/cs4231_lib.c
+++ b/sound/isa/cs423x/cs4231_lib.c
@@ -1346,6 +1346,8 @@ static void snd_cs4231_suspend(cs4231_t *chip)
1346 int reg; 1346 int reg;
1347 unsigned long flags; 1347 unsigned long flags;
1348 1348
1349 if (chip->pcm)
1350 snd_pcm_suspend_all(chip->pcm);
1349 spin_lock_irqsave(&chip->reg_lock, flags); 1351 spin_lock_irqsave(&chip->reg_lock, flags);
1350 for (reg = 0; reg < 32; reg++) 1352 for (reg = 0; reg < 32; reg++)
1351 chip->image[reg] = snd_cs4231_in(chip, reg); 1353 chip->image[reg] = snd_cs4231_in(chip, reg);
diff --git a/sound/isa/gus/gus_io.c b/sound/isa/gus/gus_io.c
index 337b0e2a8a36..23e1b5f19e1a 100644
--- a/sound/isa/gus/gus_io.c
+++ b/sound/isa/gus/gus_io.c
@@ -269,8 +269,9 @@ void snd_gf1_i_write_addr(snd_gus_card_t * gus, unsigned char reg,
269 269
270#endif /* 0 */ 270#endif /* 0 */
271 271
272unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus, 272#ifdef CONFIG_SND_DEBUG
273 unsigned char reg, short w_16bit) 273static unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus,
274 unsigned char reg, short w_16bit)
274{ 275{
275 unsigned int res; 276 unsigned int res;
276 unsigned long flags; 277 unsigned long flags;
@@ -280,6 +281,7 @@ unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus,
280 spin_unlock_irqrestore(&gus->reg_lock, flags); 281 spin_unlock_irqrestore(&gus->reg_lock, flags);
281 return res; 282 return res;
282} 283}
284#endif
283 285
284/* 286/*
285 287
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 95c7b3e53407..75bd6eca63e7 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -145,6 +145,14 @@ static snd_card_t *snd_opl3sa2_legacy[SNDRV_CARDS] = SNDRV_DEFAULT_PTR;
145 145
146#ifdef CONFIG_PNP 146#ifdef CONFIG_PNP
147 147
148static struct pnp_device_id snd_opl3sa2_pnpbiosids[] = {
149 { .id = "YMH0021" },
150 { .id = "NMX2210" }, /* Gateway Solo 2500 */
151 { .id = "" } /* end */
152};
153
154MODULE_DEVICE_TABLE(pnp, snd_opl3sa2_pnpbiosids);
155
148static struct pnp_card_device_id snd_opl3sa2_pnpids[] = { 156static struct pnp_card_device_id snd_opl3sa2_pnpids[] = {
149 /* Yamaha YMF719E-S (Genius Sound Maker 3DX) */ 157 /* Yamaha YMF719E-S (Genius Sound Maker 3DX) */
150 { .id = "YMH0020", .devs = { { "YMH0021" } } }, 158 { .id = "YMH0020", .devs = { { "YMH0021" } } },
@@ -568,20 +576,18 @@ static int snd_opl3sa2_resume(snd_card_t *card)
568 576
569#ifdef CONFIG_PNP 577#ifdef CONFIG_PNP
570static int __init snd_opl3sa2_pnp(int dev, opl3sa2_t *chip, 578static int __init snd_opl3sa2_pnp(int dev, opl3sa2_t *chip,
571 struct pnp_card_link *card, 579 struct pnp_dev *pdev,
572 const struct pnp_card_device_id *id) 580 int isapnp)
573{ 581{
574 struct pnp_dev *pdev; 582 struct pnp_resource_table * cfg;
575 struct pnp_resource_table * cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
576 int err; 583 int err;
577 584
585 if (!isapnp && pnp_device_is_isapnp(pdev))
586 return -ENOENT; /* we have another procedure - card */
587
588 cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
578 if (!cfg) 589 if (!cfg)
579 return -ENOMEM; 590 return -ENOMEM;
580 pdev = chip->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
581 if (chip->dev == NULL) {
582 kfree(cfg);
583 return -EBUSY;
584 }
585 /* PnP initialization */ 591 /* PnP initialization */
586 pnp_init_resource_table(cfg); 592 pnp_init_resource_table(cfg);
587 if (sb_port[dev] != SNDRV_AUTO_PORT) 593 if (sb_port[dev] != SNDRV_AUTO_PORT)
@@ -601,7 +607,7 @@ static int __init snd_opl3sa2_pnp(int dev, opl3sa2_t *chip,
601 if (irq[dev] != SNDRV_AUTO_IRQ) 607 if (irq[dev] != SNDRV_AUTO_IRQ)
602 pnp_resource_change(&cfg->irq_resource[0], irq[dev], 1); 608 pnp_resource_change(&cfg->irq_resource[0], irq[dev], 1);
603 err = pnp_manual_config_dev(pdev, cfg, 0); 609 err = pnp_manual_config_dev(pdev, cfg, 0);
604 if (err < 0) 610 if (err < 0 && isapnp)
605 snd_printk(KERN_ERR "PnP manual resources are invalid, using auto config\n"); 611 snd_printk(KERN_ERR "PnP manual resources are invalid, using auto config\n");
606 err = pnp_activate_dev(pdev); 612 err = pnp_activate_dev(pdev);
607 if (err < 0) { 613 if (err < 0) {
@@ -617,13 +623,31 @@ static int __init snd_opl3sa2_pnp(int dev, opl3sa2_t *chip,
617 dma1[dev] = pnp_dma(pdev, 0); 623 dma1[dev] = pnp_dma(pdev, 0);
618 dma2[dev] = pnp_dma(pdev, 1); 624 dma2[dev] = pnp_dma(pdev, 1);
619 irq[dev] = pnp_irq(pdev, 0); 625 irq[dev] = pnp_irq(pdev, 0);
620 snd_printdd("PnP OPL3-SA: sb port=0x%lx, wss port=0x%lx, fm port=0x%lx, midi port=0x%lx\n", 626 snd_printdd("%sPnP OPL3-SA: sb port=0x%lx, wss port=0x%lx, fm port=0x%lx, midi port=0x%lx\n",
621 sb_port[dev], wss_port[dev], fm_port[dev], midi_port[dev]); 627 pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", sb_port[dev], wss_port[dev], fm_port[dev], midi_port[dev]);
622 snd_printdd("PnP OPL3-SA: control port=0x%lx, dma1=%i, dma2=%i, irq=%i\n", 628 snd_printdd("%sPnP OPL3-SA: control port=0x%lx, dma1=%i, dma2=%i, irq=%i\n",
623 port[dev], dma1[dev], dma2[dev], irq[dev]); 629 pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", port[dev], dma1[dev], dma2[dev], irq[dev]);
624 kfree(cfg); 630 kfree(cfg);
631 chip->dev = pdev;
625 return 0; 632 return 0;
626} 633}
634
635static int __init snd_opl3sa2_cpnp(int dev, opl3sa2_t *chip,
636 struct pnp_card_link *card,
637 const struct pnp_card_device_id *id)
638{
639 struct pnp_dev *pdev;
640 struct pnp_resource_table * cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
641
642 if (!cfg)
643 return -ENOMEM;
644 pdev = pnp_request_card_device(card, id->devs[0].id, NULL);
645 if (pdev == NULL) {
646 kfree(cfg);
647 return -EBUSY;
648 }
649 return snd_opl3sa2_pnp(dev, chip, pdev, 1);
650}
627#endif /* CONFIG_PNP */ 651#endif /* CONFIG_PNP */
628 652
629static int snd_opl3sa2_free(opl3sa2_t *chip) 653static int snd_opl3sa2_free(opl3sa2_t *chip)
@@ -645,6 +669,7 @@ static int snd_opl3sa2_dev_free(snd_device_t *device)
645} 669}
646 670
647static int __devinit snd_opl3sa2_probe(int dev, 671static int __devinit snd_opl3sa2_probe(int dev,
672 struct pnp_dev *pdev,
648 struct pnp_card_link *pcard, 673 struct pnp_card_link *pcard,
649 const struct pnp_card_device_id *pid) 674 const struct pnp_card_device_id *pid)
650{ 675{
@@ -695,8 +720,13 @@ static int __devinit snd_opl3sa2_probe(int dev,
695 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) 720 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
696 goto __error; 721 goto __error;
697#ifdef CONFIG_PNP 722#ifdef CONFIG_PNP
698 if (isapnp[dev]) { 723 if (pdev) {
699 if ((err = snd_opl3sa2_pnp(dev, chip, pcard, pid)) < 0) 724 if ((err = snd_opl3sa2_pnp(dev, chip, pdev, 0)) < 0)
725 goto __error;
726 snd_card_set_dev(card, &pdev->dev);
727 }
728 if (pcard) {
729 if ((err = snd_opl3sa2_cpnp(dev, chip, pcard, pid)) < 0)
700 goto __error; 730 goto __error;
701 snd_card_set_dev(card, &pcard->card->dev); 731 snd_card_set_dev(card, &pcard->card->dev);
702 } 732 }
@@ -768,7 +798,9 @@ static int __devinit snd_opl3sa2_probe(int dev,
768 if ((err = snd_card_register(card)) < 0) 798 if ((err = snd_card_register(card)) < 0)
769 goto __error; 799 goto __error;
770 800
771 if (pcard) 801 if (pdev)
802 pnp_set_drvdata(pdev, card);
803 else if (pcard)
772 pnp_set_card_drvdata(pcard, card); 804 pnp_set_card_drvdata(pcard, card);
773 else 805 else
774 snd_opl3sa2_legacy[dev] = card; 806 snd_opl3sa2_legacy[dev] = card;
@@ -780,8 +812,8 @@ static int __devinit snd_opl3sa2_probe(int dev,
780} 812}
781 813
782#ifdef CONFIG_PNP 814#ifdef CONFIG_PNP
783static int __devinit snd_opl3sa2_pnp_detect(struct pnp_card_link *card, 815static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
784 const struct pnp_card_device_id *id) 816 const struct pnp_device_id *id)
785{ 817{
786 static int dev; 818 static int dev;
787 int res; 819 int res;
@@ -789,7 +821,7 @@ static int __devinit snd_opl3sa2_pnp_detect(struct pnp_card_link *card,
789 for ( ; dev < SNDRV_CARDS; dev++) { 821 for ( ; dev < SNDRV_CARDS; dev++) {
790 if (!enable[dev] || !isapnp[dev]) 822 if (!enable[dev] || !isapnp[dev])
791 continue; 823 continue;
792 res = snd_opl3sa2_probe(dev, card, id); 824 res = snd_opl3sa2_probe(dev, pdev, NULL, NULL);
793 if (res < 0) 825 if (res < 0)
794 return res; 826 return res;
795 dev++; 827 dev++;
@@ -798,7 +830,40 @@ static int __devinit snd_opl3sa2_pnp_detect(struct pnp_card_link *card,
798 return -ENODEV; 830 return -ENODEV;
799} 831}
800 832
801static void __devexit snd_opl3sa2_pnp_remove(struct pnp_card_link * pcard) 833static void __devexit snd_opl3sa2_pnp_remove(struct pnp_dev * pdev)
834{
835 snd_card_t *card = (snd_card_t *) pnp_get_drvdata(pdev);
836
837 snd_card_disconnect(card);
838 snd_card_free_in_thread(card);
839}
840
841static struct pnp_driver opl3sa2_pnp_driver = {
842 .name = "opl3sa2-pnpbios",
843 .id_table = snd_opl3sa2_pnpbiosids,
844 .probe = snd_opl3sa2_pnp_detect,
845 .remove = __devexit_p(snd_opl3sa2_pnp_remove),
846};
847
848static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *card,
849 const struct pnp_card_device_id *id)
850{
851 static int dev;
852 int res;
853
854 for ( ; dev < SNDRV_CARDS; dev++) {
855 if (!enable[dev] || !isapnp[dev])
856 continue;
857 res = snd_opl3sa2_probe(dev, NULL, card, id);
858 if (res < 0)
859 return res;
860 dev++;
861 return 0;
862 }
863 return -ENODEV;
864}
865
866static void __devexit snd_opl3sa2_pnp_cremove(struct pnp_card_link * pcard)
802{ 867{
803 snd_card_t *card = (snd_card_t *) pnp_get_card_drvdata(pcard); 868 snd_card_t *card = (snd_card_t *) pnp_get_card_drvdata(pcard);
804 869
@@ -810,8 +875,8 @@ static struct pnp_card_driver opl3sa2_pnpc_driver = {
810 .flags = PNP_DRIVER_RES_DISABLE, 875 .flags = PNP_DRIVER_RES_DISABLE,
811 .name = "opl3sa2", 876 .name = "opl3sa2",
812 .id_table = snd_opl3sa2_pnpids, 877 .id_table = snd_opl3sa2_pnpids,
813 .probe = snd_opl3sa2_pnp_detect, 878 .probe = snd_opl3sa2_pnp_cdetect,
814 .remove = __devexit_p(snd_opl3sa2_pnp_remove), 879 .remove = __devexit_p(snd_opl3sa2_pnp_cremove),
815}; 880};
816#endif /* CONFIG_PNP */ 881#endif /* CONFIG_PNP */
817 882
@@ -826,10 +891,11 @@ static int __init alsa_card_opl3sa2_init(void)
826 if (isapnp[dev]) 891 if (isapnp[dev])
827 continue; 892 continue;
828#endif 893#endif
829 if (snd_opl3sa2_probe(dev, NULL, NULL) >= 0) 894 if (snd_opl3sa2_probe(dev, NULL, NULL, NULL) >= 0)
830 cards++; 895 cards++;
831 } 896 }
832#ifdef CONFIG_PNP 897#ifdef CONFIG_PNP
898 cards += pnp_register_driver(&opl3sa2_pnp_driver);
833 cards += pnp_register_card_driver(&opl3sa2_pnpc_driver); 899 cards += pnp_register_card_driver(&opl3sa2_pnpc_driver);
834#endif 900#endif
835 if (!cards) { 901 if (!cards) {
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index a6a0fa516268..a99e642a68b5 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -729,7 +729,7 @@ static int snd_sb16_dma_control_put(snd_kcontrol_t * kcontrol, snd_ctl_elem_valu
729} 729}
730 730
731static snd_kcontrol_new_t snd_sb16_dma_control = { 731static snd_kcontrol_new_t snd_sb16_dma_control = {
732 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 732 .iface = SNDRV_CTL_ELEM_IFACE_CARD,
733 .name = "16-bit DMA Allocation", 733 .name = "16-bit DMA Allocation",
734 .info = snd_sb16_dma_control_info, 734 .info = snd_sb16_dma_control_info,
735 .get = snd_sb16_dma_control_get, 735 .get = snd_sb16_dma_control_get,
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 26b42bb20a0a..1e458919cce6 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -1,11 +1,15 @@
1# ALSA PCI drivers 1# ALSA PCI drivers
2 2
3menu "PCI devices"
4 depends on SND!=n && PCI
5
6config SND_AC97_CODEC 3config SND_AC97_CODEC
7 tristate 4 tristate
8 select SND_PCM 5 select SND_PCM
6 select SND_AC97_BUS
7
8config SND_AC97_BUS
9 tristate
10
11menu "PCI devices"
12 depends on SND!=n && PCI
9 13
10config SND_ALI5451 14config SND_ALI5451
11 tristate "ALi M5451 PCI Audio Controller" 15 tristate "ALi M5451 PCI Audio Controller"
diff --git a/sound/pci/ac97/Makefile b/sound/pci/ac97/Makefile
index 3c3222122d8b..77b3482cb133 100644
--- a/sound/pci/ac97/Makefile
+++ b/sound/pci/ac97/Makefile
@@ -10,9 +10,11 @@ snd-ac97-codec-objs += ac97_proc.o
10endif 10endif
11 11
12snd-ak4531-codec-objs := ak4531_codec.o 12snd-ak4531-codec-objs := ak4531_codec.o
13snd-ac97-bus-objs := ac97_bus.o
13 14
14# Toplevel Module Dependency 15# Toplevel Module Dependency
15obj-$(CONFIG_SND_AC97_CODEC) += snd-ac97-codec.o 16obj-$(CONFIG_SND_AC97_CODEC) += snd-ac97-codec.o
16obj-$(CONFIG_SND_ENS1370) += snd-ak4531-codec.o 17obj-$(CONFIG_SND_ENS1370) += snd-ak4531-codec.o
18obj-$(CONFIG_SND_AC97_BUS) += snd-ac97-bus.o
17 19
18obj-m := $(sort $(obj-m)) 20obj-m := $(sort $(obj-m))
diff --git a/sound/pci/ac97/ac97_bus.c b/sound/pci/ac97/ac97_bus.c
new file mode 100644
index 000000000000..227f8b9f67ce
--- /dev/null
+++ b/sound/pci/ac97/ac97_bus.c
@@ -0,0 +1,79 @@
1/*
2 * Linux driver model AC97 bus interface
3 *
4 * Author: Nicolas Pitre
5 * Created: Jan 14, 2005
6 * Copyright: (C) MontaVista Software Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/device.h>
17#include <linux/string.h>
18
19/*
20 * Codec families have names seperated by commas, so we search for an
21 * individual codec name within the family string.
22 */
23static int ac97_bus_match(struct device *dev, struct device_driver *drv)
24{
25 return (strstr(dev->bus_id, drv->name) != NULL);
26}
27
28static int ac97_bus_suspend(struct device *dev, pm_message_t state)
29{
30 int ret = 0;
31
32 if (dev->driver && dev->driver->suspend) {
33 ret = dev->driver->suspend(dev, state, SUSPEND_DISABLE);
34 if (ret == 0)
35 ret = dev->driver->suspend(dev, state, SUSPEND_SAVE_STATE);
36 if (ret == 0)
37 ret = dev->driver->suspend(dev, state, SUSPEND_POWER_DOWN);
38 }
39 return ret;
40}
41
42static int ac97_bus_resume(struct device *dev)
43{
44 int ret = 0;
45
46 if (dev->driver && dev->driver->resume) {
47 ret = dev->driver->resume(dev, RESUME_POWER_ON);
48 if (ret == 0)
49 ret = dev->driver->resume(dev, RESUME_RESTORE_STATE);
50 if (ret == 0)
51 ret = dev->driver->resume(dev, RESUME_ENABLE);
52 }
53 return ret;
54}
55
56struct bus_type ac97_bus_type = {
57 .name = "ac97",
58 .match = ac97_bus_match,
59 .suspend = ac97_bus_suspend,
60 .resume = ac97_bus_resume,
61};
62
63static int __init ac97_bus_init(void)
64{
65 return bus_register(&ac97_bus_type);
66}
67
68subsys_initcall(ac97_bus_init);
69
70static void __exit ac97_bus_exit(void)
71{
72 bus_unregister(&ac97_bus_type);
73}
74
75module_exit(ac97_bus_exit);
76
77EXPORT_SYMBOL(ac97_bus_type);
78
79MODULE_LICENSE("GPL");
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6983eea226da..5501f4440c92 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -157,6 +157,7 @@ static const ac97_codec_id_t snd_ac97_codec_ids[] = {
157{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)] 157{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
158{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL }, 158{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL },
159{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF 159{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF
160{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF
160{ 0x57454301, 0xffffffff, "W83971D", NULL, NULL }, 161{ 0x57454301, 0xffffffff, "W83971D", NULL, NULL },
161{ 0x574d4c00, 0xffffffff, "WM9701A", NULL, NULL }, 162{ 0x574d4c00, 0xffffffff, "WM9701A", NULL, NULL },
162{ 0x574d4C03, 0xffffffff, "WM9703,WM9707,WM9708,WM9717", patch_wolfson03, NULL}, 163{ 0x574d4C03, 0xffffffff, "WM9703,WM9707,WM9708,WM9717", patch_wolfson03, NULL},
@@ -1307,16 +1308,18 @@ static int snd_ac97_mixer_build(ac97_t * ac97)
1307 } 1308 }
1308 1309
1309 /* build master tone controls */ 1310 /* build master tone controls */
1310 if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) { 1311 if (!(ac97->flags & AC97_HAS_NO_TONE)) {
1311 for (idx = 0; idx < 2; idx++) { 1312 if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) {
1312 if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0) 1313 for (idx = 0; idx < 2; idx++) {
1313 return err; 1314 if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0)
1314 if (ac97->id == AC97_ID_YMF753) { 1315 return err;
1315 kctl->private_value &= ~(0xff << 16); 1316 if (ac97->id == AC97_ID_YMF753) {
1316 kctl->private_value |= 7 << 16; 1317 kctl->private_value &= ~(0xff << 16);
1318 kctl->private_value |= 7 << 16;
1319 }
1317 } 1320 }
1321 snd_ac97_write_cache(ac97, AC97_MASTER_TONE, 0x0f0f);
1318 } 1322 }
1319 snd_ac97_write_cache(ac97, AC97_MASTER_TONE, 0x0f0f);
1320 } 1323 }
1321 1324
1322 /* build PC Speaker controls */ 1325 /* build PC Speaker controls */
@@ -1339,11 +1342,13 @@ static int snd_ac97_mixer_build(ac97_t * ac97)
1339 } 1342 }
1340 1343
1341 /* build MIC controls */ 1344 /* build MIC controls */
1342 if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) { 1345 if (!(ac97->flags & AC97_HAS_NO_MIC)) {
1343 if ((err = snd_ac97_cmix_new(card, "Mic Playback", AC97_MIC, ac97)) < 0) 1346 if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) {
1344 return err; 1347 if ((err = snd_ac97_cmix_new(card, "Mic Playback", AC97_MIC, ac97)) < 0)
1345 if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0) 1348 return err;
1346 return err; 1349 if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0)
1350 return err;
1351 }
1347 } 1352 }
1348 1353
1349 /* build Line controls */ 1354 /* build Line controls */
@@ -1402,12 +1407,14 @@ static int snd_ac97_mixer_build(ac97_t * ac97)
1402 } 1407 }
1403 snd_ac97_write_cache(ac97, AC97_PCM, init_val); 1408 snd_ac97_write_cache(ac97, AC97_PCM, init_val);
1404 } else { 1409 } else {
1405 if (ac97->flags & AC97_HAS_NO_PCM_VOL) 1410 if (!(ac97->flags & AC97_HAS_NO_STD_PCM)) {
1406 err = snd_ac97_cmute_new(card, "PCM Playback Switch", AC97_PCM, ac97); 1411 if (ac97->flags & AC97_HAS_NO_PCM_VOL)
1407 else 1412 err = snd_ac97_cmute_new(card, "PCM Playback Switch", AC97_PCM, ac97);
1408 err = snd_ac97_cmix_new(card, "PCM Playback", AC97_PCM, ac97); 1413 else
1409 if (err < 0) 1414 err = snd_ac97_cmix_new(card, "PCM Playback", AC97_PCM, ac97);
1410 return err; 1415 if (err < 0)
1416 return err;
1417 }
1411 } 1418 }
1412 1419
1413 /* build Capture controls */ 1420 /* build Capture controls */
@@ -1807,6 +1814,39 @@ int snd_ac97_bus(snd_card_t *card, int num, ac97_bus_ops_t *ops,
1807 return 0; 1814 return 0;
1808} 1815}
1809 1816
1817/* stop no dev release warning */
1818static void ac97_device_release(struct device * dev)
1819{
1820}
1821
1822/* register ac97 codec to bus */
1823static int snd_ac97_dev_register(snd_device_t *device)
1824{
1825 ac97_t *ac97 = device->device_data;
1826 int err;
1827
1828 ac97->dev.bus = &ac97_bus_type;
1829 ac97->dev.parent = ac97->bus->card->dev;
1830 ac97->dev.platform_data = ac97;
1831 ac97->dev.release = ac97_device_release;
1832 snprintf(ac97->dev.bus_id, BUS_ID_SIZE, "card%d-%d", ac97->bus->card->number, ac97->num);
1833 if ((err = device_register(&ac97->dev)) < 0) {
1834 snd_printk(KERN_ERR "Can't register ac97 bus\n");
1835 ac97->dev.bus = NULL;
1836 return err;
1837 }
1838 return 0;
1839}
1840
1841/* unregister ac97 codec */
1842static int snd_ac97_dev_unregister(snd_device_t *device)
1843{
1844 ac97_t *ac97 = device->device_data;
1845 if (ac97->dev.bus)
1846 device_unregister(&ac97->dev);
1847 return snd_ac97_free(ac97);
1848}
1849
1810/* build_ops to do nothing */ 1850/* build_ops to do nothing */
1811static struct snd_ac97_build_ops null_build_ops; 1851static struct snd_ac97_build_ops null_build_ops;
1812 1852
@@ -1840,6 +1880,8 @@ int snd_ac97_mixer(ac97_bus_t *bus, ac97_template_t *template, ac97_t **rac97)
1840 const ac97_codec_id_t *pid; 1880 const ac97_codec_id_t *pid;
1841 static snd_device_ops_t ops = { 1881 static snd_device_ops_t ops = {
1842 .dev_free = snd_ac97_dev_free, 1882 .dev_free = snd_ac97_dev_free,
1883 .dev_register = snd_ac97_dev_register,
1884 .dev_unregister = snd_ac97_dev_unregister,
1843 }; 1885 };
1844 1886
1845 snd_assert(rac97 != NULL, return -EINVAL); 1887 snd_assert(rac97 != NULL, return -EINVAL);
@@ -2539,8 +2581,6 @@ int snd_ac97_tune_hardware(ac97_t *ac97, struct ac97_quirk *quirk, const char *o
2539{ 2581{
2540 int result; 2582 int result;
2541 2583
2542 snd_assert(quirk, return -EINVAL);
2543
2544 /* quirk overriden? */ 2584 /* quirk overriden? */
2545 if (override && strcmp(override, "-1") && strcmp(override, "default")) { 2585 if (override && strcmp(override, "-1") && strcmp(override, "default")) {
2546 result = apply_quirk_str(ac97, override); 2586 result = apply_quirk_str(ac97, override);
@@ -2549,6 +2589,9 @@ int snd_ac97_tune_hardware(ac97_t *ac97, struct ac97_quirk *quirk, const char *o
2549 return result; 2589 return result;
2550 } 2590 }
2551 2591
2592 if (! quirk)
2593 return -EINVAL;
2594
2552 for (; quirk->subvendor; quirk++) { 2595 for (; quirk->subvendor; quirk++) {
2553 if (quirk->subvendor != ac97->subsystem_vendor) 2596 if (quirk->subvendor != ac97->subsystem_vendor)
2554 continue; 2597 continue;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 66edc857d3e6..b584172c1104 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -370,141 +370,387 @@ int patch_yamaha_ymf753(ac97_t * ac97)
370 * added support for WM9705,WM9708,WM9709,WM9710,WM9711,WM9712 and WM9717. 370 * added support for WM9705,WM9708,WM9709,WM9710,WM9711,WM9712 and WM9717.
371 */ 371 */
372 372
373int patch_wolfson03(ac97_t * ac97) 373static const snd_kcontrol_new_t wm97xx_snd_ac97_controls[] = {
374AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
375AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
376};
377
378static int patch_wolfson_wm9703_specific(ac97_t * ac97)
374{ 379{
375 /* This is known to work for the ViewSonic ViewPad 1000 380 /* This is known to work for the ViewSonic ViewPad 1000
376 Randolph Bentson <bentson@holmsjoen.com> */ 381 * Randolph Bentson <bentson@holmsjoen.com>
382 * WM9703/9707/9708/9717
383 */
384 int err, i;
385
386 for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
387 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
388 return err;
389 }
390 snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
391 return 0;
392}
393
394static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
395 .build_specific = patch_wolfson_wm9703_specific,
396};
377 397
378 // WM9703/9707/9708/9717 398int patch_wolfson03(ac97_t * ac97)
379 snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808); 399{
380 snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0x8000); 400 ac97->build_ops = &patch_wolfson_wm9703_ops;
381 return 0; 401 return 0;
382} 402}
383 403
384int patch_wolfson04(ac97_t * ac97) 404static const snd_kcontrol_new_t wm9704_snd_ac97_controls[] = {
405AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
406AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
407AC97_DOUBLE("Rear Playback Volume", AC97_WM9704_RMIXER_VOL, 8, 0, 31, 1),
408AC97_SINGLE("Rear Playback Switch", AC97_WM9704_RMIXER_VOL, 15, 1, 1),
409AC97_DOUBLE("Rear DAC Volume", AC97_WM9704_RPCM_VOL, 8, 0, 31, 1),
410AC97_DOUBLE("Surround Volume", AC97_SURROUND_MASTER, 8, 0, 31, 1),
411};
412
413static int patch_wolfson_wm9704_specific(ac97_t * ac97)
385{ 414{
386 // WM9704M/9704Q 415 int err, i;
387 // set front and rear mixer volume 416 for (i = 0; i < ARRAY_SIZE(wm9704_snd_ac97_controls); i++) {
388 snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808); 417 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9704_snd_ac97_controls[i], ac97))) < 0)
389 snd_ac97_write_cache(ac97, AC97_WM9704_RMIXER_VOL, 0x0808); 418 return err;
390 419 }
391 // patch for DVD noise 420 /* patch for DVD noise */
392 snd_ac97_write_cache(ac97, AC97_WM9704_TEST, 0x0200); 421 snd_ac97_write_cache(ac97, AC97_WM9704_TEST, 0x0200);
393
394 // init vol
395 snd_ac97_write_cache(ac97, AC97_WM9704_RPCM_VOL, 0x0808);
396
397 // set rear surround volume
398 snd_ac97_write_cache(ac97, AC97_SURROUND_MASTER, 0x0000);
399 return 0; 422 return 0;
400} 423}
401 424
425static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
426 .build_specific = patch_wolfson_wm9704_specific,
427};
428
429int patch_wolfson04(ac97_t * ac97)
430{
431 /* WM9704M/9704Q */
432 ac97->build_ops = &patch_wolfson_wm9704_ops;
433 return 0;
434}
435
436static int patch_wolfson_wm9705_specific(ac97_t * ac97)
437{
438 int err, i;
439 for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
440 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
441 return err;
442 }
443 snd_ac97_write_cache(ac97, 0x72, 0x0808);
444 return 0;
445}
446
447static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
448 .build_specific = patch_wolfson_wm9705_specific,
449};
450
402int patch_wolfson05(ac97_t * ac97) 451int patch_wolfson05(ac97_t * ac97)
403{ 452{
404 // WM9705, WM9710 453 /* WM9705, WM9710 */
405 // set front mixer volume 454 ac97->build_ops = &patch_wolfson_wm9705_ops;
406 snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808); 455 return 0;
456}
457
458static const char* wm9711_alc_select[] = {"None", "Left", "Right", "Stereo"};
459static const char* wm9711_alc_mix[] = {"Stereo", "Right", "Left", "None"};
460static const char* wm9711_out3_src[] = {"Left", "VREF", "Left + Right", "Mono"};
461static const char* wm9711_out3_lrsrc[] = {"Master Mix", "Headphone Mix"};
462static const char* wm9711_rec_adc[] = {"Stereo", "Left", "Right", "Mute"};
463static const char* wm9711_base[] = {"Linear Control", "Adaptive Boost"};
464static const char* wm9711_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
465static const char* wm9711_mic[] = {"Mic 1", "Differential", "Mic 2", "Stereo"};
466static const char* wm9711_rec_sel[] =
467 {"Mic 1", "NC", "NC", "Master Mix", "Line", "Headphone Mix", "Phone Mix", "Phone"};
468static const char* wm9711_ng_type[] = {"Constant Gain", "Mute"};
469
470static const struct ac97_enum wm9711_enum[] = {
471AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9711_alc_select),
472AC97_ENUM_SINGLE(AC97_VIDEO, 10, 4, wm9711_alc_mix),
473AC97_ENUM_SINGLE(AC97_AUX, 9, 4, wm9711_out3_src),
474AC97_ENUM_SINGLE(AC97_AUX, 8, 2, wm9711_out3_lrsrc),
475AC97_ENUM_SINGLE(AC97_REC_SEL, 12, 4, wm9711_rec_adc),
476AC97_ENUM_SINGLE(AC97_MASTER_TONE, 15, 2, wm9711_base),
477AC97_ENUM_DOUBLE(AC97_REC_GAIN, 14, 6, 2, wm9711_rec_gain),
478AC97_ENUM_SINGLE(AC97_MIC, 5, 4, wm9711_mic),
479AC97_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 8, wm9711_rec_sel),
480AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9711_ng_type),
481};
482
483static const snd_kcontrol_new_t wm9711_snd_ac97_controls[] = {
484AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
485AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
486AC97_SINGLE("ALC Decay Time", AC97_CODEC_CLASS_REV, 4, 15, 0),
487AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
488AC97_ENUM("ALC Function", wm9711_enum[0]),
489AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 1),
490AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 1),
491AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
492AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
493AC97_ENUM("ALC NG Type", wm9711_enum[9]),
494AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 1),
495
496AC97_SINGLE("Side Tone Switch", AC97_VIDEO, 15, 1, 1),
497AC97_SINGLE("Side Tone Volume", AC97_VIDEO, 12, 7, 1),
498AC97_ENUM("ALC Headphone Mux", wm9711_enum[1]),
499AC97_SINGLE("ALC Headphone Volume", AC97_VIDEO, 7, 7, 1),
500
501AC97_SINGLE("Out3 Switch", AC97_AUX, 15, 1, 1),
502AC97_SINGLE("Out3 ZC Switch", AC97_AUX, 7, 1, 1),
503AC97_ENUM("Out3 Mux", wm9711_enum[2]),
504AC97_ENUM("Out3 LR Mux", wm9711_enum[3]),
505AC97_SINGLE("Out3 Volume", AC97_AUX, 0, 31, 1),
506
507AC97_SINGLE("Beep to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
508AC97_SINGLE("Beep to Headphone Volume", AC97_PC_BEEP, 12, 7, 1),
509AC97_SINGLE("Beep to Side Tone Switch", AC97_PC_BEEP, 11, 1, 1),
510AC97_SINGLE("Beep to Side Tone Volume", AC97_PC_BEEP, 8, 7, 1),
511AC97_SINGLE("Beep to Phone Switch", AC97_PC_BEEP, 7, 1, 1),
512AC97_SINGLE("Beep to Phone Volume", AC97_PC_BEEP, 4, 7, 1),
513
514AC97_SINGLE("Aux to Headphone Switch", AC97_CD, 15, 1, 1),
515AC97_SINGLE("Aux to Headphone Volume", AC97_CD, 12, 7, 1),
516AC97_SINGLE("Aux to Side Tone Switch", AC97_CD, 11, 1, 1),
517AC97_SINGLE("Aux to Side Tone Volume", AC97_CD, 8, 7, 1),
518AC97_SINGLE("Aux to Phone Switch", AC97_CD, 7, 1, 1),
519AC97_SINGLE("Aux to Phone Volume", AC97_CD, 4, 7, 1),
520
521AC97_SINGLE("Phone to Headphone Switch", AC97_PHONE, 15, 1, 1),
522AC97_SINGLE("Phone to Master Switch", AC97_PHONE, 14, 1, 1),
523
524AC97_SINGLE("Line to Headphone Switch", AC97_LINE, 15, 1, 1),
525AC97_SINGLE("Line to Master Switch", AC97_LINE, 14, 1, 1),
526AC97_SINGLE("Line to Phone Switch", AC97_LINE, 13, 1, 1),
527
528AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PCM, 15, 1, 1),
529AC97_SINGLE("PCM Playback to Master Switch", AC97_PCM, 14, 1, 1),
530AC97_SINGLE("PCM Playback to Phone Switch", AC97_PCM, 13, 1, 1),
531
532AC97_SINGLE("Capture 20dB Boost Switch", AC97_REC_SEL, 14, 1, 0),
533AC97_ENUM("Capture to Phone Mux", wm9711_enum[4]),
534AC97_SINGLE("Capture to Phone 20dB Boost Switch", AC97_REC_SEL, 11, 1, 1),
535AC97_ENUM("Capture Select", wm9711_enum[8]),
536
537AC97_SINGLE("3D Upper Cut-off Switch", AC97_3D_CONTROL, 5, 1, 1),
538AC97_SINGLE("3D Lower Cut-off Switch", AC97_3D_CONTROL, 4, 1, 1),
539
540AC97_ENUM("Bass Control", wm9711_enum[5]),
541AC97_SINGLE("Bass Cut-off Switch", AC97_MASTER_TONE, 12, 1, 1),
542AC97_SINGLE("Tone Cut-off Switch", AC97_MASTER_TONE, 4, 1, 1),
543AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
544
545AC97_SINGLE("ADC Switch", AC97_REC_GAIN, 15, 1, 1),
546AC97_ENUM("Capture Volume Steps", wm9711_enum[6]),
547AC97_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 15, 1),
548AC97_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
549
550AC97_SINGLE("Mic 1 to Phone Switch", AC97_MIC, 14, 1, 1),
551AC97_SINGLE("Mic 2 to Phone Switch", AC97_MIC, 13, 1, 1),
552AC97_ENUM("Mic Select Source", wm9711_enum[7]),
553AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 32, 1),
554AC97_SINGLE("Mic 20dB Boost Switch", AC97_MIC, 7, 1, 0),
555
556AC97_SINGLE("Master ZC Switch", AC97_MASTER, 7, 1, 0),
557AC97_SINGLE("Headphone ZC Switch", AC97_HEADPHONE, 7, 1, 0),
558AC97_SINGLE("Mono ZC Switch", AC97_MASTER_MONO, 7, 1, 0),
559};
560
561static int patch_wolfson_wm9711_specific(ac97_t * ac97)
562{
563 int err, i;
564
565 for (i = 0; i < ARRAY_SIZE(wm9711_snd_ac97_controls); i++) {
566 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9711_snd_ac97_controls[i], ac97))) < 0)
567 return err;
568 }
569 snd_ac97_write_cache(ac97, AC97_CODEC_CLASS_REV, 0x0808);
570 snd_ac97_write_cache(ac97, AC97_PCI_SVID, 0x0808);
571 snd_ac97_write_cache(ac97, AC97_VIDEO, 0x0808);
572 snd_ac97_write_cache(ac97, AC97_AUX, 0x0808);
573 snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
574 snd_ac97_write_cache(ac97, AC97_CD, 0x0000);
407 return 0; 575 return 0;
408} 576}
409 577
578static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
579 .build_specific = patch_wolfson_wm9711_specific,
580};
581
410int patch_wolfson11(ac97_t * ac97) 582int patch_wolfson11(ac97_t * ac97)
411{ 583{
412 // WM9711, WM9712 584 /* WM9711, WM9712 */
413 // set out3 volume 585 ac97->build_ops = &patch_wolfson_wm9711_ops;
414 snd_ac97_write_cache(ac97, AC97_WM9711_OUT3VOL, 0x0808); 586
587 ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_MIC |
588 AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD;
589
415 return 0; 590 return 0;
416} 591}
417 592
418static const char* wm9713_mic_mixer[] = {"Stereo", "Mic1", "Mic2", "Mute"}; 593static const char* wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
419static const char* wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"}; 594static const char* wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
420static const char* wm9713_rec_src_l[] = {"Mic1", "Mic2", "Line L", "Mono In", "HP Mix L", "Spk Mix", "Mono Mix", "Zh"}; 595static const char* wm9713_rec_src[] =
421static const char* wm9713_rec_src_r[] = {"Mic1", "Mic2", "Line R", "Mono In", "HP Mix R", "Spk Mix", "Mono Mix", "Zh"}; 596 {"Mic 1", "Mic 2", "Line", "Mono In", "Headphone Mix", "Master Mix",
597 "Mono Mix", "Zh"};
598static const char* wm9713_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
599static const char* wm9713_alc_select[] = {"None", "Left", "Right", "Stereo"};
600static const char* wm9713_mono_pga[] = {"Vmid", "Zh", "Mono Mix", "Inv 1"};
601static const char* wm9713_spk_pga[] =
602 {"Vmid", "Zh", "Headphone Mix", "Master Mix", "Inv", "NC", "NC", "NC"};
603static const char* wm9713_hp_pga[] = {"Vmid", "Zh", "Headphone Mix", "NC"};
604static const char* wm9713_out3_pga[] = {"Vmid", "Zh", "Inv 1", "NC"};
605static const char* wm9713_out4_pga[] = {"Vmid", "Zh", "Inv 2", "NC"};
606static const char* wm9713_dac_inv[] =
607 {"Off", "Mono Mix", "Master Mix", "Headphone Mix L", "Headphone Mix R",
608 "Headphone Mix Mono", "NC", "Vmid"};
609static const char* wm9713_base[] = {"Linear Control", "Adaptive Boost"};
610static const char* wm9713_ng_type[] = {"Constant Gain", "Mute"};
422 611
423static const struct ac97_enum wm9713_enum[] = { 612static const struct ac97_enum wm9713_enum[] = {
424AC97_ENUM_SINGLE(AC97_LINE, 3, 4, wm9713_mic_mixer), 613AC97_ENUM_SINGLE(AC97_LINE, 3, 4, wm9713_mic_mixer),
425AC97_ENUM_SINGLE(AC97_VIDEO, 14, 4, wm9713_rec_mux), 614AC97_ENUM_SINGLE(AC97_VIDEO, 14, 4, wm9713_rec_mux),
426AC97_ENUM_SINGLE(AC97_VIDEO, 9, 4, wm9713_rec_mux), 615AC97_ENUM_SINGLE(AC97_VIDEO, 9, 4, wm9713_rec_mux),
427AC97_ENUM_SINGLE(AC97_VIDEO, 3, 8, wm9713_rec_src_l), 616AC97_ENUM_DOUBLE(AC97_VIDEO, 3, 0, 8, wm9713_rec_src),
428AC97_ENUM_SINGLE(AC97_VIDEO, 0, 8, wm9713_rec_src_r), 617AC97_ENUM_DOUBLE(AC97_CD, 14, 6, 2, wm9713_rec_gain),
618AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9713_alc_select),
619AC97_ENUM_SINGLE(AC97_REC_GAIN, 14, 4, wm9713_mono_pga),
620AC97_ENUM_DOUBLE(AC97_REC_GAIN, 11, 8, 8, wm9713_spk_pga),
621AC97_ENUM_DOUBLE(AC97_REC_GAIN, 6, 4, 4, wm9713_hp_pga),
622AC97_ENUM_SINGLE(AC97_REC_GAIN, 2, 4, wm9713_out3_pga),
623AC97_ENUM_SINGLE(AC97_REC_GAIN, 0, 4, wm9713_out4_pga),
624AC97_ENUM_DOUBLE(AC97_REC_GAIN_MIC, 13, 10, 8, wm9713_dac_inv),
625AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_base),
626AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type),
429}; 627};
430 628
431static const snd_kcontrol_new_t wm13_snd_ac97_controls_line_in[] = { 629static const snd_kcontrol_new_t wm13_snd_ac97_controls[] = {
432AC97_DOUBLE("Line In Volume", AC97_PC_BEEP, 8, 0, 31, 1), 630AC97_DOUBLE("Line In Volume", AC97_PC_BEEP, 8, 0, 31, 1),
433AC97_SINGLE("Line In to Headphone Mute", AC97_PC_BEEP, 15, 1, 1), 631AC97_SINGLE("Line In to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
434AC97_SINGLE("Line In to Speaker Mute", AC97_PC_BEEP, 14, 1, 1), 632AC97_SINGLE("Line In to Master Switch", AC97_PC_BEEP, 14, 1, 1),
435AC97_SINGLE("Line In to Mono Mute", AC97_PC_BEEP, 13, 1, 1), 633AC97_SINGLE("Line In to Mono Switch", AC97_PC_BEEP, 13, 1, 1),
634
635AC97_DOUBLE("PCM Playback Volume", AC97_PHONE, 8, 0, 31, 1),
636AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PHONE, 15, 1, 1),
637AC97_SINGLE("PCM Playback to Master Switch", AC97_PHONE, 14, 1, 1),
638AC97_SINGLE("PCM Playback to Mono Switch", AC97_PHONE, 13, 1, 1),
639
640AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 31, 1),
641AC97_SINGLE("Mic 2 Volume", AC97_MIC, 0, 31, 1),
642AC97_SINGLE("Mic 1 to Mono Switch", AC97_LINE, 7, 1, 1),
643AC97_SINGLE("Mic 2 to Mono Switch", AC97_LINE, 6, 1, 1),
644AC97_SINGLE("Mic Boost (+20dB) Switch", AC97_LINE, 5, 1, 0),
645AC97_ENUM("Mic to Headphone Mux", wm9713_enum[0]),
646AC97_SINGLE("Mic Headphone Mixer Volume", AC97_LINE, 0, 7, 1),
647
648AC97_SINGLE("Capture Switch", AC97_CD, 15, 1, 1),
649AC97_ENUM("Capture Volume Steps", wm9713_enum[4]),
650AC97_DOUBLE("Capture Volume", AC97_CD, 8, 0, 15, 0),
651AC97_SINGLE("Capture ZC Switch", AC97_CD, 7, 1, 0),
652
653AC97_ENUM("Capture to Headphone Mux", wm9713_enum[1]),
654AC97_SINGLE("Capture to Headphone Volume", AC97_VIDEO, 11, 7, 1),
655AC97_ENUM("Capture to Mono Mux", wm9713_enum[2]),
656AC97_SINGLE("Capture to Mono Boost (+20dB) Switch", AC97_VIDEO, 8, 1, 0),
657AC97_SINGLE("Capture ADC Boost (+20dB) Switch", AC97_VIDEO, 6, 1, 0),
658AC97_ENUM("Capture Select", wm9713_enum[3]),
659
660AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
661AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
662AC97_SINGLE("ALC Decay Time ", AC97_CODEC_CLASS_REV, 4, 15, 0),
663AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
664AC97_ENUM("ALC Function", wm9713_enum[5]),
665AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 0),
666AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 0),
667AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
668AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
669AC97_ENUM("ALC NG Type", wm9713_enum[13]),
670AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 0),
671
672AC97_DOUBLE("Master ZC Switch", AC97_MASTER, 14, 6, 1, 0),
673AC97_DOUBLE("Headphone ZC Switch", AC97_HEADPHONE, 14, 6, 1, 0),
674AC97_DOUBLE("Out3/4 ZC Switch", AC97_MASTER_MONO, 14, 6, 1, 0),
675AC97_SINGLE("Master Right Switch", AC97_MASTER, 7, 1, 1),
676AC97_SINGLE("Headphone Right Switch", AC97_HEADPHONE, 7, 1, 1),
677AC97_SINGLE("Out3/4 Right Switch", AC97_MASTER_MONO, 7, 1, 1),
678
679AC97_SINGLE("Mono In to Headphone Switch", AC97_MASTER_TONE, 15, 1, 1),
680AC97_SINGLE("Mono In to Master Switch", AC97_MASTER_TONE, 14, 1, 1),
681AC97_SINGLE("Mono In Volume", AC97_MASTER_TONE, 8, 31, 1),
682AC97_SINGLE("Mono Switch", AC97_MASTER_TONE, 7, 1, 1),
683AC97_SINGLE("Mono ZC Switch", AC97_MASTER_TONE, 6, 1, 0),
684AC97_SINGLE("Mono Volume", AC97_MASTER_TONE, 0, 31, 1),
685
686AC97_SINGLE("PC Beep to Headphone Switch", AC97_AUX, 15, 1, 1),
687AC97_SINGLE("PC Beep to Headphone Volume", AC97_AUX, 12, 7, 1),
688AC97_SINGLE("PC Beep to Master Switch", AC97_AUX, 11, 1, 1),
689AC97_SINGLE("PC Beep to Master Volume", AC97_AUX, 8, 7, 1),
690AC97_SINGLE("PC Beep to Mono Switch", AC97_AUX, 7, 1, 1),
691AC97_SINGLE("PC Beep to Mono Volume", AC97_AUX, 4, 7, 1),
692
693AC97_SINGLE("Voice to Headphone Switch", AC97_PCM, 15, 1, 1),
694AC97_SINGLE("Voice to Headphone Volume", AC97_PCM, 12, 7, 1),
695AC97_SINGLE("Voice to Master Switch", AC97_PCM, 11, 1, 1),
696AC97_SINGLE("Voice to Master Volume", AC97_PCM, 8, 7, 1),
697AC97_SINGLE("Voice to Mono Switch", AC97_PCM, 7, 1, 1),
698AC97_SINGLE("Voice to Mono Volume", AC97_PCM, 4, 7, 1),
699
700AC97_SINGLE("Aux to Headphone Switch", AC97_REC_SEL, 15, 1, 1),
701AC97_SINGLE("Aux to Headphone Volume", AC97_REC_SEL, 12, 7, 1),
702AC97_SINGLE("Aux to Master Switch", AC97_REC_SEL, 11, 1, 1),
703AC97_SINGLE("Aux to Master Volume", AC97_REC_SEL, 8, 7, 1),
704AC97_SINGLE("Aux to Mono Switch", AC97_REC_SEL, 7, 1, 1),
705AC97_SINGLE("Aux to Mono Volume", AC97_REC_SEL, 4, 7, 1),
706
707AC97_ENUM("Mono Input Mux", wm9713_enum[6]),
708AC97_ENUM("Master Input Mux", wm9713_enum[7]),
709AC97_ENUM("Headphone Input Mux", wm9713_enum[8]),
710AC97_ENUM("Out 3 Input Mux", wm9713_enum[9]),
711AC97_ENUM("Out 4 Input Mux", wm9713_enum[10]),
712
713AC97_ENUM("Bass Control", wm9713_enum[12]),
714AC97_SINGLE("Bass Cut-off Switch", AC97_GENERAL_PURPOSE, 12, 1, 1),
715AC97_SINGLE("Tone Cut-off Switch", AC97_GENERAL_PURPOSE, 4, 1, 1),
716AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_GENERAL_PURPOSE, 6, 1, 0),
717AC97_SINGLE("Bass Volume", AC97_GENERAL_PURPOSE, 8, 15, 1),
718AC97_SINGLE("Tone Volume", AC97_GENERAL_PURPOSE, 0, 15, 1),
436}; 719};
437 720
438static const snd_kcontrol_new_t wm13_snd_ac97_controls_dac[] = { 721static const snd_kcontrol_new_t wm13_snd_ac97_controls_3d[] = {
439AC97_DOUBLE("DAC Volume", AC97_PHONE, 8, 0, 31, 1), 722AC97_ENUM("Inv Input Mux", wm9713_enum[11]),
440AC97_SINGLE("DAC to Headphone Mute", AC97_PHONE, 15, 1, 1), 723AC97_SINGLE("3D Upper Cut-off Switch", AC97_REC_GAIN_MIC, 5, 1, 0),
441AC97_SINGLE("DAC to Speaker Mute", AC97_PHONE, 14, 1, 1), 724AC97_SINGLE("3D Lower Cut-off Switch", AC97_REC_GAIN_MIC, 4, 1, 0),
442AC97_SINGLE("DAC to Mono Mute", AC97_PHONE, 13, 1, 1), 725AC97_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
443}; 726};
444 727
445static const snd_kcontrol_new_t wm13_snd_ac97_controls_mic[] = { 728static int patch_wolfson_wm9713_3d (ac97_t * ac97)
446AC97_SINGLE("MICA Volume", AC97_MIC, 8, 31, 1), 729{
447AC97_SINGLE("MICB Volume", AC97_MIC, 0, 31, 1), 730 int err, i;
448AC97_SINGLE("MICA to Mono Mute", AC97_LINE, 7, 1, 1), 731
449AC97_SINGLE("MICB to Mono Mute", AC97_LINE, 6, 1, 1), 732 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_3d); i++) {
450AC97_SINGLE("MIC Boost (+20dB)", AC97_LINE, 5, 1, 1), 733 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_3d[i], ac97))) < 0)
451AC97_ENUM("MIC Headphone Routing", wm9713_enum[0]), 734 return err;
452AC97_SINGLE("MIC Headphone Mixer Volume", AC97_LINE, 0, 7, 1) 735 }
453}; 736 return 0;
454 737}
455static const snd_kcontrol_new_t wm13_snd_ac97_controls_adc[] = {
456AC97_SINGLE("ADC Mute", AC97_CD, 15, 1, 1),
457AC97_DOUBLE("Gain Step Size (1.5dB/0.75dB)", AC97_CD, 14, 6, 1, 1),
458AC97_DOUBLE("ADC Volume",AC97_CD, 8, 0, 15, 0),
459AC97_SINGLE("ADC Zero Cross", AC97_CD, 7, 1, 1),
460};
461
462static const snd_kcontrol_new_t wm13_snd_ac97_controls_recsel[] = {
463AC97_ENUM("Record to Headphone Path", wm9713_enum[1]),
464AC97_SINGLE("Record to Headphone Volume", AC97_VIDEO, 11, 7, 0),
465AC97_ENUM("Record to Mono Path", wm9713_enum[2]),
466AC97_SINGLE("Record to Mono Boost (+20dB)", AC97_VIDEO, 8, 1, 0),
467AC97_SINGLE("Record ADC Boost (+20dB)", AC97_VIDEO, 6, 1, 0),
468AC97_ENUM("Record Select Left", wm9713_enum[3]),
469AC97_ENUM("Record Select Right", wm9713_enum[4]),
470};
471 738
472static int patch_wolfson_wm9713_specific(ac97_t * ac97) 739static int patch_wolfson_wm9713_specific(ac97_t * ac97)
473{ 740{
474 int err, i; 741 int err, i;
475 742
476 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_line_in); i++) { 743 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls); i++) {
477 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_line_in[i], ac97))) < 0) 744 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls[i], ac97))) < 0)
478 return err; 745 return err;
479 } 746 }
480 snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808); 747 snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
481
482 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_dac); i++) {
483 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_dac[i], ac97))) < 0)
484 return err;
485 }
486 snd_ac97_write_cache(ac97, AC97_PHONE, 0x0808); 748 snd_ac97_write_cache(ac97, AC97_PHONE, 0x0808);
487
488 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_mic); i++) {
489 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_mic[i], ac97))) < 0)
490 return err;
491 }
492 snd_ac97_write_cache(ac97, AC97_MIC, 0x0808); 749 snd_ac97_write_cache(ac97, AC97_MIC, 0x0808);
493 snd_ac97_write_cache(ac97, AC97_LINE, 0x00da); 750 snd_ac97_write_cache(ac97, AC97_LINE, 0x00da);
494
495 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_adc); i++) {
496 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_adc[i], ac97))) < 0)
497 return err;
498 }
499 snd_ac97_write_cache(ac97, AC97_CD, 0x0808); 751 snd_ac97_write_cache(ac97, AC97_CD, 0x0808);
500
501 for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_recsel); i++) {
502 if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_recsel[i], ac97))) < 0)
503 return err;
504 }
505 snd_ac97_write_cache(ac97, AC97_VIDEO, 0xd612); 752 snd_ac97_write_cache(ac97, AC97_VIDEO, 0xd612);
506 snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x1ba0); 753 snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x1ba0);
507
508 return 0; 754 return 0;
509} 755}
510 756
@@ -525,6 +771,7 @@ static void patch_wolfson_wm9713_resume (ac97_t * ac97)
525 771
526static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = { 772static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
527 .build_specific = patch_wolfson_wm9713_specific, 773 .build_specific = patch_wolfson_wm9713_specific,
774 .build_3d = patch_wolfson_wm9713_3d,
528#ifdef CONFIG_PM 775#ifdef CONFIG_PM
529 .suspend = patch_wolfson_wm9713_suspend, 776 .suspend = patch_wolfson_wm9713_suspend,
530 .resume = patch_wolfson_wm9713_resume 777 .resume = patch_wolfson_wm9713_resume
@@ -533,10 +780,13 @@ static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
533 780
534int patch_wolfson13(ac97_t * ac97) 781int patch_wolfson13(ac97_t * ac97)
535{ 782{
783 /* WM9713, WM9714 */
536 ac97->build_ops = &patch_wolfson_wm9713_ops; 784 ac97->build_ops = &patch_wolfson_wm9713_ops;
537 785
538 ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_PHONE | 786 ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_PHONE |
539 AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD; 787 AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD | AC97_HAS_NO_TONE |
788 AC97_HAS_NO_STD_PCM;
789 ac97->scaps &= ~AC97_SCAP_MODEM;
540 790
541 snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xda00); 791 snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xda00);
542 snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0x3810); 792 snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0x3810);
@@ -1379,6 +1629,7 @@ static void check_ad1981_hp_jack_sense(ac97_t *ac97)
1379 u32 subid = ((u32)ac97->subsystem_vendor << 16) | ac97->subsystem_device; 1629 u32 subid = ((u32)ac97->subsystem_vendor << 16) | ac97->subsystem_device;
1380 switch (subid) { 1630 switch (subid) {
1381 case 0x103c0890: /* HP nc6000 */ 1631 case 0x103c0890: /* HP nc6000 */
1632 case 0x103c099c: /* HP nx6110 */
1382 case 0x103c006d: /* HP nx9105 */ 1633 case 0x103c006d: /* HP nx9105 */
1383 case 0x17340088: /* FSC Scenic-W */ 1634 case 0x17340088: /* FSC Scenic-W */
1384 /* enable headphone jack sense */ 1635 /* enable headphone jack sense */
@@ -1706,7 +1957,7 @@ static const snd_kcontrol_new_t snd_ac97_controls_alc650[] = {
1706}; 1957};
1707 1958
1708static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc650[] = { 1959static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc650[] = {
1709 AC97_SINGLE("IEC958 Capture Switch", AC97_ALC650_MULTICH, 11, 1, 0), 1960 AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0),
1710 AC97_SINGLE("Analog to IEC958 Output", AC97_ALC650_MULTICH, 12, 1, 0), 1961 AC97_SINGLE("Analog to IEC958 Output", AC97_ALC650_MULTICH, 12, 1, 0),
1711 /* disable this controls since it doesn't work as expected */ 1962 /* disable this controls since it doesn't work as expected */
1712 /* AC97_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 13, 1, 0), */ 1963 /* AC97_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 13, 1, 0), */
@@ -1849,12 +2100,12 @@ static int alc655_iec958_route_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_
1849} 2100}
1850 2101
1851static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc655[] = { 2102static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc655[] = {
1852 AC97_PAGE_SINGLE("IEC958 Capture Switch", AC97_ALC650_MULTICH, 11, 1, 0, 0), 2103 AC97_PAGE_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0, 0),
1853 /* disable this controls since it doesn't work as expected */ 2104 /* disable this controls since it doesn't work as expected */
1854 /* AC97_PAGE_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 14, 1, 0, 0), */ 2105 /* AC97_PAGE_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 14, 1, 0, 0), */
1855 { 2106 {
1856 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2107 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1857 .name = "IEC958 Playback Route", 2108 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
1858 .info = alc655_iec958_route_info, 2109 .info = alc655_iec958_route_info,
1859 .get = alc655_iec958_route_get, 2110 .get = alc655_iec958_route_get,
1860 .put = alc655_iec958_route_put, 2111 .put = alc655_iec958_route_put,
@@ -2416,6 +2667,16 @@ int patch_vt1616(ac97_t * ac97)
2416} 2667}
2417 2668
2418/* 2669/*
2670 * VT1617A codec
2671 */
2672int patch_vt1617a(ac97_t * ac97)
2673{
2674 ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */
2675 ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000;
2676 return 0;
2677}
2678
2679/*
2419 */ 2680 */
2420static void it2646_update_jacks(ac97_t *ac97) 2681static void it2646_update_jacks(ac97_t *ac97)
2421{ 2682{
@@ -2433,7 +2694,7 @@ static const snd_kcontrol_new_t snd_ac97_controls_it2646[] = {
2433}; 2694};
2434 2695
2435static const snd_kcontrol_new_t snd_ac97_spdif_controls_it2646[] = { 2696static const snd_kcontrol_new_t snd_ac97_spdif_controls_it2646[] = {
2436 AC97_SINGLE("IEC958 Capture Switch", 0x76, 11, 1, 0), 2697 AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0x76, 11, 1, 0),
2437 AC97_SINGLE("Analog to IEC958 Output", 0x76, 12, 1, 0), 2698 AC97_SINGLE("Analog to IEC958 Output", 0x76, 12, 1, 0),
2438 AC97_SINGLE("IEC958 Input Monitor", 0x76, 13, 1, 0), 2699 AC97_SINGLE("IEC958 Input Monitor", 0x76, 13, 1, 0),
2439}; 2700};
diff --git a/sound/pci/ac97/ac97_patch.h b/sound/pci/ac97/ac97_patch.h
index 7b7377d0f2ae..ec1811320106 100644
--- a/sound/pci/ac97/ac97_patch.h
+++ b/sound/pci/ac97/ac97_patch.h
@@ -56,5 +56,6 @@ int patch_cm9739(ac97_t * ac97);
56int patch_cm9761(ac97_t * ac97); 56int patch_cm9761(ac97_t * ac97);
57int patch_cm9780(ac97_t * ac97); 57int patch_cm9780(ac97_t * ac97);
58int patch_vt1616(ac97_t * ac97); 58int patch_vt1616(ac97_t * ac97);
59int patch_vt1617a(ac97_t * ac97);
59int patch_it2646(ac97_t * ac97); 60int patch_it2646(ac97_t * ac97);
60int mpatch_si3036(ac97_t * ac97); 61int mpatch_si3036(ac97_t * ac97);
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index f08ae71f902d..ce6c9fadb594 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1842,7 +1842,7 @@ static int __devinit snd_ali_pcm(ali_t * codec, int device, struct ali_pcm_descr
1842 return 0; 1842 return 0;
1843} 1843}
1844 1844
1845struct ali_pcm_description ali_pcms[] = { 1845static struct ali_pcm_description ali_pcms[] = {
1846 { "ALI 5451", ALI_CHANNELS, 1, &snd_ali_playback_ops, &snd_ali_capture_ops }, 1846 { "ALI 5451", ALI_CHANNELS, 1, &snd_ali_playback_ops, &snd_ali_capture_ops },
1847 { "ALI 5451 modem", 1, 1, &snd_ali_modem_playback_ops, &snd_ali_modem_capture_ops } 1847 { "ALI 5451 modem", 1, 1, &snd_ali_modem_playback_ops, &snd_ali_modem_capture_ops }
1848}; 1848};
@@ -1959,9 +1959,9 @@ static int snd_ali5451_spdif_put(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
1959static snd_kcontrol_new_t snd_ali5451_mixer_spdif[] __devinitdata = { 1959static snd_kcontrol_new_t snd_ali5451_mixer_spdif[] __devinitdata = {
1960 /* spdif aplayback switch */ 1960 /* spdif aplayback switch */
1961 /* FIXME: "IEC958 Playback Switch" may conflict with one on ac97_codec */ 1961 /* FIXME: "IEC958 Playback Switch" may conflict with one on ac97_codec */
1962 ALI5451_SPDIF("IEC958 Output switch", 0, 0), 1962 ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), 0, 0),
1963 /* spdif out to spdif channel */ 1963 /* spdif out to spdif channel */
1964 ALI5451_SPDIF("IEC958 Channel Output Switch", 0, 1), 1964 ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Channel Output ",NONE,SWITCH), 0, 1),
1965 /* spdif in from spdif channel */ 1965 /* spdif in from spdif channel */
1966 ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, 2) 1966 ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, 2)
1967}; 1967};
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index cafab4af5c57..904d17394e1c 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -248,6 +248,7 @@ struct snd_atiixp_dma {
248 unsigned int period_bytes, periods; 248 unsigned int period_bytes, periods;
249 int opened; 249 int opened;
250 int running; 250 int running;
251 int suspended;
251 int pcm_open_flag; 252 int pcm_open_flag;
252 int ac97_pcm_type; /* index # of ac97_pcm to access, -1 = not used */ 253 int ac97_pcm_type; /* index # of ac97_pcm to access, -1 = not used */
253 unsigned int saved_curptr; 254 unsigned int saved_curptr;
@@ -699,12 +700,18 @@ static int snd_atiixp_pcm_trigger(snd_pcm_substream_t *substream, int cmd)
699 spin_lock(&chip->reg_lock); 700 spin_lock(&chip->reg_lock);
700 switch (cmd) { 701 switch (cmd) {
701 case SNDRV_PCM_TRIGGER_START: 702 case SNDRV_PCM_TRIGGER_START:
703 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
704 case SNDRV_PCM_TRIGGER_RESUME:
702 dma->ops->enable_transfer(chip, 1); 705 dma->ops->enable_transfer(chip, 1);
703 dma->running = 1; 706 dma->running = 1;
707 dma->suspended = 0;
704 break; 708 break;
705 case SNDRV_PCM_TRIGGER_STOP: 709 case SNDRV_PCM_TRIGGER_STOP:
710 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
711 case SNDRV_PCM_TRIGGER_SUSPEND:
706 dma->ops->enable_transfer(chip, 0); 712 dma->ops->enable_transfer(chip, 0);
707 dma->running = 0; 713 dma->running = 0;
714 dma->suspended = cmd == SNDRV_PCM_TRIGGER_SUSPEND;
708 break; 715 break;
709 default: 716 default:
710 err = -EINVAL; 717 err = -EINVAL;
@@ -975,6 +982,7 @@ static snd_pcm_hardware_t snd_atiixp_pcm_hw =
975{ 982{
976 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 983 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
977 SNDRV_PCM_INFO_BLOCK_TRANSFER | 984 SNDRV_PCM_INFO_BLOCK_TRANSFER |
985 SNDRV_PCM_INFO_PAUSE |
978 SNDRV_PCM_INFO_RESUME | 986 SNDRV_PCM_INFO_RESUME |
979 SNDRV_PCM_INFO_MMAP_VALID), 987 SNDRV_PCM_INFO_MMAP_VALID),
980 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, 988 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
@@ -1443,7 +1451,7 @@ static int snd_atiixp_resume(snd_card_t *card)
1443 for (i = 0; i < NUM_ATI_PCMDEVS; i++) 1451 for (i = 0; i < NUM_ATI_PCMDEVS; i++)
1444 if (chip->pcmdevs[i]) { 1452 if (chip->pcmdevs[i]) {
1445 atiixp_dma_t *dma = &chip->dmas[i]; 1453 atiixp_dma_t *dma = &chip->dmas[i];
1446 if (dma->substream && dma->running) { 1454 if (dma->substream && dma->suspended) {
1447 dma->ops->enable_dma(chip, 1); 1455 dma->ops->enable_dma(chip, 1);
1448 writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN, 1456 writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
1449 chip->remap_addr + dma->ops->llp_offset); 1457 chip->remap_addr + dma->ops->llp_offset);
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 04dcefd8b8ff..38bd2b5dd434 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -33,7 +33,7 @@
33/* hardware definition */ 33/* hardware definition */
34static snd_pcm_hardware_t snd_vortex_playback_hw_adb = { 34static snd_pcm_hardware_t snd_vortex_playback_hw_adb = {
35 .info = 35 .info =
36 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME | 36 (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
37 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED | 37 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
38 SNDRV_PCM_INFO_MMAP_VALID), 38 SNDRV_PCM_INFO_MMAP_VALID),
39 .formats = 39 .formats =
@@ -58,7 +58,7 @@ static snd_pcm_hardware_t snd_vortex_playback_hw_adb = {
58#ifndef CHIP_AU8820 58#ifndef CHIP_AU8820
59static snd_pcm_hardware_t snd_vortex_playback_hw_a3d = { 59static snd_pcm_hardware_t snd_vortex_playback_hw_a3d = {
60 .info = 60 .info =
61 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME | 61 (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
62 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED | 62 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
63 SNDRV_PCM_INFO_MMAP_VALID), 63 SNDRV_PCM_INFO_MMAP_VALID),
64 .formats = 64 .formats =
@@ -78,7 +78,7 @@ static snd_pcm_hardware_t snd_vortex_playback_hw_a3d = {
78#endif 78#endif
79static snd_pcm_hardware_t snd_vortex_playback_hw_spdif = { 79static snd_pcm_hardware_t snd_vortex_playback_hw_spdif = {
80 .info = 80 .info =
81 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME | 81 (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
82 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED | 82 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
83 SNDRV_PCM_INFO_MMAP_VALID), 83 SNDRV_PCM_INFO_MMAP_VALID),
84 .formats = 84 .formats =
@@ -220,8 +220,10 @@ snd_vortex_pcm_hw_params(snd_pcm_substream_t * substream,
220 vortex_adb_allocroute(chip, -1, 220 vortex_adb_allocroute(chip, -1,
221 params_channels(hw_params), 221 params_channels(hw_params),
222 substream->stream, type); 222 substream->stream, type);
223 if (dma < 0) 223 if (dma < 0) {
224 spin_unlock_irq(&chip->lock);
224 return dma; 225 return dma;
226 }
225 stream = substream->runtime->private_data = &chip->dma_adb[dma]; 227 stream = substream->runtime->private_data = &chip->dma_adb[dma];
226 stream->substream = substream; 228 stream->substream = substream;
227 /* Setup Buffers. */ 229 /* Setup Buffers. */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 95c289284267..7e27bfc37439 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -188,6 +188,14 @@ static ca0106_details_t ca0106_chip_details[] = {
188 .name = "MSI K8N Diamond MB [SB0438]", 188 .name = "MSI K8N Diamond MB [SB0438]",
189 .gpio_type = 1, 189 .gpio_type = 1,
190 .i2c_adc = 1 } , 190 .i2c_adc = 1 } ,
191 /* Shuttle XPC SD31P which has an onboard Creative Labs Sound Blaster Live! 24-bit EAX
192 * high-definition 7.1 audio processor".
193 * Added using info from andrewvegan in alsa bug #1298
194 */
195 { .serial = 0x30381297,
196 .name = "Shuttle XPC SD31P [SD31P]",
197 .gpio_type = 1,
198 .i2c_adc = 1 } ,
191 { .serial = 0, 199 { .serial = 0,
192 .name = "AudigyLS [Unknown]" } 200 .name = "AudigyLS [Unknown]" }
193}; 201};
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index 0e5e9ce0ff28..b6b8882ce704 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -297,7 +297,7 @@ static int snd_ca0106_spdif_put(snd_kcontrol_t * kcontrol,
297static snd_kcontrol_new_t snd_ca0106_spdif_mask_control = 297static snd_kcontrol_new_t snd_ca0106_spdif_mask_control =
298{ 298{
299 .access = SNDRV_CTL_ELEM_ACCESS_READ, 299 .access = SNDRV_CTL_ELEM_ACCESS_READ,
300 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 300 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
301 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), 301 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
302 .count = 4, 302 .count = 4,
303 .info = snd_ca0106_spdif_info, 303 .info = snd_ca0106_spdif_info,
@@ -306,7 +306,7 @@ static snd_kcontrol_new_t snd_ca0106_spdif_mask_control =
306 306
307static snd_kcontrol_new_t snd_ca0106_spdif_control = 307static snd_kcontrol_new_t snd_ca0106_spdif_control =
308{ 308{
309 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 309 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
310 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 310 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
311 .count = 4, 311 .count = 4,
312 .info = snd_ca0106_spdif_info, 312 .info = snd_ca0106_spdif_info,
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index f5a4ac1ceef9..b098b51099c2 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1029,7 +1029,7 @@ static int snd_cmipci_spdif_mask_get(snd_kcontrol_t * kcontrol,
1029static snd_kcontrol_new_t snd_cmipci_spdif_mask __devinitdata = 1029static snd_kcontrol_new_t snd_cmipci_spdif_mask __devinitdata =
1030{ 1030{
1031 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1031 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1032 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1032 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1033 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 1033 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
1034 .info = snd_cmipci_spdif_mask_info, 1034 .info = snd_cmipci_spdif_mask_info,
1035 .get = snd_cmipci_spdif_mask_get, 1035 .get = snd_cmipci_spdif_mask_get,
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index db212ecd792a..b9fff4ee6f9d 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -113,7 +113,7 @@ static int __devinit snd_card_cs46xx_probe(struct pci_dev *pci,
113 return err; 113 return err;
114 } 114 }
115#endif 115#endif
116 if ((err = snd_cs46xx_mixer(chip)) < 0) { 116 if ((err = snd_cs46xx_mixer(chip, 2)) < 0) {
117 snd_card_free(card); 117 snd_card_free(card);
118 return err; 118 return err;
119 } 119 }
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index ff28af1f658e..4b052158ee33 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1243,8 +1243,8 @@ static snd_pcm_hardware_t snd_cs46xx_playback =
1243{ 1243{
1244 .info = (SNDRV_PCM_INFO_MMAP | 1244 .info = (SNDRV_PCM_INFO_MMAP |
1245 SNDRV_PCM_INFO_INTERLEAVED | 1245 SNDRV_PCM_INFO_INTERLEAVED |
1246 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1246 SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
1247 SNDRV_PCM_INFO_RESUME), 1247 /*SNDRV_PCM_INFO_RESUME*/),
1248 .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | 1248 .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
1249 SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | 1249 SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
1250 SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE), 1250 SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE),
@@ -1265,8 +1265,8 @@ static snd_pcm_hardware_t snd_cs46xx_capture =
1265{ 1265{
1266 .info = (SNDRV_PCM_INFO_MMAP | 1266 .info = (SNDRV_PCM_INFO_MMAP |
1267 SNDRV_PCM_INFO_INTERLEAVED | 1267 SNDRV_PCM_INFO_INTERLEAVED |
1268 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1268 SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
1269 SNDRV_PCM_INFO_RESUME), 1269 /*SNDRV_PCM_INFO_RESUME*/),
1270 .formats = SNDRV_PCM_FMTBIT_S16_LE, 1270 .formats = SNDRV_PCM_FMTBIT_S16_LE,
1271 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, 1271 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
1272 .rate_min = 5500, 1272 .rate_min = 5500,
@@ -2231,7 +2231,7 @@ static snd_kcontrol_new_t snd_cs46xx_controls[] __devinitdata = {
2231}, 2231},
2232{ 2232{
2233 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2233 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2234 .name = "IEC958 Output Switch", 2234 .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
2235 .info = snd_mixer_boolean_info, 2235 .info = snd_mixer_boolean_info,
2236 .get = snd_cs46xx_iec958_get, 2236 .get = snd_cs46xx_iec958_get,
2237 .put = snd_cs46xx_iec958_put, 2237 .put = snd_cs46xx_iec958_put,
@@ -2239,7 +2239,7 @@ static snd_kcontrol_new_t snd_cs46xx_controls[] __devinitdata = {
2239}, 2239},
2240{ 2240{
2241 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2241 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2242 .name = "IEC958 Input Switch", 2242 .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,SWITCH),
2243 .info = snd_mixer_boolean_info, 2243 .info = snd_mixer_boolean_info,
2244 .get = snd_cs46xx_iec958_get, 2244 .get = snd_cs46xx_iec958_get,
2245 .put = snd_cs46xx_iec958_put, 2245 .put = snd_cs46xx_iec958_put,
@@ -2249,7 +2249,7 @@ static snd_kcontrol_new_t snd_cs46xx_controls[] __devinitdata = {
2249/* Input IEC958 volume does not work for the moment. (Benny) */ 2249/* Input IEC958 volume does not work for the moment. (Benny) */
2250{ 2250{
2251 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2251 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2252 .name = "IEC958 Input Volume", 2252 .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,VOLUME),
2253 .info = snd_cs46xx_vol_info, 2253 .info = snd_cs46xx_vol_info,
2254 .get = snd_cs46xx_vol_iec958_get, 2254 .get = snd_cs46xx_vol_iec958_get,
2255 .put = snd_cs46xx_vol_iec958_put, 2255 .put = snd_cs46xx_vol_iec958_put,
@@ -2440,7 +2440,7 @@ static int __devinit cs46xx_detect_codec(cs46xx_t *chip, int codec)
2440 return -ENXIO; 2440 return -ENXIO;
2441} 2441}
2442 2442
2443int __devinit snd_cs46xx_mixer(cs46xx_t *chip) 2443int __devinit snd_cs46xx_mixer(cs46xx_t *chip, int spdif_device)
2444{ 2444{
2445 snd_card_t *card = chip->card; 2445 snd_card_t *card = chip->card;
2446 snd_ctl_elem_id_t id; 2446 snd_ctl_elem_id_t id;
@@ -2476,6 +2476,8 @@ int __devinit snd_cs46xx_mixer(cs46xx_t *chip)
2476 for (idx = 0; idx < ARRAY_SIZE(snd_cs46xx_controls); idx++) { 2476 for (idx = 0; idx < ARRAY_SIZE(snd_cs46xx_controls); idx++) {
2477 snd_kcontrol_t *kctl; 2477 snd_kcontrol_t *kctl;
2478 kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip); 2478 kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip);
2479 if (kctl && kctl->id.iface == SNDRV_CTL_ELEM_IFACE_PCM)
2480 kctl->id.device = spdif_device;
2479 if ((err = snd_ctl_add(card, kctl)) < 0) 2481 if ((err = snd_ctl_add(card, kctl)) < 0)
2480 return err; 2482 return err;
2481 } 2483 }
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index b17142cabead..fc377c4b666c 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -149,7 +149,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
149 } 149 }
150 } 150 }
151 151
152 if ((err = snd_emu10k1_mixer(emu)) < 0) { 152 if ((err = snd_emu10k1_mixer(emu, 0, 3)) < 0) {
153 snd_card_free(card); 153 snd_card_free(card);
154 return err; 154 return err;
155 } 155 }
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 746b51ef3966..e69d5b739e80 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -741,12 +741,20 @@ static emu_chip_details_t emu_chip_details[] = {
741 .emu10k1_chip = 1, 741 .emu10k1_chip = 1,
742 .ac97_chip = 1, 742 .ac97_chip = 1,
743 .sblive51 = 1} , 743 .sblive51 = 1} ,
744 /* Tested by Thomas Zehetbauer 27th Aug 2005 */
745 {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80651102,
746 .driver = "EMU10K1", .name = "SB Live 5.1 [SB0220]",
747 .id = "Live",
748 .emu10k1_chip = 1,
749 .ac97_chip = 1,
750 .sblive51 = 1} ,
744 {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80641102, 751 {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80641102,
745 .driver = "EMU10K1", .name = "SB Live 5.1", 752 .driver = "EMU10K1", .name = "SB Live 5.1",
746 .id = "Live", 753 .id = "Live",
747 .emu10k1_chip = 1, 754 .emu10k1_chip = 1,
748 .ac97_chip = 1, 755 .ac97_chip = 1,
749 .sblive51 = 1} , 756 .sblive51 = 1} ,
757 /* Tested by alsa bugtrack user "hus" 12th Sept 2005 */
750 {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102, 758 {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102,
751 .driver = "EMU10K1", .name = "SBLive! Player 5.1 [SB0060]", 759 .driver = "EMU10K1", .name = "SBLive! Player 5.1 [SB0060]",
752 .id = "Live", 760 .id = "Live",
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index e90c5ddd1d17..52c7826df440 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1183,7 +1183,7 @@ static int snd_emu10k1x_spdif_put(snd_kcontrol_t * kcontrol,
1183static snd_kcontrol_new_t snd_emu10k1x_spdif_mask_control = 1183static snd_kcontrol_new_t snd_emu10k1x_spdif_mask_control =
1184{ 1184{
1185 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1185 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1186 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1186 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1187 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), 1187 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
1188 .count = 3, 1188 .count = 3,
1189 .info = snd_emu10k1x_spdif_info, 1189 .info = snd_emu10k1x_spdif_info,
@@ -1192,7 +1192,7 @@ static snd_kcontrol_new_t snd_emu10k1x_spdif_mask_control =
1192 1192
1193static snd_kcontrol_new_t snd_emu10k1x_spdif_control = 1193static snd_kcontrol_new_t snd_emu10k1x_spdif_control =
1194{ 1194{
1195 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1195 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1196 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 1196 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
1197 .count = 3, 1197 .count = 3,
1198 .info = snd_emu10k1x_spdif_info, 1198 .info = snd_emu10k1x_spdif_info,
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 0529fb281125..637c555cfdb1 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -1159,12 +1159,12 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
1159 /* Optical SPDIF Playback Volume */ 1159 /* Optical SPDIF Playback Volume */
1160 A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_OPT_SPDIF_L); 1160 A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_OPT_SPDIF_L);
1161 A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_OPT_SPDIF_R); 1161 A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_OPT_SPDIF_R);
1162 snd_emu10k1_init_stereo_control(&controls[nctl++], "IEC958 Optical Playback Volume", gpr, 0); 1162 snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",PLAYBACK,VOLUME), gpr, 0);
1163 gpr += 2; 1163 gpr += 2;
1164 /* Optical SPDIF Capture Volume */ 1164 /* Optical SPDIF Capture Volume */
1165 A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_OPT_SPDIF_L); 1165 A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_OPT_SPDIF_L);
1166 A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_OPT_SPDIF_R); 1166 A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_OPT_SPDIF_R);
1167 snd_emu10k1_init_stereo_control(&controls[nctl++], "IEC958 Optical Capture Volume", gpr, 0); 1167 snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",CAPTURE,VOLUME), gpr, 0);
1168 gpr += 2; 1168 gpr += 2;
1169 1169
1170 /* Line2 Playback Volume */ 1170 /* Line2 Playback Volume */
@@ -1389,7 +1389,7 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
1389 A_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000); 1389 A_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000);
1390 } 1390 }
1391 } 1391 }
1392 snd_emu10k1_init_stereo_onoff_control(controls + nctl++, "IEC958 Optical Raw Playback Switch", gpr, 0); 1392 snd_emu10k1_init_stereo_onoff_control(controls + nctl++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0);
1393 gpr += 2; 1393 gpr += 2;
1394 1394
1395 A_PUT_STEREO_OUTPUT(A_EXTOUT_REAR_L, A_EXTOUT_REAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS); 1395 A_PUT_STEREO_OUTPUT(A_EXTOUT_REAR_L, A_EXTOUT_REAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS);
@@ -1716,7 +1716,7 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1716 /* IEC958 TTL Playback Volume */ 1716 /* IEC958 TTL Playback Volume */
1717 for (z = 0; z < 2; z++) 1717 for (z = 0; z < 2; z++)
1718 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_SPDIF_CD_L + z, gpr + z); 1718 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_SPDIF_CD_L + z, gpr + z);
1719 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 TTL Playback Volume", gpr, 0); 1719 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",PLAYBACK,VOLUME), gpr, 0);
1720 gpr += 2; 1720 gpr += 2;
1721 1721
1722 /* IEC958 TTL Capture Volume + Switch */ 1722 /* IEC958 TTL Capture Volume + Switch */
@@ -1724,8 +1724,8 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1724 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_SPDIF_CD_L + z, gpr + 2 + z); 1724 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_SPDIF_CD_L + z, gpr + 2 + z);
1725 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); 1725 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
1726 } 1726 }
1727 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 TTL Capture Volume", gpr, 0); 1727 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,VOLUME), gpr, 0);
1728 snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 TTL Capture Switch", gpr + 2, 0); 1728 snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,SWITCH), gpr + 2, 0);
1729 gpr += 4; 1729 gpr += 4;
1730 } 1730 }
1731 1731
@@ -1750,7 +1750,7 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1750 /* IEC958 Optical Playback Volume */ 1750 /* IEC958 Optical Playback Volume */
1751 for (z = 0; z < 2; z++) 1751 for (z = 0; z < 2; z++)
1752 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_TOSLINK_L + z, gpr + z); 1752 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_TOSLINK_L + z, gpr + z);
1753 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 LiveDrive Playback Volume", gpr, 0); 1753 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",PLAYBACK,VOLUME), gpr, 0);
1754 gpr += 2; 1754 gpr += 2;
1755 1755
1756 /* IEC958 Optical Capture Volume */ 1756 /* IEC958 Optical Capture Volume */
@@ -1758,8 +1758,8 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1758 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_TOSLINK_L + z, gpr + 2 + z); 1758 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_TOSLINK_L + z, gpr + 2 + z);
1759 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); 1759 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
1760 } 1760 }
1761 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 LiveDrive Capture Volume", gpr, 0); 1761 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,VOLUME), gpr, 0);
1762 snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 LiveDrive Capture Switch", gpr + 2, 0); 1762 snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,SWITCH), gpr + 2, 0);
1763 gpr += 4; 1763 gpr += 4;
1764 } 1764 }
1765 1765
@@ -1784,7 +1784,7 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1784 /* IEC958 Coax Playback Volume */ 1784 /* IEC958 Coax Playback Volume */
1785 for (z = 0; z < 2; z++) 1785 for (z = 0; z < 2; z++)
1786 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_COAX_SPDIF_L + z, gpr + z); 1786 VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_COAX_SPDIF_L + z, gpr + z);
1787 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 Coaxial Playback Volume", gpr, 0); 1787 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",PLAYBACK,VOLUME), gpr, 0);
1788 gpr += 2; 1788 gpr += 2;
1789 1789
1790 /* IEC958 Coax Capture Volume + Switch */ 1790 /* IEC958 Coax Capture Volume + Switch */
@@ -1792,8 +1792,8 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1792 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_COAX_SPDIF_L + z, gpr + 2 + z); 1792 SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_COAX_SPDIF_L + z, gpr + 2 + z);
1793 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z); 1793 VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
1794 } 1794 }
1795 snd_emu10k1_init_stereo_control(controls + i++, "IEC958 Coaxial Capture Volume", gpr, 0); 1795 snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,VOLUME), gpr, 0);
1796 snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 Coaxial Capture Switch", gpr + 2, 0); 1796 snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,SWITCH), gpr + 2, 0);
1797 gpr += 4; 1797 gpr += 4;
1798 } 1798 }
1799 1799
@@ -1920,7 +1920,7 @@ static int __devinit _snd_emu10k1_init_efx(emu10k1_t *emu)
1920#endif 1920#endif
1921 } 1921 }
1922 1922
1923 snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 Optical Raw Playback Switch", gpr, 0); 1923 snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0);
1924 gpr += 2; 1924 gpr += 2;
1925 } 1925 }
1926 1926
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 6be82c5fe138..d71a72e84bcc 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -181,7 +181,7 @@ static int snd_emu10k1_spdif_put(snd_kcontrol_t * kcontrol,
181static snd_kcontrol_new_t snd_emu10k1_spdif_mask_control = 181static snd_kcontrol_new_t snd_emu10k1_spdif_mask_control =
182{ 182{
183 .access = SNDRV_CTL_ELEM_ACCESS_READ, 183 .access = SNDRV_CTL_ELEM_ACCESS_READ,
184 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 184 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
185 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), 185 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
186 .count = 4, 186 .count = 4,
187 .info = snd_emu10k1_spdif_info, 187 .info = snd_emu10k1_spdif_info,
@@ -190,7 +190,7 @@ static snd_kcontrol_new_t snd_emu10k1_spdif_mask_control =
190 190
191static snd_kcontrol_new_t snd_emu10k1_spdif_control = 191static snd_kcontrol_new_t snd_emu10k1_spdif_control =
192{ 192{
193 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 193 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
194 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 194 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
195 .count = 4, 195 .count = 4,
196 .info = snd_emu10k1_spdif_info, 196 .info = snd_emu10k1_spdif_info,
@@ -295,7 +295,7 @@ static int snd_emu10k1_send_routing_put(snd_kcontrol_t * kcontrol,
295static snd_kcontrol_new_t snd_emu10k1_send_routing_control = 295static snd_kcontrol_new_t snd_emu10k1_send_routing_control =
296{ 296{
297 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, 297 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
298 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 298 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
299 .name = "EMU10K1 PCM Send Routing", 299 .name = "EMU10K1 PCM Send Routing",
300 .count = 32, 300 .count = 32,
301 .info = snd_emu10k1_send_routing_info, 301 .info = snd_emu10k1_send_routing_info,
@@ -364,7 +364,7 @@ static int snd_emu10k1_send_volume_put(snd_kcontrol_t * kcontrol,
364static snd_kcontrol_new_t snd_emu10k1_send_volume_control = 364static snd_kcontrol_new_t snd_emu10k1_send_volume_control =
365{ 365{
366 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, 366 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
367 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 367 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
368 .name = "EMU10K1 PCM Send Volume", 368 .name = "EMU10K1 PCM Send Volume",
369 .count = 32, 369 .count = 32,
370 .info = snd_emu10k1_send_volume_info, 370 .info = snd_emu10k1_send_volume_info,
@@ -427,7 +427,7 @@ static int snd_emu10k1_attn_put(snd_kcontrol_t * kcontrol,
427static snd_kcontrol_new_t snd_emu10k1_attn_control = 427static snd_kcontrol_new_t snd_emu10k1_attn_control =
428{ 428{
429 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, 429 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
430 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 430 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
431 .name = "EMU10K1 PCM Volume", 431 .name = "EMU10K1 PCM Volume",
432 .count = 32, 432 .count = 32,
433 .info = snd_emu10k1_attn_info, 433 .info = snd_emu10k1_attn_info,
@@ -737,7 +737,8 @@ static int rename_ctl(snd_card_t *card, const char *src, const char *dst)
737 return -ENOENT; 737 return -ENOENT;
738} 738}
739 739
740int __devinit snd_emu10k1_mixer(emu10k1_t *emu) 740int __devinit snd_emu10k1_mixer(emu10k1_t *emu,
741 int pcm_device, int multi_device)
741{ 742{
742 int err, pcm; 743 int err, pcm;
743 snd_kcontrol_t *kctl; 744 snd_kcontrol_t *kctl;
@@ -852,29 +853,35 @@ int __devinit snd_emu10k1_mixer(emu10k1_t *emu)
852 853
853 if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL) 854 if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
854 return -ENOMEM; 855 return -ENOMEM;
856 kctl->id.device = pcm_device;
855 if ((err = snd_ctl_add(card, kctl))) 857 if ((err = snd_ctl_add(card, kctl)))
856 return err; 858 return err;
857 if ((kctl = emu->ctl_send_volume = snd_ctl_new1(&snd_emu10k1_send_volume_control, emu)) == NULL) 859 if ((kctl = emu->ctl_send_volume = snd_ctl_new1(&snd_emu10k1_send_volume_control, emu)) == NULL)
858 return -ENOMEM; 860 return -ENOMEM;
861 kctl->id.device = pcm_device;
859 if ((err = snd_ctl_add(card, kctl))) 862 if ((err = snd_ctl_add(card, kctl)))
860 return err; 863 return err;
861 if ((kctl = emu->ctl_attn = snd_ctl_new1(&snd_emu10k1_attn_control, emu)) == NULL) 864 if ((kctl = emu->ctl_attn = snd_ctl_new1(&snd_emu10k1_attn_control, emu)) == NULL)
862 return -ENOMEM; 865 return -ENOMEM;
866 kctl->id.device = pcm_device;
863 if ((err = snd_ctl_add(card, kctl))) 867 if ((err = snd_ctl_add(card, kctl)))
864 return err; 868 return err;
865 869
866 if ((kctl = emu->ctl_efx_send_routing = snd_ctl_new1(&snd_emu10k1_efx_send_routing_control, emu)) == NULL) 870 if ((kctl = emu->ctl_efx_send_routing = snd_ctl_new1(&snd_emu10k1_efx_send_routing_control, emu)) == NULL)
867 return -ENOMEM; 871 return -ENOMEM;
872 kctl->id.device = multi_device;
868 if ((err = snd_ctl_add(card, kctl))) 873 if ((err = snd_ctl_add(card, kctl)))
869 return err; 874 return err;
870 875
871 if ((kctl = emu->ctl_efx_send_volume = snd_ctl_new1(&snd_emu10k1_efx_send_volume_control, emu)) == NULL) 876 if ((kctl = emu->ctl_efx_send_volume = snd_ctl_new1(&snd_emu10k1_efx_send_volume_control, emu)) == NULL)
872 return -ENOMEM; 877 return -ENOMEM;
878 kctl->id.device = multi_device;
873 if ((err = snd_ctl_add(card, kctl))) 879 if ((err = snd_ctl_add(card, kctl)))
874 return err; 880 return err;
875 881
876 if ((kctl = emu->ctl_efx_attn = snd_ctl_new1(&snd_emu10k1_efx_attn_control, emu)) == NULL) 882 if ((kctl = emu->ctl_efx_attn = snd_ctl_new1(&snd_emu10k1_efx_attn_control, emu)) == NULL)
877 return -ENOMEM; 883 return -ENOMEM;
884 kctl->id.device = multi_device;
878 if ((err = snd_ctl_add(card, kctl))) 885 if ((err = snd_ctl_add(card, kctl)))
879 return err; 886 return err;
880 887
@@ -924,10 +931,14 @@ int __devinit snd_emu10k1_mixer(emu10k1_t *emu)
924 /* sb live! and audigy */ 931 /* sb live! and audigy */
925 if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_mask_control, emu)) == NULL) 932 if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_mask_control, emu)) == NULL)
926 return -ENOMEM; 933 return -ENOMEM;
934 if (!emu->audigy)
935 kctl->id.device = emu->pcm_efx->device;
927 if ((err = snd_ctl_add(card, kctl))) 936 if ((err = snd_ctl_add(card, kctl)))
928 return err; 937 return err;
929 if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_control, emu)) == NULL) 938 if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_control, emu)) == NULL)
930 return -ENOMEM; 939 return -ENOMEM;
940 if (!emu->audigy)
941 kctl->id.device = emu->pcm_efx->device;
931 if ((err = snd_ctl_add(card, kctl))) 942 if ((err = snd_ctl_add(card, kctl)))
932 return err; 943 return err;
933 } 944 }
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 520b99af5f55..9c35f6dde1b5 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1682,6 +1682,7 @@ static void snd_emu10k1_pcm_efx_free(snd_pcm_t *pcm)
1682int __devinit snd_emu10k1_pcm_efx(emu10k1_t * emu, int device, snd_pcm_t ** rpcm) 1682int __devinit snd_emu10k1_pcm_efx(emu10k1_t * emu, int device, snd_pcm_t ** rpcm)
1683{ 1683{
1684 snd_pcm_t *pcm; 1684 snd_pcm_t *pcm;
1685 snd_kcontrol_t *kctl;
1685 int err; 1686 int err;
1686 1687
1687 if (rpcm) 1688 if (rpcm)
@@ -1714,7 +1715,11 @@ int __devinit snd_emu10k1_pcm_efx(emu10k1_t * emu, int device, snd_pcm_t ** rpcm
1714 emu->efx_voices_mask[0] = 0xffff0000; 1715 emu->efx_voices_mask[0] = 0xffff0000;
1715 emu->efx_voices_mask[1] = 0; 1716 emu->efx_voices_mask[1] = 0;
1716 } 1717 }
1717 snd_ctl_add(emu->card, snd_ctl_new1(&snd_emu10k1_pcm_efx_voices_mask, emu)); 1718 kctl = snd_ctl_new1(&snd_emu10k1_pcm_efx_voices_mask, emu);
1719 if (!kctl)
1720 return -ENOMEM;
1721 kctl->id.device = device;
1722 snd_ctl_add(emu->card, kctl);
1718 1723
1719 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024); 1724 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
1720 1725
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 78a81f3912a1..f06b95f41a1d 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1444,7 +1444,7 @@ static int snd_es1371_spdif_put(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
1444 1444
1445/* spdif controls */ 1445/* spdif controls */
1446static snd_kcontrol_new_t snd_es1371_mixer_spdif[] __devinitdata = { 1446static snd_kcontrol_new_t snd_es1371_mixer_spdif[] __devinitdata = {
1447 ES1371_SPDIF("IEC958 Playback Switch"), 1447 ES1371_SPDIF(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH)),
1448 { 1448 {
1449 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 1449 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1450 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), 1450 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index ff10e637a95e..36b2f62e8573 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1155,10 +1155,10 @@ FM801_SINGLE("FM Playback Switch", FM801_FM_VOL, 15, 1, 1),
1155static snd_kcontrol_new_t snd_fm801_controls_multi[] __devinitdata = { 1155static snd_kcontrol_new_t snd_fm801_controls_multi[] __devinitdata = {
1156FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0), 1156FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0),
1157FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0), 1157FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0),
1158FM801_SINGLE("IEC958 Capture Switch", FM801_I2S_MODE, 8, 1, 0), 1158FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), FM801_I2S_MODE, 8, 1, 0),
1159FM801_SINGLE("IEC958 Raw Data Playback Switch", FM801_I2S_MODE, 9, 1, 0), 1159FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",PLAYBACK,SWITCH), FM801_I2S_MODE, 9, 1, 0),
1160FM801_SINGLE("IEC958 Raw Data Capture Switch", FM801_I2S_MODE, 10, 1, 0), 1160FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",CAPTURE,SWITCH), FM801_I2S_MODE, 10, 1, 0),
1161FM801_SINGLE("IEC958 Playback Switch", FM801_GEN_CTRL, 2, 1, 0), 1161FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), FM801_GEN_CTRL, 2, 1, 0),
1162}; 1162};
1163 1163
1164static void snd_fm801_mixer_free_ac97_bus(ac97_bus_t *bus) 1164static void snd_fm801_mixer_free_ac97_bus(ac97_bus_t *bus)
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index bd8cb33c4fb4..ddfb5ff7fb8f 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -1,5 +1,5 @@
1snd-hda-intel-objs := hda_intel.o 1snd-hda-intel-objs := hda_intel.o
2snd-hda-codec-objs := hda_codec.o hda_generic.o patch_realtek.o patch_cmedia.o patch_analog.o patch_sigmatel.o 2snd-hda-codec-objs := hda_codec.o hda_generic.o patch_realtek.o patch_cmedia.o patch_analog.o patch_sigmatel.o patch_si3054.o
3ifdef CONFIG_PROC_FS 3ifdef CONFIG_PROC_FS
4snd-hda-codec-objs += hda_proc.o 4snd-hda-codec-objs += hda_proc.o
5endif 5endif
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e2cf02387289..20f7762f7144 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -432,22 +432,26 @@ void snd_hda_get_codec_name(struct hda_codec *codec,
432} 432}
433 433
434/* 434/*
435 * look for an AFG node 435 * look for an AFG and MFG nodes
436 *
437 * return 0 if not found
438 */ 436 */
439static int look_for_afg_node(struct hda_codec *codec) 437static void setup_fg_nodes(struct hda_codec *codec)
440{ 438{
441 int i, total_nodes; 439 int i, total_nodes;
442 hda_nid_t nid; 440 hda_nid_t nid;
443 441
444 total_nodes = snd_hda_get_sub_nodes(codec, AC_NODE_ROOT, &nid); 442 total_nodes = snd_hda_get_sub_nodes(codec, AC_NODE_ROOT, &nid);
445 for (i = 0; i < total_nodes; i++, nid++) { 443 for (i = 0; i < total_nodes; i++, nid++) {
446 if ((snd_hda_param_read(codec, nid, AC_PAR_FUNCTION_TYPE) & 0xff) == 444 switch((snd_hda_param_read(codec, nid, AC_PAR_FUNCTION_TYPE) & 0xff)) {
447 AC_GRP_AUDIO_FUNCTION) 445 case AC_GRP_AUDIO_FUNCTION:
448 return nid; 446 codec->afg = nid;
447 break;
448 case AC_GRP_MODEM_FUNCTION:
449 codec->mfg = nid;
450 break;
451 default:
452 break;
453 }
449 } 454 }
450 return 0;
451} 455}
452 456
453/* 457/*
@@ -507,10 +511,9 @@ int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
507 codec->subsystem_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_SUBSYSTEM_ID); 511 codec->subsystem_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_SUBSYSTEM_ID);
508 codec->revision_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_REV_ID); 512 codec->revision_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_REV_ID);
509 513
510 /* FIXME: support for multiple AFGs? */ 514 setup_fg_nodes(codec);
511 codec->afg = look_for_afg_node(codec); 515 if (! codec->afg && ! codec->mfg) {
512 if (! codec->afg) { 516 snd_printdd("hda_codec: no AFG or MFG node found\n");
513 snd_printdd("hda_codec: no AFG node found\n");
514 snd_hda_codec_free(codec); 517 snd_hda_codec_free(codec);
515 return -ENODEV; 518 return -ENODEV;
516 } 519 }
@@ -749,12 +752,14 @@ int snd_hda_mixer_amp_volume_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
749 long *valp = ucontrol->value.integer.value; 752 long *valp = ucontrol->value.integer.value;
750 int change = 0; 753 int change = 0;
751 754
752 if (chs & 1) 755 if (chs & 1) {
753 change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx, 756 change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
754 0x7f, *valp); 757 0x7f, *valp);
758 valp++;
759 }
755 if (chs & 2) 760 if (chs & 2)
756 change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx, 761 change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx,
757 0x7f, valp[1]); 762 0x7f, *valp);
758 return change; 763 return change;
759} 764}
760 765
@@ -796,12 +801,15 @@ int snd_hda_mixer_amp_switch_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t
796 long *valp = ucontrol->value.integer.value; 801 long *valp = ucontrol->value.integer.value;
797 int change = 0; 802 int change = 0;
798 803
799 if (chs & 1) 804 if (chs & 1) {
800 change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx, 805 change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
801 0x80, *valp ? 0 : 0x80); 806 0x80, *valp ? 0 : 0x80);
807 valp++;
808 }
802 if (chs & 2) 809 if (chs & 2)
803 change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx, 810 change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx,
804 0x80, valp[1] ? 0 : 0x80); 811 0x80, *valp ? 0 : 0x80);
812
805 return change; 813 return change;
806} 814}
807 815
@@ -1155,8 +1163,16 @@ int snd_hda_build_controls(struct hda_bus *bus)
1155/* 1163/*
1156 * stream formats 1164 * stream formats
1157 */ 1165 */
1158static unsigned int rate_bits[][3] = { 1166struct hda_rate_tbl {
1167 unsigned int hz;
1168 unsigned int alsa_bits;
1169 unsigned int hda_fmt;
1170};
1171
1172static struct hda_rate_tbl rate_bits[] = {
1159 /* rate in Hz, ALSA rate bitmask, HDA format value */ 1173 /* rate in Hz, ALSA rate bitmask, HDA format value */
1174
1175 /* autodetected value used in snd_hda_query_supported_pcm */
1160 { 8000, SNDRV_PCM_RATE_8000, 0x0500 }, /* 1/6 x 48 */ 1176 { 8000, SNDRV_PCM_RATE_8000, 0x0500 }, /* 1/6 x 48 */
1161 { 11025, SNDRV_PCM_RATE_11025, 0x4300 }, /* 1/4 x 44 */ 1177 { 11025, SNDRV_PCM_RATE_11025, 0x4300 }, /* 1/4 x 44 */
1162 { 16000, SNDRV_PCM_RATE_16000, 0x0200 }, /* 1/3 x 48 */ 1178 { 16000, SNDRV_PCM_RATE_16000, 0x0200 }, /* 1/3 x 48 */
@@ -1168,7 +1184,11 @@ static unsigned int rate_bits[][3] = {
1168 { 96000, SNDRV_PCM_RATE_96000, 0x0800 }, /* 2 x 48 */ 1184 { 96000, SNDRV_PCM_RATE_96000, 0x0800 }, /* 2 x 48 */
1169 { 176400, SNDRV_PCM_RATE_176400, 0x5800 },/* 4 x 44 */ 1185 { 176400, SNDRV_PCM_RATE_176400, 0x5800 },/* 4 x 44 */
1170 { 192000, SNDRV_PCM_RATE_192000, 0x1800 }, /* 4 x 48 */ 1186 { 192000, SNDRV_PCM_RATE_192000, 0x1800 }, /* 4 x 48 */
1171 { 0 } 1187
1188 /* not autodetected value */
1189 { 9600, SNDRV_PCM_RATE_KNOT, 0x0400 }, /* 1/5 x 48 */
1190
1191 { 0 } /* terminator */
1172}; 1192};
1173 1193
1174/** 1194/**
@@ -1190,12 +1210,12 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
1190 int i; 1210 int i;
1191 unsigned int val = 0; 1211 unsigned int val = 0;
1192 1212
1193 for (i = 0; rate_bits[i][0]; i++) 1213 for (i = 0; rate_bits[i].hz; i++)
1194 if (rate_bits[i][0] == rate) { 1214 if (rate_bits[i].hz == rate) {
1195 val = rate_bits[i][2]; 1215 val = rate_bits[i].hda_fmt;
1196 break; 1216 break;
1197 } 1217 }
1198 if (! rate_bits[i][0]) { 1218 if (! rate_bits[i].hz) {
1199 snd_printdd("invalid rate %d\n", rate); 1219 snd_printdd("invalid rate %d\n", rate);
1200 return 0; 1220 return 0;
1201 } 1221 }
@@ -1258,9 +1278,9 @@ int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
1258 1278
1259 if (ratesp) { 1279 if (ratesp) {
1260 u32 rates = 0; 1280 u32 rates = 0;
1261 for (i = 0; rate_bits[i][0]; i++) { 1281 for (i = 0; rate_bits[i].hz; i++) {
1262 if (val & (1 << i)) 1282 if (val & (1 << i))
1263 rates |= rate_bits[i][1]; 1283 rates |= rate_bits[i].alsa_bits;
1264 } 1284 }
1265 *ratesp = rates; 1285 *ratesp = rates;
1266 } 1286 }
@@ -1352,13 +1372,13 @@ int snd_hda_is_supported_format(struct hda_codec *codec, hda_nid_t nid,
1352 } 1372 }
1353 1373
1354 rate = format & 0xff00; 1374 rate = format & 0xff00;
1355 for (i = 0; rate_bits[i][0]; i++) 1375 for (i = 0; rate_bits[i].hz; i++)
1356 if (rate_bits[i][2] == rate) { 1376 if (rate_bits[i].hda_fmt == rate) {
1357 if (val & (1 << i)) 1377 if (val & (1 << i))
1358 break; 1378 break;
1359 return 0; 1379 return 0;
1360 } 1380 }
1361 if (! rate_bits[i][0]) 1381 if (! rate_bits[i].hz)
1362 return 0; 1382 return 0;
1363 1383
1364 stream = snd_hda_param_read(codec, nid, AC_PAR_STREAM); 1384 stream = snd_hda_param_read(codec, nid, AC_PAR_STREAM);
@@ -1541,8 +1561,11 @@ int snd_hda_check_board_config(struct hda_codec *codec, const struct hda_board_c
1541 for (c = tbl; c->modelname || c->pci_subvendor; c++) { 1561 for (c = tbl; c->modelname || c->pci_subvendor; c++) {
1542 if (c->pci_subvendor == subsystem_vendor && 1562 if (c->pci_subvendor == subsystem_vendor &&
1543 (! c->pci_subdevice /* all match */|| 1563 (! c->pci_subdevice /* all match */||
1544 (c->pci_subdevice == subsystem_device))) 1564 (c->pci_subdevice == subsystem_device))) {
1565 snd_printdd(KERN_INFO "hda_codec: PCI %x:%x, codec config %d is selected\n",
1566 subsystem_vendor, subsystem_device, c->config);
1545 return c->config; 1567 return c->config;
1568 }
1546 } 1569 }
1547 } 1570 }
1548 return -1; 1571 return -1;
@@ -1803,11 +1826,25 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, struct auto_pin_cfg *c
1803 cfg->line_out_pins[j] = nid; 1826 cfg->line_out_pins[j] = nid;
1804 } 1827 }
1805 1828
1806 /* Swap surround and CLFE: the association order is front/CLFE/surr/back */ 1829 /* Reorder the surround channels
1807 if (cfg->line_outs >= 3) { 1830 * ALSA sequence is front/surr/clfe/side
1831 * HDA sequence is:
1832 * 4-ch: front/surr => OK as it is
1833 * 6-ch: front/clfe/surr
1834 * 8-ch: front/clfe/side/surr
1835 */
1836 switch (cfg->line_outs) {
1837 case 3:
1808 nid = cfg->line_out_pins[1]; 1838 nid = cfg->line_out_pins[1];
1809 cfg->line_out_pins[1] = cfg->line_out_pins[2]; 1839 cfg->line_out_pins[1] = cfg->line_out_pins[2];
1810 cfg->line_out_pins[2] = nid; 1840 cfg->line_out_pins[2] = nid;
1841 break;
1842 case 4:
1843 nid = cfg->line_out_pins[1];
1844 cfg->line_out_pins[1] = cfg->line_out_pins[3];
1845 cfg->line_out_pins[3] = cfg->line_out_pins[2];
1846 cfg->line_out_pins[2] = nid;
1847 break;
1811 } 1848 }
1812 1849
1813 return 0; 1850 return 0;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index dd0d99d2ad27..63a29a8a2860 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -514,6 +514,7 @@ struct hda_codec {
514 struct list_head list; /* list point */ 514 struct list_head list; /* list point */
515 515
516 hda_nid_t afg; /* AFG node id */ 516 hda_nid_t afg; /* AFG node id */
517 hda_nid_t mfg; /* MFG node id */
517 518
518 /* ids */ 519 /* ids */
519 u32 vendor_id; 520 u32 vendor_id;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2d046abb5911..1229227af5b5 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -881,6 +881,11 @@ int snd_hda_parse_generic_codec(struct hda_codec *codec)
881 struct hda_gspec *spec; 881 struct hda_gspec *spec;
882 int err; 882 int err;
883 883
884 if(!codec->afg) {
885 snd_printdd("hda_generic: no generic modem yet\n");
886 return -ENODEV;
887 }
888
884 spec = kcalloc(1, sizeof(*spec), GFP_KERNEL); 889 spec = kcalloc(1, sizeof(*spec), GFP_KERNEL);
885 if (spec == NULL) { 890 if (spec == NULL) {
886 printk(KERN_ERR "hda_generic: can't allocate spec\n"); 891 printk(KERN_ERR "hda_generic: can't allocate spec\n");
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 288ab0764830..15107df1f490 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -71,7 +71,9 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
71 "{Intel, ESB2}," 71 "{Intel, ESB2},"
72 "{ATI, SB450}," 72 "{ATI, SB450},"
73 "{VIA, VT8251}," 73 "{VIA, VT8251},"
74 "{VIA, VT8237A}}"); 74 "{VIA, VT8237A},"
75 "{SiS, SIS966},"
76 "{ULI, M5461}}");
75MODULE_DESCRIPTION("Intel HDA driver"); 77MODULE_DESCRIPTION("Intel HDA driver");
76 78
77#define SFX "hda-intel: " 79#define SFX "hda-intel: "
@@ -141,9 +143,24 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
141 */ 143 */
142 144
143/* max number of SDs */ 145/* max number of SDs */
144#define MAX_ICH6_DEV 8 146/* ICH, ATI and VIA have 4 playback and 4 capture */
147#define ICH6_CAPTURE_INDEX 0
148#define ICH6_NUM_CAPTURE 4
149#define ICH6_PLAYBACK_INDEX 4
150#define ICH6_NUM_PLAYBACK 4
151
152/* ULI has 6 playback and 5 capture */
153#define ULI_CAPTURE_INDEX 0
154#define ULI_NUM_CAPTURE 5
155#define ULI_PLAYBACK_INDEX 5
156#define ULI_NUM_PLAYBACK 6
157
158/* this number is statically defined for simplicity */
159#define MAX_AZX_DEV 16
160
145/* max number of fragments - we may use more if allocating more pages for BDL */ 161/* max number of fragments - we may use more if allocating more pages for BDL */
146#define AZX_MAX_FRAG (PAGE_SIZE / (MAX_ICH6_DEV * 16)) 162#define BDL_SIZE PAGE_ALIGN(8192)
163#define AZX_MAX_FRAG (BDL_SIZE / (MAX_AZX_DEV * 16))
147/* max buffer size - no h/w limit, you can increase as you like */ 164/* max buffer size - no h/w limit, you can increase as you like */
148#define AZX_MAX_BUF_SIZE (1024*1024*1024) 165#define AZX_MAX_BUF_SIZE (1024*1024*1024)
149/* max number of PCM devics per card */ 166/* max number of PCM devics per card */
@@ -200,7 +217,6 @@ enum {
200}; 217};
201 218
202/* Defines for ATI HD Audio support in SB450 south bridge */ 219/* Defines for ATI HD Audio support in SB450 south bridge */
203#define ATI_SB450_HDAUDIO_PCI_DEVICE_ID 0x437b
204#define ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR 0x42 220#define ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR 0x42
205#define ATI_SB450_HDAUDIO_ENABLE_SNOOP 0x02 221#define ATI_SB450_HDAUDIO_ENABLE_SNOOP 0x02
206 222
@@ -258,6 +274,14 @@ struct snd_azx {
258 snd_card_t *card; 274 snd_card_t *card;
259 struct pci_dev *pci; 275 struct pci_dev *pci;
260 276
277 /* chip type specific */
278 int driver_type;
279 int playback_streams;
280 int playback_index_offset;
281 int capture_streams;
282 int capture_index_offset;
283 int num_streams;
284
261 /* pci resources */ 285 /* pci resources */
262 unsigned long addr; 286 unsigned long addr;
263 void __iomem *remap_addr; 287 void __iomem *remap_addr;
@@ -267,8 +291,8 @@ struct snd_azx {
267 spinlock_t reg_lock; 291 spinlock_t reg_lock;
268 struct semaphore open_mutex; 292 struct semaphore open_mutex;
269 293
270 /* streams */ 294 /* streams (x num_streams) */
271 azx_dev_t azx_dev[MAX_ICH6_DEV]; 295 azx_dev_t *azx_dev;
272 296
273 /* PCM */ 297 /* PCM */
274 unsigned int pcm_devs; 298 unsigned int pcm_devs;
@@ -292,6 +316,23 @@ struct snd_azx {
292 unsigned int initialized: 1; 316 unsigned int initialized: 1;
293}; 317};
294 318
319/* driver types */
320enum {
321 AZX_DRIVER_ICH,
322 AZX_DRIVER_ATI,
323 AZX_DRIVER_VIA,
324 AZX_DRIVER_SIS,
325 AZX_DRIVER_ULI,
326};
327
328static char *driver_short_names[] __devinitdata = {
329 [AZX_DRIVER_ICH] = "HDA Intel",
330 [AZX_DRIVER_ATI] = "HDA ATI SB",
331 [AZX_DRIVER_VIA] = "HDA VIA VT82xx",
332 [AZX_DRIVER_SIS] = "HDA SIS966",
333 [AZX_DRIVER_ULI] = "HDA ULI M5461"
334};
335
295/* 336/*
296 * macros for easy use 337 * macros for easy use
297 */ 338 */
@@ -360,6 +401,8 @@ static void azx_init_cmd_io(azx_t *chip)
360 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr); 401 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
361 azx_writel(chip, CORBUBASE, upper_32bit(chip->corb.addr)); 402 azx_writel(chip, CORBUBASE, upper_32bit(chip->corb.addr));
362 403
404 /* set the corb size to 256 entries (ULI requires explicitly) */
405 azx_writeb(chip, CORBSIZE, 0x02);
363 /* set the corb write pointer to 0 */ 406 /* set the corb write pointer to 0 */
364 azx_writew(chip, CORBWP, 0); 407 azx_writew(chip, CORBWP, 0);
365 /* reset the corb hw read pointer */ 408 /* reset the corb hw read pointer */
@@ -373,6 +416,8 @@ static void azx_init_cmd_io(azx_t *chip)
373 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr); 416 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
374 azx_writel(chip, RIRBUBASE, upper_32bit(chip->rirb.addr)); 417 azx_writel(chip, RIRBUBASE, upper_32bit(chip->rirb.addr));
375 418
419 /* set the rirb size to 256 entries (ULI requires explicitly) */
420 azx_writeb(chip, RIRBSIZE, 0x02);
376 /* reset the rirb hw write pointer */ 421 /* reset the rirb hw write pointer */
377 azx_writew(chip, RIRBWP, ICH6_RBRWP_CLR); 422 azx_writew(chip, RIRBWP, ICH6_RBRWP_CLR);
378 /* set N=1, get RIRB response interrupt for new entry */ 423 /* set N=1, get RIRB response interrupt for new entry */
@@ -596,7 +641,7 @@ static void azx_int_disable(azx_t *chip)
596 int i; 641 int i;
597 642
598 /* disable interrupts in stream descriptor */ 643 /* disable interrupts in stream descriptor */
599 for (i = 0; i < MAX_ICH6_DEV; i++) { 644 for (i = 0; i < chip->num_streams; i++) {
600 azx_dev_t *azx_dev = &chip->azx_dev[i]; 645 azx_dev_t *azx_dev = &chip->azx_dev[i];
601 azx_sd_writeb(azx_dev, SD_CTL, 646 azx_sd_writeb(azx_dev, SD_CTL,
602 azx_sd_readb(azx_dev, SD_CTL) & ~SD_INT_MASK); 647 azx_sd_readb(azx_dev, SD_CTL) & ~SD_INT_MASK);
@@ -616,7 +661,7 @@ static void azx_int_clear(azx_t *chip)
616 int i; 661 int i;
617 662
618 /* clear stream status */ 663 /* clear stream status */
619 for (i = 0; i < MAX_ICH6_DEV; i++) { 664 for (i = 0; i < chip->num_streams; i++) {
620 azx_dev_t *azx_dev = &chip->azx_dev[i]; 665 azx_dev_t *azx_dev = &chip->azx_dev[i];
621 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 666 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
622 } 667 }
@@ -686,8 +731,7 @@ static void azx_init_chip(azx_t *chip)
686 } 731 }
687 732
688 /* For ATI SB450 azalia HD audio, we need to enable snoop */ 733 /* For ATI SB450 azalia HD audio, we need to enable snoop */
689 if (chip->pci->vendor == PCI_VENDOR_ID_ATI && 734 if (chip->driver_type == AZX_DRIVER_ATI) {
690 chip->pci->device == ATI_SB450_HDAUDIO_PCI_DEVICE_ID) {
691 pci_read_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 735 pci_read_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
692 &ati_misc_cntl2); 736 &ati_misc_cntl2);
693 pci_write_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 737 pci_write_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
@@ -714,7 +758,7 @@ static irqreturn_t azx_interrupt(int irq, void* dev_id, struct pt_regs *regs)
714 return IRQ_NONE; 758 return IRQ_NONE;
715 } 759 }
716 760
717 for (i = 0; i < MAX_ICH6_DEV; i++) { 761 for (i = 0; i < chip->num_streams; i++) {
718 azx_dev = &chip->azx_dev[i]; 762 azx_dev = &chip->azx_dev[i];
719 if (status & azx_dev->sd_int_sta_mask) { 763 if (status & azx_dev->sd_int_sta_mask) {
720 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 764 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
@@ -879,9 +923,15 @@ static int __devinit azx_codec_create(azx_t *chip, const char *model)
879/* assign a stream for the PCM */ 923/* assign a stream for the PCM */
880static inline azx_dev_t *azx_assign_device(azx_t *chip, int stream) 924static inline azx_dev_t *azx_assign_device(azx_t *chip, int stream)
881{ 925{
882 int dev, i; 926 int dev, i, nums;
883 dev = stream == SNDRV_PCM_STREAM_PLAYBACK ? 4 : 0; 927 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
884 for (i = 0; i < 4; i++, dev++) 928 dev = chip->playback_index_offset;
929 nums = chip->playback_streams;
930 } else {
931 dev = chip->capture_index_offset;
932 nums = chip->capture_streams;
933 }
934 for (i = 0; i < nums; i++, dev++)
885 if (! chip->azx_dev[dev].opened) { 935 if (! chip->azx_dev[dev].opened) {
886 chip->azx_dev[dev].opened = 1; 936 chip->azx_dev[dev].opened = 1;
887 return &chip->azx_dev[dev]; 937 return &chip->azx_dev[dev];
@@ -899,8 +949,8 @@ static snd_pcm_hardware_t azx_pcm_hw = {
899 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 949 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
900 SNDRV_PCM_INFO_BLOCK_TRANSFER | 950 SNDRV_PCM_INFO_BLOCK_TRANSFER |
901 SNDRV_PCM_INFO_MMAP_VALID | 951 SNDRV_PCM_INFO_MMAP_VALID |
902 SNDRV_PCM_INFO_PAUSE | 952 SNDRV_PCM_INFO_PAUSE /*|*/
903 SNDRV_PCM_INFO_RESUME), 953 /*SNDRV_PCM_INFO_RESUME*/),
904 .formats = SNDRV_PCM_FMTBIT_S16_LE, 954 .formats = SNDRV_PCM_FMTBIT_S16_LE,
905 .rates = SNDRV_PCM_RATE_48000, 955 .rates = SNDRV_PCM_RATE_48000,
906 .rate_min = 48000, 956 .rate_min = 48000,
@@ -1049,6 +1099,7 @@ static int azx_pcm_trigger(snd_pcm_substream_t *substream, int cmd)
1049 azx_dev->running = 1; 1099 azx_dev->running = 1;
1050 break; 1100 break;
1051 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 1101 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1102 case SNDRV_PCM_TRIGGER_SUSPEND:
1052 case SNDRV_PCM_TRIGGER_STOP: 1103 case SNDRV_PCM_TRIGGER_STOP:
1053 azx_stream_stop(chip, azx_dev); 1104 azx_stream_stop(chip, azx_dev);
1054 azx_dev->running = 0; 1105 azx_dev->running = 0;
@@ -1058,6 +1109,7 @@ static int azx_pcm_trigger(snd_pcm_substream_t *substream, int cmd)
1058 } 1109 }
1059 spin_unlock(&chip->reg_lock); 1110 spin_unlock(&chip->reg_lock);
1060 if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH || 1111 if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH ||
1112 cmd == SNDRV_PCM_TRIGGER_SUSPEND ||
1061 cmd == SNDRV_PCM_TRIGGER_STOP) { 1113 cmd == SNDRV_PCM_TRIGGER_STOP) {
1062 int timeout = 5000; 1114 int timeout = 5000;
1063 while (azx_sd_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START && --timeout) 1115 while (azx_sd_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START && --timeout)
@@ -1136,6 +1188,7 @@ static int __devinit create_codec_pcm(azx_t *chip, struct hda_codec *codec,
1136 snd_dma_pci_data(chip->pci), 1188 snd_dma_pci_data(chip->pci),
1137 1024 * 64, 1024 * 128); 1189 1024 * 64, 1024 * 128);
1138 chip->pcm[pcm_dev] = pcm; 1190 chip->pcm[pcm_dev] = pcm;
1191 chip->pcm_devs = pcm_dev + 1;
1139 1192
1140 return 0; 1193 return 0;
1141} 1194}
@@ -1186,7 +1239,7 @@ static int __devinit azx_init_stream(azx_t *chip)
1186 /* initialize each stream (aka device) 1239 /* initialize each stream (aka device)
1187 * assign the starting bdl address to each stream (device) and initialize 1240 * assign the starting bdl address to each stream (device) and initialize
1188 */ 1241 */
1189 for (i = 0; i < MAX_ICH6_DEV; i++) { 1242 for (i = 0; i < chip->num_streams; i++) {
1190 unsigned int off = sizeof(u32) * (i * AZX_MAX_FRAG * 4); 1243 unsigned int off = sizeof(u32) * (i * AZX_MAX_FRAG * 4);
1191 azx_dev_t *azx_dev = &chip->azx_dev[i]; 1244 azx_dev_t *azx_dev = &chip->azx_dev[i];
1192 azx_dev->bdl = (u32 *)(chip->bdl.area + off); 1245 azx_dev->bdl = (u32 *)(chip->bdl.area + off);
@@ -1245,7 +1298,7 @@ static int azx_free(azx_t *chip)
1245 if (chip->initialized) { 1298 if (chip->initialized) {
1246 int i; 1299 int i;
1247 1300
1248 for (i = 0; i < MAX_ICH6_DEV; i++) 1301 for (i = 0; i < chip->num_streams; i++)
1249 azx_stream_stop(chip, &chip->azx_dev[i]); 1302 azx_stream_stop(chip, &chip->azx_dev[i]);
1250 1303
1251 /* disable interrupts */ 1304 /* disable interrupts */
@@ -1261,10 +1314,10 @@ static int azx_free(azx_t *chip)
1261 1314
1262 /* wait a little for interrupts to finish */ 1315 /* wait a little for interrupts to finish */
1263 msleep(1); 1316 msleep(1);
1264
1265 iounmap(chip->remap_addr);
1266 } 1317 }
1267 1318
1319 if (chip->remap_addr)
1320 iounmap(chip->remap_addr);
1268 if (chip->irq >= 0) 1321 if (chip->irq >= 0)
1269 free_irq(chip->irq, (void*)chip); 1322 free_irq(chip->irq, (void*)chip);
1270 1323
@@ -1276,6 +1329,7 @@ static int azx_free(azx_t *chip)
1276 snd_dma_free_pages(&chip->posbuf); 1329 snd_dma_free_pages(&chip->posbuf);
1277 pci_release_regions(chip->pci); 1330 pci_release_regions(chip->pci);
1278 pci_disable_device(chip->pci); 1331 pci_disable_device(chip->pci);
1332 kfree(chip->azx_dev);
1279 kfree(chip); 1333 kfree(chip);
1280 1334
1281 return 0; 1335 return 0;
@@ -1290,7 +1344,8 @@ static int azx_dev_free(snd_device_t *device)
1290 * constructor 1344 * constructor
1291 */ 1345 */
1292static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci, 1346static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci,
1293 int posfix, azx_t **rchip) 1347 int posfix, int driver_type,
1348 azx_t **rchip)
1294{ 1349{
1295 azx_t *chip; 1350 azx_t *chip;
1296 int err = 0; 1351 int err = 0;
@@ -1316,9 +1371,20 @@ static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci,
1316 chip->card = card; 1371 chip->card = card;
1317 chip->pci = pci; 1372 chip->pci = pci;
1318 chip->irq = -1; 1373 chip->irq = -1;
1374 chip->driver_type = driver_type;
1319 1375
1320 chip->position_fix = posfix; 1376 chip->position_fix = posfix;
1321 1377
1378#if BITS_PER_LONG != 64
1379 /* Fix up base address on ULI M5461 */
1380 if (chip->driver_type == AZX_DRIVER_ULI) {
1381 u16 tmp3;
1382 pci_read_config_word(pci, 0x40, &tmp3);
1383 pci_write_config_word(pci, 0x40, tmp3 | 0x10);
1384 pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, 0);
1385 }
1386#endif
1387
1322 if ((err = pci_request_regions(pci, "ICH HD audio")) < 0) { 1388 if ((err = pci_request_regions(pci, "ICH HD audio")) < 0) {
1323 kfree(chip); 1389 kfree(chip);
1324 pci_disable_device(pci); 1390 pci_disable_device(pci);
@@ -1344,16 +1410,37 @@ static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci,
1344 pci_set_master(pci); 1410 pci_set_master(pci);
1345 synchronize_irq(chip->irq); 1411 synchronize_irq(chip->irq);
1346 1412
1413 switch (chip->driver_type) {
1414 case AZX_DRIVER_ULI:
1415 chip->playback_streams = ULI_NUM_PLAYBACK;
1416 chip->capture_streams = ULI_NUM_CAPTURE;
1417 chip->playback_index_offset = ULI_PLAYBACK_INDEX;
1418 chip->capture_index_offset = ULI_CAPTURE_INDEX;
1419 break;
1420 default:
1421 chip->playback_streams = ICH6_NUM_PLAYBACK;
1422 chip->capture_streams = ICH6_NUM_CAPTURE;
1423 chip->playback_index_offset = ICH6_PLAYBACK_INDEX;
1424 chip->capture_index_offset = ICH6_CAPTURE_INDEX;
1425 break;
1426 }
1427 chip->num_streams = chip->playback_streams + chip->capture_streams;
1428 chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev), GFP_KERNEL);
1429 if (! chip->azx_dev) {
1430 snd_printk(KERN_ERR "cannot malloc azx_dev\n");
1431 goto errout;
1432 }
1433
1347 /* allocate memory for the BDL for each stream */ 1434 /* allocate memory for the BDL for each stream */
1348 if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 1435 if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
1349 PAGE_SIZE, &chip->bdl)) < 0) { 1436 BDL_SIZE, &chip->bdl)) < 0) {
1350 snd_printk(KERN_ERR SFX "cannot allocate BDL\n"); 1437 snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
1351 goto errout; 1438 goto errout;
1352 } 1439 }
1353 if (chip->position_fix == POS_FIX_POSBUF) { 1440 if (chip->position_fix == POS_FIX_POSBUF) {
1354 /* allocate memory for the position buffer */ 1441 /* allocate memory for the position buffer */
1355 if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 1442 if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
1356 MAX_ICH6_DEV * 8, &chip->posbuf)) < 0) { 1443 chip->num_streams * 8, &chip->posbuf)) < 0) {
1357 snd_printk(KERN_ERR SFX "cannot allocate posbuf\n"); 1444 snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
1358 goto errout; 1445 goto errout;
1359 } 1446 }
@@ -1382,6 +1469,10 @@ static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci,
1382 goto errout; 1469 goto errout;
1383 } 1470 }
1384 1471
1472 strcpy(card->driver, "HDA-Intel");
1473 strcpy(card->shortname, driver_short_names[chip->driver_type]);
1474 sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->addr, chip->irq);
1475
1385 *rchip = chip; 1476 *rchip = chip;
1386 return 0; 1477 return 0;
1387 1478
@@ -1410,15 +1501,12 @@ static int __devinit azx_probe(struct pci_dev *pci, const struct pci_device_id *
1410 return -ENOMEM; 1501 return -ENOMEM;
1411 } 1502 }
1412 1503
1413 if ((err = azx_create(card, pci, position_fix[dev], &chip)) < 0) { 1504 if ((err = azx_create(card, pci, position_fix[dev], pci_id->driver_data,
1505 &chip)) < 0) {
1414 snd_card_free(card); 1506 snd_card_free(card);
1415 return err; 1507 return err;
1416 } 1508 }
1417 1509
1418 strcpy(card->driver, "HDA-Intel");
1419 strcpy(card->shortname, "HDA Intel");
1420 sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->addr, chip->irq);
1421
1422 /* create codec instances */ 1510 /* create codec instances */
1423 if ((err = azx_codec_create(chip, model[dev])) < 0) { 1511 if ((err = azx_codec_create(chip, model[dev])) < 0) {
1424 snd_card_free(card); 1512 snd_card_free(card);
@@ -1459,12 +1547,13 @@ static void __devexit azx_remove(struct pci_dev *pci)
1459 1547
1460/* PCI IDs */ 1548/* PCI IDs */
1461static struct pci_device_id azx_ids[] = { 1549static struct pci_device_id azx_ids[] = {
1462 { 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICH6 */ 1550 { 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH6 */
1463 { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICH7 */ 1551 { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
1464 { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ESB2 */ 1552 { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
1465 { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ATI SB450 */ 1553 { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
1466 { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* VIA VT8251/VT8237A */ 1554 { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
1467 { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ALI 5461? */ 1555 { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
1556 { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */
1468 { 0, } 1557 { 0, }
1469}; 1558};
1470MODULE_DEVICE_TABLE(pci, azx_ids); 1559MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_patch.h b/sound/pci/hda/hda_patch.h
index a5de684b6944..acaef3c811b8 100644
--- a/sound/pci/hda/hda_patch.h
+++ b/sound/pci/hda/hda_patch.h
@@ -10,11 +10,14 @@ extern struct hda_codec_preset snd_hda_preset_cmedia[];
10extern struct hda_codec_preset snd_hda_preset_analog[]; 10extern struct hda_codec_preset snd_hda_preset_analog[];
11/* SigmaTel codecs */ 11/* SigmaTel codecs */
12extern struct hda_codec_preset snd_hda_preset_sigmatel[]; 12extern struct hda_codec_preset snd_hda_preset_sigmatel[];
13/* SiLabs 3054/3055 modem codecs */
14extern struct hda_codec_preset snd_hda_preset_si3054[];
13 15
14static const struct hda_codec_preset *hda_preset_tables[] = { 16static const struct hda_codec_preset *hda_preset_tables[] = {
15 snd_hda_preset_realtek, 17 snd_hda_preset_realtek,
16 snd_hda_preset_cmedia, 18 snd_hda_preset_cmedia,
17 snd_hda_preset_analog, 19 snd_hda_preset_analog,
18 snd_hda_preset_sigmatel, 20 snd_hda_preset_sigmatel,
21 snd_hda_preset_si3054,
19 NULL 22 NULL
20}; 23};
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 2fd05bb84136..bceb83a42a38 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -572,7 +572,7 @@ static snd_kcontrol_new_t ad1983_mixers[] = {
572 }, 572 },
573 { 573 {
574 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 574 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
575 .name = "IEC958 Playback Route", 575 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
576 .info = ad1983_spdif_route_info, 576 .info = ad1983_spdif_route_info,
577 .get = ad1983_spdif_route_get, 577 .get = ad1983_spdif_route_get,
578 .put = ad1983_spdif_route_put, 578 .put = ad1983_spdif_route_put,
@@ -705,7 +705,7 @@ static snd_kcontrol_new_t ad1981_mixers[] = {
705 /* identical with AD1983 */ 705 /* identical with AD1983 */
706 { 706 {
707 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 707 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
708 .name = "IEC958 Playback Route", 708 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
709 .info = ad1983_spdif_route_info, 709 .info = ad1983_spdif_route_info,
710 .get = ad1983_spdif_route_get, 710 .get = ad1983_spdif_route_get,
711 .put = ad1983_spdif_route_put, 711 .put = ad1983_spdif_route_put,
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index 86f195f19eef..07fb4f5a54b3 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -647,6 +647,7 @@ static struct hda_board_config cmi9880_cfg_tbl[] = {
647 { .modelname = "min_fp", .config = CMI_MIN_FP }, 647 { .modelname = "min_fp", .config = CMI_MIN_FP },
648 { .modelname = "full", .config = CMI_FULL }, 648 { .modelname = "full", .config = CMI_FULL },
649 { .modelname = "full_dig", .config = CMI_FULL_DIG }, 649 { .modelname = "full_dig", .config = CMI_FULL_DIG },
650 { .pci_subvendor = 0x1043, .pci_subdevice = 0x813d, .config = CMI_FULL_DIG }, /* ASUS P5AD2 */
650 { .modelname = "allout", .config = CMI_ALLOUT }, 651 { .modelname = "allout", .config = CMI_ALLOUT },
651 { .modelname = "auto", .config = CMI_AUTO }, 652 { .modelname = "auto", .config = CMI_AUTO },
652 {} /* terminator */ 653 {} /* terminator */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9b8569900787..eeb900ab79af 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -687,6 +687,12 @@ static snd_kcontrol_new_t alc880_asus_w1v_mixer[] = {
687 { } /* end */ 687 { } /* end */
688}; 688};
689 689
690/* additional mixers to alc880_asus_mixer */
691static snd_kcontrol_new_t alc880_pcbeep_mixer[] = {
692 HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x0b, 0x05, HDA_INPUT),
693 HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x0b, 0x05, HDA_INPUT),
694 { } /* end */
695};
690 696
691/* 697/*
692 * build control elements 698 * build control elements
@@ -1524,6 +1530,7 @@ static struct hda_board_config alc880_cfg_tbl[] = {
1524 /* Back 3 jack plus 1 SPDIF out jack, front 2 jack */ 1530 /* Back 3 jack plus 1 SPDIF out jack, front 2 jack */
1525 { .modelname = "3stack-digout", .config = ALC880_3ST_DIG }, 1531 { .modelname = "3stack-digout", .config = ALC880_3ST_DIG },
1526 { .pci_subvendor = 0x8086, .pci_subdevice = 0xe308, .config = ALC880_3ST_DIG }, 1532 { .pci_subvendor = 0x8086, .pci_subdevice = 0xe308, .config = ALC880_3ST_DIG },
1533 { .pci_subvendor = 0x1025, .pci_subdevice = 0x0070, .config = ALC880_3ST_DIG },
1527 1534
1528 /* Back 3 jack plus 1 SPDIF out jack, front 2 jack (Internal add Aux-In)*/ 1535 /* Back 3 jack plus 1 SPDIF out jack, front 2 jack (Internal add Aux-In)*/
1529 { .pci_subvendor = 0x8086, .pci_subdevice = 0xe305, .config = ALC880_3ST_DIG }, 1536 { .pci_subvendor = 0x8086, .pci_subdevice = 0xe305, .config = ALC880_3ST_DIG },
@@ -1734,7 +1741,7 @@ static struct alc_config_preset alc880_presets[] = {
1734 .input_mux = &alc880_capture_source, 1741 .input_mux = &alc880_capture_source,
1735 }, 1742 },
1736 [ALC880_UNIWILL_DIG] = { 1743 [ALC880_UNIWILL_DIG] = {
1737 .mixers = { alc880_asus_mixer }, 1744 .mixers = { alc880_asus_mixer, alc880_pcbeep_mixer },
1738 .init_verbs = { alc880_volume_init_verbs, alc880_pin_asus_init_verbs }, 1745 .init_verbs = { alc880_volume_init_verbs, alc880_pin_asus_init_verbs },
1739 .num_dacs = ARRAY_SIZE(alc880_asus_dac_nids), 1746 .num_dacs = ARRAY_SIZE(alc880_asus_dac_nids),
1740 .dac_nids = alc880_asus_dac_nids, 1747 .dac_nids = alc880_asus_dac_nids,
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
new file mode 100644
index 000000000000..b0270d1b64ce
--- /dev/null
+++ b/sound/pci/hda/patch_si3054.c
@@ -0,0 +1,300 @@
1/*
2 * Universal Interface for Intel High Definition Audio Codec
3 *
4 * HD audio interface patch for Silicon Labs 3054/5 modem codec
5 *
6 * Copyright (c) 2005 Sasha Khapyorsky <sashak@smlink.com>
7 * Takashi Iwai <tiwai@suse.de>
8 *
9 *
10 * This driver is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This driver is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <sound/driver.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/pci.h>
30#include <sound/core.h>
31#include "hda_codec.h"
32#include "hda_local.h"
33
34
35/* si3054 verbs */
36#define SI3054_VERB_READ_NODE 0x900
37#define SI3054_VERB_WRITE_NODE 0x100
38
39/* si3054 nodes (registers) */
40#define SI3054_EXTENDED_MID 2
41#define SI3054_LINE_RATE 3
42#define SI3054_LINE_LEVEL 4
43#define SI3054_GPIO_CFG 5
44#define SI3054_GPIO_POLARITY 6
45#define SI3054_GPIO_STICKY 7
46#define SI3054_GPIO_WAKEUP 8
47#define SI3054_GPIO_STATUS 9
48#define SI3054_GPIO_CONTROL 10
49#define SI3054_MISC_AFE 11
50#define SI3054_CHIPID 12
51#define SI3054_LINE_CFG1 13
52#define SI3054_LINE_STATUS 14
53#define SI3054_DC_TERMINATION 15
54#define SI3054_LINE_CONFIG 16
55#define SI3054_CALLPROG_ATT 17
56#define SI3054_SQ_CONTROL 18
57#define SI3054_MISC_CONTROL 19
58#define SI3054_RING_CTRL1 20
59#define SI3054_RING_CTRL2 21
60
61/* extended MID */
62#define SI3054_MEI_READY 0xf
63
64/* line level */
65#define SI3054_ATAG_MASK 0x00f0
66#define SI3054_DTAG_MASK 0xf000
67
68/* GPIO bits */
69#define SI3054_GPIO_OH 0x0001
70#define SI3054_GPIO_CID 0x0002
71
72/* chipid and revisions */
73#define SI3054_CHIPID_CODEC_REV_MASK 0x000f
74#define SI3054_CHIPID_DAA_REV_MASK 0x00f0
75#define SI3054_CHIPID_INTERNATIONAL 0x0100
76#define SI3054_CHIPID_DAA_ID 0x0f00
77#define SI3054_CHIPID_CODEC_ID (1<<12)
78
79/* si3054 codec registers (nodes) access macros */
80#define GET_REG(codec,reg) (snd_hda_codec_read(codec,reg,0,SI3054_VERB_READ_NODE,0))
81#define SET_REG(codec,reg,val) (snd_hda_codec_write(codec,reg,0,SI3054_VERB_WRITE_NODE,val))
82
83
84struct si3054_spec {
85 unsigned international;
86 struct hda_pcm pcm;
87};
88
89
90/*
91 * Modem mixer
92 */
93
94#define PRIVATE_VALUE(reg,mask) ((reg<<16)|(mask&0xffff))
95#define PRIVATE_REG(val) ((val>>16)&0xffff)
96#define PRIVATE_MASK(val) (val&0xffff)
97
98static int si3054_switch_info(snd_kcontrol_t *kcontrol,
99 snd_ctl_elem_info_t *uinfo)
100{
101 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
102 uinfo->count = 1;
103 uinfo->value.integer.min = 0;
104 uinfo->value.integer.max = 1;
105 return 0;
106}
107
108static int si3054_switch_get(snd_kcontrol_t *kcontrol,
109 snd_ctl_elem_value_t *uvalue)
110{
111 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
112 u16 reg = PRIVATE_REG(kcontrol->private_value);
113 u16 mask = PRIVATE_MASK(kcontrol->private_value);
114 uvalue->value.integer.value[0] = (GET_REG(codec, reg)) & mask ? 1 : 0 ;
115 return 0;
116}
117
118static int si3054_switch_put(snd_kcontrol_t *kcontrol,
119 snd_ctl_elem_value_t *uvalue)
120{
121 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
122 u16 reg = PRIVATE_REG(kcontrol->private_value);
123 u16 mask = PRIVATE_MASK(kcontrol->private_value);
124 if (uvalue->value.integer.value[0])
125 SET_REG(codec, reg, (GET_REG(codec, reg)) | mask);
126 else
127 SET_REG(codec, reg, (GET_REG(codec, reg)) & ~mask);
128 return 0;
129}
130
131#define SI3054_KCONTROL(kname,reg,mask) { \
132 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
133 .name = kname, \
134 .info = si3054_switch_info, \
135 .get = si3054_switch_get, \
136 .put = si3054_switch_put, \
137 .private_value = PRIVATE_VALUE(reg,mask), \
138}
139
140
141static snd_kcontrol_new_t si3054_modem_mixer[] = {
142 SI3054_KCONTROL("Off-hook Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_OH),
143 SI3054_KCONTROL("Caller ID Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_CID),
144 {}
145};
146
147static int si3054_build_controls(struct hda_codec *codec)
148{
149 return snd_hda_add_new_ctls(codec, si3054_modem_mixer);
150}
151
152
153/*
154 * PCM callbacks
155 */
156
157static int si3054_pcm_prepare(struct hda_pcm_stream *hinfo,
158 struct hda_codec *codec,
159 unsigned int stream_tag,
160 unsigned int format,
161 snd_pcm_substream_t *substream)
162{
163 u16 val;
164
165 SET_REG(codec, SI3054_LINE_RATE, substream->runtime->rate);
166 val = GET_REG(codec, SI3054_LINE_LEVEL);
167 val &= 0xff << (8 * (substream->stream != SNDRV_PCM_STREAM_PLAYBACK));
168 val |= ((stream_tag & 0xf) << 4) << (8 * (substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
169 SET_REG(codec, SI3054_LINE_LEVEL, val);
170
171 snd_hda_codec_setup_stream(codec, hinfo->nid,
172 stream_tag, 0, format);
173 return 0;
174}
175
176static int si3054_pcm_open(struct hda_pcm_stream *hinfo,
177 struct hda_codec *codec,
178 snd_pcm_substream_t *substream)
179{
180 static unsigned int rates[] = { 8000, 9600, 16000 };
181 static snd_pcm_hw_constraint_list_t hw_constraints_rates = {
182 .count = ARRAY_SIZE(rates),
183 .list = rates,
184 .mask = 0,
185 };
186 substream->runtime->hw.period_bytes_min = 80;
187 return snd_pcm_hw_constraint_list(substream->runtime, 0,
188 SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates);
189}
190
191
192static struct hda_pcm_stream si3054_pcm = {
193 .substreams = 1,
194 .channels_min = 1,
195 .channels_max = 1,
196 .nid = 0x1,
197 .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_KNOT,
198 .formats = SNDRV_PCM_FMTBIT_S16_LE,
199 .maxbps = 16,
200 .ops = {
201 .open = si3054_pcm_open,
202 .prepare = si3054_pcm_prepare,
203 },
204};
205
206
207static int si3054_build_pcms(struct hda_codec *codec)
208{
209 struct si3054_spec *spec = codec->spec;
210 struct hda_pcm *info = &spec->pcm;
211 si3054_pcm.nid = codec->mfg;
212 codec->num_pcms = 1;
213 codec->pcm_info = info;
214 info->name = "Si3054 Modem";
215 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm;
216 info->stream[SNDRV_PCM_STREAM_CAPTURE] = si3054_pcm;
217 return 0;
218}
219
220
221/*
222 * Init part
223 */
224
225static int si3054_init(struct hda_codec *codec)
226{
227 struct si3054_spec *spec = codec->spec;
228 unsigned wait_count;
229 u16 val;
230
231 snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0);
232 snd_hda_codec_write(codec, codec->mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0);
233 SET_REG(codec, SI3054_LINE_RATE, 9600);
234 SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK);
235 SET_REG(codec, SI3054_EXTENDED_MID, 0);
236
237 wait_count = 10;
238 do {
239 msleep(2);
240 val = GET_REG(codec, SI3054_EXTENDED_MID);
241 } while ((val & SI3054_MEI_READY) != SI3054_MEI_READY && wait_count--);
242
243 if((val&SI3054_MEI_READY) != SI3054_MEI_READY) {
244 snd_printk(KERN_ERR "si3054: cannot initialize. EXT MID = %04x\n", val);
245 return -EACCES;
246 }
247
248 SET_REG(codec, SI3054_GPIO_POLARITY, 0xffff);
249 SET_REG(codec, SI3054_GPIO_CFG, 0x0);
250 SET_REG(codec, SI3054_MISC_AFE, 0);
251 SET_REG(codec, SI3054_LINE_CFG1,0x200);
252
253 if((GET_REG(codec,SI3054_LINE_STATUS) & (1<<6)) == 0) {
254 snd_printd("Link Frame Detect(FDT) is not ready (line status: %04x)\n",
255 GET_REG(codec,SI3054_LINE_STATUS));
256 }
257
258 spec->international = GET_REG(codec, SI3054_CHIPID) & SI3054_CHIPID_INTERNATIONAL;
259
260 return 0;
261}
262
263static void si3054_free(struct hda_codec *codec)
264{
265 kfree(codec->spec);
266}
267
268
269/*
270 */
271
272static struct hda_codec_ops si3054_patch_ops = {
273 .build_controls = si3054_build_controls,
274 .build_pcms = si3054_build_pcms,
275 .init = si3054_init,
276 .free = si3054_free,
277#ifdef CONFIG_PM
278 //.suspend = si3054_suspend,
279 .resume = si3054_init,
280#endif
281};
282
283static int patch_si3054(struct hda_codec *codec)
284{
285 struct si3054_spec *spec = kcalloc(1, sizeof(*spec), GFP_KERNEL);
286 if (spec == NULL)
287 return -ENOMEM;
288 codec->spec = spec;
289 codec->patch_ops = si3054_patch_ops;
290 return 0;
291}
292
293/*
294 * patch entries
295 */
296struct hda_codec_preset snd_hda_preset_si3054[] = {
297 { .id = 0x163c3155, .name = "Si3054", .patch = patch_si3054 },
298 {}
299};
300
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index eb20f73be61a..39fbe662965d 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -618,15 +618,15 @@ static int __devinit snd_ice1712_delta_init(ice1712_t *ice)
618 */ 618 */
619 619
620static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_select __devinitdata = 620static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_select __devinitdata =
621ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0); 621ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0);
622static snd_kcontrol_new_t snd_ice1712_delta1010lt_wordclock_select __devinitdata = 622static snd_kcontrol_new_t snd_ice1712_delta1010lt_wordclock_select __devinitdata =
623ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 1, 0); 623ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 1, 0);
624static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_status __devinitdata = 624static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_status __devinitdata =
625ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE); 625ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
626static snd_kcontrol_new_t snd_ice1712_deltadio2496_spdif_in_select __devinitdata = 626static snd_kcontrol_new_t snd_ice1712_deltadio2496_spdif_in_select __devinitdata =
627ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0); 627ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0);
628static snd_kcontrol_new_t snd_ice1712_delta_spdif_in_status __devinitdata = 628static snd_kcontrol_new_t snd_ice1712_delta_spdif_in_status __devinitdata =
629ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE); 629ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
630 630
631 631
632static int __devinit snd_ice1712_delta_add_controls(ice1712_t *ice) 632static int __devinit snd_ice1712_delta_add_controls(ice1712_t *ice)
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index a2545a5b26c4..b97f50d10ba3 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -1422,7 +1422,7 @@ static snd_kcontrol_new_t snd_ice1712_multi_capture_analog_switch __devinitdata
1422 1422
1423static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_switch __devinitdata = { 1423static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_switch __devinitdata = {
1424 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1424 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1425 .name = "IEC958 Multi Capture Switch", 1425 .name = SNDRV_CTL_NAME_IEC958("Multi ",CAPTURE,SWITCH),
1426 .info = snd_ice1712_pro_mixer_switch_info, 1426 .info = snd_ice1712_pro_mixer_switch_info,
1427 .get = snd_ice1712_pro_mixer_switch_get, 1427 .get = snd_ice1712_pro_mixer_switch_get,
1428 .put = snd_ice1712_pro_mixer_switch_put, 1428 .put = snd_ice1712_pro_mixer_switch_put,
@@ -1441,7 +1441,7 @@ static snd_kcontrol_new_t snd_ice1712_multi_capture_analog_volume __devinitdata
1441 1441
1442static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_volume __devinitdata = { 1442static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_volume __devinitdata = {
1443 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1443 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1444 .name = "IEC958 Multi Capture Volume", 1444 .name = SNDRV_CTL_NAME_IEC958("Multi ",CAPTURE,VOLUME),
1445 .info = snd_ice1712_pro_mixer_volume_info, 1445 .info = snd_ice1712_pro_mixer_volume_info,
1446 .get = snd_ice1712_pro_mixer_volume_get, 1446 .get = snd_ice1712_pro_mixer_volume_get,
1447 .put = snd_ice1712_pro_mixer_volume_put, 1447 .put = snd_ice1712_pro_mixer_volume_put,
@@ -1715,7 +1715,7 @@ static int snd_ice1712_spdif_maskp_get(snd_kcontrol_t * kcontrol,
1715static snd_kcontrol_new_t snd_ice1712_spdif_maskc __devinitdata = 1715static snd_kcontrol_new_t snd_ice1712_spdif_maskc __devinitdata =
1716{ 1716{
1717 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1717 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1718 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1718 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1719 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 1719 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
1720 .info = snd_ice1712_spdif_info, 1720 .info = snd_ice1712_spdif_info,
1721 .get = snd_ice1712_spdif_maskc_get, 1721 .get = snd_ice1712_spdif_maskc_get,
@@ -1724,7 +1724,7 @@ static snd_kcontrol_new_t snd_ice1712_spdif_maskc __devinitdata =
1724static snd_kcontrol_new_t snd_ice1712_spdif_maskp __devinitdata = 1724static snd_kcontrol_new_t snd_ice1712_spdif_maskp __devinitdata =
1725{ 1725{
1726 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1726 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1727 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1727 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1728 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 1728 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
1729 .info = snd_ice1712_spdif_info, 1729 .info = snd_ice1712_spdif_info,
1730 .get = snd_ice1712_spdif_maskp_get, 1730 .get = snd_ice1712_spdif_maskp_get,
@@ -2203,7 +2203,7 @@ static snd_kcontrol_new_t snd_ice1712_mixer_pro_analog_route __devinitdata = {
2203 2203
2204static snd_kcontrol_new_t snd_ice1712_mixer_pro_spdif_route __devinitdata = { 2204static snd_kcontrol_new_t snd_ice1712_mixer_pro_spdif_route __devinitdata = {
2205 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2205 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2206 .name = "IEC958 Playback Route", 2206 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
2207 .info = snd_ice1712_pro_route_info, 2207 .info = snd_ice1712_pro_route_info,
2208 .get = snd_ice1712_pro_route_spdif_get, 2208 .get = snd_ice1712_pro_route_spdif_get,
2209 .put = snd_ice1712_pro_route_spdif_put, 2209 .put = snd_ice1712_pro_route_spdif_put,
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 79b5f12e06fc..c7af5e5fee13 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1414,7 +1414,7 @@ static int snd_vt1724_spdif_maskp_get(snd_kcontrol_t * kcontrol,
1414static snd_kcontrol_new_t snd_vt1724_spdif_maskc __devinitdata = 1414static snd_kcontrol_new_t snd_vt1724_spdif_maskc __devinitdata =
1415{ 1415{
1416 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1416 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1417 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1417 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1418 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 1418 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
1419 .info = snd_vt1724_spdif_info, 1419 .info = snd_vt1724_spdif_info,
1420 .get = snd_vt1724_spdif_maskc_get, 1420 .get = snd_vt1724_spdif_maskc_get,
@@ -1423,7 +1423,7 @@ static snd_kcontrol_new_t snd_vt1724_spdif_maskc __devinitdata =
1423static snd_kcontrol_new_t snd_vt1724_spdif_maskp __devinitdata = 1423static snd_kcontrol_new_t snd_vt1724_spdif_maskp __devinitdata =
1424{ 1424{
1425 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1425 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1426 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1426 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1427 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 1427 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
1428 .info = snd_vt1724_spdif_info, 1428 .info = snd_vt1724_spdif_info,
1429 .get = snd_vt1724_spdif_maskp_get, 1429 .get = snd_vt1724_spdif_maskp_get,
@@ -1466,7 +1466,7 @@ static snd_kcontrol_new_t snd_vt1724_spdif_switch __devinitdata =
1466 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1466 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1467 /* FIXME: the following conflict with IEC958 Playback Route */ 1467 /* FIXME: the following conflict with IEC958 Playback Route */
1468 // .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 1468 // .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH),
1469 .name = "IEC958 Output Switch", 1469 .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
1470 .info = snd_vt1724_spdif_sw_info, 1470 .info = snd_vt1724_spdif_sw_info,
1471 .get = snd_vt1724_spdif_sw_get, 1471 .get = snd_vt1724_spdif_sw_get,
1472 .put = snd_vt1724_spdif_sw_put 1472 .put = snd_vt1724_spdif_sw_put
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index d7af3e474432..7b548416dcef 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -389,6 +389,7 @@ typedef struct {
389 struct ac97_pcm *pcm; 389 struct ac97_pcm *pcm;
390 int pcm_open_flag; 390 int pcm_open_flag;
391 unsigned int page_attr_changed: 1; 391 unsigned int page_attr_changed: 1;
392 unsigned int suspended: 1;
392} ichdev_t; 393} ichdev_t;
393 394
394typedef struct _snd_intel8x0 intel8x0_t; 395typedef struct _snd_intel8x0 intel8x0_t;
@@ -862,12 +863,16 @@ static int snd_intel8x0_pcm_trigger(snd_pcm_substream_t *substream, int cmd)
862 unsigned long port = ichdev->reg_offset; 863 unsigned long port = ichdev->reg_offset;
863 864
864 switch (cmd) { 865 switch (cmd) {
865 case SNDRV_PCM_TRIGGER_START:
866 case SNDRV_PCM_TRIGGER_RESUME: 866 case SNDRV_PCM_TRIGGER_RESUME:
867 ichdev->suspended = 0;
868 /* fallthru */
869 case SNDRV_PCM_TRIGGER_START:
867 val = ICH_IOCE | ICH_STARTBM; 870 val = ICH_IOCE | ICH_STARTBM;
868 break; 871 break;
869 case SNDRV_PCM_TRIGGER_STOP:
870 case SNDRV_PCM_TRIGGER_SUSPEND: 872 case SNDRV_PCM_TRIGGER_SUSPEND:
873 ichdev->suspended = 1;
874 /* fallthru */
875 case SNDRV_PCM_TRIGGER_STOP:
871 val = 0; 876 val = 0;
872 break; 877 break;
873 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 878 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
@@ -899,9 +904,11 @@ static int snd_intel8x0_ali_trigger(snd_pcm_substream_t *substream, int cmd)
899 904
900 val = igetdword(chip, ICHREG(ALI_DMACR)); 905 val = igetdword(chip, ICHREG(ALI_DMACR));
901 switch (cmd) { 906 switch (cmd) {
907 case SNDRV_PCM_TRIGGER_RESUME:
908 ichdev->suspended = 0;
909 /* fallthru */
902 case SNDRV_PCM_TRIGGER_START: 910 case SNDRV_PCM_TRIGGER_START:
903 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 911 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
904 case SNDRV_PCM_TRIGGER_RESUME:
905 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 912 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
906 /* clear FIFO for synchronization of channels */ 913 /* clear FIFO for synchronization of channels */
907 fifo = igetdword(chip, fiforeg[ichdev->ali_slot / 4]); 914 fifo = igetdword(chip, fiforeg[ichdev->ali_slot / 4]);
@@ -913,9 +920,11 @@ static int snd_intel8x0_ali_trigger(snd_pcm_substream_t *substream, int cmd)
913 val &= ~(1 << (ichdev->ali_slot + 16)); /* clear PAUSE flag */ 920 val &= ~(1 << (ichdev->ali_slot + 16)); /* clear PAUSE flag */
914 iputdword(chip, ICHREG(ALI_DMACR), val | (1 << ichdev->ali_slot)); /* start DMA */ 921 iputdword(chip, ICHREG(ALI_DMACR), val | (1 << ichdev->ali_slot)); /* start DMA */
915 break; 922 break;
923 case SNDRV_PCM_TRIGGER_SUSPEND:
924 ichdev->suspended = 1;
925 /* fallthru */
916 case SNDRV_PCM_TRIGGER_STOP: 926 case SNDRV_PCM_TRIGGER_STOP:
917 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 927 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
918 case SNDRV_PCM_TRIGGER_SUSPEND:
919 iputdword(chip, ICHREG(ALI_DMACR), val | (1 << (ichdev->ali_slot + 16))); /* pause */ 928 iputdword(chip, ICHREG(ALI_DMACR), val | (1 << (ichdev->ali_slot + 16))); /* pause */
920 iputbyte(chip, port + ICH_REG_OFF_CR, 0); 929 iputbyte(chip, port + ICH_REG_OFF_CR, 0);
921 while (igetbyte(chip, port + ICH_REG_OFF_CR)) 930 while (igetbyte(chip, port + ICH_REG_OFF_CR))
@@ -994,6 +1003,8 @@ static void snd_intel8x0_setup_pcm_out(intel8x0_t *chip,
994{ 1003{
995 unsigned int cnt; 1004 unsigned int cnt;
996 int dbl = runtime->rate > 48000; 1005 int dbl = runtime->rate > 48000;
1006
1007 spin_lock_irq(&chip->reg_lock);
997 switch (chip->device_type) { 1008 switch (chip->device_type) {
998 case DEVICE_ALI: 1009 case DEVICE_ALI:
999 cnt = igetdword(chip, ICHREG(ALI_SCR)); 1010 cnt = igetdword(chip, ICHREG(ALI_SCR));
@@ -1037,6 +1048,7 @@ static void snd_intel8x0_setup_pcm_out(intel8x0_t *chip,
1037 iputdword(chip, ICHREG(GLOB_CNT), cnt); 1048 iputdword(chip, ICHREG(GLOB_CNT), cnt);
1038 break; 1049 break;
1039 } 1050 }
1051 spin_unlock_irq(&chip->reg_lock);
1040} 1052}
1041 1053
1042static int snd_intel8x0_pcm_prepare(snd_pcm_substream_t * substream) 1054static int snd_intel8x0_pcm_prepare(snd_pcm_substream_t * substream)
@@ -1048,15 +1060,12 @@ static int snd_intel8x0_pcm_prepare(snd_pcm_substream_t * substream)
1048 ichdev->physbuf = runtime->dma_addr; 1060 ichdev->physbuf = runtime->dma_addr;
1049 ichdev->size = snd_pcm_lib_buffer_bytes(substream); 1061 ichdev->size = snd_pcm_lib_buffer_bytes(substream);
1050 ichdev->fragsize = snd_pcm_lib_period_bytes(substream); 1062 ichdev->fragsize = snd_pcm_lib_period_bytes(substream);
1051 spin_lock_irq(&chip->reg_lock);
1052 if (ichdev->ichd == ICHD_PCMOUT) { 1063 if (ichdev->ichd == ICHD_PCMOUT) {
1053 snd_intel8x0_setup_pcm_out(chip, runtime); 1064 snd_intel8x0_setup_pcm_out(chip, runtime);
1054 if (chip->device_type == DEVICE_INTEL_ICH4) { 1065 if (chip->device_type == DEVICE_INTEL_ICH4)
1055 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1; 1066 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
1056 }
1057 } 1067 }
1058 snd_intel8x0_setup_periods(chip, ichdev); 1068 snd_intel8x0_setup_periods(chip, ichdev);
1059 spin_unlock_irq(&chip->reg_lock);
1060 return 0; 1069 return 0;
1061} 1070}
1062 1071
@@ -1817,6 +1826,18 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
1817 }, 1826 },
1818 { 1827 {
1819 .subvendor = 0x103c, 1828 .subvendor = 0x103c,
1829 .subdevice = 0x0934,
1830 .name = "HP nx8220",
1831 .type = AC97_TUNE_MUTE_LED
1832 },
1833 {
1834 .subvendor = 0x103c,
1835 .subdevice = 0x099c,
1836 .name = "HP nx6110", /* AD1981B */
1837 .type = AC97_TUNE_HP_ONLY
1838 },
1839 {
1840 .subvendor = 0x103c,
1820 .subdevice = 0x129d, 1841 .subdevice = 0x129d,
1821 .name = "HP xw8000", 1842 .name = "HP xw8000",
1822 .type = AC97_TUNE_HP_ONLY 1843 .type = AC97_TUNE_HP_ONLY
@@ -1870,6 +1891,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
1870 .type = AC97_TUNE_HP_ONLY 1891 .type = AC97_TUNE_HP_ONLY
1871 }, 1892 },
1872 { 1893 {
1894 .subvendor = 0x10cf,
1895 .subdevice = 0x12ec,
1896 .name = "Fujitsu-Siemens 4010",
1897 .type = AC97_TUNE_HP_ONLY
1898 },
1899 {
1873 .subvendor = 0x10f1, 1900 .subvendor = 0x10f1,
1874 .subdevice = 0x2665, 1901 .subdevice = 0x2665,
1875 .name = "Fujitsu-Siemens Celsius", /* AD1981? */ 1902 .name = "Fujitsu-Siemens Celsius", /* AD1981? */
@@ -2424,6 +2451,20 @@ static int intel8x0_resume(snd_card_t *card)
2424 } 2451 }
2425 } 2452 }
2426 2453
2454 /* resume status */
2455 for (i = 0; i < chip->bdbars_count; i++) {
2456 ichdev_t *ichdev = &chip->ichd[i];
2457 unsigned long port = ichdev->reg_offset;
2458 if (! ichdev->substream || ! ichdev->suspended)
2459 continue;
2460 if (ichdev->ichd == ICHD_PCMOUT)
2461 snd_intel8x0_setup_pcm_out(chip, ichdev->substream->runtime);
2462 iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
2463 iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi);
2464 iputbyte(chip, port + ICH_REG_OFF_CIV, ichdev->civ);
2465 iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
2466 }
2467
2427 return 0; 2468 return 0;
2428} 2469}
2429#endif /* CONFIG_PM */ 2470#endif /* CONFIG_PM */
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 79d8eda54f0d..d2aa9c82d41e 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2067,7 +2067,7 @@ static int snd_korg1212_control_sync_put(snd_kcontrol_t * kcontrol, snd_ctl_elem
2067 }, \ 2067 }, \
2068 { \ 2068 { \
2069 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE, \ 2069 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE, \
2070 .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 2070 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2071 .name = c_name " Monitor Phase Invert", \ 2071 .name = c_name " Monitor Phase Invert", \
2072 .info = snd_korg1212_control_phase_info, \ 2072 .info = snd_korg1212_control_phase_info, \
2073 .get = snd_korg1212_control_phase_get, \ 2073 .get = snd_korg1212_control_phase_get, \
@@ -2082,7 +2082,7 @@ static snd_kcontrol_new_t snd_korg1212_controls[] = {
2082 MON_MIXER(4, "ADAT-5"), MON_MIXER(5, "ADAT-6"), MON_MIXER(6, "ADAT-7"), MON_MIXER(7, "ADAT-8"), 2082 MON_MIXER(4, "ADAT-5"), MON_MIXER(5, "ADAT-6"), MON_MIXER(6, "ADAT-7"), MON_MIXER(7, "ADAT-8"),
2083 { 2083 {
2084 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE, 2084 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE,
2085 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2085 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2086 .name = "Sync Source", 2086 .name = "Sync Source",
2087 .info = snd_korg1212_control_sync_info, 2087 .info = snd_korg1212_control_sync_info,
2088 .get = snd_korg1212_control_sync_get, 2088 .get = snd_korg1212_control_sync_get,
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 7eb20b8f89f6..2bbeb10ff7c4 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -189,6 +189,7 @@ struct snd_nm256_stream {
189 nm256_t *chip; 189 nm256_t *chip;
190 snd_pcm_substream_t *substream; 190 snd_pcm_substream_t *substream;
191 int running; 191 int running;
192 int suspended;
192 193
193 u32 buf; /* offset from chip->buffer */ 194 u32 buf; /* offset from chip->buffer */
194 int bufsize; /* buffer size in bytes */ 195 int bufsize; /* buffer size in bytes */
@@ -231,8 +232,10 @@ struct snd_nm256 {
231 int mixer_status_mask; /* bit mask to test the mixer status */ 232 int mixer_status_mask; /* bit mask to test the mixer status */
232 233
233 int irq; 234 int irq;
235 int irq_acks;
234 irqreturn_t (*interrupt)(int, void *, struct pt_regs *); 236 irqreturn_t (*interrupt)(int, void *, struct pt_regs *);
235 int badintrcount; /* counter to check bogus interrupts */ 237 int badintrcount; /* counter to check bogus interrupts */
238 struct semaphore irq_mutex;
236 239
237 nm256_stream_t streams[2]; 240 nm256_stream_t streams[2];
238 241
@@ -464,6 +467,37 @@ snd_nm256_set_format(nm256_t *chip, nm256_stream_t *s, snd_pcm_substream_t *subs
464 } 467 }
465} 468}
466 469
470/* acquire interrupt */
471static int snd_nm256_acquire_irq(nm256_t *chip)
472{
473 down(&chip->irq_mutex);
474 if (chip->irq < 0) {
475 if (request_irq(chip->pci->irq, chip->interrupt, SA_INTERRUPT|SA_SHIRQ,
476 chip->card->driver, (void*)chip)) {
477 snd_printk("unable to grab IRQ %d\n", chip->pci->irq);
478 up(&chip->irq_mutex);
479 return -EBUSY;
480 }
481 chip->irq = chip->pci->irq;
482 }
483 chip->irq_acks++;
484 up(&chip->irq_mutex);
485 return 0;
486}
487
488/* release interrupt */
489static void snd_nm256_release_irq(nm256_t *chip)
490{
491 down(&chip->irq_mutex);
492 if (chip->irq_acks > 0)
493 chip->irq_acks--;
494 if (chip->irq_acks == 0 && chip->irq >= 0) {
495 free_irq(chip->irq, (void*)chip);
496 chip->irq = -1;
497 }
498 up(&chip->irq_mutex);
499}
500
467/* 501/*
468 * start / stop 502 * start / stop
469 */ 503 */
@@ -538,15 +572,19 @@ snd_nm256_playback_trigger(snd_pcm_substream_t *substream, int cmd)
538 572
539 spin_lock(&chip->reg_lock); 573 spin_lock(&chip->reg_lock);
540 switch (cmd) { 574 switch (cmd) {
541 case SNDRV_PCM_TRIGGER_START:
542 case SNDRV_PCM_TRIGGER_RESUME: 575 case SNDRV_PCM_TRIGGER_RESUME:
576 s->suspended = 0;
577 /* fallthru */
578 case SNDRV_PCM_TRIGGER_START:
543 if (! s->running) { 579 if (! s->running) {
544 snd_nm256_playback_start(chip, s, substream); 580 snd_nm256_playback_start(chip, s, substream);
545 s->running = 1; 581 s->running = 1;
546 } 582 }
547 break; 583 break;
548 case SNDRV_PCM_TRIGGER_STOP:
549 case SNDRV_PCM_TRIGGER_SUSPEND: 584 case SNDRV_PCM_TRIGGER_SUSPEND:
585 s->suspended = 1;
586 /* fallthru */
587 case SNDRV_PCM_TRIGGER_STOP:
550 if (s->running) { 588 if (s->running) {
551 snd_nm256_playback_stop(chip); 589 snd_nm256_playback_stop(chip);
552 s->running = 0; 590 s->running = 0;
@@ -818,6 +856,8 @@ snd_nm256_playback_open(snd_pcm_substream_t *substream)
818{ 856{
819 nm256_t *chip = snd_pcm_substream_chip(substream); 857 nm256_t *chip = snd_pcm_substream_chip(substream);
820 858
859 if (snd_nm256_acquire_irq(chip) < 0)
860 return -EBUSY;
821 snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK], 861 snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK],
822 substream, &snd_nm256_playback); 862 substream, &snd_nm256_playback);
823 return 0; 863 return 0;
@@ -828,6 +868,8 @@ snd_nm256_capture_open(snd_pcm_substream_t *substream)
828{ 868{
829 nm256_t *chip = snd_pcm_substream_chip(substream); 869 nm256_t *chip = snd_pcm_substream_chip(substream);
830 870
871 if (snd_nm256_acquire_irq(chip) < 0)
872 return -EBUSY;
831 snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_CAPTURE], 873 snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_CAPTURE],
832 substream, &snd_nm256_capture); 874 substream, &snd_nm256_capture);
833 return 0; 875 return 0;
@@ -839,6 +881,9 @@ snd_nm256_capture_open(snd_pcm_substream_t *substream)
839static int 881static int
840snd_nm256_playback_close(snd_pcm_substream_t *substream) 882snd_nm256_playback_close(snd_pcm_substream_t *substream)
841{ 883{
884 nm256_t *chip = snd_pcm_substream_chip(substream);
885
886 snd_nm256_release_irq(chip);
842 return 0; 887 return 0;
843} 888}
844 889
@@ -846,6 +891,9 @@ snd_nm256_playback_close(snd_pcm_substream_t *substream)
846static int 891static int
847snd_nm256_capture_close(snd_pcm_substream_t *substream) 892snd_nm256_capture_close(snd_pcm_substream_t *substream)
848{ 893{
894 nm256_t *chip = snd_pcm_substream_chip(substream);
895
896 snd_nm256_release_irq(chip);
849 return 0; 897 return 0;
850} 898}
851 899
@@ -915,18 +963,16 @@ snd_nm256_pcm(nm256_t *chip, int device)
915static void 963static void
916snd_nm256_init_chip(nm256_t *chip) 964snd_nm256_init_chip(nm256_t *chip)
917{ 965{
918 spin_lock_irq(&chip->reg_lock);
919 /* Reset everything. */ 966 /* Reset everything. */
920 snd_nm256_writeb(chip, 0x0, 0x11); 967 snd_nm256_writeb(chip, 0x0, 0x11);
921 snd_nm256_writew(chip, 0x214, 0); 968 snd_nm256_writew(chip, 0x214, 0);
922 /* stop sounds.. */ 969 /* stop sounds.. */
923 //snd_nm256_playback_stop(chip); 970 //snd_nm256_playback_stop(chip);
924 //snd_nm256_capture_stop(chip); 971 //snd_nm256_capture_stop(chip);
925 spin_unlock_irq(&chip->reg_lock);
926} 972}
927 973
928 974
929static inline void 975static irqreturn_t
930snd_nm256_intr_check(nm256_t *chip) 976snd_nm256_intr_check(nm256_t *chip)
931{ 977{
932 if (chip->badintrcount++ > 1000) { 978 if (chip->badintrcount++ > 1000) {
@@ -947,7 +993,9 @@ snd_nm256_intr_check(nm256_t *chip)
947 if (chip->streams[SNDRV_PCM_STREAM_CAPTURE].running) 993 if (chip->streams[SNDRV_PCM_STREAM_CAPTURE].running)
948 snd_nm256_capture_stop(chip); 994 snd_nm256_capture_stop(chip);
949 chip->badintrcount = 0; 995 chip->badintrcount = 0;
996 return IRQ_HANDLED;
950 } 997 }
998 return IRQ_NONE;
951} 999}
952 1000
953/* 1001/*
@@ -969,10 +1017,8 @@ snd_nm256_interrupt(int irq, void *dev_id, struct pt_regs *dummy)
969 status = snd_nm256_readw(chip, NM_INT_REG); 1017 status = snd_nm256_readw(chip, NM_INT_REG);
970 1018
971 /* Not ours. */ 1019 /* Not ours. */
972 if (status == 0) { 1020 if (status == 0)
973 snd_nm256_intr_check(chip); 1021 return snd_nm256_intr_check(chip);
974 return IRQ_NONE;
975 }
976 1022
977 chip->badintrcount = 0; 1023 chip->badintrcount = 0;
978 1024
@@ -1036,10 +1082,8 @@ snd_nm256_interrupt_zx(int irq, void *dev_id, struct pt_regs *dummy)
1036 status = snd_nm256_readl(chip, NM_INT_REG); 1082 status = snd_nm256_readl(chip, NM_INT_REG);
1037 1083
1038 /* Not ours. */ 1084 /* Not ours. */
1039 if (status == 0) { 1085 if (status == 0)
1040 snd_nm256_intr_check(chip); 1086 return snd_nm256_intr_check(chip);
1041 return IRQ_NONE;
1042 }
1043 1087
1044 chip->badintrcount = 0; 1088 chip->badintrcount = 0;
1045 1089
@@ -1192,7 +1236,7 @@ snd_nm256_mixer(nm256_t *chip)
1192 AC97_PC_BEEP, AC97_PHONE, AC97_MIC, AC97_LINE, AC97_CD, 1236 AC97_PC_BEEP, AC97_PHONE, AC97_MIC, AC97_LINE, AC97_CD,
1193 AC97_VIDEO, AC97_AUX, AC97_PCM, AC97_REC_SEL, 1237 AC97_VIDEO, AC97_AUX, AC97_PCM, AC97_REC_SEL,
1194 AC97_REC_GAIN, AC97_GENERAL_PURPOSE, AC97_3D_CONTROL, 1238 AC97_REC_GAIN, AC97_GENERAL_PURPOSE, AC97_3D_CONTROL,
1195 AC97_EXTENDED_ID, 1239 /*AC97_EXTENDED_ID,*/
1196 AC97_VENDOR_ID1, AC97_VENDOR_ID2, 1240 AC97_VENDOR_ID1, AC97_VENDOR_ID2,
1197 -1 1241 -1
1198 }; 1242 };
@@ -1206,6 +1250,7 @@ snd_nm256_mixer(nm256_t *chip)
1206 for (i = 0; mixer_regs[i] >= 0; i++) 1250 for (i = 0; mixer_regs[i] >= 0; i++)
1207 set_bit(mixer_regs[i], ac97.reg_accessed); 1251 set_bit(mixer_regs[i], ac97.reg_accessed);
1208 ac97.private_data = chip; 1252 ac97.private_data = chip;
1253 pbus->no_vra = 1;
1209 err = snd_ac97_mixer(pbus, &ac97, &chip->ac97); 1254 err = snd_ac97_mixer(pbus, &ac97, &chip->ac97);
1210 if (err < 0) 1255 if (err < 0)
1211 return err; 1256 return err;
@@ -1281,6 +1326,7 @@ static int nm256_suspend(snd_card_t *card, pm_message_t state)
1281static int nm256_resume(snd_card_t *card) 1326static int nm256_resume(snd_card_t *card)
1282{ 1327{
1283 nm256_t *chip = card->pm_private_data; 1328 nm256_t *chip = card->pm_private_data;
1329 int i;
1284 1330
1285 /* Perform a full reset on the hardware */ 1331 /* Perform a full reset on the hardware */
1286 pci_enable_device(chip->pci); 1332 pci_enable_device(chip->pci);
@@ -1289,6 +1335,15 @@ static int nm256_resume(snd_card_t *card)
1289 /* restore ac97 */ 1335 /* restore ac97 */
1290 snd_ac97_resume(chip->ac97); 1336 snd_ac97_resume(chip->ac97);
1291 1337
1338 for (i = 0; i < 2; i++) {
1339 nm256_stream_t *s = &chip->streams[i];
1340 if (s->substream && s->suspended) {
1341 spin_lock_irq(&chip->reg_lock);
1342 snd_nm256_set_format(chip, s, s->substream);
1343 spin_unlock_irq(&chip->reg_lock);
1344 }
1345 }
1346
1292 return 0; 1347 return 0;
1293} 1348}
1294#endif /* CONFIG_PM */ 1349#endif /* CONFIG_PM */
@@ -1360,6 +1415,7 @@ snd_nm256_create(snd_card_t *card, struct pci_dev *pci,
1360 chip->use_cache = usecache; 1415 chip->use_cache = usecache;
1361 spin_lock_init(&chip->reg_lock); 1416 spin_lock_init(&chip->reg_lock);
1362 chip->irq = -1; 1417 chip->irq = -1;
1418 init_MUTEX(&chip->irq_mutex);
1363 1419
1364 chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = play_bufsize; 1420 chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = play_bufsize;
1365 chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capt_bufsize; 1421 chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capt_bufsize;
@@ -1470,15 +1526,6 @@ snd_nm256_create(snd_card_t *card, struct pci_dev *pci,
1470 chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr; 1526 chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr;
1471 } 1527 }
1472 1528
1473 /* acquire interrupt */
1474 if (request_irq(pci->irq, chip->interrupt, SA_INTERRUPT|SA_SHIRQ,
1475 card->driver, (void*)chip)) {
1476 err = -EBUSY;
1477 snd_printk("unable to grab IRQ %d\n", pci->irq);
1478 goto __error;
1479 }
1480 chip->irq = pci->irq;
1481
1482 /* Fixed setting. */ 1529 /* Fixed setting. */
1483 chip->mixer_base = NM_MIXER_OFFSET; 1530 chip->mixer_base = NM_MIXER_OFFSET;
1484 1531
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index b7b554df6705..456be39e8e4a 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1900,7 +1900,7 @@ static snd_kcontrol_new_t snd_rme32_controls[] = {
1900 }, 1900 },
1901 { 1901 {
1902 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1902 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1903 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1903 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1904 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK), 1904 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
1905 .info = snd_rme32_control_spdif_mask_info, 1905 .info = snd_rme32_control_spdif_mask_info,
1906 .get = snd_rme32_control_spdif_mask_get, 1906 .get = snd_rme32_control_spdif_mask_get,
@@ -1908,7 +1908,7 @@ static snd_kcontrol_new_t snd_rme32_controls[] = {
1908 }, 1908 },
1909 { 1909 {
1910 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1910 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1911 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1911 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1912 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK), 1912 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK),
1913 .info = snd_rme32_control_spdif_mask_info, 1913 .info = snd_rme32_control_spdif_mask_info,
1914 .get = snd_rme32_control_spdif_mask_get, 1914 .get = snd_rme32_control_spdif_mask_get,
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 10c4f45a913c..9645e9004a48 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -2266,7 +2266,7 @@ static snd_kcontrol_new_t snd_rme96_controls[] = {
2266}, 2266},
2267{ 2267{
2268 .access = SNDRV_CTL_ELEM_ACCESS_READ, 2268 .access = SNDRV_CTL_ELEM_ACCESS_READ,
2269 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2269 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2270 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 2270 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
2271 .info = snd_rme96_control_spdif_mask_info, 2271 .info = snd_rme96_control_spdif_mask_info,
2272 .get = snd_rme96_control_spdif_mask_get, 2272 .get = snd_rme96_control_spdif_mask_get,
@@ -2276,7 +2276,7 @@ static snd_kcontrol_new_t snd_rme96_controls[] = {
2276}, 2276},
2277{ 2277{
2278 .access = SNDRV_CTL_ELEM_ACCESS_READ, 2278 .access = SNDRV_CTL_ELEM_ACCESS_READ,
2279 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2279 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2280 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 2280 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
2281 .info = snd_rme96_control_spdif_mask_info, 2281 .info = snd_rme96_control_spdif_mask_info,
2282 .get = snd_rme96_control_spdif_mask_get, 2282 .get = snd_rme96_control_spdif_mask_get,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 796621de5009..6694866089b5 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1524,7 +1524,7 @@ static int snd_hdsp_control_spdif_mask_get(snd_kcontrol_t * kcontrol, snd_ctl_el
1524} 1524}
1525 1525
1526#define HDSP_SPDIF_IN(xname, xindex) \ 1526#define HDSP_SPDIF_IN(xname, xindex) \
1527{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 1527{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1528 .name = xname, \ 1528 .name = xname, \
1529 .index = xindex, \ 1529 .index = xindex, \
1530 .info = snd_hdsp_info_spdif_in, \ 1530 .info = snd_hdsp_info_spdif_in, \
@@ -1584,7 +1584,7 @@ static int snd_hdsp_put_spdif_in(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
1584} 1584}
1585 1585
1586#define HDSP_SPDIF_OUT(xname, xindex) \ 1586#define HDSP_SPDIF_OUT(xname, xindex) \
1587{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \ 1587{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1588 .info = snd_hdsp_info_spdif_bits, \ 1588 .info = snd_hdsp_info_spdif_bits, \
1589 .get = snd_hdsp_get_spdif_out, .put = snd_hdsp_put_spdif_out } 1589 .get = snd_hdsp_get_spdif_out, .put = snd_hdsp_put_spdif_out }
1590 1590
@@ -1638,7 +1638,7 @@ static int snd_hdsp_put_spdif_out(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_
1638} 1638}
1639 1639
1640#define HDSP_SPDIF_PROFESSIONAL(xname, xindex) \ 1640#define HDSP_SPDIF_PROFESSIONAL(xname, xindex) \
1641{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \ 1641{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1642 .info = snd_hdsp_info_spdif_bits, \ 1642 .info = snd_hdsp_info_spdif_bits, \
1643 .get = snd_hdsp_get_spdif_professional, .put = snd_hdsp_put_spdif_professional } 1643 .get = snd_hdsp_get_spdif_professional, .put = snd_hdsp_put_spdif_professional }
1644 1644
@@ -1683,7 +1683,7 @@ static int snd_hdsp_put_spdif_professional(snd_kcontrol_t * kcontrol, snd_ctl_el
1683} 1683}
1684 1684
1685#define HDSP_SPDIF_EMPHASIS(xname, xindex) \ 1685#define HDSP_SPDIF_EMPHASIS(xname, xindex) \
1686{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \ 1686{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1687 .info = snd_hdsp_info_spdif_bits, \ 1687 .info = snd_hdsp_info_spdif_bits, \
1688 .get = snd_hdsp_get_spdif_emphasis, .put = snd_hdsp_put_spdif_emphasis } 1688 .get = snd_hdsp_get_spdif_emphasis, .put = snd_hdsp_put_spdif_emphasis }
1689 1689
@@ -1728,7 +1728,7 @@ static int snd_hdsp_put_spdif_emphasis(snd_kcontrol_t * kcontrol, snd_ctl_elem_v
1728} 1728}
1729 1729
1730#define HDSP_SPDIF_NON_AUDIO(xname, xindex) \ 1730#define HDSP_SPDIF_NON_AUDIO(xname, xindex) \
1731{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \ 1731{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1732 .info = snd_hdsp_info_spdif_bits, \ 1732 .info = snd_hdsp_info_spdif_bits, \
1733 .get = snd_hdsp_get_spdif_nonaudio, .put = snd_hdsp_put_spdif_nonaudio } 1733 .get = snd_hdsp_get_spdif_nonaudio, .put = snd_hdsp_put_spdif_nonaudio }
1734 1734
@@ -1773,7 +1773,7 @@ static int snd_hdsp_put_spdif_nonaudio(snd_kcontrol_t * kcontrol, snd_ctl_elem_v
1773} 1773}
1774 1774
1775#define HDSP_SPDIF_SAMPLE_RATE(xname, xindex) \ 1775#define HDSP_SPDIF_SAMPLE_RATE(xname, xindex) \
1776{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1776{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1777 .name = xname, \ 1777 .name = xname, \
1778 .index = xindex, \ 1778 .index = xindex, \
1779 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1779 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1834,7 +1834,7 @@ static int snd_hdsp_get_spdif_sample_rate(snd_kcontrol_t * kcontrol, snd_ctl_ele
1834} 1834}
1835 1835
1836#define HDSP_SYSTEM_SAMPLE_RATE(xname, xindex) \ 1836#define HDSP_SYSTEM_SAMPLE_RATE(xname, xindex) \
1837{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1837{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1838 .name = xname, \ 1838 .name = xname, \
1839 .index = xindex, \ 1839 .index = xindex, \
1840 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1840 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1858,7 +1858,7 @@ static int snd_hdsp_get_system_sample_rate(snd_kcontrol_t * kcontrol, snd_ctl_el
1858} 1858}
1859 1859
1860#define HDSP_AUTOSYNC_SAMPLE_RATE(xname, xindex) \ 1860#define HDSP_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
1861{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 1861{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1862 .name = xname, \ 1862 .name = xname, \
1863 .index = xindex, \ 1863 .index = xindex, \
1864 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1864 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1918,7 +1918,7 @@ static int snd_hdsp_get_autosync_sample_rate(snd_kcontrol_t * kcontrol, snd_ctl_
1918} 1918}
1919 1919
1920#define HDSP_SYSTEM_CLOCK_MODE(xname, xindex) \ 1920#define HDSP_SYSTEM_CLOCK_MODE(xname, xindex) \
1921{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1921{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1922 .name = xname, \ 1922 .name = xname, \
1923 .index = xindex, \ 1923 .index = xindex, \
1924 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1924 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1958,7 +1958,7 @@ static int snd_hdsp_get_system_clock_mode(snd_kcontrol_t * kcontrol, snd_ctl_ele
1958} 1958}
1959 1959
1960#define HDSP_CLOCK_SOURCE(xname, xindex) \ 1960#define HDSP_CLOCK_SOURCE(xname, xindex) \
1961{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 1961{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1962 .name = xname, \ 1962 .name = xname, \
1963 .index = xindex, \ 1963 .index = xindex, \
1964 .info = snd_hdsp_info_clock_source, \ 1964 .info = snd_hdsp_info_clock_source, \
@@ -2124,7 +2124,7 @@ static int snd_hdsp_put_clock_source_lock(snd_kcontrol_t * kcontrol, snd_ctl_ele
2124} 2124}
2125 2125
2126#define HDSP_DA_GAIN(xname, xindex) \ 2126#define HDSP_DA_GAIN(xname, xindex) \
2127{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2127{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2128 .name = xname, \ 2128 .name = xname, \
2129 .index = xindex, \ 2129 .index = xindex, \
2130 .info = snd_hdsp_info_da_gain, \ 2130 .info = snd_hdsp_info_da_gain, \
@@ -2210,7 +2210,7 @@ static int snd_hdsp_put_da_gain(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
2210} 2210}
2211 2211
2212#define HDSP_AD_GAIN(xname, xindex) \ 2212#define HDSP_AD_GAIN(xname, xindex) \
2213{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2213{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2214 .name = xname, \ 2214 .name = xname, \
2215 .index = xindex, \ 2215 .index = xindex, \
2216 .info = snd_hdsp_info_ad_gain, \ 2216 .info = snd_hdsp_info_ad_gain, \
@@ -2296,7 +2296,7 @@ static int snd_hdsp_put_ad_gain(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
2296} 2296}
2297 2297
2298#define HDSP_PHONE_GAIN(xname, xindex) \ 2298#define HDSP_PHONE_GAIN(xname, xindex) \
2299{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2299{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2300 .name = xname, \ 2300 .name = xname, \
2301 .index = xindex, \ 2301 .index = xindex, \
2302 .info = snd_hdsp_info_phone_gain, \ 2302 .info = snd_hdsp_info_phone_gain, \
@@ -2382,7 +2382,7 @@ static int snd_hdsp_put_phone_gain(snd_kcontrol_t * kcontrol, snd_ctl_elem_value
2382} 2382}
2383 2383
2384#define HDSP_XLR_BREAKOUT_CABLE(xname, xindex) \ 2384#define HDSP_XLR_BREAKOUT_CABLE(xname, xindex) \
2385{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2385{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2386 .name = xname, \ 2386 .name = xname, \
2387 .index = xindex, \ 2387 .index = xindex, \
2388 .info = snd_hdsp_info_xlr_breakout_cable, \ 2388 .info = snd_hdsp_info_xlr_breakout_cable, \
@@ -2447,7 +2447,7 @@ static int snd_hdsp_put_xlr_breakout_cable(snd_kcontrol_t * kcontrol, snd_ctl_el
2447 Switching this on desactivates external ADAT 2447 Switching this on desactivates external ADAT
2448*/ 2448*/
2449#define HDSP_AEB(xname, xindex) \ 2449#define HDSP_AEB(xname, xindex) \
2450{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2450{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2451 .name = xname, \ 2451 .name = xname, \
2452 .index = xindex, \ 2452 .index = xindex, \
2453 .info = snd_hdsp_info_aeb, \ 2453 .info = snd_hdsp_info_aeb, \
@@ -2508,7 +2508,7 @@ static int snd_hdsp_put_aeb(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * uc
2508} 2508}
2509 2509
2510#define HDSP_PREF_SYNC_REF(xname, xindex) \ 2510#define HDSP_PREF_SYNC_REF(xname, xindex) \
2511{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2511{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2512 .name = xname, \ 2512 .name = xname, \
2513 .index = xindex, \ 2513 .index = xindex, \
2514 .info = snd_hdsp_info_pref_sync_ref, \ 2514 .info = snd_hdsp_info_pref_sync_ref, \
@@ -2641,7 +2641,7 @@ static int snd_hdsp_put_pref_sync_ref(snd_kcontrol_t * kcontrol, snd_ctl_elem_va
2641} 2641}
2642 2642
2643#define HDSP_AUTOSYNC_REF(xname, xindex) \ 2643#define HDSP_AUTOSYNC_REF(xname, xindex) \
2644{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2644{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2645 .name = xname, \ 2645 .name = xname, \
2646 .index = xindex, \ 2646 .index = xindex, \
2647 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 2647 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -2697,7 +2697,7 @@ static int snd_hdsp_get_autosync_ref(snd_kcontrol_t * kcontrol, snd_ctl_elem_val
2697} 2697}
2698 2698
2699#define HDSP_LINE_OUT(xname, xindex) \ 2699#define HDSP_LINE_OUT(xname, xindex) \
2700{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2700{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2701 .name = xname, \ 2701 .name = xname, \
2702 .index = xindex, \ 2702 .index = xindex, \
2703 .info = snd_hdsp_info_line_out, \ 2703 .info = snd_hdsp_info_line_out, \
@@ -2757,7 +2757,7 @@ static int snd_hdsp_put_line_out(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
2757} 2757}
2758 2758
2759#define HDSP_PRECISE_POINTER(xname, xindex) \ 2759#define HDSP_PRECISE_POINTER(xname, xindex) \
2760{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2760{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
2761 .name = xname, \ 2761 .name = xname, \
2762 .index = xindex, \ 2762 .index = xindex, \
2763 .info = snd_hdsp_info_precise_pointer, \ 2763 .info = snd_hdsp_info_precise_pointer, \
@@ -2811,7 +2811,7 @@ static int snd_hdsp_put_precise_pointer(snd_kcontrol_t * kcontrol, snd_ctl_elem_
2811} 2811}
2812 2812
2813#define HDSP_USE_MIDI_TASKLET(xname, xindex) \ 2813#define HDSP_USE_MIDI_TASKLET(xname, xindex) \
2814{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2814{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
2815 .name = xname, \ 2815 .name = xname, \
2816 .index = xindex, \ 2816 .index = xindex, \
2817 .info = snd_hdsp_info_use_midi_tasklet, \ 2817 .info = snd_hdsp_info_use_midi_tasklet, \
@@ -2868,6 +2868,7 @@ static int snd_hdsp_put_use_midi_tasklet(snd_kcontrol_t * kcontrol, snd_ctl_elem
2868{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2868{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
2869 .name = xname, \ 2869 .name = xname, \
2870 .index = xindex, \ 2870 .index = xindex, \
2871 .device = 0, \
2871 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ 2872 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
2872 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 2873 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
2873 .info = snd_hdsp_info_mixer, \ 2874 .info = snd_hdsp_info_mixer, \
@@ -2939,7 +2940,7 @@ static int snd_hdsp_put_mixer(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t *
2939} 2940}
2940 2941
2941#define HDSP_WC_SYNC_CHECK(xname, xindex) \ 2942#define HDSP_WC_SYNC_CHECK(xname, xindex) \
2942{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2943{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2943 .name = xname, \ 2944 .name = xname, \
2944 .index = xindex, \ 2945 .index = xindex, \
2945 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 2946 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -2983,7 +2984,7 @@ static int snd_hdsp_get_wc_sync_check(snd_kcontrol_t * kcontrol, snd_ctl_elem_va
2983} 2984}
2984 2985
2985#define HDSP_SPDIF_SYNC_CHECK(xname, xindex) \ 2986#define HDSP_SPDIF_SYNC_CHECK(xname, xindex) \
2986{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2987{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2987 .name = xname, \ 2988 .name = xname, \
2988 .index = xindex, \ 2989 .index = xindex, \
2989 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 2990 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -3015,7 +3016,7 @@ static int snd_hdsp_get_spdif_sync_check(snd_kcontrol_t * kcontrol, snd_ctl_elem
3015} 3016}
3016 3017
3017#define HDSP_ADATSYNC_SYNC_CHECK(xname, xindex) \ 3018#define HDSP_ADATSYNC_SYNC_CHECK(xname, xindex) \
3018{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 3019{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3019 .name = xname, \ 3020 .name = xname, \
3020 .index = xindex, \ 3021 .index = xindex, \
3021 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 3022 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -3046,7 +3047,7 @@ static int snd_hdsp_get_adatsync_sync_check(snd_kcontrol_t * kcontrol, snd_ctl_e
3046} 3047}
3047 3048
3048#define HDSP_ADAT_SYNC_CHECK \ 3049#define HDSP_ADAT_SYNC_CHECK \
3049{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 3050{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
3050 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 3051 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
3051 .info = snd_hdsp_info_sync_check, \ 3052 .info = snd_hdsp_info_sync_check, \
3052 .get = snd_hdsp_get_adat_sync_check \ 3053 .get = snd_hdsp_get_adat_sync_check \
@@ -3119,7 +3120,7 @@ static snd_kcontrol_new_t snd_hdsp_controls[] = {
3119}, 3120},
3120{ 3121{
3121 .access = SNDRV_CTL_ELEM_ACCESS_READ, 3122 .access = SNDRV_CTL_ELEM_ACCESS_READ,
3122 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 3123 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
3123 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 3124 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
3124 .info = snd_hdsp_control_spdif_mask_info, 3125 .info = snd_hdsp_control_spdif_mask_info,
3125 .get = snd_hdsp_control_spdif_mask_get, 3126 .get = snd_hdsp_control_spdif_mask_get,
@@ -3129,7 +3130,7 @@ static snd_kcontrol_new_t snd_hdsp_controls[] = {
3129}, 3130},
3130{ 3131{
3131 .access = SNDRV_CTL_ELEM_ACCESS_READ, 3132 .access = SNDRV_CTL_ELEM_ACCESS_READ,
3132 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 3133 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
3133 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 3134 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
3134 .info = snd_hdsp_control_spdif_mask_info, 3135 .info = snd_hdsp_control_spdif_mask_info,
3135 .get = snd_hdsp_control_spdif_mask_get, 3136 .get = snd_hdsp_control_spdif_mask_get,
@@ -3146,8 +3147,6 @@ HDSP_SPDIF_NON_AUDIO("IEC958 Non-audio Bit", 0),
3146/* 'Sample Clock Source' complies with the alsa control naming scheme */ 3147/* 'Sample Clock Source' complies with the alsa control naming scheme */
3147HDSP_CLOCK_SOURCE("Sample Clock Source", 0), 3148HDSP_CLOCK_SOURCE("Sample Clock Source", 0),
3148{ 3149{
3149 /* FIXME: should be PCM or MIXER? */
3150 /* .iface = SNDRV_CTL_ELEM_IFACE_PCM, */
3151 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 3150 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
3152 .name = "Sample Clock Source Locking", 3151 .name = "Sample Clock Source Locking",
3153 .info = snd_hdsp_info_clock_source_lock, 3152 .info = snd_hdsp_info_clock_source_lock,
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 9e86d0eb41ce..5d786d113b25 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -65,7 +65,7 @@ module_param_array(enable, bool, NULL, 0444);
65MODULE_PARM_DESC(enable, "Enable/disable specific HDSPM soundcards."); 65MODULE_PARM_DESC(enable, "Enable/disable specific HDSPM soundcards.");
66 66
67module_param_array(precise_ptr, bool, NULL, 0444); 67module_param_array(precise_ptr, bool, NULL, 0444);
68MODULE_PARM_DESC(precise_ptr, "Enable precise pointer, or disable."); 68MODULE_PARM_DESC(precise_ptr, "Enable or disable precise pointer.");
69 69
70module_param_array(line_outs_monitor, bool, NULL, 0444); 70module_param_array(line_outs_monitor, bool, NULL, 0444);
71MODULE_PARM_DESC(line_outs_monitor, 71MODULE_PARM_DESC(line_outs_monitor,
@@ -1104,14 +1104,14 @@ static int snd_hdspm_midi_output_close(snd_rawmidi_substream_t * substream)
1104 return 0; 1104 return 0;
1105} 1105}
1106 1106
1107snd_rawmidi_ops_t snd_hdspm_midi_output = 1107static snd_rawmidi_ops_t snd_hdspm_midi_output =
1108{ 1108{
1109 .open = snd_hdspm_midi_output_open, 1109 .open = snd_hdspm_midi_output_open,
1110 .close = snd_hdspm_midi_output_close, 1110 .close = snd_hdspm_midi_output_close,
1111 .trigger = snd_hdspm_midi_output_trigger, 1111 .trigger = snd_hdspm_midi_output_trigger,
1112}; 1112};
1113 1113
1114snd_rawmidi_ops_t snd_hdspm_midi_input = 1114static snd_rawmidi_ops_t snd_hdspm_midi_input =
1115{ 1115{
1116 .open = snd_hdspm_midi_input_open, 1116 .open = snd_hdspm_midi_input_open,
1117 .close = snd_hdspm_midi_input_close, 1117 .close = snd_hdspm_midi_input_close,
@@ -1168,7 +1168,7 @@ static void hdspm_midi_tasklet(unsigned long arg)
1168/* get the system sample rate which is set */ 1168/* get the system sample rate which is set */
1169 1169
1170#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \ 1170#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \
1171{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1171{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1172 .name = xname, \ 1172 .name = xname, \
1173 .index = xindex, \ 1173 .index = xindex, \
1174 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1174 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1195,7 +1195,7 @@ static int snd_hdspm_get_system_sample_rate(snd_kcontrol_t * kcontrol,
1195} 1195}
1196 1196
1197#define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \ 1197#define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
1198{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 1198{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1199 .name = xname, \ 1199 .name = xname, \
1200 .index = xindex, \ 1200 .index = xindex, \
1201 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1201 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1264,7 +1264,7 @@ static int snd_hdspm_get_autosync_sample_rate(snd_kcontrol_t * kcontrol,
1264} 1264}
1265 1265
1266#define HDSPM_SYSTEM_CLOCK_MODE(xname, xindex) \ 1266#define HDSPM_SYSTEM_CLOCK_MODE(xname, xindex) \
1267{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1267{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1268 .name = xname, \ 1268 .name = xname, \
1269 .index = xindex, \ 1269 .index = xindex, \
1270 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1270 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1310,7 +1310,7 @@ static int snd_hdspm_get_system_clock_mode(snd_kcontrol_t * kcontrol,
1310} 1310}
1311 1311
1312#define HDSPM_CLOCK_SOURCE(xname, xindex) \ 1312#define HDSPM_CLOCK_SOURCE(xname, xindex) \
1313{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \ 1313{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1314 .name = xname, \ 1314 .name = xname, \
1315 .index = xindex, \ 1315 .index = xindex, \
1316 .info = snd_hdspm_info_clock_source, \ 1316 .info = snd_hdspm_info_clock_source, \
@@ -1457,7 +1457,7 @@ static int snd_hdspm_put_clock_source(snd_kcontrol_t * kcontrol,
1457} 1457}
1458 1458
1459#define HDSPM_PREF_SYNC_REF(xname, xindex) \ 1459#define HDSPM_PREF_SYNC_REF(xname, xindex) \
1460{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1460{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1461 .name = xname, \ 1461 .name = xname, \
1462 .index = xindex, \ 1462 .index = xindex, \
1463 .info = snd_hdspm_info_pref_sync_ref, \ 1463 .info = snd_hdspm_info_pref_sync_ref, \
@@ -1547,7 +1547,7 @@ static int snd_hdspm_put_pref_sync_ref(snd_kcontrol_t * kcontrol,
1547} 1547}
1548 1548
1549#define HDSPM_AUTOSYNC_REF(xname, xindex) \ 1549#define HDSPM_AUTOSYNC_REF(xname, xindex) \
1550{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1550{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1551 .name = xname, \ 1551 .name = xname, \
1552 .index = xindex, \ 1552 .index = xindex, \
1553 .access = SNDRV_CTL_ELEM_ACCESS_READ, \ 1553 .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1604,7 +1604,7 @@ static int snd_hdspm_get_autosync_ref(snd_kcontrol_t * kcontrol,
1604} 1604}
1605 1605
1606#define HDSPM_LINE_OUT(xname, xindex) \ 1606#define HDSPM_LINE_OUT(xname, xindex) \
1607{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1607{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1608 .name = xname, \ 1608 .name = xname, \
1609 .index = xindex, \ 1609 .index = xindex, \
1610 .info = snd_hdspm_info_line_out, \ 1610 .info = snd_hdspm_info_line_out, \
@@ -1668,7 +1668,7 @@ static int snd_hdspm_put_line_out(snd_kcontrol_t * kcontrol,
1668} 1668}
1669 1669
1670#define HDSPM_TX_64(xname, xindex) \ 1670#define HDSPM_TX_64(xname, xindex) \
1671{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1671{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1672 .name = xname, \ 1672 .name = xname, \
1673 .index = xindex, \ 1673 .index = xindex, \
1674 .info = snd_hdspm_info_tx_64, \ 1674 .info = snd_hdspm_info_tx_64, \
@@ -1731,7 +1731,7 @@ static int snd_hdspm_put_tx_64(snd_kcontrol_t * kcontrol,
1731} 1731}
1732 1732
1733#define HDSPM_C_TMS(xname, xindex) \ 1733#define HDSPM_C_TMS(xname, xindex) \
1734{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1734{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1735 .name = xname, \ 1735 .name = xname, \
1736 .index = xindex, \ 1736 .index = xindex, \
1737 .info = snd_hdspm_info_c_tms, \ 1737 .info = snd_hdspm_info_c_tms, \
@@ -1794,7 +1794,7 @@ static int snd_hdspm_put_c_tms(snd_kcontrol_t * kcontrol,
1794} 1794}
1795 1795
1796#define HDSPM_SAFE_MODE(xname, xindex) \ 1796#define HDSPM_SAFE_MODE(xname, xindex) \
1797{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1797{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1798 .name = xname, \ 1798 .name = xname, \
1799 .index = xindex, \ 1799 .index = xindex, \
1800 .info = snd_hdspm_info_safe_mode, \ 1800 .info = snd_hdspm_info_safe_mode, \
@@ -1857,7 +1857,7 @@ static int snd_hdspm_put_safe_mode(snd_kcontrol_t * kcontrol,
1857} 1857}
1858 1858
1859#define HDSPM_INPUT_SELECT(xname, xindex) \ 1859#define HDSPM_INPUT_SELECT(xname, xindex) \
1860{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1860{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1861 .name = xname, \ 1861 .name = xname, \
1862 .index = xindex, \ 1862 .index = xindex, \
1863 .info = snd_hdspm_info_input_select, \ 1863 .info = snd_hdspm_info_input_select, \
@@ -1941,6 +1941,7 @@ static int snd_hdspm_put_input_select(snd_kcontrol_t * kcontrol,
1941{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 1941{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
1942 .name = xname, \ 1942 .name = xname, \
1943 .index = xindex, \ 1943 .index = xindex, \
1944 .device = 0, \
1944 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \ 1945 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
1945 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 1946 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
1946 .info = snd_hdspm_info_mixer, \ 1947 .info = snd_hdspm_info_mixer, \
@@ -2124,7 +2125,7 @@ static int snd_hdspm_put_playback_mixer(snd_kcontrol_t * kcontrol,
2124} 2125}
2125 2126
2126#define HDSPM_WC_SYNC_CHECK(xname, xindex) \ 2127#define HDSPM_WC_SYNC_CHECK(xname, xindex) \
2127{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2128{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2128 .name = xname, \ 2129 .name = xname, \
2129 .index = xindex, \ 2130 .index = xindex, \
2130 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 2131 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -2170,7 +2171,7 @@ static int snd_hdspm_get_wc_sync_check(snd_kcontrol_t * kcontrol,
2170 2171
2171 2172
2172#define HDSPM_MADI_SYNC_CHECK(xname, xindex) \ 2173#define HDSPM_MADI_SYNC_CHECK(xname, xindex) \
2173{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \ 2174{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
2174 .name = xname, \ 2175 .name = xname, \
2175 .index = xindex, \ 2176 .index = xindex, \
2176 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 2177 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 1bc9d0df8516..8ee4d6fd6ea7 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -893,7 +893,7 @@ static int snd_rme9652_control_spdif_mask_get(snd_kcontrol_t * kcontrol, snd_ctl
893} 893}
894 894
895#define RME9652_ADAT1_IN(xname, xindex) \ 895#define RME9652_ADAT1_IN(xname, xindex) \
896{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 896{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
897 .info = snd_rme9652_info_adat1_in, \ 897 .info = snd_rme9652_info_adat1_in, \
898 .get = snd_rme9652_get_adat1_in, \ 898 .get = snd_rme9652_get_adat1_in, \
899 .put = snd_rme9652_put_adat1_in } 899 .put = snd_rme9652_put_adat1_in }
@@ -971,7 +971,7 @@ static int snd_rme9652_put_adat1_in(snd_kcontrol_t * kcontrol, snd_ctl_elem_valu
971} 971}
972 972
973#define RME9652_SPDIF_IN(xname, xindex) \ 973#define RME9652_SPDIF_IN(xname, xindex) \
974{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 974{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
975 .info = snd_rme9652_info_spdif_in, \ 975 .info = snd_rme9652_info_spdif_in, \
976 .get = snd_rme9652_get_spdif_in, .put = snd_rme9652_put_spdif_in } 976 .get = snd_rme9652_get_spdif_in, .put = snd_rme9652_put_spdif_in }
977 977
@@ -1042,7 +1042,7 @@ static int snd_rme9652_put_spdif_in(snd_kcontrol_t * kcontrol, snd_ctl_elem_valu
1042} 1042}
1043 1043
1044#define RME9652_SPDIF_OUT(xname, xindex) \ 1044#define RME9652_SPDIF_OUT(xname, xindex) \
1045{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1045{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1046 .info = snd_rme9652_info_spdif_out, \ 1046 .info = snd_rme9652_info_spdif_out, \
1047 .get = snd_rme9652_get_spdif_out, .put = snd_rme9652_put_spdif_out } 1047 .get = snd_rme9652_get_spdif_out, .put = snd_rme9652_put_spdif_out }
1048 1048
@@ -1110,7 +1110,7 @@ static int snd_rme9652_put_spdif_out(snd_kcontrol_t * kcontrol, snd_ctl_elem_val
1110} 1110}
1111 1111
1112#define RME9652_SYNC_MODE(xname, xindex) \ 1112#define RME9652_SYNC_MODE(xname, xindex) \
1113{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1113{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1114 .info = snd_rme9652_info_sync_mode, \ 1114 .info = snd_rme9652_info_sync_mode, \
1115 .get = snd_rme9652_get_sync_mode, .put = snd_rme9652_put_sync_mode } 1115 .get = snd_rme9652_get_sync_mode, .put = snd_rme9652_put_sync_mode }
1116 1116
@@ -1195,7 +1195,7 @@ static int snd_rme9652_put_sync_mode(snd_kcontrol_t * kcontrol, snd_ctl_elem_val
1195} 1195}
1196 1196
1197#define RME9652_SYNC_PREF(xname, xindex) \ 1197#define RME9652_SYNC_PREF(xname, xindex) \
1198{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1198{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1199 .info = snd_rme9652_info_sync_pref, \ 1199 .info = snd_rme9652_info_sync_pref, \
1200 .get = snd_rme9652_get_sync_pref, .put = snd_rme9652_put_sync_pref } 1200 .get = snd_rme9652_get_sync_pref, .put = snd_rme9652_put_sync_pref }
1201 1201
@@ -1340,7 +1340,7 @@ static int snd_rme9652_put_thru(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
1340} 1340}
1341 1341
1342#define RME9652_PASSTHRU(xname, xindex) \ 1342#define RME9652_PASSTHRU(xname, xindex) \
1343{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1343{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1344 .info = snd_rme9652_info_passthru, \ 1344 .info = snd_rme9652_info_passthru, \
1345 .put = snd_rme9652_put_passthru, \ 1345 .put = snd_rme9652_put_passthru, \
1346 .get = snd_rme9652_get_passthru } 1346 .get = snd_rme9652_get_passthru }
@@ -1386,7 +1386,7 @@ static int snd_rme9652_put_passthru(snd_kcontrol_t * kcontrol, snd_ctl_elem_valu
1386/* Read-only switches */ 1386/* Read-only switches */
1387 1387
1388#define RME9652_SPDIF_RATE(xname, xindex) \ 1388#define RME9652_SPDIF_RATE(xname, xindex) \
1389{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1389{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1390 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 1390 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
1391 .info = snd_rme9652_info_spdif_rate, \ 1391 .info = snd_rme9652_info_spdif_rate, \
1392 .get = snd_rme9652_get_spdif_rate } 1392 .get = snd_rme9652_get_spdif_rate }
@@ -1411,7 +1411,7 @@ static int snd_rme9652_get_spdif_rate(snd_kcontrol_t * kcontrol, snd_ctl_elem_va
1411} 1411}
1412 1412
1413#define RME9652_ADAT_SYNC(xname, xindex, xidx) \ 1413#define RME9652_ADAT_SYNC(xname, xindex, xidx) \
1414{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1414{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1415 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 1415 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
1416 .info = snd_rme9652_info_adat_sync, \ 1416 .info = snd_rme9652_info_adat_sync, \
1417 .get = snd_rme9652_get_adat_sync, .private_value = xidx } 1417 .get = snd_rme9652_get_adat_sync, .private_value = xidx }
@@ -1447,7 +1447,7 @@ static int snd_rme9652_get_adat_sync(snd_kcontrol_t * kcontrol, snd_ctl_elem_val
1447} 1447}
1448 1448
1449#define RME9652_TC_VALID(xname, xindex) \ 1449#define RME9652_TC_VALID(xname, xindex) \
1450{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \ 1450{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
1451 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ 1451 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
1452 .info = snd_rme9652_info_tc_valid, \ 1452 .info = snd_rme9652_info_tc_valid, \
1453 .get = snd_rme9652_get_tc_valid } 1453 .get = snd_rme9652_get_tc_valid }
@@ -1545,7 +1545,7 @@ static snd_kcontrol_new_t snd_rme9652_controls[] = {
1545}, 1545},
1546{ 1546{
1547 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1547 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1548 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1548 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1549 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), 1549 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
1550 .info = snd_rme9652_control_spdif_mask_info, 1550 .info = snd_rme9652_control_spdif_mask_info,
1551 .get = snd_rme9652_control_spdif_mask_get, 1551 .get = snd_rme9652_control_spdif_mask_get,
@@ -1555,7 +1555,7 @@ static snd_kcontrol_new_t snd_rme9652_controls[] = {
1555}, 1555},
1556{ 1556{
1557 .access = SNDRV_CTL_ELEM_ACCESS_READ, 1557 .access = SNDRV_CTL_ELEM_ACCESS_READ,
1558 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1558 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1559 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), 1559 .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
1560 .info = snd_rme9652_control_spdif_mask_info, 1560 .info = snd_rme9652_control_spdif_mask_info,
1561 .get = snd_rme9652_control_spdif_mask_get, 1561 .get = snd_rme9652_control_spdif_mask_get,
@@ -1568,7 +1568,7 @@ RME9652_SPDIF_OUT("IEC958 Output also on ADAT1", 0),
1568RME9652_SYNC_MODE("Sync Mode", 0), 1568RME9652_SYNC_MODE("Sync Mode", 0),
1569RME9652_SYNC_PREF("Preferred Sync Source", 0), 1569RME9652_SYNC_PREF("Preferred Sync Source", 0),
1570{ 1570{
1571 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 1571 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1572 .name = "Channels Thru", 1572 .name = "Channels Thru",
1573 .index = 0, 1573 .index = 0,
1574 .info = snd_rme9652_info_thru, 1574 .info = snd_rme9652_info_thru,
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 29d89bfba0a4..f30d9d947862 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -1689,7 +1689,7 @@ static snd_pcm_hardware_t snd_trident_playback =
1689 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1689 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1690 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1690 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1691 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | 1691 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
1692 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 1692 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
1693 .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | 1693 .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
1694 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), 1694 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
1695 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, 1695 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
@@ -1714,7 +1714,7 @@ static snd_pcm_hardware_t snd_trident_capture =
1714 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1714 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1715 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1715 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1716 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | 1716 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
1717 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 1717 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
1718 .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | 1718 .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
1719 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), 1719 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
1720 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, 1720 .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
@@ -1739,7 +1739,7 @@ static snd_pcm_hardware_t snd_trident_foldback =
1739 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1739 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1740 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1740 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1741 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | 1741 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
1742 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 1742 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
1743 .formats = SNDRV_PCM_FMTBIT_S16_LE, 1743 .formats = SNDRV_PCM_FMTBIT_S16_LE,
1744 .rates = SNDRV_PCM_RATE_48000, 1744 .rates = SNDRV_PCM_RATE_48000,
1745 .rate_min = 48000, 1745 .rate_min = 48000,
@@ -1763,7 +1763,7 @@ static snd_pcm_hardware_t snd_trident_spdif =
1763 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1763 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1764 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1764 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1765 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | 1765 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
1766 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 1766 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
1767 .formats = SNDRV_PCM_FMTBIT_S16_LE, 1767 .formats = SNDRV_PCM_FMTBIT_S16_LE,
1768 .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | 1768 .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
1769 SNDRV_PCM_RATE_48000), 1769 SNDRV_PCM_RATE_48000),
@@ -1784,7 +1784,7 @@ static snd_pcm_hardware_t snd_trident_spdif_7018 =
1784 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1784 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1785 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1785 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1786 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START | 1786 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
1787 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), 1787 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
1788 .formats = SNDRV_PCM_FMTBIT_S16_LE, 1788 .formats = SNDRV_PCM_FMTBIT_S16_LE,
1789 .rates = SNDRV_PCM_RATE_48000, 1789 .rates = SNDRV_PCM_RATE_48000,
1790 .rate_min = 48000, 1790 .rate_min = 48000,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 4889600387c8..56c6e52d7264 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -663,10 +663,12 @@ static int snd_via82xx_pcm_trigger(snd_pcm_substream_t * substream, int cmd)
663 val = 0; 663 val = 0;
664 switch (cmd) { 664 switch (cmd) {
665 case SNDRV_PCM_TRIGGER_START: 665 case SNDRV_PCM_TRIGGER_START:
666 case SNDRV_PCM_TRIGGER_RESUME:
666 val |= VIA_REG_CTRL_START; 667 val |= VIA_REG_CTRL_START;
667 viadev->running = 1; 668 viadev->running = 1;
668 break; 669 break;
669 case SNDRV_PCM_TRIGGER_STOP: 670 case SNDRV_PCM_TRIGGER_STOP:
671 case SNDRV_PCM_TRIGGER_SUSPEND:
670 val = VIA_REG_CTRL_TERMINATE; 672 val = VIA_REG_CTRL_TERMINATE;
671 viadev->running = 0; 673 viadev->running = 0;
672 break; 674 break;
@@ -929,12 +931,12 @@ static int snd_via8233_playback_prepare(snd_pcm_substream_t *substream)
929 931
930 if ((rate_changed = via_lock_rate(&chip->rates[0], ac97_rate)) < 0) 932 if ((rate_changed = via_lock_rate(&chip->rates[0], ac97_rate)) < 0)
931 return rate_changed; 933 return rate_changed;
932 if (rate_changed) { 934 if (rate_changed)
933 snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE, 935 snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE,
934 chip->no_vra ? 48000 : runtime->rate); 936 chip->no_vra ? 48000 : runtime->rate);
935 snd_ac97_set_rate(chip->ac97, AC97_SPDIF, 937 if (chip->spdif_on && viadev->reg_offset == 0x30)
936 chip->no_vra ? 48000 : runtime->rate); 938 snd_ac97_set_rate(chip->ac97, AC97_SPDIF, runtime->rate);
937 } 939
938 if (runtime->rate == 48000) 940 if (runtime->rate == 48000)
939 rbits = 0xfffff; 941 rbits = 0xfffff;
940 else 942 else
@@ -1035,7 +1037,7 @@ static snd_pcm_hardware_t snd_via82xx_hw =
1035 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1037 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1036 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1038 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1037 SNDRV_PCM_INFO_MMAP_VALID | 1039 SNDRV_PCM_INFO_MMAP_VALID |
1038 SNDRV_PCM_INFO_RESUME | 1040 /* SNDRV_PCM_INFO_RESUME | */
1039 SNDRV_PCM_INFO_PAUSE), 1041 SNDRV_PCM_INFO_PAUSE),
1040 .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, 1042 .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
1041 .rates = SNDRV_PCM_RATE_48000, 1043 .rates = SNDRV_PCM_RATE_48000,
@@ -1484,7 +1486,7 @@ static int snd_via8233_dxs3_spdif_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_val
1484} 1486}
1485 1487
1486static snd_kcontrol_new_t snd_via8233_dxs3_spdif_control __devinitdata = { 1488static snd_kcontrol_new_t snd_via8233_dxs3_spdif_control __devinitdata = {
1487 .name = "IEC958 Output Switch", 1489 .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
1488 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1490 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1489 .info = snd_via8233_dxs3_spdif_info, 1491 .info = snd_via8233_dxs3_spdif_info,
1490 .get = snd_via8233_dxs3_spdif_get, 1492 .get = snd_via8233_dxs3_spdif_get,
@@ -2153,6 +2155,7 @@ static int __devinit check_dxs_list(struct pci_dev *pci)
2153 { .subvendor = 0x1019, .subdevice = 0x0a81, .action = VIA_DXS_NO_VRA }, /* ECS K7VTA3 v8.0 */ 2155 { .subvendor = 0x1019, .subdevice = 0x0a81, .action = VIA_DXS_NO_VRA }, /* ECS K7VTA3 v8.0 */
2154 { .subvendor = 0x1019, .subdevice = 0x0a85, .action = VIA_DXS_NO_VRA }, /* ECS L7VMM2 */ 2156 { .subvendor = 0x1019, .subdevice = 0x0a85, .action = VIA_DXS_NO_VRA }, /* ECS L7VMM2 */
2155 { .subvendor = 0x1025, .subdevice = 0x0033, .action = VIA_DXS_NO_VRA }, /* Acer Inspire 1353LM */ 2157 { .subvendor = 0x1025, .subdevice = 0x0033, .action = VIA_DXS_NO_VRA }, /* Acer Inspire 1353LM */
2158 { .subvendor = 0x1025, .subdevice = 0x0046, .action = VIA_DXS_SRC }, /* Acer Aspire 1524 WLMi */
2156 { .subvendor = 0x1043, .subdevice = 0x8095, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8X (FIXME: possibly VIA_DXS_ENABLE?)*/ 2159 { .subvendor = 0x1043, .subdevice = 0x8095, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8X (FIXME: possibly VIA_DXS_ENABLE?)*/
2157 { .subvendor = 0x1043, .subdevice = 0x80a1, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8-X */ 2160 { .subvendor = 0x1043, .subdevice = 0x80a1, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8-X */
2158 { .subvendor = 0x1043, .subdevice = 0x80b0, .action = VIA_DXS_NO_VRA }, /* ASUS A7V600 & K8V*/ 2161 { .subvendor = 0x1043, .subdevice = 0x80b0, .action = VIA_DXS_NO_VRA }, /* ASUS A7V600 & K8V*/
@@ -2168,10 +2171,12 @@ static int __devinit check_dxs_list(struct pci_dev *pci)
2168 { .subvendor = 0x1297, .subdevice = 0xc160, .action = VIA_DXS_ENABLE }, /* Shuttle SK41G */ 2171 { .subvendor = 0x1297, .subdevice = 0xc160, .action = VIA_DXS_ENABLE }, /* Shuttle SK41G */
2169 { .subvendor = 0x1458, .subdevice = 0xa002, .action = VIA_DXS_ENABLE }, /* Gigabyte GA-7VAXP */ 2172 { .subvendor = 0x1458, .subdevice = 0xa002, .action = VIA_DXS_ENABLE }, /* Gigabyte GA-7VAXP */
2170 { .subvendor = 0x1462, .subdevice = 0x0080, .action = VIA_DXS_SRC }, /* MSI K8T Neo-FIS2R */ 2173 { .subvendor = 0x1462, .subdevice = 0x0080, .action = VIA_DXS_SRC }, /* MSI K8T Neo-FIS2R */
2174 { .subvendor = 0x1462, .subdevice = 0x0430, .action = VIA_DXS_SRC }, /* MSI 7142 (K8MM-V) */
2171 { .subvendor = 0x1462, .subdevice = 0x3800, .action = VIA_DXS_ENABLE }, /* MSI KT266 */ 2175 { .subvendor = 0x1462, .subdevice = 0x3800, .action = VIA_DXS_ENABLE }, /* MSI KT266 */
2172 { .subvendor = 0x1462, .subdevice = 0x5901, .action = VIA_DXS_NO_VRA }, /* MSI KT6 Delta-SR */ 2176 { .subvendor = 0x1462, .subdevice = 0x5901, .action = VIA_DXS_NO_VRA }, /* MSI KT6 Delta-SR */
2173 { .subvendor = 0x1462, .subdevice = 0x7023, .action = VIA_DXS_NO_VRA }, /* MSI K8T Neo2-FI */ 2177 { .subvendor = 0x1462, .subdevice = 0x7023, .action = VIA_DXS_NO_VRA }, /* MSI K8T Neo2-FI */
2174 { .subvendor = 0x1462, .subdevice = 0x7120, .action = VIA_DXS_ENABLE }, /* MSI KT4V */ 2178 { .subvendor = 0x1462, .subdevice = 0x7120, .action = VIA_DXS_ENABLE }, /* MSI KT4V */
2179 { .subvendor = 0x1462, .subdevice = 0x7142, .action = VIA_DXS_ENABLE }, /* MSI K8MM-V */
2175 { .subvendor = 0x147b, .subdevice = 0x1401, .action = VIA_DXS_ENABLE }, /* ABIT KD7(-RAID) */ 2180 { .subvendor = 0x147b, .subdevice = 0x1401, .action = VIA_DXS_ENABLE }, /* ABIT KD7(-RAID) */
2176 { .subvendor = 0x147b, .subdevice = 0x1411, .action = VIA_DXS_ENABLE }, /* ABIT VA-20 */ 2181 { .subvendor = 0x147b, .subdevice = 0x1411, .action = VIA_DXS_ENABLE }, /* ABIT VA-20 */
2177 { .subvendor = 0x147b, .subdevice = 0x1413, .action = VIA_DXS_ENABLE }, /* ABIT KV8 Pro */ 2182 { .subvendor = 0x147b, .subdevice = 0x1413, .action = VIA_DXS_ENABLE }, /* ABIT KV8 Pro */
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 4a9779cc9733..5872d438a04a 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -521,6 +521,7 @@ static int snd_via82xx_pcm_trigger(snd_pcm_substream_t * substream, int cmd)
521 521
522 switch (cmd) { 522 switch (cmd) {
523 case SNDRV_PCM_TRIGGER_START: 523 case SNDRV_PCM_TRIGGER_START:
524 case SNDRV_PCM_TRIGGER_SUSPEND:
524 val |= VIA_REG_CTRL_START; 525 val |= VIA_REG_CTRL_START;
525 viadev->running = 1; 526 viadev->running = 1;
526 break; 527 break;
@@ -697,7 +698,7 @@ static snd_pcm_hardware_t snd_via82xx_hw =
697 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 698 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
698 SNDRV_PCM_INFO_BLOCK_TRANSFER | 699 SNDRV_PCM_INFO_BLOCK_TRANSFER |
699 SNDRV_PCM_INFO_MMAP_VALID | 700 SNDRV_PCM_INFO_MMAP_VALID |
700 SNDRV_PCM_INFO_RESUME | 701 /* SNDRV_PCM_INFO_RESUME | */
701 SNDRV_PCM_INFO_PAUSE), 702 SNDRV_PCM_INFO_PAUSE),
702 .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, 703 .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
703 .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT, 704 .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT,
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index d54f88a1b525..054836412dc4 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -321,6 +321,26 @@ static void snd_ymfpci_pcm_interrupt(ymfpci_t *chip, ymfpci_voice_t *voice)
321 snd_pcm_period_elapsed(ypcm->substream); 321 snd_pcm_period_elapsed(ypcm->substream);
322 spin_lock(&chip->reg_lock); 322 spin_lock(&chip->reg_lock);
323 } 323 }
324
325 if (unlikely(ypcm->update_pcm_vol)) {
326 unsigned int subs = ypcm->substream->number;
327 unsigned int next_bank = 1 - chip->active_bank;
328 snd_ymfpci_playback_bank_t *bank;
329 u32 volume;
330
331 bank = &voice->bank[next_bank];
332 volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15);
333 bank->left_gain_end = volume;
334 if (ypcm->output_rear)
335 bank->eff2_gain_end = volume;
336 if (ypcm->voices[1])
337 bank = &ypcm->voices[1]->bank[next_bank];
338 volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15);
339 bank->right_gain_end = volume;
340 if (ypcm->output_rear)
341 bank->eff3_gain_end = volume;
342 ypcm->update_pcm_vol--;
343 }
324 } 344 }
325 spin_unlock(&chip->reg_lock); 345 spin_unlock(&chip->reg_lock);
326} 346}
@@ -451,87 +471,74 @@ static int snd_ymfpci_pcm_voice_alloc(ymfpci_pcm_t *ypcm, int voices)
451 return 0; 471 return 0;
452} 472}
453 473
454static void snd_ymfpci_pcm_init_voice(ymfpci_voice_t *voice, int stereo, 474static void snd_ymfpci_pcm_init_voice(ymfpci_pcm_t *ypcm, unsigned int voiceidx,
455 int rate, int w_16, unsigned long addr, 475 snd_pcm_runtime_t *runtime,
456 unsigned int end, 476 int has_pcm_volume)
457 int output_front, int output_rear)
458{ 477{
478 ymfpci_voice_t *voice = ypcm->voices[voiceidx];
459 u32 format; 479 u32 format;
460 u32 delta = snd_ymfpci_calc_delta(rate); 480 u32 delta = snd_ymfpci_calc_delta(runtime->rate);
461 u32 lpfQ = snd_ymfpci_calc_lpfQ(rate); 481 u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate);
462 u32 lpfK = snd_ymfpci_calc_lpfK(rate); 482 u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate);
463 snd_ymfpci_playback_bank_t *bank; 483 snd_ymfpci_playback_bank_t *bank;
464 unsigned int nbank; 484 unsigned int nbank;
485 u32 vol_left, vol_right;
486 u8 use_left, use_right;
465 487
466 snd_assert(voice != NULL, return); 488 snd_assert(voice != NULL, return);
467 format = (stereo ? 0x00010000 : 0) | (w_16 ? 0 : 0x80000000); 489 if (runtime->channels == 1) {
490 use_left = 1;
491 use_right = 1;
492 } else {
493 use_left = (voiceidx & 1) == 0;
494 use_right = !use_left;
495 }
496 if (has_pcm_volume) {
497 vol_left = cpu_to_le32(ypcm->chip->pcm_mixer
498 [ypcm->substream->number].left << 15);
499 vol_right = cpu_to_le32(ypcm->chip->pcm_mixer
500 [ypcm->substream->number].right << 15);
501 } else {
502 vol_left = cpu_to_le32(0x40000000);
503 vol_right = cpu_to_le32(0x40000000);
504 }
505 format = runtime->channels == 2 ? 0x00010000 : 0;
506 if (snd_pcm_format_width(runtime->format) == 8)
507 format |= 0x80000000;
508 if (runtime->channels == 2 && (voiceidx & 1) != 0)
509 format |= 1;
468 for (nbank = 0; nbank < 2; nbank++) { 510 for (nbank = 0; nbank < 2; nbank++) {
469 bank = &voice->bank[nbank]; 511 bank = &voice->bank[nbank];
512 memset(bank, 0, sizeof(*bank));
470 bank->format = cpu_to_le32(format); 513 bank->format = cpu_to_le32(format);
471 bank->loop_default = 0; 514 bank->base = cpu_to_le32(runtime->dma_addr);
472 bank->base = cpu_to_le32(addr); 515 bank->loop_end = cpu_to_le32(ypcm->buffer_size);
473 bank->loop_start = 0;
474 bank->loop_end = cpu_to_le32(end);
475 bank->loop_frac = 0;
476 bank->eg_gain_end = cpu_to_le32(0x40000000);
477 bank->lpfQ = cpu_to_le32(lpfQ); 516 bank->lpfQ = cpu_to_le32(lpfQ);
478 bank->status = 0;
479 bank->num_of_frames = 0;
480 bank->loop_count = 0;
481 bank->start = 0;
482 bank->start_frac = 0;
483 bank->delta = 517 bank->delta =
484 bank->delta_end = cpu_to_le32(delta); 518 bank->delta_end = cpu_to_le32(delta);
485 bank->lpfK = 519 bank->lpfK =
486 bank->lpfK_end = cpu_to_le32(lpfK); 520 bank->lpfK_end = cpu_to_le32(lpfK);
487 bank->eg_gain = cpu_to_le32(0x40000000); 521 bank->eg_gain =
488 bank->lpfD1 = 522 bank->eg_gain_end = cpu_to_le32(0x40000000);
489 bank->lpfD2 = 0; 523
490 524 if (ypcm->output_front) {
491 bank->left_gain = 525 if (use_left) {
492 bank->right_gain = 526 bank->left_gain =
493 bank->left_gain_end = 527 bank->left_gain_end = vol_left;
494 bank->right_gain_end = 528 }
495 bank->eff1_gain = 529 if (use_right) {
496 bank->eff2_gain =
497 bank->eff3_gain =
498 bank->eff1_gain_end =
499 bank->eff2_gain_end =
500 bank->eff3_gain_end = 0;
501
502 if (!stereo) {
503 if (output_front) {
504 bank->left_gain =
505 bank->right_gain = 530 bank->right_gain =
506 bank->left_gain_end = 531 bank->right_gain_end = vol_right;
507 bank->right_gain_end = cpu_to_le32(0x40000000);
508 } 532 }
509 if (output_rear) { 533 }
534 if (ypcm->output_rear) {
535 if (use_left) {
510 bank->eff2_gain = 536 bank->eff2_gain =
511 bank->eff2_gain_end = 537 bank->eff2_gain_end = vol_left;
512 bank->eff3_gain =
513 bank->eff3_gain_end = cpu_to_le32(0x40000000);
514 }
515 } else {
516 if (output_front) {
517 if ((voice->number & 1) == 0) {
518 bank->left_gain =
519 bank->left_gain_end = cpu_to_le32(0x40000000);
520 } else {
521 bank->format |= cpu_to_le32(1);
522 bank->right_gain =
523 bank->right_gain_end = cpu_to_le32(0x40000000);
524 }
525 } 538 }
526 if (output_rear) { 539 if (use_right) {
527 if ((voice->number & 1) == 0) { 540 bank->eff3_gain =
528 bank->eff3_gain = 541 bank->eff3_gain_end = vol_right;
529 bank->eff3_gain_end = cpu_to_le32(0x40000000);
530 } else {
531 bank->format |= cpu_to_le32(1);
532 bank->eff2_gain =
533 bank->eff2_gain_end = cpu_to_le32(0x40000000);
534 }
535 } 542 }
536 } 543 }
537 } 544 }
@@ -613,7 +620,7 @@ static int snd_ymfpci_playback_hw_free(snd_pcm_substream_t * substream)
613 620
614static int snd_ymfpci_playback_prepare(snd_pcm_substream_t * substream) 621static int snd_ymfpci_playback_prepare(snd_pcm_substream_t * substream)
615{ 622{
616 // ymfpci_t *chip = snd_pcm_substream_chip(substream); 623 ymfpci_t *chip = snd_pcm_substream_chip(substream);
617 snd_pcm_runtime_t *runtime = substream->runtime; 624 snd_pcm_runtime_t *runtime = substream->runtime;
618 ymfpci_pcm_t *ypcm = runtime->private_data; 625 ymfpci_pcm_t *ypcm = runtime->private_data;
619 unsigned int nvoice; 626 unsigned int nvoice;
@@ -623,14 +630,8 @@ static int snd_ymfpci_playback_prepare(snd_pcm_substream_t * substream)
623 ypcm->period_pos = 0; 630 ypcm->period_pos = 0;
624 ypcm->last_pos = 0; 631 ypcm->last_pos = 0;
625 for (nvoice = 0; nvoice < runtime->channels; nvoice++) 632 for (nvoice = 0; nvoice < runtime->channels; nvoice++)
626 snd_ymfpci_pcm_init_voice(ypcm->voices[nvoice], 633 snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime,
627 runtime->channels == 2, 634 substream->pcm == chip->pcm);
628 runtime->rate,
629 snd_pcm_format_width(runtime->format) == 16,
630 runtime->dma_addr,
631 ypcm->buffer_size,
632 ypcm->output_front,
633 ypcm->output_rear);
634 return 0; 635 return 0;
635} 636}
636 637
@@ -882,6 +883,7 @@ static int snd_ymfpci_playback_open(snd_pcm_substream_t * substream)
882 ymfpci_t *chip = snd_pcm_substream_chip(substream); 883 ymfpci_t *chip = snd_pcm_substream_chip(substream);
883 snd_pcm_runtime_t *runtime = substream->runtime; 884 snd_pcm_runtime_t *runtime = substream->runtime;
884 ymfpci_pcm_t *ypcm; 885 ymfpci_pcm_t *ypcm;
886 snd_kcontrol_t *kctl;
885 int err; 887 int err;
886 888
887 if ((err = snd_ymfpci_playback_open_1(substream)) < 0) 889 if ((err = snd_ymfpci_playback_open_1(substream)) < 0)
@@ -895,6 +897,10 @@ static int snd_ymfpci_playback_open(snd_pcm_substream_t * substream)
895 chip->rear_opened++; 897 chip->rear_opened++;
896 } 898 }
897 spin_unlock_irq(&chip->reg_lock); 899 spin_unlock_irq(&chip->reg_lock);
900
901 kctl = chip->pcm_mixer[substream->number].ctl;
902 kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
903 snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id);
898 return 0; 904 return 0;
899} 905}
900 906
@@ -987,6 +993,7 @@ static int snd_ymfpci_playback_close(snd_pcm_substream_t * substream)
987{ 993{
988 ymfpci_t *chip = snd_pcm_substream_chip(substream); 994 ymfpci_t *chip = snd_pcm_substream_chip(substream);
989 ymfpci_pcm_t *ypcm = substream->runtime->private_data; 995 ymfpci_pcm_t *ypcm = substream->runtime->private_data;
996 snd_kcontrol_t *kctl;
990 997
991 spin_lock_irq(&chip->reg_lock); 998 spin_lock_irq(&chip->reg_lock);
992 if (ypcm->output_rear && chip->rear_opened > 0) { 999 if (ypcm->output_rear && chip->rear_opened > 0) {
@@ -994,6 +1001,9 @@ static int snd_ymfpci_playback_close(snd_pcm_substream_t * substream)
994 ymfpci_close_extension(chip); 1001 ymfpci_close_extension(chip);
995 } 1002 }
996 spin_unlock_irq(&chip->reg_lock); 1003 spin_unlock_irq(&chip->reg_lock);
1004 kctl = chip->pcm_mixer[substream->number].ctl;
1005 kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
1006 snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id);
997 return snd_ymfpci_playback_close_1(substream); 1007 return snd_ymfpci_playback_close_1(substream);
998} 1008}
999 1009
@@ -1665,6 +1675,66 @@ static snd_kcontrol_new_t snd_ymfpci_rear_shared __devinitdata = {
1665 .private_value = 2, 1675 .private_value = 2,
1666}; 1676};
1667 1677
1678/*
1679 * PCM voice volume
1680 */
1681
1682static int snd_ymfpci_pcm_vol_info(snd_kcontrol_t *kcontrol,
1683 snd_ctl_elem_info_t *uinfo)
1684{
1685 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1686 uinfo->count = 2;
1687 uinfo->value.integer.min = 0;
1688 uinfo->value.integer.max = 0x8000;
1689 return 0;
1690}
1691
1692static int snd_ymfpci_pcm_vol_get(snd_kcontrol_t *kcontrol,
1693 snd_ctl_elem_value_t *ucontrol)
1694{
1695 ymfpci_t *chip = snd_kcontrol_chip(kcontrol);
1696 unsigned int subs = kcontrol->id.subdevice;
1697
1698 ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left;
1699 ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right;
1700 return 0;
1701}
1702
1703static int snd_ymfpci_pcm_vol_put(snd_kcontrol_t *kcontrol,
1704 snd_ctl_elem_value_t *ucontrol)
1705{
1706 ymfpci_t *chip = snd_kcontrol_chip(kcontrol);
1707 unsigned int subs = kcontrol->id.subdevice;
1708 snd_pcm_substream_t *substream;
1709 unsigned long flags;
1710
1711 if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left ||
1712 ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) {
1713 chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0];
1714 chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1];
1715
1716 substream = (snd_pcm_substream_t *)kcontrol->private_value;
1717 spin_lock_irqsave(&chip->voice_lock, flags);
1718 if (substream->runtime && substream->runtime->private_data) {
1719 ymfpci_pcm_t *ypcm = substream->runtime->private_data;
1720 ypcm->update_pcm_vol = 2;
1721 }
1722 spin_unlock_irqrestore(&chip->voice_lock, flags);
1723 return 1;
1724 }
1725 return 0;
1726}
1727
1728static snd_kcontrol_new_t snd_ymfpci_pcm_volume __devinitdata = {
1729 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1730 .name = "PCM Playback Volume",
1731 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
1732 SNDRV_CTL_ELEM_ACCESS_INACTIVE,
1733 .info = snd_ymfpci_pcm_vol_info,
1734 .get = snd_ymfpci_pcm_vol_get,
1735 .put = snd_ymfpci_pcm_vol_put,
1736};
1737
1668 1738
1669/* 1739/*
1670 * Mixer routines 1740 * Mixer routines
@@ -1686,6 +1756,7 @@ int __devinit snd_ymfpci_mixer(ymfpci_t *chip, int rear_switch)
1686{ 1756{
1687 ac97_template_t ac97; 1757 ac97_template_t ac97;
1688 snd_kcontrol_t *kctl; 1758 snd_kcontrol_t *kctl;
1759 snd_pcm_substream_t *substream;
1689 unsigned int idx; 1760 unsigned int idx;
1690 int err; 1761 int err;
1691 static ac97_bus_ops_t ops = { 1762 static ac97_bus_ops_t ops = {
@@ -1739,6 +1810,23 @@ int __devinit snd_ymfpci_mixer(ymfpci_t *chip, int rear_switch)
1739 return err; 1810 return err;
1740 } 1811 }
1741 1812
1813 /* per-voice volume */
1814 substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
1815 for (idx = 0; idx < 32; ++idx) {
1816 kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip);
1817 if (!kctl)
1818 return -ENOMEM;
1819 kctl->id.device = chip->pcm->device;
1820 kctl->id.subdevice = idx;
1821 kctl->private_value = (unsigned long)substream;
1822 if ((err = snd_ctl_add(chip->card, kctl)) < 0)
1823 return err;
1824 chip->pcm_mixer[idx].left = 0x8000;
1825 chip->pcm_mixer[idx].right = 0x8000;
1826 chip->pcm_mixer[idx].ctl = kctl;
1827 substream = substream->next;
1828 }
1829
1742 return 0; 1830 return 0;
1743} 1831}
1744 1832
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 3a82161d3b24..1e8f16b4c073 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -297,6 +297,7 @@ static void vxpocket_config(dev_link_t *link)
297 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 297 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
298 298
299 chip->dev = &handle_to_dev(link->handle); 299 chip->dev = &handle_to_dev(link->handle);
300 snd_card_set_dev(chip->card, chip->dev);
300 301
301 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 302 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
302 goto failed; 303 goto failed;
@@ -376,7 +377,7 @@ static int vxpocket_event(event_t event, int priority, event_callback_args_t *ar
376 377
377/* 378/*
378 */ 379 */
379static dev_link_t *vxp_attach(void) 380static dev_link_t *vxpocket_attach(void)
380{ 381{
381 snd_card_t *card; 382 snd_card_t *card;
382 struct snd_vxpocket *vxp; 383 struct snd_vxpocket *vxp;
@@ -407,7 +408,7 @@ static dev_link_t *vxp_attach(void)
407 return NULL; 408 return NULL;
408 } 409 }
409 410
410 vxp->index = index[i]; 411 vxp->index = i;
411 card_alloc |= 1 << i; 412 card_alloc |= 1 << i;
412 413
413 /* Chain drivers */ 414 /* Chain drivers */
@@ -417,7 +418,7 @@ static dev_link_t *vxp_attach(void)
417 return &vxp->link; 418 return &vxp->link;
418} 419}
419 420
420static void vxp_detach(dev_link_t *link) 421static void vxpocket_detach(dev_link_t *link)
421{ 422{
422 struct snd_vxpocket *vxp; 423 struct snd_vxpocket *vxp;
423 vx_core_t *chip; 424 vx_core_t *chip;
@@ -458,8 +459,9 @@ static struct pcmcia_driver vxp_cs_driver = {
458 .drv = { 459 .drv = {
459 .name = "snd-vxpocket", 460 .name = "snd-vxpocket",
460 }, 461 },
461 .attach = vxp_attach, 462 .attach = vxpocket_attach,
462 .detach = vxp_detach, 463 .detach = vxpocket_detach,
464 .event = vxpocket_event,
463 .id_table = vxp_ids, 465 .id_table = vxp_ids,
464}; 466};
465 467
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 21a69e096225..954f994592ab 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -153,7 +153,7 @@ static DEFINE_SPINLOCK(sound_loader_lock);
153 * list. Acquires locks as needed 153 * list. Acquires locks as needed
154 */ 154 */
155 155
156static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode) 156static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev)
157{ 157{
158 struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL); 158 struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL);
159 int r; 159 int r;
@@ -175,7 +175,7 @@ static int sound_insert_unit(struct sound_unit **list, struct file_operations *f
175 devfs_mk_cdev(MKDEV(SOUND_MAJOR, s->unit_minor), 175 devfs_mk_cdev(MKDEV(SOUND_MAJOR, s->unit_minor),
176 S_IFCHR | mode, s->name); 176 S_IFCHR | mode, s->name);
177 class_device_create(sound_class, MKDEV(SOUND_MAJOR, s->unit_minor), 177 class_device_create(sound_class, MKDEV(SOUND_MAJOR, s->unit_minor),
178 NULL, s->name+6); 178 dev, s->name+6);
179 return r; 179 return r;
180 180
181 fail: 181 fail:
@@ -227,16 +227,18 @@ static void sound_remove_unit(struct sound_unit **list, int unit)
227static struct sound_unit *chains[SOUND_STEP]; 227static struct sound_unit *chains[SOUND_STEP];
228 228
229/** 229/**
230 * register_sound_special - register a special sound node 230 * register_sound_special_device - register a special sound node
231 * @fops: File operations for the driver 231 * @fops: File operations for the driver
232 * @unit: Unit number to allocate 232 * @unit: Unit number to allocate
233 * @dev: device pointer
233 * 234 *
234 * Allocate a special sound device by minor number from the sound 235 * Allocate a special sound device by minor number from the sound
235 * subsystem. The allocated number is returned on succes. On failure 236 * subsystem. The allocated number is returned on succes. On failure
236 * a negative error code is returned. 237 * a negative error code is returned.
237 */ 238 */
238 239
239int register_sound_special(struct file_operations *fops, int unit) 240int register_sound_special_device(struct file_operations *fops, int unit,
241 struct device *dev)
240{ 242{
241 const int chain = unit % SOUND_STEP; 243 const int chain = unit % SOUND_STEP;
242 int max_unit = 128 + chain; 244 int max_unit = 128 + chain;
@@ -294,9 +296,16 @@ int register_sound_special(struct file_operations *fops, int unit)
294 break; 296 break;
295 } 297 }
296 return sound_insert_unit(&chains[chain], fops, -1, unit, max_unit, 298 return sound_insert_unit(&chains[chain], fops, -1, unit, max_unit,
297 name, S_IRUSR | S_IWUSR); 299 name, S_IRUSR | S_IWUSR, dev);
298} 300}
299 301
302EXPORT_SYMBOL(register_sound_special_device);
303
304int register_sound_special(struct file_operations *fops, int unit)
305{
306 return register_sound_special_device(fops, unit, NULL);
307}
308
300EXPORT_SYMBOL(register_sound_special); 309EXPORT_SYMBOL(register_sound_special);
301 310
302/** 311/**
@@ -312,7 +321,7 @@ EXPORT_SYMBOL(register_sound_special);
312int register_sound_mixer(struct file_operations *fops, int dev) 321int register_sound_mixer(struct file_operations *fops, int dev)
313{ 322{
314 return sound_insert_unit(&chains[0], fops, dev, 0, 128, 323 return sound_insert_unit(&chains[0], fops, dev, 0, 128,
315 "mixer", S_IRUSR | S_IWUSR); 324 "mixer", S_IRUSR | S_IWUSR, NULL);
316} 325}
317 326
318EXPORT_SYMBOL(register_sound_mixer); 327EXPORT_SYMBOL(register_sound_mixer);
@@ -330,7 +339,7 @@ EXPORT_SYMBOL(register_sound_mixer);
330int register_sound_midi(struct file_operations *fops, int dev) 339int register_sound_midi(struct file_operations *fops, int dev)
331{ 340{
332 return sound_insert_unit(&chains[2], fops, dev, 2, 130, 341 return sound_insert_unit(&chains[2], fops, dev, 2, 130,
333 "midi", S_IRUSR | S_IWUSR); 342 "midi", S_IRUSR | S_IWUSR, NULL);
334} 343}
335 344
336EXPORT_SYMBOL(register_sound_midi); 345EXPORT_SYMBOL(register_sound_midi);
@@ -356,7 +365,7 @@ EXPORT_SYMBOL(register_sound_midi);
356int register_sound_dsp(struct file_operations *fops, int dev) 365int register_sound_dsp(struct file_operations *fops, int dev)
357{ 366{
358 return sound_insert_unit(&chains[3], fops, dev, 3, 131, 367 return sound_insert_unit(&chains[3], fops, dev, 3, 131,
359 "dsp", S_IWUSR | S_IRUSR); 368 "dsp", S_IWUSR | S_IRUSR, NULL);
360} 369}
361 370
362EXPORT_SYMBOL(register_sound_dsp); 371EXPORT_SYMBOL(register_sound_dsp);
@@ -375,7 +384,7 @@ EXPORT_SYMBOL(register_sound_dsp);
375int register_sound_synth(struct file_operations *fops, int dev) 384int register_sound_synth(struct file_operations *fops, int dev)
376{ 385{
377 return sound_insert_unit(&chains[9], fops, dev, 9, 137, 386 return sound_insert_unit(&chains[9], fops, dev, 9, 137,
378 "synth", S_IRUSR | S_IWUSR); 387 "synth", S_IRUSR | S_IWUSR, NULL);
379} 388}
380 389
381EXPORT_SYMBOL(register_sound_synth); 390EXPORT_SYMBOL(register_sound_synth);
diff --git a/sound/synth/emux/emux_synth.c b/sound/synth/emux/emux_synth.c
index f13b038329eb..751bf1272af3 100644
--- a/sound/synth/emux/emux_synth.c
+++ b/sound/synth/emux/emux_synth.c
@@ -98,7 +98,6 @@ snd_emux_note_on(void *p, int note, int vel, snd_midi_channel_t *chan)
98 vp = emu->ops.get_voice(emu, port); 98 vp = emu->ops.get_voice(emu, port);
99 if (vp == NULL || vp->ch < 0) 99 if (vp == NULL || vp->ch < 0)
100 continue; 100 continue;
101 snd_assert(vp->emu != NULL && vp->hw != NULL, return);
102 if (STATE_IS_PLAYING(vp->state)) 101 if (STATE_IS_PLAYING(vp->state))
103 emu->ops.terminate(vp); 102 emu->ops.terminate(vp);
104 103
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index 8298c462c291..5aa5fe651a8a 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -41,10 +41,12 @@
41#include <sound/driver.h> 41#include <sound/driver.h>
42#include <linux/bitops.h> 42#include <linux/bitops.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/interrupt.h>
44#include <linux/list.h> 45#include <linux/list.h>
45#include <linux/slab.h> 46#include <linux/slab.h>
46#include <linux/string.h> 47#include <linux/string.h>
47#include <linux/usb.h> 48#include <linux/usb.h>
49#include <linux/vmalloc.h>
48#include <linux/moduleparam.h> 50#include <linux/moduleparam.h>
49#include <sound/core.h> 51#include <sound/core.h>
50#include <sound/info.h> 52#include <sound/info.h>
@@ -79,7 +81,7 @@ module_param_array(vid, int, NULL, 0444);
79MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device."); 81MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
80module_param_array(pid, int, NULL, 0444); 82module_param_array(pid, int, NULL, 0444);
81MODULE_PARM_DESC(pid, "Product ID for the USB audio device."); 83MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
82module_param(nrpacks, int, 0444); 84module_param(nrpacks, int, 0644);
83MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB."); 85MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
84module_param(async_unlink, bool, 0444); 86module_param(async_unlink, bool, 0444);
85MODULE_PARM_DESC(async_unlink, "Use async unlink mode."); 87MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
@@ -97,7 +99,7 @@ MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
97 99
98#define MAX_PACKS 10 100#define MAX_PACKS 10
99#define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */ 101#define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
100#define MAX_URBS 5 /* max. 20ms long packets */ 102#define MAX_URBS 8
101#define SYNC_URBS 4 /* always four urbs for sync */ 103#define SYNC_URBS 4 /* always four urbs for sync */
102#define MIN_PACKS_URB 1 /* minimum 1 packet per urb */ 104#define MIN_PACKS_URB 1 /* minimum 1 packet per urb */
103 105
@@ -126,11 +128,10 @@ struct audioformat {
126 128
127struct snd_urb_ctx { 129struct snd_urb_ctx {
128 struct urb *urb; 130 struct urb *urb;
131 unsigned int buffer_size; /* size of data buffer, if data URB */
129 snd_usb_substream_t *subs; 132 snd_usb_substream_t *subs;
130 int index; /* index for urb array */ 133 int index; /* index for urb array */
131 int packets; /* number of packets per urb */ 134 int packets; /* number of packets per urb */
132 int transfer; /* transferred size */
133 char *buf; /* buffer for capture */
134}; 135};
135 136
136struct snd_urb_ops { 137struct snd_urb_ops {
@@ -165,12 +166,11 @@ struct snd_usb_substream {
165 unsigned int curframesize; /* current packet size in frames (for capture) */ 166 unsigned int curframesize; /* current packet size in frames (for capture) */
166 unsigned int fill_max: 1; /* fill max packet size always */ 167 unsigned int fill_max: 1; /* fill max packet size always */
167 unsigned int fmt_type; /* USB audio format type (1-3) */ 168 unsigned int fmt_type; /* USB audio format type (1-3) */
169 unsigned int packs_per_ms; /* packets per millisecond (for playback) */
168 170
169 unsigned int running: 1; /* running status */ 171 unsigned int running: 1; /* running status */
170 172
171 unsigned int hwptr; /* free frame position in the buffer (only for playback) */
172 unsigned int hwptr_done; /* processed frame position in the buffer */ 173 unsigned int hwptr_done; /* processed frame position in the buffer */
173 unsigned int transfer_sched; /* scheduled frames since last period (for playback) */
174 unsigned int transfer_done; /* processed frames since last period update */ 174 unsigned int transfer_done; /* processed frames since last period update */
175 unsigned long active_mask; /* bitmask of active urbs */ 175 unsigned long active_mask; /* bitmask of active urbs */
176 unsigned long unlink_mask; /* bitmask of unlinked urbs */ 176 unsigned long unlink_mask; /* bitmask of unlinked urbs */
@@ -178,13 +178,14 @@ struct snd_usb_substream {
178 unsigned int nurbs; /* # urbs */ 178 unsigned int nurbs; /* # urbs */
179 snd_urb_ctx_t dataurb[MAX_URBS]; /* data urb table */ 179 snd_urb_ctx_t dataurb[MAX_URBS]; /* data urb table */
180 snd_urb_ctx_t syncurb[SYNC_URBS]; /* sync urb table */ 180 snd_urb_ctx_t syncurb[SYNC_URBS]; /* sync urb table */
181 char syncbuf[SYNC_URBS * 4]; /* sync buffer; it's so small - let's get static */ 181 char *syncbuf; /* sync buffer for all sync URBs */
182 char *tmpbuf; /* temporary buffer for playback */ 182 dma_addr_t sync_dma; /* DMA address of syncbuf */
183 183
184 u64 formats; /* format bitmasks (all or'ed) */ 184 u64 formats; /* format bitmasks (all or'ed) */
185 unsigned int num_formats; /* number of supported audio formats (list) */ 185 unsigned int num_formats; /* number of supported audio formats (list) */
186 struct list_head fmt_list; /* format list */ 186 struct list_head fmt_list; /* format list */
187 spinlock_t lock; 187 spinlock_t lock;
188 struct tasklet_struct start_period_elapsed; /* for start trigger */
188 189
189 struct snd_urb_ops ops; /* callbacks (must be filled at init) */ 190 struct snd_urb_ops ops; /* callbacks (must be filled at init) */
190}; 191};
@@ -311,27 +312,17 @@ static int prepare_capture_urb(snd_usb_substream_t *subs,
311 struct urb *urb) 312 struct urb *urb)
312{ 313{
313 int i, offs; 314 int i, offs;
314 unsigned long flags;
315 snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context; 315 snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
316 316
317 offs = 0; 317 offs = 0;
318 urb->dev = ctx->subs->dev; /* we need to set this at each time */ 318 urb->dev = ctx->subs->dev; /* we need to set this at each time */
319 urb->number_of_packets = 0;
320 spin_lock_irqsave(&subs->lock, flags);
321 for (i = 0; i < ctx->packets; i++) { 319 for (i = 0; i < ctx->packets; i++) {
322 urb->iso_frame_desc[i].offset = offs; 320 urb->iso_frame_desc[i].offset = offs;
323 urb->iso_frame_desc[i].length = subs->curpacksize; 321 urb->iso_frame_desc[i].length = subs->curpacksize;
324 offs += subs->curpacksize; 322 offs += subs->curpacksize;
325 urb->number_of_packets++;
326 subs->transfer_sched += subs->curframesize;
327 if (subs->transfer_sched >= runtime->period_size) {
328 subs->transfer_sched -= runtime->period_size;
329 break;
330 }
331 } 323 }
332 spin_unlock_irqrestore(&subs->lock, flags);
333 urb->transfer_buffer = ctx->buf;
334 urb->transfer_buffer_length = offs; 324 urb->transfer_buffer_length = offs;
325 urb->number_of_packets = ctx->packets;
335#if 0 // for check 326#if 0 // for check
336 if (! urb->bandwidth) { 327 if (! urb->bandwidth) {
337 int bustime; 328 int bustime;
@@ -359,6 +350,7 @@ static int retire_capture_urb(snd_usb_substream_t *subs,
359 unsigned char *cp; 350 unsigned char *cp;
360 int i; 351 int i;
361 unsigned int stride, len, oldptr; 352 unsigned int stride, len, oldptr;
353 int period_elapsed = 0;
362 354
363 stride = runtime->frame_bits >> 3; 355 stride = runtime->frame_bits >> 3;
364 356
@@ -378,6 +370,10 @@ static int retire_capture_urb(snd_usb_substream_t *subs,
378 if (subs->hwptr_done >= runtime->buffer_size) 370 if (subs->hwptr_done >= runtime->buffer_size)
379 subs->hwptr_done -= runtime->buffer_size; 371 subs->hwptr_done -= runtime->buffer_size;
380 subs->transfer_done += len; 372 subs->transfer_done += len;
373 if (subs->transfer_done >= runtime->period_size) {
374 subs->transfer_done -= runtime->period_size;
375 period_elapsed = 1;
376 }
381 spin_unlock_irqrestore(&subs->lock, flags); 377 spin_unlock_irqrestore(&subs->lock, flags);
382 /* copy a data chunk */ 378 /* copy a data chunk */
383 if (oldptr + len > runtime->buffer_size) { 379 if (oldptr + len > runtime->buffer_size) {
@@ -388,15 +384,9 @@ static int retire_capture_urb(snd_usb_substream_t *subs,
388 } else { 384 } else {
389 memcpy(runtime->dma_area + oldptr * stride, cp, len * stride); 385 memcpy(runtime->dma_area + oldptr * stride, cp, len * stride);
390 } 386 }
391 /* update the pointer, call callback if necessary */
392 spin_lock_irqsave(&subs->lock, flags);
393 if (subs->transfer_done >= runtime->period_size) {
394 subs->transfer_done -= runtime->period_size;
395 spin_unlock_irqrestore(&subs->lock, flags);
396 snd_pcm_period_elapsed(subs->pcm_substream);
397 } else
398 spin_unlock_irqrestore(&subs->lock, flags);
399 } 387 }
388 if (period_elapsed)
389 snd_pcm_period_elapsed(subs->pcm_substream);
400 return 0; 390 return 0;
401} 391}
402 392
@@ -492,12 +482,10 @@ static int retire_playback_sync_urb_hs(snd_usb_substream_t *subs,
492/* 482/*
493 * prepare urb for playback data pipe 483 * prepare urb for playback data pipe
494 * 484 *
495 * we copy the data directly from the pcm buffer. 485 * Since a URB can handle only a single linear buffer, we must use double
496 * the current position to be copied is held in hwptr field. 486 * buffering when the data to be transferred overflows the buffer boundary.
497 * since a urb can handle only a single linear buffer, if the total 487 * To avoid inconsistencies when updating hwptr_done, we use double buffering
498 * transferred area overflows the buffer boundary, we cannot send 488 * for all URBs.
499 * it directly from the buffer. thus the data is once copied to
500 * a temporary buffer and urb points to that.
501 */ 489 */
502static int prepare_playback_urb(snd_usb_substream_t *subs, 490static int prepare_playback_urb(snd_usb_substream_t *subs,
503 snd_pcm_runtime_t *runtime, 491 snd_pcm_runtime_t *runtime,
@@ -506,6 +494,7 @@ static int prepare_playback_urb(snd_usb_substream_t *subs,
506 int i, stride, offs; 494 int i, stride, offs;
507 unsigned int counts; 495 unsigned int counts;
508 unsigned long flags; 496 unsigned long flags;
497 int period_elapsed = 0;
509 snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context; 498 snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
510 499
511 stride = runtime->frame_bits >> 3; 500 stride = runtime->frame_bits >> 3;
@@ -530,80 +519,85 @@ static int prepare_playback_urb(snd_usb_substream_t *subs,
530 urb->iso_frame_desc[i].length = counts * stride; 519 urb->iso_frame_desc[i].length = counts * stride;
531 offs += counts; 520 offs += counts;
532 urb->number_of_packets++; 521 urb->number_of_packets++;
533 subs->transfer_sched += counts; 522 subs->transfer_done += counts;
534 if (subs->transfer_sched >= runtime->period_size) { 523 if (subs->transfer_done >= runtime->period_size) {
535 subs->transfer_sched -= runtime->period_size; 524 subs->transfer_done -= runtime->period_size;
525 period_elapsed = 1;
536 if (subs->fmt_type == USB_FORMAT_TYPE_II) { 526 if (subs->fmt_type == USB_FORMAT_TYPE_II) {
537 if (subs->transfer_sched > 0) { 527 if (subs->transfer_done > 0) {
538 /* FIXME: fill-max mode is not supported yet */ 528 /* FIXME: fill-max mode is not
539 offs -= subs->transfer_sched; 529 * supported yet */
540 counts -= subs->transfer_sched; 530 offs -= subs->transfer_done;
541 urb->iso_frame_desc[i].length = counts * stride; 531 counts -= subs->transfer_done;
542 subs->transfer_sched = 0; 532 urb->iso_frame_desc[i].length =
533 counts * stride;
534 subs->transfer_done = 0;
543 } 535 }
544 i++; 536 i++;
545 if (i < ctx->packets) { 537 if (i < ctx->packets) {
546 /* add a transfer delimiter */ 538 /* add a transfer delimiter */
547 urb->iso_frame_desc[i].offset = offs * stride; 539 urb->iso_frame_desc[i].offset =
540 offs * stride;
548 urb->iso_frame_desc[i].length = 0; 541 urb->iso_frame_desc[i].length = 0;
549 urb->number_of_packets++; 542 urb->number_of_packets++;
550 } 543 }
544 break;
551 } 545 }
552 break;
553 } 546 }
547 /* finish at the frame boundary at/after the period boundary */
548 if (period_elapsed &&
549 (i & (subs->packs_per_ms - 1)) == subs->packs_per_ms - 1)
550 break;
554 } 551 }
555 if (subs->hwptr + offs > runtime->buffer_size) { 552 if (subs->hwptr_done + offs > runtime->buffer_size) {
556 /* err, the transferred area goes over buffer boundary. 553 /* err, the transferred area goes over buffer boundary. */
557 * copy the data to the temp buffer. 554 unsigned int len = runtime->buffer_size - subs->hwptr_done;
558 */ 555 memcpy(urb->transfer_buffer,
559 int len; 556 runtime->dma_area + subs->hwptr_done * stride,
560 len = runtime->buffer_size - subs->hwptr; 557 len * stride);
561 urb->transfer_buffer = subs->tmpbuf; 558 memcpy(urb->transfer_buffer + len * stride,
562 memcpy(subs->tmpbuf, runtime->dma_area + subs->hwptr * stride, len * stride); 559 runtime->dma_area,
563 memcpy(subs->tmpbuf + len * stride, runtime->dma_area, (offs - len) * stride); 560 (offs - len) * stride);
564 subs->hwptr += offs;
565 subs->hwptr -= runtime->buffer_size;
566 } else { 561 } else {
567 /* set the buffer pointer */ 562 memcpy(urb->transfer_buffer,
568 urb->transfer_buffer = runtime->dma_area + subs->hwptr * stride; 563 runtime->dma_area + subs->hwptr_done * stride,
569 subs->hwptr += offs; 564 offs * stride);
570 if (subs->hwptr == runtime->buffer_size)
571 subs->hwptr = 0;
572 } 565 }
566 subs->hwptr_done += offs;
567 if (subs->hwptr_done >= runtime->buffer_size)
568 subs->hwptr_done -= runtime->buffer_size;
573 spin_unlock_irqrestore(&subs->lock, flags); 569 spin_unlock_irqrestore(&subs->lock, flags);
574 urb->transfer_buffer_length = offs * stride; 570 urb->transfer_buffer_length = offs * stride;
575 ctx->transfer = offs; 571 if (period_elapsed) {
576 572 if (likely(subs->running))
573 snd_pcm_period_elapsed(subs->pcm_substream);
574 else
575 tasklet_hi_schedule(&subs->start_period_elapsed);
576 }
577 return 0; 577 return 0;
578} 578}
579 579
580/* 580/*
581 * process after playback data complete 581 * process after playback data complete
582 * 582 * - nothing to do
583 * update the current position and call callback if a period is processed.
584 */ 583 */
585static int retire_playback_urb(snd_usb_substream_t *subs, 584static int retire_playback_urb(snd_usb_substream_t *subs,
586 snd_pcm_runtime_t *runtime, 585 snd_pcm_runtime_t *runtime,
587 struct urb *urb) 586 struct urb *urb)
588{ 587{
589 unsigned long flags;
590 snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
591
592 spin_lock_irqsave(&subs->lock, flags);
593 subs->transfer_done += ctx->transfer;
594 subs->hwptr_done += ctx->transfer;
595 ctx->transfer = 0;
596 if (subs->hwptr_done >= runtime->buffer_size)
597 subs->hwptr_done -= runtime->buffer_size;
598 if (subs->transfer_done >= runtime->period_size) {
599 subs->transfer_done -= runtime->period_size;
600 spin_unlock_irqrestore(&subs->lock, flags);
601 snd_pcm_period_elapsed(subs->pcm_substream);
602 } else
603 spin_unlock_irqrestore(&subs->lock, flags);
604 return 0; 588 return 0;
605} 589}
606 590
591/*
592 * Delay the snd_pcm_period_elapsed() call until after the start trigger
593 * callback so that we're not longer in the substream's lock.
594 */
595static void start_period_elapsed(unsigned long data)
596{
597 snd_usb_substream_t *subs = (snd_usb_substream_t *)data;
598 snd_pcm_period_elapsed(subs->pcm_substream);
599}
600
607 601
608/* 602/*
609 */ 603 */
@@ -683,6 +677,42 @@ static void snd_complete_sync_urb(struct urb *urb, struct pt_regs *regs)
683} 677}
684 678
685 679
680/* get the physical page pointer at the given offset */
681static struct page *snd_pcm_get_vmalloc_page(snd_pcm_substream_t *subs,
682 unsigned long offset)
683{
684 void *pageptr = subs->runtime->dma_area + offset;
685 return vmalloc_to_page(pageptr);
686}
687
688/* allocate virtual buffer; may be called more than once */
689static int snd_pcm_alloc_vmalloc_buffer(snd_pcm_substream_t *subs, size_t size)
690{
691 snd_pcm_runtime_t *runtime = subs->runtime;
692 if (runtime->dma_area) {
693 if (runtime->dma_bytes >= size)
694 return 0; /* already large enough */
695 vfree_nocheck(runtime->dma_area);
696 }
697 runtime->dma_area = vmalloc_nocheck(size);
698 if (! runtime->dma_area)
699 return -ENOMEM;
700 runtime->dma_bytes = size;
701 return 0;
702}
703
704/* free virtual buffer; may be called more than once */
705static int snd_pcm_free_vmalloc_buffer(snd_pcm_substream_t *subs)
706{
707 snd_pcm_runtime_t *runtime = subs->runtime;
708 if (runtime->dma_area) {
709 vfree_nocheck(runtime->dma_area);
710 runtime->dma_area = NULL;
711 }
712 return 0;
713}
714
715
686/* 716/*
687 * unlink active urbs. 717 * unlink active urbs.
688 */ 718 */
@@ -824,8 +854,14 @@ static int wait_clear_urbs(snd_usb_substream_t *subs)
824 */ 854 */
825static snd_pcm_uframes_t snd_usb_pcm_pointer(snd_pcm_substream_t *substream) 855static snd_pcm_uframes_t snd_usb_pcm_pointer(snd_pcm_substream_t *substream)
826{ 856{
827 snd_usb_substream_t *subs = (snd_usb_substream_t *)substream->runtime->private_data; 857 snd_usb_substream_t *subs;
828 return subs->hwptr_done; 858 snd_pcm_uframes_t hwptr_done;
859
860 subs = (snd_usb_substream_t *)substream->runtime->private_data;
861 spin_lock(&subs->lock);
862 hwptr_done = subs->hwptr_done;
863 spin_unlock(&subs->lock);
864 return hwptr_done;
829} 865}
830 866
831 867
@@ -858,11 +894,13 @@ static int snd_usb_pcm_trigger(snd_pcm_substream_t *substream, int cmd)
858static void release_urb_ctx(snd_urb_ctx_t *u) 894static void release_urb_ctx(snd_urb_ctx_t *u)
859{ 895{
860 if (u->urb) { 896 if (u->urb) {
897 if (u->buffer_size)
898 usb_buffer_free(u->subs->dev, u->buffer_size,
899 u->urb->transfer_buffer,
900 u->urb->transfer_dma);
861 usb_free_urb(u->urb); 901 usb_free_urb(u->urb);
862 u->urb = NULL; 902 u->urb = NULL;
863 } 903 }
864 kfree(u->buf);
865 u->buf = NULL;
866} 904}
867 905
868/* 906/*
@@ -880,8 +918,9 @@ static void release_substream_urbs(snd_usb_substream_t *subs, int force)
880 release_urb_ctx(&subs->dataurb[i]); 918 release_urb_ctx(&subs->dataurb[i]);
881 for (i = 0; i < SYNC_URBS; i++) 919 for (i = 0; i < SYNC_URBS; i++)
882 release_urb_ctx(&subs->syncurb[i]); 920 release_urb_ctx(&subs->syncurb[i]);
883 kfree(subs->tmpbuf); 921 usb_buffer_free(subs->dev, SYNC_URBS * 4,
884 subs->tmpbuf = NULL; 922 subs->syncbuf, subs->sync_dma);
923 subs->syncbuf = NULL;
885 subs->nurbs = 0; 924 subs->nurbs = 0;
886} 925}
887 926
@@ -893,7 +932,7 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
893{ 932{
894 unsigned int maxsize, n, i; 933 unsigned int maxsize, n, i;
895 int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK; 934 int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
896 unsigned int npacks[MAX_URBS], urb_packs, total_packs; 935 unsigned int npacks[MAX_URBS], urb_packs, total_packs, packs_per_ms;
897 936
898 /* calculate the frequency in 16.16 format */ 937 /* calculate the frequency in 16.16 format */
899 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) 938 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
@@ -920,24 +959,40 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
920 else 959 else
921 subs->curpacksize = maxsize; 960 subs->curpacksize = maxsize;
922 961
923 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) 962 if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
924 urb_packs = nrpacks; 963 packs_per_ms = 8 >> subs->datainterval;
925 else 964 else
926 urb_packs = (nrpacks * 8) >> subs->datainterval; 965 packs_per_ms = 1;
966 subs->packs_per_ms = packs_per_ms;
927 967
928 /* allocate a temporary buffer for playback */
929 if (is_playback) { 968 if (is_playback) {
930 subs->tmpbuf = kmalloc(maxsize * urb_packs, GFP_KERNEL); 969 urb_packs = nrpacks;
931 if (! subs->tmpbuf) { 970 urb_packs = max(urb_packs, (unsigned int)MIN_PACKS_URB);
932 snd_printk(KERN_ERR "cannot malloc tmpbuf\n"); 971 urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
933 return -ENOMEM; 972 } else
934 } 973 urb_packs = 1;
935 } 974 urb_packs *= packs_per_ms;
936 975
937 /* decide how many packets to be used */ 976 /* decide how many packets to be used */
938 total_packs = (period_bytes + maxsize - 1) / maxsize; 977 if (is_playback) {
939 if (total_packs < 2 * MIN_PACKS_URB) 978 unsigned int minsize;
940 total_packs = 2 * MIN_PACKS_URB; 979 /* determine how small a packet can be */
980 minsize = (subs->freqn >> (16 - subs->datainterval))
981 * (frame_bits >> 3);
982 /* with sync from device, assume it can be 12% lower */
983 if (subs->syncpipe)
984 minsize -= minsize >> 3;
985 minsize = max(minsize, 1u);
986 total_packs = (period_bytes + minsize - 1) / minsize;
987 /* round up to multiple of packs_per_ms */
988 total_packs = (total_packs + packs_per_ms - 1)
989 & ~(packs_per_ms - 1);
990 /* we need at least two URBs for queueing */
991 if (total_packs < 2 * MIN_PACKS_URB * packs_per_ms)
992 total_packs = 2 * MIN_PACKS_URB * packs_per_ms;
993 } else {
994 total_packs = MAX_URBS * urb_packs;
995 }
941 subs->nurbs = (total_packs + urb_packs - 1) / urb_packs; 996 subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
942 if (subs->nurbs > MAX_URBS) { 997 if (subs->nurbs > MAX_URBS) {
943 /* too much... */ 998 /* too much... */
@@ -956,7 +1011,7 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
956 subs->nurbs = 2; 1011 subs->nurbs = 2;
957 npacks[0] = (total_packs + 1) / 2; 1012 npacks[0] = (total_packs + 1) / 2;
958 npacks[1] = total_packs - npacks[0]; 1013 npacks[1] = total_packs - npacks[0];
959 } else if (npacks[subs->nurbs-1] < MIN_PACKS_URB) { 1014 } else if (npacks[subs->nurbs-1] < MIN_PACKS_URB * packs_per_ms) {
960 /* the last packet is too small.. */ 1015 /* the last packet is too small.. */
961 if (subs->nurbs > 2) { 1016 if (subs->nurbs > 2) {
962 /* merge to the first one */ 1017 /* merge to the first one */
@@ -975,27 +1030,20 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
975 snd_urb_ctx_t *u = &subs->dataurb[i]; 1030 snd_urb_ctx_t *u = &subs->dataurb[i];
976 u->index = i; 1031 u->index = i;
977 u->subs = subs; 1032 u->subs = subs;
978 u->transfer = 0;
979 u->packets = npacks[i]; 1033 u->packets = npacks[i];
1034 u->buffer_size = maxsize * u->packets;
980 if (subs->fmt_type == USB_FORMAT_TYPE_II) 1035 if (subs->fmt_type == USB_FORMAT_TYPE_II)
981 u->packets++; /* for transfer delimiter */ 1036 u->packets++; /* for transfer delimiter */
982 if (! is_playback) {
983 /* allocate a capture buffer per urb */
984 u->buf = kmalloc(maxsize * u->packets, GFP_KERNEL);
985 if (! u->buf) {
986 release_substream_urbs(subs, 0);
987 return -ENOMEM;
988 }
989 }
990 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL); 1037 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
991 if (! u->urb) { 1038 if (! u->urb)
992 release_substream_urbs(subs, 0); 1039 goto out_of_memory;
993 return -ENOMEM; 1040 u->urb->transfer_buffer =
994 } 1041 usb_buffer_alloc(subs->dev, u->buffer_size, GFP_KERNEL,
995 u->urb->dev = subs->dev; 1042 &u->urb->transfer_dma);
1043 if (! u->urb->transfer_buffer)
1044 goto out_of_memory;
996 u->urb->pipe = subs->datapipe; 1045 u->urb->pipe = subs->datapipe;
997 u->urb->transfer_flags = URB_ISO_ASAP; 1046 u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
998 u->urb->number_of_packets = u->packets;
999 u->urb->interval = 1 << subs->datainterval; 1047 u->urb->interval = 1 << subs->datainterval;
1000 u->urb->context = u; 1048 u->urb->context = u;
1001 u->urb->complete = snd_usb_complete_callback(snd_complete_urb); 1049 u->urb->complete = snd_usb_complete_callback(snd_complete_urb);
@@ -1003,21 +1051,24 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
1003 1051
1004 if (subs->syncpipe) { 1052 if (subs->syncpipe) {
1005 /* allocate and initialize sync urbs */ 1053 /* allocate and initialize sync urbs */
1054 subs->syncbuf = usb_buffer_alloc(subs->dev, SYNC_URBS * 4,
1055 GFP_KERNEL, &subs->sync_dma);
1056 if (! subs->syncbuf)
1057 goto out_of_memory;
1006 for (i = 0; i < SYNC_URBS; i++) { 1058 for (i = 0; i < SYNC_URBS; i++) {
1007 snd_urb_ctx_t *u = &subs->syncurb[i]; 1059 snd_urb_ctx_t *u = &subs->syncurb[i];
1008 u->index = i; 1060 u->index = i;
1009 u->subs = subs; 1061 u->subs = subs;
1010 u->packets = 1; 1062 u->packets = 1;
1011 u->urb = usb_alloc_urb(1, GFP_KERNEL); 1063 u->urb = usb_alloc_urb(1, GFP_KERNEL);
1012 if (! u->urb) { 1064 if (! u->urb)
1013 release_substream_urbs(subs, 0); 1065 goto out_of_memory;
1014 return -ENOMEM;
1015 }
1016 u->urb->transfer_buffer = subs->syncbuf + i * 4; 1066 u->urb->transfer_buffer = subs->syncbuf + i * 4;
1067 u->urb->transfer_dma = subs->sync_dma + i * 4;
1017 u->urb->transfer_buffer_length = 4; 1068 u->urb->transfer_buffer_length = 4;
1018 u->urb->dev = subs->dev;
1019 u->urb->pipe = subs->syncpipe; 1069 u->urb->pipe = subs->syncpipe;
1020 u->urb->transfer_flags = URB_ISO_ASAP; 1070 u->urb->transfer_flags = URB_ISO_ASAP |
1071 URB_NO_TRANSFER_DMA_MAP;
1021 u->urb->number_of_packets = 1; 1072 u->urb->number_of_packets = 1;
1022 u->urb->interval = 1 << subs->syncinterval; 1073 u->urb->interval = 1 << subs->syncinterval;
1023 u->urb->context = u; 1074 u->urb->context = u;
@@ -1025,6 +1076,10 @@ static int init_substream_urbs(snd_usb_substream_t *subs, unsigned int period_by
1025 } 1076 }
1026 } 1077 }
1027 return 0; 1078 return 0;
1079
1080out_of_memory:
1081 release_substream_urbs(subs, 0);
1082 return -ENOMEM;
1028} 1083}
1029 1084
1030 1085
@@ -1293,7 +1348,8 @@ static int snd_usb_hw_params(snd_pcm_substream_t *substream,
1293 unsigned int channels, rate, format; 1348 unsigned int channels, rate, format;
1294 int ret, changed; 1349 int ret, changed;
1295 1350
1296 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); 1351 ret = snd_pcm_alloc_vmalloc_buffer(substream,
1352 params_buffer_bytes(hw_params));
1297 if (ret < 0) 1353 if (ret < 0)
1298 return ret; 1354 return ret;
1299 1355
@@ -1349,7 +1405,7 @@ static int snd_usb_hw_free(snd_pcm_substream_t *substream)
1349 subs->cur_rate = 0; 1405 subs->cur_rate = 0;
1350 subs->period_bytes = 0; 1406 subs->period_bytes = 0;
1351 release_substream_urbs(subs, 0); 1407 release_substream_urbs(subs, 0);
1352 return snd_pcm_lib_free_pages(substream); 1408 return snd_pcm_free_vmalloc_buffer(substream);
1353} 1409}
1354 1410
1355/* 1411/*
@@ -1372,9 +1428,7 @@ static int snd_usb_pcm_prepare(snd_pcm_substream_t *substream)
1372 subs->curframesize = bytes_to_frames(runtime, subs->curpacksize); 1428 subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
1373 1429
1374 /* reset the pointer */ 1430 /* reset the pointer */
1375 subs->hwptr = 0;
1376 subs->hwptr_done = 0; 1431 subs->hwptr_done = 0;
1377 subs->transfer_sched = 0;
1378 subs->transfer_done = 0; 1432 subs->transfer_done = 0;
1379 subs->phase = 0; 1433 subs->phase = 0;
1380 1434
@@ -1390,7 +1444,7 @@ static snd_pcm_hardware_t snd_usb_playback =
1390 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1444 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1391 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1445 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1392 SNDRV_PCM_INFO_MMAP_VALID), 1446 SNDRV_PCM_INFO_MMAP_VALID),
1393 .buffer_bytes_max = (128*1024), 1447 .buffer_bytes_max = (256*1024),
1394 .period_bytes_min = 64, 1448 .period_bytes_min = 64,
1395 .period_bytes_max = (128*1024), 1449 .period_bytes_max = (128*1024),
1396 .periods_min = 2, 1450 .periods_min = 2,
@@ -1402,7 +1456,7 @@ static snd_pcm_hardware_t snd_usb_capture =
1402 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 1456 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1403 SNDRV_PCM_INFO_BLOCK_TRANSFER | 1457 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1404 SNDRV_PCM_INFO_MMAP_VALID), 1458 SNDRV_PCM_INFO_MMAP_VALID),
1405 .buffer_bytes_max = (128*1024), 1459 .buffer_bytes_max = (256*1024),
1406 .period_bytes_min = 64, 1460 .period_bytes_min = 64,
1407 .period_bytes_max = (128*1024), 1461 .period_bytes_max = (128*1024),
1408 .periods_min = 2, 1462 .periods_min = 2,
@@ -1794,6 +1848,7 @@ static snd_pcm_ops_t snd_usb_playback_ops = {
1794 .prepare = snd_usb_pcm_prepare, 1848 .prepare = snd_usb_pcm_prepare,
1795 .trigger = snd_usb_pcm_trigger, 1849 .trigger = snd_usb_pcm_trigger,
1796 .pointer = snd_usb_pcm_pointer, 1850 .pointer = snd_usb_pcm_pointer,
1851 .page = snd_pcm_get_vmalloc_page,
1797}; 1852};
1798 1853
1799static snd_pcm_ops_t snd_usb_capture_ops = { 1854static snd_pcm_ops_t snd_usb_capture_ops = {
@@ -1805,6 +1860,7 @@ static snd_pcm_ops_t snd_usb_capture_ops = {
1805 .prepare = snd_usb_pcm_prepare, 1860 .prepare = snd_usb_pcm_prepare,
1806 .trigger = snd_usb_pcm_trigger, 1861 .trigger = snd_usb_pcm_trigger,
1807 .pointer = snd_usb_pcm_pointer, 1862 .pointer = snd_usb_pcm_pointer,
1863 .page = snd_pcm_get_vmalloc_page,
1808}; 1864};
1809 1865
1810 1866
@@ -2021,6 +2077,9 @@ static void init_substream(snd_usb_stream_t *as, int stream, struct audioformat
2021 2077
2022 INIT_LIST_HEAD(&subs->fmt_list); 2078 INIT_LIST_HEAD(&subs->fmt_list);
2023 spin_lock_init(&subs->lock); 2079 spin_lock_init(&subs->lock);
2080 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2081 tasklet_init(&subs->start_period_elapsed, start_period_elapsed,
2082 (unsigned long)subs);
2024 2083
2025 subs->stream = as; 2084 subs->stream = as;
2026 subs->direction = stream; 2085 subs->direction = stream;
@@ -2029,10 +2088,6 @@ static void init_substream(snd_usb_stream_t *as, int stream, struct audioformat
2029 subs->ops = audio_urb_ops[stream]; 2088 subs->ops = audio_urb_ops[stream];
2030 else 2089 else
2031 subs->ops = audio_urb_ops_high_speed[stream]; 2090 subs->ops = audio_urb_ops_high_speed[stream];
2032 snd_pcm_lib_preallocate_pages(as->pcm->streams[stream].substream,
2033 SNDRV_DMA_TYPE_CONTINUOUS,
2034 snd_dma_continuous_data(GFP_KERNEL),
2035 64 * 1024, 128 * 1024);
2036 snd_pcm_set_ops(as->pcm, stream, 2091 snd_pcm_set_ops(as->pcm, stream,
2037 stream == SNDRV_PCM_STREAM_PLAYBACK ? 2092 stream == SNDRV_PCM_STREAM_PLAYBACK ?
2038 &snd_usb_playback_ops : &snd_usb_capture_ops); 2093 &snd_usb_playback_ops : &snd_usb_capture_ops);
@@ -2078,7 +2133,6 @@ static void snd_usb_audio_pcm_free(snd_pcm_t *pcm)
2078 snd_usb_stream_t *stream = pcm->private_data; 2133 snd_usb_stream_t *stream = pcm->private_data;
2079 if (stream) { 2134 if (stream) {
2080 stream->pcm = NULL; 2135 stream->pcm = NULL;
2081 snd_pcm_lib_preallocate_free_for_all(pcm);
2082 snd_usb_audio_stream_free(stream); 2136 snd_usb_audio_stream_free(stream);
2083 } 2137 }
2084} 2138}
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 5778a9b725ec..93dedde3c428 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -44,6 +44,7 @@
44#include <linux/string.h> 44#include <linux/string.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/timer.h>
47#include <linux/usb.h> 48#include <linux/usb.h>
48#include <sound/core.h> 49#include <sound/core.h>
49#include <sound/minors.h> 50#include <sound/minors.h>
@@ -56,6 +57,12 @@
56 */ 57 */
57/* #define DUMP_PACKETS */ 58/* #define DUMP_PACKETS */
58 59
60/*
61 * how long to wait after some USB errors, so that khubd can disconnect() us
62 * without too many spurious errors
63 */
64#define ERROR_DELAY_JIFFIES (HZ / 10)
65
59 66
60MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 67MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
61MODULE_DESCRIPTION("USB Audio/MIDI helper module"); 68MODULE_DESCRIPTION("USB Audio/MIDI helper module");
@@ -100,6 +107,7 @@ struct snd_usb_midi {
100 snd_rawmidi_t* rmidi; 107 snd_rawmidi_t* rmidi;
101 struct usb_protocol_ops* usb_protocol_ops; 108 struct usb_protocol_ops* usb_protocol_ops;
102 struct list_head list; 109 struct list_head list;
110 struct timer_list error_timer;
103 111
104 struct snd_usb_midi_endpoint { 112 struct snd_usb_midi_endpoint {
105 snd_usb_midi_out_endpoint_t *out; 113 snd_usb_midi_out_endpoint_t *out;
@@ -141,7 +149,8 @@ struct snd_usb_midi_in_endpoint {
141 struct usbmidi_in_port { 149 struct usbmidi_in_port {
142 snd_rawmidi_substream_t* substream; 150 snd_rawmidi_substream_t* substream;
143 } ports[0x10]; 151 } ports[0x10];
144 int seen_f5; 152 u8 seen_f5;
153 u8 error_resubmit;
145 int current_port; 154 int current_port;
146}; 155};
147 156
@@ -167,14 +176,22 @@ static int snd_usbmidi_submit_urb(struct urb* urb, int flags)
167 */ 176 */
168static int snd_usbmidi_urb_error(int status) 177static int snd_usbmidi_urb_error(int status)
169{ 178{
170 if (status == -ENOENT) 179 switch (status) {
171 return status; /* killed */ 180 /* manually unlinked, or device gone */
172 if (status == -EILSEQ || 181 case -ENOENT:
173 status == -ECONNRESET || 182 case -ECONNRESET:
174 status == -ETIMEDOUT) 183 case -ESHUTDOWN:
175 return -ENODEV; /* device removed/shutdown */ 184 case -ENODEV:
176 snd_printk(KERN_ERR "urb status %d\n", status); 185 return -ENODEV;
177 return 0; /* continue */ 186 /* errors that might occur during unplugging */
187 case -EPROTO: /* EHCI */
188 case -ETIMEDOUT: /* OHCI */
189 case -EILSEQ: /* UHCI */
190 return -EIO;
191 default:
192 snd_printk(KERN_ERR "urb status %d\n", status);
193 return 0; /* continue */
194 }
178} 195}
179 196
180/* 197/*
@@ -218,8 +235,15 @@ static void snd_usbmidi_in_urb_complete(struct urb* urb, struct pt_regs *regs)
218 ep->umidi->usb_protocol_ops->input(ep, urb->transfer_buffer, 235 ep->umidi->usb_protocol_ops->input(ep, urb->transfer_buffer,
219 urb->actual_length); 236 urb->actual_length);
220 } else { 237 } else {
221 if (snd_usbmidi_urb_error(urb->status) < 0) 238 int err = snd_usbmidi_urb_error(urb->status);
239 if (err < 0) {
240 if (err != -ENODEV) {
241 ep->error_resubmit = 1;
242 mod_timer(&ep->umidi->error_timer,
243 jiffies + ERROR_DELAY_JIFFIES);
244 }
222 return; 245 return;
246 }
223 } 247 }
224 248
225 if (usb_pipe_needs_resubmit(urb->pipe)) { 249 if (usb_pipe_needs_resubmit(urb->pipe)) {
@@ -236,8 +260,13 @@ static void snd_usbmidi_out_urb_complete(struct urb* urb, struct pt_regs *regs)
236 ep->urb_active = 0; 260 ep->urb_active = 0;
237 spin_unlock(&ep->buffer_lock); 261 spin_unlock(&ep->buffer_lock);
238 if (urb->status < 0) { 262 if (urb->status < 0) {
239 if (snd_usbmidi_urb_error(urb->status) < 0) 263 int err = snd_usbmidi_urb_error(urb->status);
264 if (err < 0) {
265 if (err != -ENODEV)
266 mod_timer(&ep->umidi->error_timer,
267 jiffies + ERROR_DELAY_JIFFIES);
240 return; 268 return;
269 }
241 } 270 }
242 snd_usbmidi_do_output(ep); 271 snd_usbmidi_do_output(ep);
243} 272}
@@ -276,6 +305,24 @@ static void snd_usbmidi_out_tasklet(unsigned long data)
276 snd_usbmidi_do_output(ep); 305 snd_usbmidi_do_output(ep);
277} 306}
278 307
308/* called after transfers had been interrupted due to some USB error */
309static void snd_usbmidi_error_timer(unsigned long data)
310{
311 snd_usb_midi_t *umidi = (snd_usb_midi_t *)data;
312 int i;
313
314 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
315 snd_usb_midi_in_endpoint_t *in = umidi->endpoints[i].in;
316 if (in && in->error_resubmit) {
317 in->error_resubmit = 0;
318 in->urb->dev = umidi->chip->dev;
319 snd_usbmidi_submit_urb(in->urb, GFP_ATOMIC);
320 }
321 if (umidi->endpoints[i].out)
322 snd_usbmidi_do_output(umidi->endpoints[i].out);
323 }
324}
325
279/* helper function to send static data that may not DMA-able */ 326/* helper function to send static data that may not DMA-able */
280static int send_bulk_static_data(snd_usb_midi_out_endpoint_t* ep, 327static int send_bulk_static_data(snd_usb_midi_out_endpoint_t* ep,
281 const void *data, int len) 328 const void *data, int len)
@@ -594,17 +641,20 @@ static void snd_usbmidi_emagic_finish_out(snd_usb_midi_out_endpoint_t* ep)
594static void snd_usbmidi_emagic_input(snd_usb_midi_in_endpoint_t* ep, 641static void snd_usbmidi_emagic_input(snd_usb_midi_in_endpoint_t* ep,
595 uint8_t* buffer, int buffer_length) 642 uint8_t* buffer, int buffer_length)
596{ 643{
597 /* ignore padding bytes at end of buffer */ 644 int i;
598 while (buffer_length > 0 && buffer[buffer_length - 1] == 0xff) 645
599 --buffer_length; 646 /* FF indicates end of valid data */
647 for (i = 0; i < buffer_length; ++i)
648 if (buffer[i] == 0xff) {
649 buffer_length = i;
650 break;
651 }
600 652
601 /* handle F5 at end of last buffer */ 653 /* handle F5 at end of last buffer */
602 if (ep->seen_f5) 654 if (ep->seen_f5)
603 goto switch_port; 655 goto switch_port;
604 656
605 while (buffer_length > 0) { 657 while (buffer_length > 0) {
606 int i;
607
608 /* determine size of data until next F5 */ 658 /* determine size of data until next F5 */
609 for (i = 0; i < buffer_length; ++i) 659 for (i = 0; i < buffer_length; ++i)
610 if (buffer[i] == 0xf5) 660 if (buffer[i] == 0xf5)
@@ -671,6 +721,10 @@ static void snd_usbmidi_emagic_output(snd_usb_midi_out_endpoint_t* ep)
671 break; 721 break;
672 } 722 }
673 } 723 }
724 if (buf_free < ep->max_transfer && buf_free > 0) {
725 *buf = 0xff;
726 --buf_free;
727 }
674 ep->urb->transfer_buffer_length = ep->max_transfer - buf_free; 728 ep->urb->transfer_buffer_length = ep->max_transfer - buf_free;
675} 729}
676 730
@@ -765,7 +819,10 @@ static snd_rawmidi_ops_t snd_usbmidi_input_ops = {
765static void snd_usbmidi_in_endpoint_delete(snd_usb_midi_in_endpoint_t* ep) 819static void snd_usbmidi_in_endpoint_delete(snd_usb_midi_in_endpoint_t* ep)
766{ 820{
767 if (ep->urb) { 821 if (ep->urb) {
768 kfree(ep->urb->transfer_buffer); 822 usb_buffer_free(ep->umidi->chip->dev,
823 ep->urb->transfer_buffer_length,
824 ep->urb->transfer_buffer,
825 ep->urb->transfer_dma);
769 usb_free_urb(ep->urb); 826 usb_free_urb(ep->urb);
770 } 827 }
771 kfree(ep); 828 kfree(ep);
@@ -799,7 +856,8 @@ static int snd_usbmidi_in_endpoint_create(snd_usb_midi_t* umidi,
799 else 856 else
800 pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep); 857 pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep);
801 length = usb_maxpacket(umidi->chip->dev, pipe, 0); 858 length = usb_maxpacket(umidi->chip->dev, pipe, 0);
802 buffer = kmalloc(length, GFP_KERNEL); 859 buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL,
860 &ep->urb->transfer_dma);
803 if (!buffer) { 861 if (!buffer) {
804 snd_usbmidi_in_endpoint_delete(ep); 862 snd_usbmidi_in_endpoint_delete(ep);
805 return -ENOMEM; 863 return -ENOMEM;
@@ -812,6 +870,7 @@ static int snd_usbmidi_in_endpoint_create(snd_usb_midi_t* umidi,
812 usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer, length, 870 usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer, length,
813 snd_usb_complete_callback(snd_usbmidi_in_urb_complete), 871 snd_usb_complete_callback(snd_usbmidi_in_urb_complete),
814 ep); 872 ep);
873 ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
815 874
816 rep->in = ep; 875 rep->in = ep;
817 return 0; 876 return 0;
@@ -832,10 +891,10 @@ static unsigned int snd_usbmidi_count_bits(unsigned int x)
832 */ 891 */
833static void snd_usbmidi_out_endpoint_delete(snd_usb_midi_out_endpoint_t* ep) 892static void snd_usbmidi_out_endpoint_delete(snd_usb_midi_out_endpoint_t* ep)
834{ 893{
835 if (ep->tasklet.func)
836 tasklet_kill(&ep->tasklet);
837 if (ep->urb) { 894 if (ep->urb) {
838 kfree(ep->urb->transfer_buffer); 895 usb_buffer_free(ep->umidi->chip->dev, ep->max_transfer,
896 ep->urb->transfer_buffer,
897 ep->urb->transfer_dma);
839 usb_free_urb(ep->urb); 898 usb_free_urb(ep->urb);
840 } 899 }
841 kfree(ep); 900 kfree(ep);
@@ -867,7 +926,8 @@ static int snd_usbmidi_out_endpoint_create(snd_usb_midi_t* umidi,
867 /* we never use interrupt output pipes */ 926 /* we never use interrupt output pipes */
868 pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep); 927 pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep);
869 ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1); 928 ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1);
870 buffer = kmalloc(ep->max_transfer, GFP_KERNEL); 929 buffer = usb_buffer_alloc(umidi->chip->dev, ep->max_transfer,
930 GFP_KERNEL, &ep->urb->transfer_dma);
871 if (!buffer) { 931 if (!buffer) {
872 snd_usbmidi_out_endpoint_delete(ep); 932 snd_usbmidi_out_endpoint_delete(ep);
873 return -ENOMEM; 933 return -ENOMEM;
@@ -875,6 +935,7 @@ static int snd_usbmidi_out_endpoint_create(snd_usb_midi_t* umidi,
875 usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer, 935 usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer,
876 ep->max_transfer, 936 ep->max_transfer,
877 snd_usb_complete_callback(snd_usbmidi_out_urb_complete), ep); 937 snd_usb_complete_callback(snd_usbmidi_out_urb_complete), ep);
938 ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
878 939
879 spin_lock_init(&ep->buffer_lock); 940 spin_lock_init(&ep->buffer_lock);
880 tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep); 941 tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
@@ -918,8 +979,11 @@ void snd_usbmidi_disconnect(struct list_head* p)
918 int i; 979 int i;
919 980
920 umidi = list_entry(p, snd_usb_midi_t, list); 981 umidi = list_entry(p, snd_usb_midi_t, list);
982 del_timer_sync(&umidi->error_timer);
921 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { 983 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
922 snd_usb_midi_endpoint_t* ep = &umidi->endpoints[i]; 984 snd_usb_midi_endpoint_t* ep = &umidi->endpoints[i];
985 if (ep->out)
986 tasklet_kill(&ep->out->tasklet);
923 if (ep->out && ep->out->urb) { 987 if (ep->out && ep->out->urb) {
924 usb_kill_urb(ep->out->urb); 988 usb_kill_urb(ep->out->urb);
925 if (umidi->usb_protocol_ops->finish_out_endpoint) 989 if (umidi->usb_protocol_ops->finish_out_endpoint)
@@ -1480,6 +1544,9 @@ int snd_usb_create_midi_interface(snd_usb_audio_t* chip,
1480 umidi->iface = iface; 1544 umidi->iface = iface;
1481 umidi->quirk = quirk; 1545 umidi->quirk = quirk;
1482 umidi->usb_protocol_ops = &snd_usbmidi_standard_ops; 1546 umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
1547 init_timer(&umidi->error_timer);
1548 umidi->error_timer.function = snd_usbmidi_error_timer;
1549 umidi->error_timer.data = (unsigned long)umidi;
1483 1550
1484 /* detect the endpoint(s) to use */ 1551 /* detect the endpoint(s) to use */
1485 memset(endpoints, 0, sizeof(endpoints)); 1552 memset(endpoints, 0, sizeof(endpoints));
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index ef28061287f2..d0199c4e5551 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -624,7 +624,7 @@ static int usX2Y_pcms_lock_check(snd_card_t *card)
624 for (s = 0; s < 2; ++s) { 624 for (s = 0; s < 2; ++s) {
625 snd_pcm_substream_t *substream; 625 snd_pcm_substream_t *substream;
626 substream = pcm->streams[s].substream; 626 substream = pcm->streams[s].substream;
627 if (substream && substream->open_flag) 627 if (substream && substream->ffile != NULL)
628 err = -EBUSY; 628 err = -EBUSY;
629 } 629 }
630 } 630 }