summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/proc-pid-oom_adj22
-rw-r--r--Documentation/block/switching-sched.txt8
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.txt11
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/leds-class.txt21
-rw-r--r--Documentation/leds/leds-lp5521.txt88
-rw-r--r--Documentation/leds/leds-lp5523.txt83
-rw-r--r--Documentation/networking/LICENSE.qlcnic327
-rw-r--r--Documentation/networking/dccp.txt20
-rw-r--r--Documentation/networking/e1000.txt10
-rw-r--r--Documentation/networking/e1000e.txt40
-rw-r--r--Documentation/networking/igb.txt31
-rw-r--r--Documentation/networking/igbvf.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/ixgbe.txt211
-rw-r--r--Documentation/networking/ixgbevf.txt4
-rw-r--r--Documentation/networking/stmmac.txt48
-rw-r--r--Documentation/rbtree.txt4
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--MAINTAINERS25
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/gic.c28
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h111
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c14
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.h2
-rw-r--r--arch/arm/mach-kirkwood/mpp.c4
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c6
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c14
-rw-r--r--arch/arm/mach-mmp/include/mach/cputype.h3
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c46
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c102
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/plat-omap/devices.c4
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c5
-rw-r--r--arch/m68k/include/asm/irqflags.h2
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/s390/include/asm/qeth.h51
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/Kconfig7
-rw-r--r--arch/sh/boards/Makefile2
-rw-r--r--arch/sh/boards/board-edosk7705.c78
-rw-r--r--arch/sh/boards/board-secureedge5410.c (renamed from arch/sh/boards/mach-snapgear/setup.c)38
-rw-r--r--arch/sh/boards/mach-edosk7705/Makefile5
-rw-r--r--arch/sh/boards/mach-edosk7705/io.c71
-rw-r--r--arch/sh/boards/mach-edosk7705/setup.c36
-rw-r--r--arch/sh/boards/mach-microdev/io.c246
-rw-r--r--arch/sh/boards/mach-microdev/setup.c23
-rw-r--r--arch/sh/boards/mach-se/7206/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7206/io.c104
-rw-r--r--arch/sh/boards/mach-se/7206/irq.c4
-rw-r--r--arch/sh/boards/mach-se/7206/setup.c15
-rw-r--r--arch/sh/boards/mach-se/770x/Makefile2
-rw-r--r--arch/sh/boards/mach-se/770x/io.c156
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c22
-rw-r--r--arch/sh/boards/mach-se/7751/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7751/io.c119
-rw-r--r--arch/sh/boards/mach-se/7751/setup.c18
-rw-r--r--arch/sh/boards/mach-snapgear/Makefile5
-rw-r--r--arch/sh/boards/mach-snapgear/io.c121
-rw-r--r--arch/sh/boards/mach-systemh/Makefile13
-rw-r--r--arch/sh/boards/mach-systemh/io.c158
-rw-r--r--arch/sh/boards/mach-systemh/irq.c61
-rw-r--r--arch/sh/boards/mach-systemh/setup.c57
-rw-r--r--arch/sh/configs/secureedge5410_defconfig (renamed from arch/sh/configs/snapgear_defconfig)0
-rw-r--r--arch/sh/configs/systemh_defconfig28
-rw-r--r--arch/sh/include/asm/addrspace.h8
-rw-r--r--arch/sh/include/asm/pgtable.h12
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h36
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/uncached.h40
-rw-r--r--arch/sh/include/mach-common/mach/edosk7705.h7
-rw-r--r--arch/sh/include/mach-common/mach/microdev.h9
-rw-r--r--arch/sh/include/mach-common/mach/secureedge5410.h (renamed from arch/sh/include/mach-common/mach/snapgear.h)22
-rw-r--r--arch/sh/include/mach-common/mach/systemh7751.h71
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c6
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/consistent.c15
-rw-r--r--arch/sh/mm/uncached.c2
-rw-r--r--arch/sh/tools/mach-types1
-rw-r--r--arch/tile/include/asm/highmem.h1
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/pgtable.h6
-rw-r--r--arch/tile/include/asm/stat.h3
-rw-r--r--arch/tile/include/asm/unistd.h1
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/ptrace.c39
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/signal.c9
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/lib/memcpy_tile64.c11
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c8
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/kernel/ptrace.c2
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h189
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/pvclock.c38
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/xen.c8
-rw-r--r--arch/x86/platform/uv/tlb_uv.c13
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/setup.c18
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c7
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/lanai.c7
-rw-r--r--drivers/atm/solos-attrlist.c1
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/cciss.c131
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/drbd/drbd_actlog.c42
-rw-r--r--drivers/block/drbd/drbd_int.h52
-rw-r--r--drivers/block/drbd/drbd_main.c148
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c217
-rw-r--r--drivers/block/drbd/drbd_req.c38
-rw-r--r--drivers/block/drbd/drbd_worker.c23
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/char/Makefile44
-rw-r--r--drivers/char/agp/intel-gtt.c6
-rw-r--r--drivers/char/amiserial.c1
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/clocksource/sh_cmt.c10
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/firewire/ohci.c88
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c129
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/hwmon/ad7414.c6
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/gpio-fan.c8
-rw-r--r--drivers/hwmon/ltc4261.c5
-rw-r--r--drivers/infiniband/core/addr.c14
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/input/input.c87
-rw-r--r--drivers/input/keyboard/adp5588-keys.c74
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/misc/pcf8574_keypad.c23
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/tablet/acecad.c3
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c2
-rw-r--r--drivers/isdn/hisax/callc.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c4
-rw-r--r--drivers/isdn/hisax/hfc_2bs0.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/hisax.h2
-rw-r--r--drivers/isdn/hisax/ipacx.c2
-rw-r--r--drivers/isdn/hisax/isar.c19
-rw-r--r--drivers/isdn/hisax/isdnl1.h1
-rw-r--r--drivers/isdn/hisax/isdnl3.c2
-rw-r--r--drivers/isdn/hisax/netjet.c10
-rw-r--r--drivers/isdn/hisax/st5481_d.c6
-rw-r--r--drivers/isdn/i4l/isdn_concap.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c20
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c26
-rw-r--r--drivers/isdn/icn/icn.c7
-rw-r--r--drivers/isdn/mISDN/layer1.c10
-rw-r--r--drivers/isdn/mISDN/layer2.c12
-rw-r--r--drivers/isdn/mISDN/tei.c23
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c105
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp5521.c821
-rw-r--r--drivers/leds/leds-lp5523.c1065
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/leds/ledtrig-timer.c124
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/misc/apds9802als.c2
-rw-r--r--drivers/misc/bh1770glc.c8
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig261
-rw-r--r--drivers/net/arm/am79c961a.c9
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/atlx/atl2.c4
-rw-r--r--drivers/net/au1000_eth.c12
-rw-r--r--drivers/net/ax88796.c8
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h39
-rw-r--r--drivers/net/benet/be_cmds.c71
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c248
-rw-r--r--drivers/net/bnx2.c58
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c87
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h10
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c67
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h51
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c61
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c356
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_debugfs.c96
-rw-r--r--drivers/net/bonding/bond_main.c28
-rw-r--r--drivers/net/bonding/bonding.h11
-rw-r--r--drivers/net/caif/caif_shm_u5500.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/Kconfig21
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c9
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c939
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c8
-rw-r--r--drivers/net/can/slcan.c756
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cris/eth_v10.c34
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c7
-rw-r--r--drivers/net/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c148
-rw-r--r--drivers/net/cxgb4vf/sge.c131
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c118
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_hw.c20
-rw-r--r--drivers/net/e1000/e1000_main.c28
-rw-r--r--drivers/net/e1000/e1000_param.c13
-rw-r--r--drivers/net/e1000e/82571.c189
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h4
-rw-r--r--drivers/net/e1000e/ethtool.c29
-rw-r--r--drivers/net/e1000e/ich8lan.c18
-rw-r--r--drivers/net/e1000e/lib.c135
-rw-r--r--drivers/net/e1000e/netdev.c84
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c25
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ehea/ehea.h15
-rw-r--r--drivers/net/ehea/ehea_ethtool.c27
-rw-r--r--drivers/net/ehea/ehea_main.c450
-rw-r--r--drivers/net/ehea/ehea_phyp.c40
-rw-r--r--drivers/net/ehea/ehea_qmr.c89
-rw-r--r--drivers/net/enic/enic.h6
-rw-r--r--drivers/net/enic/enic_main.c245
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_vic.h31
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/fec_mpc52xx.c19
-rw-r--r--drivers/net/forcedeth.c1134
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c3
-rw-r--r--drivers/net/ibmveth.c7
-rw-r--r--drivers/net/ifb.c37
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_nvm.c93
-rw-r--r--drivers/net/igb/e1000_nvm.h2
-rw-r--r--drivers/net/igb/e1000_phy.c11
-rw-r--r--drivers/net/igb/igb_main.c26
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c9
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c24
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/igbvf/vf.h2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/iseries_veth.c27
-rw-r--r--drivers/net/ixgb/ixgb_main.c59
-rw-r--r--drivers/net/ixgb/ixgb_param.c21
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h122
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c58
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c136
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c192
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c297
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c15
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2112
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c722
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c22
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c20
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/macvlan.c113
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c9
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c18
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/pcmcia/axnet_cs.c48
-rw-r--r--drivers/net/phy/marvell.c164
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/ppp_generic.c55
-rw-r--r--drivers/net/pptp.c5
-rw-r--r--drivers/net/pxa168_eth.c9
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h42
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c130
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h25
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c76
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c123
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c93
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c14
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/s2io.c79
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sc92031.c3
-rw-r--r--drivers/net/sfc/efx.c57
-rw-r--r--drivers/net/sfc/efx.h5
-rw-r--r--drivers/net/sfc/ethtool.c168
-rw-r--r--drivers/net/sfc/falcon.c183
-rw-r--r--drivers/net/sfc/falcon_boards.c120
-rw-r--r--drivers/net/sfc/falcon_xmac.c14
-rw-r--r--drivers/net/sfc/filter.c255
-rw-r--r--drivers/net/sfc/filter.h149
-rw-r--r--drivers/net/sfc/io.h153
-rw-r--r--drivers/net/sfc/mcdi.c3
-rw-r--r--drivers/net/sfc/mcdi_phy.c1
-rw-r--r--drivers/net/sfc/mdio_10g.c1
-rw-r--r--drivers/net/sfc/mtd.c98
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c96
-rw-r--r--drivers/net/sfc/nic.h12
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sfc/rx.c30
-rw-r--r--drivers/net/sfc/siena.c10
-rw-r--r--drivers/net/sfc/spi.h5
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sfc/tx.c25
-rw-r--r--drivers/net/sh_eth.c245
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/smsc911x.h2
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/stmmac/stmmac_main.c229
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/sungem.c12
-rw-r--r--drivers/net/sunlance.c10
-rw-r--r--drivers/net/tg3.c287
-rw-r--r--drivers/net/tg3.h42
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/Kconfig19
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ncm.c1213
-rw-r--r--drivers/net/usb/hso.c11
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/usb/usbnet.c59
-rw-r--r--drivers/net/via-rhine.c326
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c953
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c174
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h73
-rw-r--r--drivers/net/vxge/vxge-config.c3604
-rw-r--r--drivers/net/vxge/vxge-config.h169
-rw-r--r--drivers/net/vxge/vxge-ethtool.c112
-rw-r--r--drivers/net/vxge/vxge-main.c1104
-rw-r--r--drivers/net/vxge/vxge-main.h86
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.c773
-rw-r--r--drivers/net/vxge/vxge-traffic.h49
-rw-r--r--drivers/net/vxge/vxge-version.h33
-rw-r--r--drivers/net/wan/x25_asy.c11
-rw-r--r--drivers/net/wimax/i2400m/driver.c96
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h19
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1
-rw-r--r--drivers/net/xilinx_emaclite.c36
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c11
-rw-r--r--drivers/s390/net/qeth_l3_main.c245
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/serial/8250.c5
-rw-r--r--drivers/serial/8250_pci.c5
-rw-r--r--drivers/serial/bfin_5xx.c31
-rw-r--r--drivers/serial/kgdboc.c59
-rw-r--r--drivers/sh/clk/core.c96
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/dynamic.c2
-rw-r--r--drivers/staging/ath6kl/Kconfig2
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c4
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c5
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c7
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athendpack_linux.h0
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h0
-rw-r--r--drivers/staging/batman-adv/hard-interface.c15
-rw-r--r--drivers/staging/batman-adv/routing.c12
-rw-r--r--drivers/staging/batman-adv/routing.h4
-rw-r--r--drivers/staging/batman-adv/unicast.c2
-rw-r--r--drivers/staging/bcm/Bcmchar.c49
-rw-r--r--drivers/staging/brcm80211/README2
-rw-r--r--drivers/staging/brcm80211/TODO2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c12
-rw-r--r--drivers/staging/cpia/cpia.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c1
-rw-r--r--drivers/staging/hv/hv_utils.c3
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c284
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h4
-rw-r--r--drivers/staging/keucr/init.c18
-rw-r--r--drivers/staging/keucr/ms.c14
-rw-r--r--drivers/staging/keucr/msscsi.c6
-rw-r--r--drivers/staging/keucr/sdscsi.c4
-rw-r--r--drivers/staging/keucr/smilsub.c18
-rw-r--r--drivers/staging/keucr/transport.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c3
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2-enc.c2
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2.c2
-rw-r--r--drivers/staging/stradis/stradis.c11
-rw-r--r--drivers/staging/tidspbridge/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/Makefile7
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h5
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h19
-rw-r--r--drivers/staging/tidspbridge/core/dsp-mmu.c317
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c180
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1083
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c17
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c115
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h41
-rw-r--r--drivers/staging/tidspbridge/hw/MMUAccInt.h76
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h225
-rw-r--r--drivers/staging/tidspbridge/hw/hw_defs.h58
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c562
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h163
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h75
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h10
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h67
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h44
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c63
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c533
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c34
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c197
-rw-r--r--drivers/staging/udlfb/udlfb.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/tty/Makefile11
-rw-r--r--drivers/tty/n_gsm.c (renamed from drivers/char/n_gsm.c)5
-rw-r--r--drivers/tty/n_hdlc.c (renamed from drivers/char/n_hdlc.c)0
-rw-r--r--drivers/tty/n_r3964.c (renamed from drivers/char/n_r3964.c)0
-rw-r--r--drivers/tty/n_tty.c (renamed from drivers/char/n_tty.c)0
-rw-r--r--drivers/tty/pty.c (renamed from drivers/char/pty.c)0
-rw-r--r--drivers/tty/sysrq.c (renamed from drivers/char/sysrq.c)0
-rw-r--r--drivers/tty/tty_audit.c (renamed from drivers/char/tty_audit.c)0
-rw-r--r--drivers/tty/tty_buffer.c (renamed from drivers/char/tty_buffer.c)14
-rw-r--r--drivers/tty/tty_io.c (renamed from drivers/char/tty_io.c)0
-rw-r--r--drivers/tty/tty_ioctl.c (renamed from drivers/char/tty_ioctl.c)0
-rw-r--r--drivers/tty/tty_ldisc.c (renamed from drivers/char/tty_ldisc.c)49
-rw-r--r--drivers/tty/tty_mutex.c (renamed from drivers/char/tty_mutex.c)0
-rw-r--r--drivers/tty/tty_port.c (renamed from drivers/char/tty_port.c)0
-rw-r--r--drivers/tty/vt/.gitignore (renamed from drivers/char/.gitignore)0
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c (renamed from drivers/char/consolemap.c)0
-rw-r--r--drivers/tty/vt/cp437.uni (renamed from drivers/char/cp437.uni)0
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped (renamed from drivers/char/defkeymap.c_shipped)0
-rw-r--r--drivers/tty/vt/defkeymap.map (renamed from drivers/char/defkeymap.map)0
-rw-r--r--drivers/tty/vt/keyboard.c (renamed from drivers/char/keyboard.c)0
-rw-r--r--drivers/tty/vt/selection.c (renamed from drivers/char/selection.c)0
-rw-r--r--drivers/tty/vt/vc_screen.c (renamed from drivers/char/vc_screen.c)6
-rw-r--r--drivers/tty/vt/vt.c (renamed from drivers/char/vt.c)0
-rw-r--r--drivers/tty/vt/vt_ioctl.c (renamed from drivers/char/vt_ioctl.c)0
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/goku_udc.h3
-rw-r--r--drivers/usb/gadget/u_ether.c1
-rw-r--r--drivers/usb/gadget/u_serial.c54
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-mxc.c14
-rw-r--r--drivers/usb/host/ohci-jz4740.c2
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/blackfin.c80
-rw-r--r--drivers/usb/musb/musb_core.c41
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c41
-rw-r--r--drivers/usb/musb/musb_regs.h3
-rw-r--r--drivers/usb/musb/musbhsdma.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/uas.c5
-rw-r--r--drivers/uwb/allocator.c3
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/video/backlight/adp8860_bl.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c18
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/s6e63m0.c7
-rw-r--r--drivers/xen/events.c25
-rw-r--r--fs/bio.c23
-rw-r--r--fs/cifs/TODO2
-rw-r--r--fs/cifs/cifs_fs_sb.h6
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifsglob.h3
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c195
-rw-r--r--fs/cifs/file.c72
-rw-r--r--fs/cifs/inode.c1
-rw-r--r--fs/cifs/ioctl.c16
-rw-r--r--fs/cifs/misc.c25
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/page-io.c97
-rw-r--r--fs/ext4/super.c102
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/ioprio.c18
-rw-r--r--fs/locks.c19
-rw-r--r--fs/logfs/logfs.h2
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_quota.h20
-rw-r--r--include/asm-generic/stat.h14
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h79
-rw-r--r--include/linux/atomic.h37
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/dccp.h23
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/filter.h56
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/highmem.h1
-rw-r--r--include/linux/i2c/adp5588.h15
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_link.h28
-rw-r--r--include/linux/if_macvlan.h34
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/igmp.h14
-rw-r--r--include/linux/inetdevice.h15
-rw-r--r--include/linux/input.h4
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/jhash.h183
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/leds-lp5521.h47
-rw-r--r--include/linux/leds-lp5523.h47
-rw-r--r--include/linux/leds.h47
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mdio.h5
-rw-r--r--include/linux/mmc/sh_mmcif.h18
-rw-r--r--include/linux/netdevice.h82
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/radix-tree.h39
-rw-r--r--include/linux/resource.h1
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/sh_timer.h1
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/snmp.h1
-rw-r--r--include/linux/stmmac.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h18
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/usbnet.h6
-rw-r--r--include/linux/xfrm.h1
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/caif/caif_dev.h4
-rw-r--r--include/net/caif/caif_spi.h2
-rw-r--r--include/net/caif/cfcnfg.h8
-rw-r--r--include/net/caif/cfctrl.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_dev.h27
-rw-r--r--include/net/dn_route.h10
-rw-r--r--include/net/dst.h43
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/if_inet6.h3
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_sock.h7
-rw-r--r--include/net/inet_timewait_sock.h20
-rw-r--r--include/net/inetpeer.h32
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h13
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h12
-rw-r--r--include/net/netlink.h23
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/route.h35
-rw-r--r--include/net/rtnetlink.h35
-rw-r--r--include/net/scm.h5
-rw-r--r--include/net/sctp/command.h3
-rw-r--r--include/net/sctp/constants.h14
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/snmp.h4
-rw-r--r--include/net/sock.h105
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/net/timewait_sock.h8
-rw-r--r--include/net/tipc/tipc.h186
-rw-r--r--include/net/tipc/tipc_bearer.h138
-rw-r--r--include/net/tipc/tipc_msg.h207
-rw-r--r--include/net/tipc/tipc_port.h101
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/trace/events/ext4.h97
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/latencytop.c17
-rw-r--r--kernel/perf_event.c42
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/range.c2
-rw-r--r--kernel/relay.c15
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/nlattr.c22
-rw-r--r--lib/radix-tree.c83
-rw-r--r--mm/filemap.c33
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan.h22
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c197
-rw-r--r--net/8021q/vlan_netlink.c20
-rw-r--r--net/8021q/vlanproc.c5
-rw-r--r--net/9p/protocol.c33
-rw-r--r--net/Kconfig5
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bridge/br.c4
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c15
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/br_input.c10
-rw-r--r--net/bridge/br_multicast.c88
-rw-r--r--net/bridge/br_netfilter.c49
-rw-r--r--net/bridge/br_netlink.c10
-rw-r--r--net/bridge/br_notify.c6
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_stp_bpdu.c8
-rw-r--r--net/bridge/netfilter/ebtable_broute.c3
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/Makefile8
-rw-r--r--net/caif/caif_config_util.c13
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c45
-rw-r--r--net/caif/cfcnfg.c17
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/caif/cfdbgl.c14
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/can/Makefile6
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/ceph/Makefile24
-rw-r--r--net/ceph/buffer.c2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c205
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/ethtool.c21
-rw-r--r--net/core/filter.c461
-rw-r--r--net/core/net-sysfs.c435
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/pktgen.c47
-rw-r--r--net/core/request_sock.c5
-rw-r--r--net/core/rtnetlink.c175
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/skbuff.c34
-rw-r--r--net/core/sock.c25
-rw-r--r--net/core/timestamping.c4
-rw-r--r--net/dccp/Makefile4
-rw-r--r--net/dccp/ackvec.c616
-rw-r--r--net/dccp/ackvec.h151
-rw-r--r--net/dccp/ccids/ccid2.c143
-rw-r--r--net/dccp/ccids/ccid2.h2
-rw-r--r--net/dccp/dccp.h32
-rw-r--r--net/dccp/input.c36
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dccp/options.c100
-rw-r--r--net/dccp/output.c22
-rw-r--r--net/dccp/proto.c71
-rw-r--r--net/dccp/qpolicy.c137
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_dev.c100
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c107
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c4
-rw-r--r--net/dns_resolver/Makefile2
-rw-r--r--net/econet/Makefile2
-rw-r--r--net/econet/af_econet.c93
-rw-r--r--net/ieee802154/af_ieee802154.c6
-rw-r--r--net/ipv4/af_inet.c18
-rw-r--r--net/ipv4/arp.c31
-rw-r--r--net/ipv4/devinet.c97
-rw-r--r--net/ipv4/esp4.c32
-rw-r--r--net/ipv4/fib_frontend.c28
-rw-r--r--net/ipv4/fib_lookup.h5
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c35
-rw-r--r--net/ipv4/igmp.c286
-rw-r--r--net/ipv4/inet_connection_sock.c22
-rw-r--r--net/ipv4/inet_diag.c27
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inetpeer.c167
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c52
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ipconfig.c32
-rw-r--r--net/ipv4/ipip.c21
-rw-r--r--net/ipv4/ipmr.c20
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/proc.c9
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c155
-rw-r--r--net/ipv4/syncookies.c15
-rw-r--r--net/ipv4/sysctl_net_ipv4.c13
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_input.c33
-rw-r--r--net/ipv4/tcp_ipv4.c84
-rw-r--r--net/ipv4/tcp_minisocks.c65
-rw-r--r--net/ipv4/tcp_output.c54
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/udp.c20
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv4/xfrm4_policy.c47
-rw-r--r--net/ipv6/addrconf.c160
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/esp6.c32
-rw-r--r--net/ipv6/inet6_connection_sock.c54
-rw-r--r--net/ipv6/ip6_tunnel.c9
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/mcast.c77
-rw-r--r--net/ipv6/ndisc.c29
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/netfilter/Makefile4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c32
-rw-r--r--net/ipv6/route.c123
-rw-r--r--net/ipv6/sit.c17
-rw-r--r--net/ipv6/tcp_ipv6.c149
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/irda/ircomm/Makefile4
-rw-r--r--net/irda/irlan/Makefile2
-rw-r--r--net/irda/irnet/Makefile2
-rw-r--r--net/irda/irttp.c30
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/l2tp/l2tp_ip.c18
-rw-r--r--net/lapb/Makefile2
-rw-r--r--net/llc/af_llc.c11
-rw-r--r--net/netfilter/core.c6
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c42
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/netfilter/xt_TEE.c12
-rw-r--r--net/packet/af_packet.c160
-rw-r--r--net/phonet/Makefile4
-rw-r--r--net/rds/Makefile8
-rw-r--r--net/rds/loop.c4
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/rds/tcp.c6
-rw-r--r--net/rxrpc/Makefile4
-rw-r--r--net/rxrpc/ar-peer.c10
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_cgroup.c2
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/socket.c11
-rw-r--r--net/sunrpc/auth_gss/Makefile4
-rw-r--r--net/tipc/addr.c2
-rw-r--r--net/tipc/bcast.c8
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/bearer.h71
-rw-r--r--net/tipc/cluster.c7
-rw-r--r--net/tipc/config.c16
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c41
-rw-r--r--net/tipc/core.h14
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/eth_media.c6
-rw-r--r--net/tipc/link.c14
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/msg.h168
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c5
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/node_subscr.c2
-rw-r--r--net/tipc/port.c115
-rw-r--r--net/tipc/port.h130
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/tipc/subscr.c8
-rw-r--r--net/tipc/user_reg.c50
-rw-r--r--net/tipc/user_reg.h3
-rw-r--r--net/tipc/zone.c3
-rw-r--r--net/unix/af_unix.c71
-rw-r--r--net/unix/garbage.c9
-rw-r--r--net/wanrouter/Makefile2
-rw-r--r--net/x25/af_x25.c95
-rw-r--r--net/x25/x25_facilities.c20
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/x25/x25_link.c9
-rw-r--r--net/xfrm/xfrm_hash.c2
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--net/xfrm/xfrm_user.c19
-rw-r--r--scripts/kconfig/symbol.c2
-rw-r--r--security/Kconfig12
-rw-r--r--security/apparmor/lsm.c6
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/commoncap.c2
-rw-r--r--security/selinux/hooks.c12
-rw-r--r--sound/pci/asihpi/hpi6000.c2
-rw-r--r--sound/pci/asihpi/hpi6205.c2
-rw-r--r--sound/pci/asihpi/hpicmn.c12
-rw-r--r--sound/pci/cs46xx/dsp_spos.c33
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/lx6464es/lx6464es.c4
-rw-r--r--sound/pci/lx6464es/lx6464es.h2
-rw-r--r--sound/pci/lx6464es/lx_core.c2
-rw-r--r--sound/soc/codecs/Kconfig3
-rw-r--r--sound/soc/codecs/tlv320dac33.c36
-rw-r--r--sound/soc/codecs/tpa6130a2.c6
-rw-r--r--sound/soc/codecs/wm8900.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/pxa/tosa.c2
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/usb/mixer_quirks.c15
-rw-r--r--sound/usb/pcm.c4
-rw-r--r--tools/perf/Documentation/perf-trace.txt57
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c209
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/util/ui/util.c5
1054 files changed, 30234 insertions, 17869 deletions
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 000000000000..cf63f264ce0f
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
1What: /proc/<pid>/oom_adj
2When: August 2012
3Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
4 badness heuristic used to determine which task to kill when the kernel
5 is out of memory.
6
7 The badness heuristic has since been rewritten since the introduction of
8 this tunable such that its meaning is deprecated. The value was
9 implemented as a bitshift on a score generated by the badness()
10 function that did not have any precise units of measure. With the
11 rewrite, the score is given as a proportion of available memory to the
12 task allocating pages, so using a bitshift which grows the score
13 exponentially is, thus, impossible to tune with fine granularity.
14
15 A much more powerful interface, /proc/<pid>/oom_score_adj, was
16 introduced with the oom killer rewrite that allows users to increase or
17 decrease the badness() score linearly. This interface will replace
18 /proc/<pid>/oom_adj.
19
20 A warning will be emitted to the kernel log if an application uses this
21 deprecated interface. After it is printed once, future warnings will be
22 suppressed until the kernel is rebooted.
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f630814..71cfbdc0f74d 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@ you can do so by typing:
16As of the Linux 2.6.10 kernel, it is now possible to change the 16As of the Linux 2.6.10 kernel, it is now possible to change the
17IO scheduler for a given block device on the fly (thus making it possible, 17IO scheduler for a given block device on the fly (thus making it possible,
18for instance, to set the CFQ scheduler for the system default, but 18for instance, to set the CFQ scheduler for the system default, but
19set a specific device to use the anticipatory or noop schedulers - which 19set a specific device to use the deadline or noop schedulers - which
20can improve that device's throughput). 20can improve that device's throughput).
21 21
22To set a specific scheduler, simply do this: 22To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
31will be displayed, with the currently selected scheduler in brackets: 31will be displayed, with the currently selected scheduler in brackets:
32 32
33# cat /sys/block/hda/queue/scheduler 33# cat /sys/block/hda/queue/scheduler
34noop anticipatory deadline [cfq] 34noop deadline [cfq]
35# echo anticipatory > /sys/block/hda/queue/scheduler 35# echo deadline > /sys/block/hda/queue/scheduler
36# cat /sys/block/hda/queue/scheduler 36# cat /sys/block/hda/queue/scheduler
37noop [anticipatory] deadline cfq 37noop [deadline] cfq
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df28bed3..7445bf335dae 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@ designed.
794 794
795Roadmap: 795Roadmap:
796 796
7972.6.37 Remove experimental tag from mount option
798 => should be roughly 6 months after initial merge
799 => enough time to:
800 => gain confidence and fix problems reported by early
801 adopters (a.k.a. guinea pigs)
802 => address worst performance regressions and undesired
803 behaviours
804 => start tuning/optimising code for parallelism
805 => start tuning/optimising algorithms consuming
806 excessive CPU time
807
8082.6.39 Switch default mount option to use delayed logging 7972.6.39 Switch default mount option to use delayed logging
809 => should be roughly 12 months after initial merge 798 => should be roughly 12 months after initial merge
810 => enough time to shake out remaining problems before next round of 799 => enough time to shake out remaining problems before next round of
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ed45e9802aa8..92e83e53148f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
706 arch/x86/kernel/cpu/cpufreq/elanfreq.c. 706 arch/x86/kernel/cpu/cpufreq/elanfreq.c.
707 707
708 elevator= [IOSCHED] 708 elevator= [IOSCHED]
709 Format: {"anticipatory" | "cfq" | "deadline" | "noop"} 709 Format: {"cfq" | "deadline" | "noop"}
710 See Documentation/block/as-iosched.txt and 710 See Documentation/block/as-iosched.txt and
711 Documentation/block/deadline-iosched.txt for details. 711 Documentation/block/deadline-iosched.txt for details.
712 712
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2ae32d..58b266bd1846 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
60 60
61Some LEDs can be programmed to blink without any CPU interaction. To 61Some LEDs can be programmed to blink without any CPU interaction. To
62support this feature, a LED driver can optionally implement the 62support this feature, a LED driver can optionally implement the
63blink_set() function (see <linux/leds.h>). If implemented, triggers can 63blink_set() function (see <linux/leds.h>). To set an LED to blinking,
64attempt to use it before falling back to software timers. The blink_set() 64however, it is better to use use the API function led_blink_set(),
65function should return 0 if the blink setting is supported, or -EINVAL 65as it will check and implement software fallback if necessary.
66otherwise, which means that LED blinking will be handled by software. 66
67 67To turn off blinking again, use the API function led_brightness_set()
68The blink_set() function should choose a user friendly blinking 68as that will not just set the LED brightness but also stop any software
69value if it is called with *delay_on==0 && *delay_off==0 parameters. In 69timers that may have been required for blinking.
70this case the driver should give back the chosen value through delay_on 70
71and delay_off parameters to the leds subsystem. 71The blink_set() function should choose a user friendly blinking value
72if it is called with *delay_on==0 && *delay_off==0 parameters. In this
73case the driver should give back the chosen value through delay_on and
74delay_off parameters to the leds subsystem.
72 75
73Setting the brightness to zero with brightness_set() callback function 76Setting the brightness to zero with brightness_set() callback function
74should completely turn off the LED and cancel the previously programmed 77should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 000000000000..c4d8d151e0fe
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
1Kernel driver for lp5521
2========================
3
4* National Semiconductor LP5521 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5521.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12
13LP5521 can drive up to 3 channels. Leds can be controlled directly via
14the led class control interface. Channels have generic names:
15lp5521:channelx, where x is 0 .. 2
16
17All three channels can be also controlled using the engine micro programs.
18More details of the instructions can be found from the public data sheet.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : store program (visible only in engine load mode)
24
25Example (start to blink the channel 2 led):
26cd /sys/class/leds/lp5521:channel2/device
27echo "load" > engine3_mode
28echo "037f4d0003ff6000" > engine3_load
29echo "run" > engine3_mode
30
31stop the engine:
32echo "disabled" > engine3_mode
33
34sysfs contains a selftest entry.
35The test communicates with the chip and checks that
36the clock mode is automatically set to the requested one.
37
38Each channel has its own led current settings.
39/sys/class/leds/lp5521:channel0/led_current - RW
40/sys/class/leds/lp5521:channel0/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43example platform data:
44
45Note: chan_nr can have values between 0 and 2.
46
47static struct lp5521_led_config lp5521_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 }, {
53 .chan_nr = 1,
54 .led_current = 0,
55 .max_current = 130,
56 }, {
57 .chan_nr = 2,
58 .led_current = 0,
59 .max_current = 130,
60 }
61};
62
63static int lp5521_setup(void)
64{
65 /* setup HW resources */
66}
67
68static void lp5521_release(void)
69{
70 /* Release HW resources */
71}
72
73static void lp5521_enable(bool state)
74{
75 /* Control of chip enable signal */
76}
77
78static struct lp5521_platform_data lp5521_platform_data = {
79 .led_config = lp5521_led_config,
80 .num_channels = ARRAY_SIZE(lp5521_led_config),
81 .clock_mode = LP5521_CLOCK_EXT,
82 .setup_resources = lp5521_setup,
83 .release_resources = lp5521_release,
84 .enable = lp5521_enable,
85};
86
87If the current is set to 0 in the platform data, that channel is
88disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 000000000000..fad2feb8b7ce
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
1Kernel driver for lp5523
2========================
3
4* National Semiconductor LP5523 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5523.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12LP5523 can drive up to 9 channels. Leds can be controlled directly via
13the led class control interface. Channels have generic names:
14lp5523:channelx where x is 0...8
15
16The chip provides 3 engines. Each engine can control channels without
17interaction from the main CPU. Details of the micro engine code can be found
18from the public data sheet. Leds can be muxed to different channels.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : microcode load (visible only in load mode)
24enginex_leds : led mux control (visible only in load mode)
25
26cd /sys/class/leds/lp5523:channel2/device
27echo "load" > engine3_mode
28echo "9d80400004ff05ff437f0000" > engine3_load
29echo "111111111" > engine3_leds
30echo "run" > engine3_mode
31
32sysfs contains a selftest entry. It measures each channel
33voltage level and checks if it looks reasonable. If the level is too high,
34the led is missing; if the level is too low, there is a short circuit.
35
36Selftest uses always the current from the platform data.
37
38Each channel contains led current settings.
39/sys/class/leds/lp5523:channel2/led_current - RW
40/sys/class/leds/lp5523:channel2/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43Example platform data:
44
45Note - chan_nr can have values between 0 and 8.
46
47static struct lp5523_led_config lp5523_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 },
53...
54 }, {
55 .chan_nr = 8,
56 .led_current = 50,
57 .max_current = 130,
58 }
59};
60
61static int lp5523_setup(void)
62{
63 /* Setup HW resources */
64}
65
66static void lp5523_release(void)
67{
68 /* Release HW resources */
69}
70
71static void lp5523_enable(bool state)
72{
73 /* Control chip enable signal */
74}
75
76static struct lp5523_platform_data lp5523_platform_data = {
77 .led_config = lp5523_led_config,
78 .num_channels = ARRAY_SIZE(lp5523_led_config),
79 .clock_mode = LP5523_CLOCK_EXT,
80 .setup_resources = lp5523_setup,
81 .release_resources = lp5523_release,
82 .enable = lp5523_enable,
83};
diff --git a/Documentation/networking/LICENSE.qlcnic b/Documentation/networking/LICENSE.qlcnic
new file mode 100644
index 000000000000..29ad4b106420
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlcnic
@@ -0,0 +1,327 @@
1Copyright (c) 2009-2010 QLogic Corporation
2QLogic Linux qlcnic NIC Driver
3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the
7GNU General Public License (a copy of which is attached hereto as
8Exhibit A) published by the Free Software Foundation (version 2).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46
47
48EXHIBIT A
49
50 GNU GENERAL PUBLIC LICENSE
51 Version 2, June 1991
52
53 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
54 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
55 Everyone is permitted to copy and distribute verbatim copies
56 of this license document, but changing it is not allowed.
57
58 Preamble
59
60 The licenses for most software are designed to take away your
61freedom to share and change it. By contrast, the GNU General Public
62License is intended to guarantee your freedom to share and change free
63software--to make sure the software is free for all its users. This
64General Public License applies to most of the Free Software
65Foundation's software and to any other program whose authors commit to
66using it. (Some other Free Software Foundation software is covered by
67the GNU Lesser General Public License instead.) You can apply it to
68your programs, too.
69
70 When we speak of free software, we are referring to freedom, not
71price. Our General Public Licenses are designed to make sure that you
72have the freedom to distribute copies of free software (and charge for
73this service if you wish), that you receive source code or can get it
74if you want it, that you can change the software or use pieces of it
75in new free programs; and that you know you can do these things.
76
77 To protect your rights, we need to make restrictions that forbid
78anyone to deny you these rights or to ask you to surrender the rights.
79These restrictions translate to certain responsibilities for you if you
80distribute copies of the software, or if you modify it.
81
82 For example, if you distribute copies of such a program, whether
83gratis or for a fee, you must give the recipients all the rights that
84you have. You must make sure that they, too, receive or can get the
85source code. And you must show them these terms so they know their
86rights.
87
88 We protect your rights with two steps: (1) copyright the software, and
89(2) offer you this license which gives you legal permission to copy,
90distribute and/or modify the software.
91
92 Also, for each author's protection and ours, we want to make certain
93that everyone understands that there is no warranty for this free
94software. If the software is modified by someone else and passed on, we
95want its recipients to know that what they have is not the original, so
96that any problems introduced by others will not reflect on the original
97authors' reputations.
98
99 Finally, any free program is threatened constantly by software
100patents. We wish to avoid the danger that redistributors of a free
101program will individually obtain patent licenses, in effect making the
102program proprietary. To prevent this, we have made it clear that any
103patent must be licensed for everyone's free use or not licensed at all.
104
105 The precise terms and conditions for copying, distribution and
106modification follow.
107
108 GNU GENERAL PUBLIC LICENSE
109 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
110
111 0. This License applies to any program or other work which contains
112a notice placed by the copyright holder saying it may be distributed
113under the terms of this General Public License. The "Program", below,
114refers to any such program or work, and a "work based on the Program"
115means either the Program or any derivative work under copyright law:
116that is to say, a work containing the Program or a portion of it,
117either verbatim or with modifications and/or translated into another
118language. (Hereinafter, translation is included without limitation in
119the term "modification".) Each licensee is addressed as "you".
120
121Activities other than copying, distribution and modification are not
122covered by this License; they are outside its scope. The act of
123running the Program is not restricted, and the output from the Program
124is covered only if its contents constitute a work based on the
125Program (independent of having been made by running the Program).
126Whether that is true depends on what the Program does.
127
128 1. You may copy and distribute verbatim copies of the Program's
129source code as you receive it, in any medium, provided that you
130conspicuously and appropriately publish on each copy an appropriate
131copyright notice and disclaimer of warranty; keep intact all the
132notices that refer to this License and to the absence of any warranty;
133and give any other recipients of the Program a copy of this License
134along with the Program.
135
136You may charge a fee for the physical act of transferring a copy, and
137you may at your option offer warranty protection in exchange for a fee.
138
139 2. You may modify your copy or copies of the Program or any portion
140of it, thus forming a work based on the Program, and copy and
141distribute such modifications or work under the terms of Section 1
142above, provided that you also meet all of these conditions:
143
144 a) You must cause the modified files to carry prominent notices
145 stating that you changed the files and the date of any change.
146
147 b) You must cause any work that you distribute or publish, that in
148 whole or in part contains or is derived from the Program or any
149 part thereof, to be licensed as a whole at no charge to all third
150 parties under the terms of this License.
151
152 c) If the modified program normally reads commands interactively
153 when run, you must cause it, when started running for such
154 interactive use in the most ordinary way, to print or display an
155 announcement including an appropriate copyright notice and a
156 notice that there is no warranty (or else, saying that you provide
157 a warranty) and that users may redistribute the program under
158 these conditions, and telling the user how to view a copy of this
159 License. (Exception: if the Program itself is interactive but
160 does not normally print such an announcement, your work based on
161 the Program is not required to print an announcement.)
162
163These requirements apply to the modified work as a whole. If
164identifiable sections of that work are not derived from the Program,
165and can be reasonably considered independent and separate works in
166themselves, then this License, and its terms, do not apply to those
167sections when you distribute them as separate works. But when you
168distribute the same sections as part of a whole which is a work based
169on the Program, the distribution of the whole must be on the terms of
170this License, whose permissions for other licensees extend to the
171entire whole, and thus to each and every part regardless of who wrote it.
172
173Thus, it is not the intent of this section to claim rights or contest
174your rights to work written entirely by you; rather, the intent is to
175exercise the right to control the distribution of derivative or
176collective works based on the Program.
177
178In addition, mere aggregation of another work not based on the Program
179with the Program (or with a work based on the Program) on a volume of
180a storage or distribution medium does not bring the other work under
181the scope of this License.
182
183 3. You may copy and distribute the Program (or a work based on it,
184under Section 2) in object code or executable form under the terms of
185Sections 1 and 2 above provided that you also do one of the following:
186
187 a) Accompany it with the complete corresponding machine-readable
188 source code, which must be distributed under the terms of Sections
189 1 and 2 above on a medium customarily used for software interchange; or,
190
191 b) Accompany it with a written offer, valid for at least three
192 years, to give any third party, for a charge no more than your
193 cost of physically performing source distribution, a complete
194 machine-readable copy of the corresponding source code, to be
195 distributed under the terms of Sections 1 and 2 above on a medium
196 customarily used for software interchange; or,
197
198 c) Accompany it with the information you received as to the offer
199 to distribute corresponding source code. (This alternative is
200 allowed only for noncommercial distribution and only if you
201 received the program in object code or executable form with such
202 an offer, in accord with Subsection b above.)
203
204The source code for a work means the preferred form of the work for
205making modifications to it. For an executable work, complete source
206code means all the source code for all modules it contains, plus any
207associated interface definition files, plus the scripts used to
208control compilation and installation of the executable. However, as a
209special exception, the source code distributed need not include
210anything that is normally distributed (in either source or binary
211form) with the major components (compiler, kernel, and so on) of the
212operating system on which the executable runs, unless that component
213itself accompanies the executable.
214
215If distribution of executable or object code is made by offering
216access to copy from a designated place, then offering equivalent
217access to copy the source code from the same place counts as
218distribution of the source code, even though third parties are not
219compelled to copy the source along with the object code.
220
221 4. You may not copy, modify, sublicense, or distribute the Program
222except as expressly provided under this License. Any attempt
223otherwise to copy, modify, sublicense or distribute the Program is
224void, and will automatically terminate your rights under this License.
225However, parties who have received copies, or rights, from you under
226this License will not have their licenses terminated so long as such
227parties remain in full compliance.
228
229 5. You are not required to accept this License, since you have not
230signed it. However, nothing else grants you permission to modify or
231distribute the Program or its derivative works. These actions are
232prohibited by law if you do not accept this License. Therefore, by
233modifying or distributing the Program (or any work based on the
234Program), you indicate your acceptance of this License to do so, and
235all its terms and conditions for copying, distributing or modifying
236the Program or works based on it.
237
238 6. Each time you redistribute the Program (or any work based on the
239Program), the recipient automatically receives a license from the
240original licensor to copy, distribute or modify the Program subject to
241these terms and conditions. You may not impose any further
242restrictions on the recipients' exercise of the rights granted herein.
243You are not responsible for enforcing compliance by third parties to
244this License.
245
246 7. If, as a consequence of a court judgment or allegation of patent
247infringement or for any other reason (not limited to patent issues),
248conditions are imposed on you (whether by court order, agreement or
249otherwise) that contradict the conditions of this License, they do not
250excuse you from the conditions of this License. If you cannot
251distribute so as to satisfy simultaneously your obligations under this
252License and any other pertinent obligations, then as a consequence you
253may not distribute the Program at all. For example, if a patent
254license would not permit royalty-free redistribution of the Program by
255all those who receive copies directly or indirectly through you, then
256the only way you could satisfy both it and this License would be to
257refrain entirely from distribution of the Program.
258
259If any portion of this section is held invalid or unenforceable under
260any particular circumstance, the balance of the section is intended to
261apply and the section as a whole is intended to apply in other
262circumstances.
263
264It is not the purpose of this section to induce you to infringe any
265patents or other property right claims or to contest validity of any
266such claims; this section has the sole purpose of protecting the
267integrity of the free software distribution system, which is
268implemented by public license practices. Many people have made
269generous contributions to the wide range of software distributed
270through that system in reliance on consistent application of that
271system; it is up to the author/donor to decide if he or she is willing
272to distribute software through any other system and a licensee cannot
273impose that choice.
274
275This section is intended to make thoroughly clear what is believed to
276be a consequence of the rest of this License.
277
278 8. If the distribution and/or use of the Program is restricted in
279certain countries either by patents or by copyrighted interfaces, the
280original copyright holder who places the Program under this License
281may add an explicit geographical distribution limitation excluding
282those countries, so that distribution is permitted only in or among
283countries not thus excluded. In such case, this License incorporates
284the limitation as if written in the body of this License.
285
286 9. The Free Software Foundation may publish revised and/or new versions
287of the General Public License from time to time. Such new versions will
288be similar in spirit to the present version, but may differ in detail to
289address new problems or concerns.
290
291Each version is given a distinguishing version number. If the Program
292specifies a version number of this License which applies to it and "any
293later version", you have the option of following the terms and conditions
294either of that version or of any later version published by the Free
295Software Foundation. If the Program does not specify a version number of
296this License, you may choose any version ever published by the Free Software
297Foundation.
298
299 10. If you wish to incorporate parts of the Program into other free
300programs whose distribution conditions are different, write to the author
301to ask for permission. For software which is copyrighted by the Free
302Software Foundation, write to the Free Software Foundation; we sometimes
303make exceptions for this. Our decision will be guided by the two goals
304of preserving the free status of all derivatives of our free software and
305of promoting the sharing and reuse of software generally.
306
307 NO WARRANTY
308
309 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
310FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
311OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
312PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
313OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
314MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
315TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
316PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
317REPAIR OR CORRECTION.
318
319 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
320WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
321REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
322INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
323OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
324TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
325YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
326PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
327POSSIBILITY OF SUCH DAMAGES.
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index 271d524a4c8d..b395ca6a49f2 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -47,6 +47,26 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
47 47
48Socket options 48Socket options
49============== 49==============
50DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
51a policy ID as argument and can only be set before the connection (i.e. changes
52during an established connection are not supported). Currently, two policies are
53defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
54and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
55u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
56a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
57be formatted using a cmsg(3) message header filled in as follows:
58 cmsg->cmsg_level = SOL_DCCP;
59 cmsg->cmsg_type = DCCP_SCM_PRIORITY;
60 cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); /* or CMSG_LEN(4) */
61
62DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
63value is always interpreted as unbounded queue length. If different from zero,
64the interpretation of this parameter depends on the current dequeuing policy
65(see above): the "simple" policy will enforce a fixed queue size by returning
66EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
67lowest-priority packet first. The default value for this parameter is
68initialised from /proc/sys/net/dccp/default/tx_qlen.
69
50DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 70DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
51service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 71service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
52the socket will fall back to 0 (which means that no meaningful service code 72the socket will fall back to 0 (which means that no meaningful service code
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index d9271e74e488..6cb13e9e1346 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -79,7 +79,7 @@ InterruptThrottleRate
79--------------------- 79---------------------
80(not supported on Intel(R) 82542, 82543 or 82544-based adapters) 80(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
81Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, 81Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
82 4=simplified balancing) 82 4=simplified balancing)
83Default Value: 3 83Default Value: 3
84 84
85The driver can limit the amount of interrupts per second that the adapter 85The driver can limit the amount of interrupts per second that the adapter
@@ -124,8 +124,8 @@ InterruptThrottleRate is set to mode 1. In this mode, which operates
124the same as mode 3, the InterruptThrottleRate will be increased stepwise to 124the same as mode 3, the InterruptThrottleRate will be increased stepwise to
12570000 for traffic in class "Lowest latency". 12570000 for traffic in class "Lowest latency".
126 126
127In simplified mode the interrupt rate is based on the ratio of Tx and 127In simplified mode the interrupt rate is based on the ratio of TX and
128Rx traffic. If the bytes per second rate is approximately equal, the 128RX traffic. If the bytes per second rate is approximately equal, the
129interrupt rate will drop as low as 2000 interrupts per second. If the 129interrupt rate will drop as low as 2000 interrupts per second. If the
130traffic is mostly transmit or mostly receive, the interrupt rate could 130traffic is mostly transmit or mostly receive, the interrupt rate could
131be as high as 8000. 131be as high as 8000.
@@ -245,7 +245,7 @@ NOTE: Depending on the available system resources, the request for a
245TxDescriptorStep 245TxDescriptorStep
246---------------- 246----------------
247Valid Range: 1 (use every Tx Descriptor) 247Valid Range: 1 (use every Tx Descriptor)
248 4 (use every 4th Tx Descriptor) 248 4 (use every 4th Tx Descriptor)
249 249
250Default Value: 1 (use every Tx Descriptor) 250Default Value: 1 (use every Tx Descriptor)
251 251
@@ -312,7 +312,7 @@ Valid Range: 0-xxxxxxx (0=off)
312Default Value: 256 312Default Value: 256
313Usage: insmod e1000.ko copybreak=128 313Usage: insmod e1000.ko copybreak=128
314 314
315Driver copies all packets below or equaling this size to a fresh Rx 315Driver copies all packets below or equaling this size to a fresh RX
316buffer before handing it up the stack. 316buffer before handing it up the stack.
317 317
318This parameter is different than other parameters, in that it is a 318This parameter is different than other parameters, in that it is a
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
index 6aa048badf32..81a66e69a127 100644
--- a/Documentation/networking/e1000e.txt
+++ b/Documentation/networking/e1000e.txt
@@ -1,5 +1,5 @@
1Linux* Driver for Intel(R) Network Connection 1Linux* Driver for Intel(R) Network Connection
2=============================================================== 2=============================================
3 3
4Intel Gigabit Linux driver. 4Intel Gigabit Linux driver.
5Copyright(c) 1999 - 2010 Intel Corporation. 5Copyright(c) 1999 - 2010 Intel Corporation.
@@ -61,6 +61,12 @@ per second, even if more packets have come in. This reduces interrupt
61load on the system and can lower CPU utilization under heavy load, 61load on the system and can lower CPU utilization under heavy load,
62but will increase latency as packets are not processed as quickly. 62but will increase latency as packets are not processed as quickly.
63 63
64The default behaviour of the driver previously assumed a static
65InterruptThrottleRate value of 8000, providing a good fallback value for
66all traffic types, but lacking in small packet performance and latency.
67The hardware can handle many more small packets per second however, and
68for this reason an adaptive interrupt moderation algorithm was implemented.
69
64The driver has two adaptive modes (setting 1 or 3) in which 70The driver has two adaptive modes (setting 1 or 3) in which
65it dynamically adjusts the InterruptThrottleRate value based on the traffic 71it dynamically adjusts the InterruptThrottleRate value based on the traffic
66that it receives. After determining the type of incoming traffic in the last 72that it receives. After determining the type of incoming traffic in the last
@@ -86,8 +92,8 @@ InterruptThrottleRate is set to mode 1. In this mode, which operates
86the same as mode 3, the InterruptThrottleRate will be increased stepwise to 92the same as mode 3, the InterruptThrottleRate will be increased stepwise to
8770000 for traffic in class "Lowest latency". 9370000 for traffic in class "Lowest latency".
88 94
89In simplified mode the interrupt rate is based on the ratio of Tx and 95In simplified mode the interrupt rate is based on the ratio of TX and
90Rx traffic. If the bytes per second rate is approximately equal the 96RX traffic. If the bytes per second rate is approximately equal, the
91interrupt rate will drop as low as 2000 interrupts per second. If the 97interrupt rate will drop as low as 2000 interrupts per second. If the
92traffic is mostly transmit or mostly receive, the interrupt rate could 98traffic is mostly transmit or mostly receive, the interrupt rate could
93be as high as 8000. 99be as high as 8000.
@@ -177,7 +183,7 @@ Copybreak
177Valid Range: 0-xxxxxxx (0=off) 183Valid Range: 0-xxxxxxx (0=off)
178Default Value: 256 184Default Value: 256
179 185
180Driver copies all packets below or equaling this size to a fresh Rx 186Driver copies all packets below or equaling this size to a fresh RX
181buffer before handing it up the stack. 187buffer before handing it up the stack.
182 188
183This parameter is different than other parameters, in that it is a 189This parameter is different than other parameters, in that it is a
@@ -223,17 +229,17 @@ loading or enabling the driver, try disabling this feature.
223 229
224WriteProtectNVM 230WriteProtectNVM
225--------------- 231---------------
226Valid Range: 0-1 232Valid Range: 0,1
227Default Value: 1 (enabled) 233Default Value: 1
228 234
229Set the hardware to ignore all write/erase cycles to the GbE region in the 235If set to 1, configure the hardware to ignore all write/erase cycles to the
230ICHx NVM (non-volatile memory). This feature can be disabled by the 236GbE region in the ICHx NVM (in order to prevent accidental corruption of the
231WriteProtectNVM module parameter (enabled by default) only after a hardware 237NVM). This feature can be disabled by setting the parameter to 0 during initial
232reset, but the machine must be power cycled before trying to enable writes. 238driver load.
233 239NOTE: The machine must be power cycled (full off/on) when enabling NVM writes
234Note: the kernel boot option iomem=relaxed may need to be set if the kernel 240via setting the parameter to zero. Once the NVM has been locked (via the
235config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the 241parameter at 1 when the driver loads) it cannot be unlocked except via power
236NVM from user space via ethtool. 242cycle.
237 243
238Additional Configurations 244Additional Configurations
239========================= 245=========================
@@ -259,7 +265,6 @@ Additional Configurations
259 - Some adapters limit Jumbo Frames sized packets to a maximum of 265 - Some adapters limit Jumbo Frames sized packets to a maximum of
260 4096 bytes and some adapters do not support Jumbo Frames. 266 4096 bytes and some adapters do not support Jumbo Frames.
261 267
262
263 Ethtool 268 Ethtool
264 ------- 269 -------
265 The driver utilizes the ethtool interface for driver configuration and 270 The driver utilizes the ethtool interface for driver configuration and
@@ -283,8 +288,7 @@ Additional Configurations
283 loaded when shutting down or rebooting the system. 288 loaded when shutting down or rebooting the system.
284 289
285 In most cases Wake On LAN is only supported on port A for multiple port 290 In most cases Wake On LAN is only supported on port A for multiple port
286 adapters. To verify if a port supports Wake on LAN run ethtool eth<X>. 291 adapters. To verify if a port supports Wake on Lan run Ethtool eth<X>.
287
288 292
289Support 293Support
290======= 294=======
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
index ab2d71831892..4a5e29c19bd1 100644
--- a/Documentation/networking/igb.txt
+++ b/Documentation/networking/igb.txt
@@ -36,6 +36,7 @@ Default Value: 0
36This parameter adds support for SR-IOV. It causes the driver to spawn up to 36This parameter adds support for SR-IOV. It causes the driver to spawn up to
37max_vfs worth of virtual function. 37max_vfs worth of virtual function.
38 38
39
39Additional Configurations 40Additional Configurations
40========================= 41=========================
41 42
@@ -60,9 +61,10 @@ Additional Configurations
60 Ethtool 61 Ethtool
61 ------- 62 -------
62 The driver utilizes the ethtool interface for driver configuration and 63 The driver utilizes the ethtool interface for driver configuration and
63 diagnostics, as well as displaying statistical information. 64 diagnostics, as well as displaying statistical information. The latest
65 version of Ethtool can be found at:
64 66
65 http://sourceforge.net/projects/gkernel. 67 http://ftp.kernel.org/pub/software/network/ethtool/
66 68
67 Enabling Wake on LAN* (WoL) 69 Enabling Wake on LAN* (WoL)
68 --------------------------- 70 ---------------------------
@@ -91,31 +93,6 @@ Additional Configurations
91 REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not 93 REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not
92 found, the system will fallback to MSI or to Legacy interrupts. 94 found, the system will fallback to MSI or to Legacy interrupts.
93 95
94 LRO
95 ---
96 Large Receive Offload (LRO) is a technique for increasing inbound throughput
97 of high-bandwidth network connections by reducing CPU overhead. It works by
98 aggregating multiple incoming packets from a single stream into a larger
99 buffer before they are passed higher up the networking stack, thus reducing
100 the number of packets that have to be processed. LRO combines multiple
101 Ethernet frames into a single receive in the stack, thereby potentially
102 decreasing CPU utilization for receives.
103
104 NOTE: You need to have inet_lro enabled via either the CONFIG_INET_LRO or
105 CONFIG_INET_LRO_MODULE kernel config option. Additionally, if
106 CONFIG_INET_LRO_MODULE is used, the inet_lro module needs to be loaded
107 before the igb driver.
108
109 You can verify that the driver is using LRO by looking at these counters in
110 Ethtool:
111
112 lro_aggregated - count of total packets that were combined
113 lro_flushed - counts the number of packets flushed out of LRO
114 lro_no_desc - counts the number of times an LRO descriptor was not available
115 for the LRO packet
116
117 NOTE: IPv6 and UDP are not supported by LRO.
118
119Support 96Support
120======= 97=======
121 98
diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt
index 056028138d9c..694817b17a9c 100644
--- a/Documentation/networking/igbvf.txt
+++ b/Documentation/networking/igbvf.txt
@@ -58,7 +58,9 @@ Additional Configurations
58 Ethtool 58 Ethtool
59 ------- 59 -------
60 The driver utilizes the ethtool interface for driver configuration and 60 The driver utilizes the ethtool interface for driver configuration and
61 diagnostics, as well as displaying statistical information. 61 diagnostics, as well as displaying statistical information. Ethtool
62 version 3.0 or later is required for this functionality, although we
63 strongly recommend downloading the latest version at:
62 64
63 http://sourceforge.net/projects/gkernel. 65 http://sourceforge.net/projects/gkernel.
64 66
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7165f4cb792..2193a5d124c5 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
20min_pmtu - INTEGER 20min_pmtu - INTEGER
21 default 562 - minimum discovered Path MTU 21 default 562 - minimum discovered Path MTU
22 22
23route/max_size - INTEGER
24 Maximum number of routes allowed in the kernel. Increase
25 this when using large numbers of interfaces and/or routes.
26
27neigh/default/gc_thresh3 - INTEGER
28 Maximum number of neighbor entries allowed. Increase this
29 when using large numbers of interfaces and when communicating
30 with large numbers of directly-connected peers.
31
23mtu_expires - INTEGER 32mtu_expires - INTEGER
24 Time, in seconds, that cached PMTU information is kept. 33 Time, in seconds, that cached PMTU information is kept.
25 34
@@ -135,6 +144,7 @@ tcp_adv_win_scale - INTEGER
135 Count buffering overhead as bytes/2^tcp_adv_win_scale 144 Count buffering overhead as bytes/2^tcp_adv_win_scale
136 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), 145 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
137 if it is <= 0. 146 if it is <= 0.
147 Possible values are [-31, 31], inclusive.
138 Default: 2 148 Default: 2
139 149
140tcp_allowed_congestion_control - STRING 150tcp_allowed_congestion_control - STRING
@@ -698,10 +708,28 @@ igmp_max_memberships - INTEGER
698 Change the maximum number of multicast groups we can subscribe to. 708 Change the maximum number of multicast groups we can subscribe to.
699 Default: 20 709 Default: 20
700 710
701conf/interface/* changes special settings per interface (where "interface" is 711 Theoretical maximum value is bounded by having to send a membership
702 the name of your network interface) 712 report in a single datagram (i.e. the report can't span multiple
703conf/all/* is special, changes the settings for all interfaces 713 datagrams, or risk confusing the switch and leaving groups you don't
714 intend to).
715
716 The number of supported groups 'M' is bounded by the number of group
717 report entries you can fit into a single datagram of 65535 bytes.
718
719 M = 65536-sizeof (ip header)/(sizeof(Group record))
720
721 Group records are variable length, with a minimum of 12 bytes.
722 So net.ipv4.igmp_max_memberships should not be set higher than:
723
724 (65536-24) / 12 = 5459
725
726 The value 5459 assumes no IP header options, so in practice
727 this number may be lower.
728
729 conf/interface/* changes special settings per interface (where
730 "interface" is the name of your network interface)
704 731
732 conf/all/* is special, changes the settings for all interfaces
705 733
706log_martians - BOOLEAN 734log_martians - BOOLEAN
707 Log packets with impossible addresses to kernel log. 735 Log packets with impossible addresses to kernel log.
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index eeb68685c788..9ade2806d82c 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -1,107 +1,126 @@
1Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection 1Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection
2======================================================================== 2========================================================================
3 3
4March 10, 2009 4Intel Gigabit Linux driver.
5 5Copyright(c) 1999 - 2010 Intel Corporation.
6 6
7Contents 7Contents
8======== 8========
9 9
10- In This Release
11- Identifying Your Adapter 10- Identifying Your Adapter
12- Building and Installation
13- Additional Configurations 11- Additional Configurations
12- Performance Tuning
13- Known Issues
14- Support 14- Support
15 15
16Identifying Your Adapter
17========================
16 18
19The driver in this release is compatible with 82598 and 82599-based Intel
20Network Connections.
17 21
18In This Release 22For more information on how to identify your adapter, go to the Adapter &
19=============== 23Driver ID Guide at:
20 24
21This file describes the ixgbe Linux Base Driver for the 10 Gigabit PCI 25 http://support.intel.com/support/network/sb/CS-012904.htm
22Express Intel(R) Network Connection. This driver includes support for
23Itanium(R)2-based systems.
24 26
25For questions related to hardware requirements, refer to the documentation 27SFP+ Devices with Pluggable Optics
26supplied with your 10 Gigabit adapter. All hardware requirements listed apply 28----------------------------------
27to use with Linux.
28 29
29The following features are available in this kernel: 3082599-BASED ADAPTERS
30 - Native VLANs
31 - Channel Bonding (teaming)
32 - SNMP
33 - Generic Receive Offload
34 - Data Center Bridging
35 31
36Channel Bonding documentation can be found in the Linux kernel source: 32NOTES: If your 82599-based Intel(R) Network Adapter came with Intel optics, or
37/Documentation/networking/bonding.txt 33is an Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel
34optics and/or the direct attach cables listed below.
38 35
39Ethtool, lspci, and ifconfig can be used to display device and driver 36When 82599-based SFP+ devices are connected back to back, they should be set to
40specific information. 37the same Speed setting via Ethtool. Results may vary if you mix speed settings.
3882598-based adapters support all passive direct attach cables that comply
39with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
40cables are not supported.
41 41
42Supplier Type Part Numbers
42 43
43Identifying Your Adapter 44SR Modules
44======================== 45Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
46Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDDZ-IN1
47Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
48LR Modules
49Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
50Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDDZ-IN1
51Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
45 52
46This driver supports devices based on the 82598 controller and the 82599 53The following is a list of 3rd party SFP+ modules and direct attach cables that
47controller. 54have received some testing. Not all modules are applicable to all devices.
48 55
49For specific information on identifying which adapter you have, please visit: 56Supplier Type Part Numbers
50 57
51 http://support.intel.com/support/network/sb/CS-008441.htm 58Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
59Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
60Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
52 61
62Finisar DUAL RATE 1G/10G SFP+ SR (No Bail) FTLX8571D3QCV-IT
63Avago DUAL RATE 1G/10G SFP+ SR (No Bail) AFBR-703SDZ-IN1
64Finisar DUAL RATE 1G/10G SFP+ LR (No Bail) FTLX1471D3QCV-IT
65Avago DUAL RATE 1G/10G SFP+ LR (No Bail) AFCT-701SDZ-IN1
66Finistar 1000BASE-T SFP FCLF8522P2BTL
67Avago 1000BASE-T SFP ABCU-5710RZ
53 68
54Building and Installation 6982599-based adapters support all passive and active limiting direct attach
55========================= 70cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
56 71
57select m for "Intel(R) 10GbE PCI Express adapters support" located at: 72Laser turns off for SFP+ when ifconfig down
58 Location: 73-------------------------------------------
59 -> Device Drivers 74"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
60 -> Network device support (NETDEVICES [=y]) 75"ifconfig up" turns on the later.
61 -> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
62 76
631. make modules & make modules_install
64 77
652. Load the module: 7882598-BASED ADAPTERS
66 79
67# modprobe ixgbe 80NOTES for 82598-Based Adapters:
81- Intel(R) Network Adapters that support removable optical modules only support
82 their original module type (i.e., the Intel(R) 10 Gigabit SR Dual Port
83 Express Module only supports SR optical modules). If you plug in a different
84 type of module, the driver will not load.
85- Hot Swapping/hot plugging optical modules is not supported.
86- Only single speed, 10 gigabit modules are supported.
87- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
88 types are not supported. Please see your system documentation for details.
68 89
69 The insmod command can be used if the full 90The following is a list of 3rd party SFP+ modules and direct attach cables that
70 path to the driver module is specified. For example: 91have received some testing. Not all modules are applicable to all devices.
71 92
72 insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgbe/ixgbe.ko 93Supplier Type Part Numbers
73 94
74 With 2.6 based kernels also make sure that older ixgbe drivers are 95Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
75 removed from the kernel, before loading the new module: 96Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
97Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
76 98
77 rmmod ixgbe; modprobe ixgbe 9982598-based adapters support all passive direct attach cables that comply
100with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
101cables are not supported.
78 102
793. Assign an IP address to the interface by entering the following, where
80 x is the interface number:
81 103
82 ifconfig ethx <IP_address> 104Flow Control
105------------
106Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
107receiving and transmitting pause frames for ixgbe. When TX is enabled, PAUSE
108frames are generated when the receive packet buffer crosses a predefined
109threshold. When rx is enabled, the transmit unit will halt for the time delay
110specified when a PAUSE frame is received.
83 111
844. Verify that the interface works. Enter the following, where <IP_address> 112Flow Control is enabled by default. If you want to disable a flow control
85 is the IP address for another machine on the same subnet as the interface 113capable link partner, use Ethtool:
86 that is being tested:
87 114
88 ping <IP_address> 115 ethtool -A eth? autoneg off RX off TX off
89 116
117NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
118behavior is changed to off. Flow control in 1 gig mode on these devices can
119lead to Tx hangs.
90 120
91Additional Configurations 121Additional Configurations
92========================= 122=========================
93 123
94 Viewing Link Messages
95 ---------------------
96 Link messages will not be displayed to the console if the distribution is
97 restricting system messages. In order to see network driver link messages on
98 your console, set dmesg to eight by entering the following:
99
100 dmesg -n 8
101
102 NOTE: This setting is not saved across reboots.
103
104
105 Jumbo Frames 124 Jumbo Frames
106 ------------ 125 ------------
107 The driver supports Jumbo Frames for all adapters. Jumbo Frames support is 126 The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
@@ -123,13 +142,8 @@ Additional Configurations
123 other protocols besides TCP. It's also safe to use with configurations that 142 other protocols besides TCP. It's also safe to use with configurations that
124 are problematic for LRO, namely bridging and iSCSI. 143 are problematic for LRO, namely bridging and iSCSI.
125 144
126 GRO is enabled by default in the driver. Future versions of ethtool will
127 support disabling and re-enabling GRO on the fly.
128
129
130 Data Center Bridging, aka DCB 145 Data Center Bridging, aka DCB
131 ----------------------------- 146 -----------------------------
132
133 DCB is a configuration Quality of Service implementation in hardware. 147 DCB is a configuration Quality of Service implementation in hardware.
134 It uses the VLAN priority tag (802.1p) to filter traffic. That means 148 It uses the VLAN priority tag (802.1p) to filter traffic. That means
135 that there are 8 different priorities that traffic can be filtered into. 149 that there are 8 different priorities that traffic can be filtered into.
@@ -163,24 +177,71 @@ Additional Configurations
163 177
164 http://e1000.sf.net 178 http://e1000.sf.net
165 179
166
167 Ethtool 180 Ethtool
168 ------- 181 -------
169 The driver utilizes the ethtool interface for driver configuration and 182 The driver utilizes the ethtool interface for driver configuration and
170 diagnostics, as well as displaying statistical information. Ethtool 183 diagnostics, as well as displaying statistical information. The latest
171 version 3.0 or later is required for this functionality. 184 Ethtool version is required for this functionality.
172 185
173 The latest release of ethtool can be found from 186 The latest release of ethtool can be found from
174 http://sourceforge.net/projects/gkernel. 187 http://sourceforge.net/projects/gkernel.
175 188
176 189 FCoE
177 NAPI
178 ---- 190 ----
191 This release of the ixgbe driver contains new code to enable users to use
192 Fiber Channel over Ethernet (FCoE) and Data Center Bridging (DCB)
193 functionality that is supported by the 82598-based hardware. This code has
194 no default effect on the regular driver operation, and configuring DCB and
195 FCoE is outside the scope of this driver README. Refer to
196 http://www.open-fcoe.org/ for FCoE project information and contact
197 e1000-eedc@lists.sourceforge.net for DCB information.
198
199 MAC and VLAN anti-spoofing feature
200 ----------------------------------
201 When a malicious driver attempts to send a spoofed packet, it is dropped by
202 the hardware and not transmitted. An interrupt is sent to the PF driver
203 notifying it of the spoof attempt.
204
205 When a spoofed packet is detected the PF driver will send the following
206 message to the system log (displayed by the "dmesg" command):
207
208 Spoof event(s) detected on VF (n)
209
210 Where n=the VF that attempted to do the spoofing.
211
212
213Performance Tuning
214==================
215
216An excellent article on performance tuning can be found at:
217
218http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
219
220
221Known Issues
222============
223
224 Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using
225 Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM
226 -----------------------------------------------------------------------------
227 KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
228 includes traditional PCIe devices, as well as SR-IOV-capable devices using
229 Intel 82576-based and 82599-based controllers.
230
231 While direct assignment of a PCIe device or an SR-IOV Virtual Function (VF)
232 to a Linux-based VM running 2.6.32 or later kernel works fine, there is a
233 known issue with Microsoft Windows Server 2008 VM that results in a "yellow
234 bang" error. This problem is within the KVM VMM itself, not the Intel driver,
235 or the SR-IOV logic of the VMM, but rather that KVM emulates an older CPU
236 model for the guests, and this older CPU model does not support MSI-X
237 interrupts, which is a requirement for Intel SR-IOV.
179 238
180 NAPI (Rx polling mode) is supported in the ixgbe driver. NAPI is enabled 239 If you wish to use the Intel 82576 or 82599-based controllers in SR-IOV mode
181 by default in the driver. 240 with KVM and a Microsoft Windows Server 2008 guest try the following
241 workaround. The workaround is to tell KVM to emulate a different model of CPU
242 when using qemu to create the KVM guest:
182 243
183 See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI. 244 "-cpu qemu64,model=13"
184 245
185 246
186Support 247Support
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
index 21dd5d15b6b4..5a91a41fa946 100644
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -35,10 +35,6 @@ Driver ID Guide at:
35Known Issues/Troubleshooting 35Known Issues/Troubleshooting
36============================ 36============================
37 37
38 Unloading Physical Function (PF) Driver Causes System Reboots When VM is
39 Running and VF is Loaded on the VM
40 ------------------------------------------------------------------------
41 Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
42 38
43Support 39Support
44======= 40=======
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 7ee770b5ef5f..80a7a3454902 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
7(Synopsys IP blocks); it has been fully tested on STLinux platforms. 7(Synopsys IP blocks); it has been fully tested on STLinux platforms.
8 8
9Currently this network device driver is for all STM embedded MAC/GMAC 9Currently this network device driver is for all STM embedded MAC/GMAC
10(7xxx SoCs). 10(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
11 11
12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
13Universal version 4.0 have been used for developing the first code 13Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
95driver's Header file in include/linux directory. 95driver's Header file in include/linux directory.
96 96
97struct plat_stmmacenet_data { 97struct plat_stmmacenet_data {
98 int bus_id; 98 int bus_id;
99 int pbl; 99 int pbl;
100 int has_gmac; 100 int clk_csr;
101 int has_gmac;
102 int enh_desc;
103 int tx_coe;
104 int bugged_jumbo;
105 int pmt;
101 void (*fix_mac_speed)(void *priv, unsigned int speed); 106 void (*fix_mac_speed)(void *priv, unsigned int speed);
102 void (*bus_setup)(unsigned long ioaddr); 107 void (*bus_setup)(unsigned long ioaddr);
103#ifdef CONFIG_STM_DRIVERS 108#ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@ Where:
114 registers (on STM platforms); 119 registers (on STM platforms);
115- has_gmac: GMAC core is on board (get it at run-time in the next step); 120- has_gmac: GMAC core is on board (get it at run-time in the next step);
116- bus_id: bus identifier. 121- bus_id: bus identifier.
122- tx_coe: core is able to perform the tx csum in HW.
123- enh_desc: if sets the MAC will use the enhanced descriptor structure.
124- clk_csr: CSR Clock range selection.
125- bugged_jumbo: some HWs are not able to perform the csum in HW for
126 over-sized frames due to limited buffer sizes. Setting this
127 flag the csum will be done in SW on JUMBO frames.
117 128
118struct plat_stmmacphy_data { 129struct plat_stmmacphy_data {
119 int bus_id; 130 int bus_id;
@@ -131,13 +142,28 @@ Where:
131- interface: physical MII interface mode; 142- interface: physical MII interface mode;
132- phy_reset: hook to reset HW function. 143- phy_reset: hook to reset HW function.
133 144
145SOURCES:
146- Kconfig
147- Makefile
148- stmmac_main.c: main network device driver;
149- stmmac_mdio.c: mdio functions;
150- stmmac_ethtool.c: ethtool support;
151- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
152 Only tested on ST40 platforms based.
153- stmmac.h: private driver structure;
154- common.h: common definitions and VFTs;
155- descs.h: descriptor structure definitions;
156- dwmac1000_core.c: GMAC core functions;
157- dwmac1000_dma.c: dma functions for the GMAC chip;
158- dwmac1000.h: specific header file for the GMAC;
159- dwmac100_core: MAC 100 core and dma code;
160- dwmac100_dma.c: dma funtions for the MAC chip;
161- dwmac1000.h: specific header file for the MAC;
162- dwmac_lib.c: generic DMA functions shared among chips
163- enh_desc.c: functions for handling enhanced descriptors
164- norm_desc.c: functions for handling normal descriptors
165
134TODO: 166TODO:
135- Continue to make the driver more generic and suitable for other Synopsys 167- XGMAC controller is not supported.
136 Ethernet controllers used on other architectures (i.e. ARM).
137- 10G controllers are not supported.
138- MAC uses Normal descriptors and GMAC uses enhanced ones.
139 This is a limit that should be reviewed. MAC could want to
140 use the enhanced structure.
141- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
142- Review the timer optimisation code to use an embedded device that seems to be 168- Review the timer optimisation code to use an embedded device that seems to be
143 available in new chip generations. 169 available in new chip generations.
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38be98f4..19f8278c3854 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
21To quote Linux Weekly News: 21To quote Linux Weekly News:
22 22
23 There are a number of red-black trees in use in the kernel. 23 There are a number of red-black trees in use in the kernel.
24 The anticipatory, deadline, and CFQ I/O schedulers all employ 24 The deadline and CFQ I/O schedulers employ rbtrees to
25 rbtrees to track requests; the packet CD/DVD driver does the same. 25 track requests; the packet CD/DVD driver does the same.
26 The high-resolution timer code uses an rbtree to organize outstanding 26 The high-resolution timer code uses an rbtree to organize outstanding
27 timer requests. The ext3 filesystem tracks directory entries in a 27 timer requests. The ext3 filesystem tracks directory entries in a
28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black 28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa23486..209e1584c3dc 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
28- core_uses_pid 28- core_uses_pid
29- ctrl-alt-del 29- ctrl-alt-del
30- dentry-state 30- dentry-state
31- dmesg_restrict
31- domainname 32- domainname
32- hostname 33- hostname
33- hotplug 34- hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
213 214
214============================================================== 215==============================================================
215 216
217dmesg_restrict:
218
219This toggle indicates whether unprivileged users are prevented from using
220dmesg(8) to view messages from the kernel's log buffer. When
221dmesg_restrict is set to (0) there are no restrictions. When
222dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
223dmesg(8).
224
225The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
226value of dmesg_restrict.
227
228==============================================================
229
216domainname & hostname: 230domainname & hostname:
217 231
218These files can be used to set the NIS/YP domainname and the 232These files can be used to set the NIS/YP domainname and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 4d8bde32a26b..f16ce8f46934 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
161L: linux-serial@vger.kernel.org 161L: linux-serial@vger.kernel.org
162W: http://serial.sourceforge.net 162W: http://serial.sourceforge.net
163S: Maintained 163S: Maintained
164T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 164T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
165F: drivers/serial/8250* 165F: drivers/serial/8250*
166F: include/linux/serial_8250.h 166F: include/linux/serial_8250.h
167 167
@@ -1365,7 +1365,7 @@ F: include/net/bluetooth/
1365 1365
1366BONDING DRIVER 1366BONDING DRIVER
1367M: Jay Vosburgh <fubar@us.ibm.com> 1367M: Jay Vosburgh <fubar@us.ibm.com>
1368L: bonding-devel@lists.sourceforge.net 1368L: netdev@vger.kernel.org
1369W: http://sourceforge.net/projects/bonding/ 1369W: http://sourceforge.net/projects/bonding/
1370S: Supported 1370S: Supported
1371F: drivers/net/bonding/ 1371F: drivers/net/bonding/
@@ -1835,6 +1835,13 @@ W: http://www.chelsio.com
1835S: Supported 1835S: Supported
1836F: drivers/net/cxgb4vf/ 1836F: drivers/net/cxgb4vf/
1837 1837
1838STMMAC ETHERNET DRIVER
1839M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
1840L: netdev@vger.kernel.org
1841W: http://www.stlinux.com
1842S: Supported
1843F: drivers/net/stmmac/
1844
1838CYBERPRO FB DRIVER 1845CYBERPRO FB DRIVER
1839M: Russell King <linux@arm.linux.org.uk> 1846M: Russell King <linux@arm.linux.org.uk>
1840L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1847L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3114,6 +3121,8 @@ M: Alex Duyck <alexander.h.duyck@intel.com>
3114M: John Ronciak <john.ronciak@intel.com> 3121M: John Ronciak <john.ronciak@intel.com>
3115L: e1000-devel@lists.sourceforge.net 3122L: e1000-devel@lists.sourceforge.net
3116W: http://e1000.sourceforge.net/ 3123W: http://e1000.sourceforge.net/
3124T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
3125T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
3117S: Supported 3126S: Supported
3118F: Documentation/networking/e100.txt 3127F: Documentation/networking/e100.txt
3119F: Documentation/networking/e1000.txt 3128F: Documentation/networking/e1000.txt
@@ -5682,7 +5691,7 @@ S: Maintained
5682 5691
5683STAGING SUBSYSTEM 5692STAGING SUBSYSTEM
5684M: Greg Kroah-Hartman <gregkh@suse.de> 5693M: Greg Kroah-Hartman <gregkh@suse.de>
5685T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git 5694T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
5686L: devel@driverdev.osuosl.org 5695L: devel@driverdev.osuosl.org
5687S: Maintained 5696S: Maintained
5688F: drivers/staging/ 5697F: drivers/staging/
@@ -5916,7 +5925,7 @@ S: Maintained
5916TTY LAYER 5925TTY LAYER
5917M: Greg Kroah-Hartman <gregkh@suse.de> 5926M: Greg Kroah-Hartman <gregkh@suse.de>
5918S: Maintained 5927S: Maintained
5919T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 5928T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
5920F: drivers/char/tty_* 5929F: drivers/char/tty_*
5921F: drivers/serial/serial_core.c 5930F: drivers/serial/serial_core.c
5922F: include/linux/serial_core.h 5931F: include/linux/serial_core.h
@@ -6239,7 +6248,7 @@ USB SUBSYSTEM
6239M: Greg Kroah-Hartman <gregkh@suse.de> 6248M: Greg Kroah-Hartman <gregkh@suse.de>
6240L: linux-usb@vger.kernel.org 6249L: linux-usb@vger.kernel.org
6241W: http://www.linux-usb.org 6250W: http://www.linux-usb.org
6242T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 6251T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
6243S: Supported 6252S: Supported
6244F: Documentation/usb/ 6253F: Documentation/usb/
6245F: drivers/net/usb/ 6254F: drivers/net/usb/
@@ -6604,14 +6613,14 @@ F: drivers/platform/x86
6604 6613
6605XEN PCI SUBSYSTEM 6614XEN PCI SUBSYSTEM
6606M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6607L: xen-devel@lists.xensource.com 6616L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6608S: Supported 6617S: Supported
6609F: arch/x86/pci/*xen* 6618F: arch/x86/pci/*xen*
6610F: drivers/pci/*xen* 6619F: drivers/pci/*xen*
6611 6620
6612XEN SWIOTLB SUBSYSTEM 6621XEN SWIOTLB SUBSYSTEM
6613M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6622M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6614L: xen-devel@lists.xensource.com 6623L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6615S: Supported 6624S: Supported
6616F: arch/x86/xen/*swiotlb* 6625F: arch/x86/xen/*swiotlb*
6617F: drivers/xen/*swiotlb* 6626F: drivers/xen/*swiotlb*
@@ -6619,7 +6628,7 @@ F: drivers/xen/*swiotlb*
6619XEN HYPERVISOR INTERFACE 6628XEN HYPERVISOR INTERFACE
6620M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6629M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
6621M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6630M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6622L: xen-devel@lists.xen.org 6631L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6623L: virtualization@lists.osdl.org 6632L: virtualization@lists.osdl.org
6624S: Supported 6633S: Supported
6625F: arch/x86/xen/ 6634F: arch/x86/xen/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a19a5266d5fc..8ae3d48d504c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
6 select HAVE_MEMBLOCK 6 select HAVE_MEMBLOCK
7 select RTC_LIB 7 select RTC_LIB
8 select SYS_SUPPORTS_APM_EMULATION 8 select SYS_SUPPORTS_APM_EMULATION
9 select GENERIC_ATOMIC64 if (!CPU_32v6K) 9 select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
11 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
12 select HAVE_KPROBES if (!XIP_KERNEL) 12 select HAVE_KPROBES if (!XIP_KERNEL)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359160eb..772f95f1aecd 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
252 252
253 /* 253 /*
254 * Set priority on all interrupts. 254 * Set priority on all global interrupts.
255 */ 255 */
256 for (i = 0; i < max_irq; i += 4) 256 for (i = 32; i < max_irq; i += 4)
257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); 257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
258 258
259 /* 259 /*
260 * Disable all interrupts. 260 * Disable all interrupts. Leave the PPI and SGIs alone
261 * as these enables are banked registers.
261 */ 262 */
262 for (i = 0; i < max_irq; i += 32) 263 for (i = 32; i < max_irq; i += 32)
263 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 264 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
264 265
265 /* 266 /*
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
277 278
278void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) 279void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
279{ 280{
281 void __iomem *dist_base;
282 int i;
283
280 if (gic_nr >= MAX_GIC_NR) 284 if (gic_nr >= MAX_GIC_NR)
281 BUG(); 285 BUG();
282 286
287 dist_base = gic_data[gic_nr].dist_base;
288 BUG_ON(!dist_base);
289
283 gic_data[gic_nr].cpu_base = base; 290 gic_data[gic_nr].cpu_base = base;
284 291
292 /*
293 * Deal with the banked PPI and SGI interrupts - disable all
294 * PPI interrupts, ensure all SGI interrupts are enabled.
295 */
296 writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
297 writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
298
299 /*
300 * Set priority on PPI and SGI interrupts
301 */
302 for (i = 0; i < 32; i += 4)
303 writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
304
285 writel(0xf0, base + GIC_CPU_PRIMASK); 305 writel(0xf0, base + GIC_CPU_PRIMASK);
286 writel(1, base + GIC_CPU_CTRL); 306 writel(1, base + GIC_CPU_CTRL);
287} 307}
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7ebd..21fa272301f8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) 78#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
79 79
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9 81#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0c241b..21e3a4ab3b8c 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
748 breakpoint_handler(addr, regs); 748 breakpoint_handler(addr, regs);
749 break; 749 break;
750 case ARM_ENTRY_ASYNC_WATCHPOINT: 750 case ARM_ENTRY_ASYNC_WATCHPOINT:
751 WARN_ON("Asynchronous watchpoint exception taken. " 751 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
752 "Debugging results may be unreliable");
753 case ARM_ENTRY_SYNC_WATCHPOINT: 752 case ARM_ENTRY_SYNC_WATCHPOINT:
754 watchpoint_handler(addr, regs); 753 watchpoint_handler(addr, regs);
755 break; 754 break;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1467e6..07a50357492a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, 1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1750 enum armv7_counters counter) 1750 enum armv7_counters counter)
1751{ 1751{
1752 int ret; 1752 int ret = 0;
1753 1753
1754 if (counter == ARMV7_CYCLE_COUNTER) 1754 if (counter == ARMV7_CYCLE_COUNTER)
1755 ret = pmnc & ARMV7_FLAG_C; 1755 ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411e47fd..c2e112e1a05f 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
28 28
29 /* only go to a higher address on the stack */ 29 /* only go to a higher address on the stack */
30 low = frame->sp; 30 low = frame->sp;
31 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d59aa31..446aee97436f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
54{ 54{
55#ifdef CONFIG_KALLSYMS 55#ifdef CONFIG_KALLSYMS
56 char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN]; 56 printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
57 sprint_symbol(sym1, where);
58 sprint_symbol(sym2, from);
59 printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
60#else 57#else
61 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 58 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
62#endif 59#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a161765f6d5..d2cb0b3c9872 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
279 279
280 /* only go to a higher address on the stack */ 280 /* only go to a higher address on the stack */
281 low = frame->sp; 281 low = frame->sp;
282 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 282 high = ALIGN(low, THREAD_SIZE);
283 283
284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, 284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
285 frame->pc, frame->lr, frame->sp); 285 frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d3f3b1..5e31b2b25da9 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
1/* 1/**
2 * arch/arm/mach-ep93xx/include/mach/dma.h 2 * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
3 *
4 * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
5 * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
6 * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
7 * engine.
8 *
9 * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
10 *
3 */ 11 */
4 12
5#ifndef __ASM_ARCH_DMA_H 13#ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
8#include <linux/list.h> 16#include <linux/list.h>
9#include <linux/types.h> 17#include <linux/types.h>
10 18
19/**
20 * struct ep93xx_dma_buffer - Information about a buffer to be transferred
21 * using the DMA M2P engine
22 *
23 * @list: Entry in DMA buffer list
24 * @bus_addr: Physical address of the buffer
25 * @size: Size of the buffer in bytes
26 */
11struct ep93xx_dma_buffer { 27struct ep93xx_dma_buffer {
12 struct list_head list; 28 struct list_head list;
13 u32 bus_addr; 29 u32 bus_addr;
14 u16 size; 30 u16 size;
15}; 31};
16 32
33/**
34 * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
35 *
36 * @name: Unique name for this client
37 * @flags: Client flags
38 * @cookie: User data to pass to callback functions
39 * @buffer_started: Non NULL function to call when a transfer is started.
40 * The arguments are the user data cookie and the DMA
41 * buffer which is starting.
42 * @buffer_finished: Non NULL function to call when a transfer is completed.
43 * The arguments are the user data cookie, the DMA buffer
44 * which has completed, and a boolean flag indicating if
45 * the transfer had an error.
46 */
17struct ep93xx_dma_m2p_client { 47struct ep93xx_dma_m2p_client {
18 char *name; 48 char *name;
19 u8 flags; 49 u8 flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
24 struct ep93xx_dma_buffer *buf, 54 struct ep93xx_dma_buffer *buf,
25 int bytes, int error); 55 int bytes, int error);
26 56
27 /* Internal to the DMA code. */ 57 /* private: Internal use only */
28 void *channel; 58 void *channel;
29}; 59};
30 60
61/* DMA M2P ports */
31#define EP93XX_DMA_M2P_PORT_I2S1 0x00 62#define EP93XX_DMA_M2P_PORT_I2S1 0x00
32#define EP93XX_DMA_M2P_PORT_I2S2 0x01 63#define EP93XX_DMA_M2P_PORT_I2S2 0x01
33#define EP93XX_DMA_M2P_PORT_AAC1 0x02 64#define EP93XX_DMA_M2P_PORT_AAC1 0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
39#define EP93XX_DMA_M2P_PORT_UART3 0x08 70#define EP93XX_DMA_M2P_PORT_UART3 0x08
40#define EP93XX_DMA_M2P_PORT_IRDA 0x09 71#define EP93XX_DMA_M2P_PORT_IRDA 0x09
41#define EP93XX_DMA_M2P_PORT_MASK 0x0f 72#define EP93XX_DMA_M2P_PORT_MASK 0x0f
42#define EP93XX_DMA_M2P_TX 0x00
43#define EP93XX_DMA_M2P_RX 0x10
44#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
45#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
46#define EP93XX_DMA_M2P_ERROR_MASK 0x60
47 73
48int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); 74/* DMA M2P client flags */
75#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
76#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
77
78/*
79 * DMA M2P client error handling flags. See the EP93xx users guide
80 * documentation on the DMA M2P CONTROL register for more details
81 */
82#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
83#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
84#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
85
86/**
87 * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
88 * subsystem
89 *
90 * @m2p: Client information to register
91 * returns 0 on success
92 *
93 * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
94 * client
95 */
96int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
97
98/**
99 * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
100 * subsystem
101 *
102 * @m2p: Client to unregister
103 *
104 * Any transfers currently in progress will be completed in hardware, but
105 * ignored in software.
106 */
49void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); 107void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
108
109/**
110 * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
111 *
112 * @m2p: DMA Client to submit the transfer on
113 * @buf: DMA Buffer to submit
114 *
115 * If the current or next transfer positions are free on the M2P client then
116 * the transfer is started immediately. If not, the transfer is added to the
117 * list of pending transfers. This function must not be called from the
118 * buffer_finished callback for an M2P channel.
119 *
120 */
50void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, 121void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
51 struct ep93xx_dma_buffer *buf); 122 struct ep93xx_dma_buffer *buf);
123
124/**
125 * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
126 * for an M2P channel
127 *
128 * @m2p: DMA Client to submit the transfer on
129 * @buf: DMA Buffer to submit
130 *
131 * This function must only be called from the buffer_finished callback for an
132 * M2P channel. It is commonly used to add the next transfer in a chained list
133 * of DMA transfers.
134 */
52void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, 135void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
53 struct ep93xx_dma_buffer *buf); 136 struct ep93xx_dma_buffer *buf);
137
138/**
139 * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
140 *
141 * @m2p: DMA client to flush transfers on
142 *
143 * Any transfers currently in progress will be completed in hardware, but
144 * ignored in software.
145 *
146 */
54void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); 147void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
55 148
56#endif /* __ASM_ARCH_DMA_H */ 149#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b72d3a..3688123b5ad8 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
854 854
855 kirkwood_pcie_id(&dev, &rev); 855 kirkwood_pcie_id(&dev, &rev);
856 856
857 if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 || 857 if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
858 rev == MV88F6281_REV_A1)) || 858 if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
859 (dev == MV88F6282_DEV_ID)) 859 return 200000000;
860 return 200000000;
861 860
862 return 166666667; 861 return 166666667;
863} 862}
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4a152c..a31c9499ab36 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
225 .init_machine = d2net_v2_init, 225 .init_machine = d2net_v2_init,
226 .map_io = kirkwood_map_io, 226 .map_io = kirkwood_map_io,
227 .init_irq = kirkwood_init_irq, 227 .init_irq = kirkwood_init_irq,
228 .timer = &lacie_v2_timer, 228 .timer = &kirkwood_timer,
229MACHINE_END 229MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6c8a02..285edab776e9 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
111 pr_err("Failed to power up HDD%d\n", i + 1); 111 pr_err("Failed to power up HDD%d\n", i + 1);
112 } 112 }
113} 113}
114
115/*****************************************************************************
116 * Timer
117 ****************************************************************************/
118
119static void lacie_v2_timer_init(void)
120{
121 kirkwood_tclk = 166666667;
122 orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
123}
124
125struct sys_timer lacie_v2_timer = {
126 .init = lacie_v2_timer_init,
127};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af521315b87b..fc64f578536e 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
13void lacie_v2_register_i2c_devices(void); 13void lacie_v2_register_i2c_devices(void);
14void lacie_v2_hdd_power_init(int hdd_num); 14void lacie_v2_hdd_power_init(int hdd_num);
15 15
16extern struct sys_timer lacie_v2_timer;
17
18#endif 16#endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d177c6..27901f702feb 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
59 } 59 }
60 printk("\n"); 60 printk("\n");
61 61
62 while (*mpp_list) { 62 for ( ; *mpp_list; mpp_list++) {
63 unsigned int num = MPP_NUM(*mpp_list); 63 unsigned int num = MPP_NUM(*mpp_list);
64 unsigned int sel = MPP_SEL(*mpp_list); 64 unsigned int sel = MPP_SEL(*mpp_list);
65 int shift, gpio_mode; 65 int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
88 if (sel != 0) 88 if (sel != 0)
89 gpio_mode = 0; 89 gpio_mode = 0;
90 orion_gpio_set_valid(num, gpio_mode); 90 orion_gpio_set_valid(num, gpio_mode);
91
92 mpp_list++;
93 } 91 }
94 92
95 printk(KERN_DEBUG " final MPP regs:"); 93 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1f4178..65ee21fd2f3b 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
262 .init_machine = netspace_v2_init, 262 .init_machine = netspace_v2_init,
263 .map_io = kirkwood_map_io, 263 .map_io = kirkwood_map_io,
264 .init_irq = kirkwood_init_irq, 264 .init_irq = kirkwood_init_irq,
265 .timer = &lacie_v2_timer, 265 .timer = &kirkwood_timer,
266MACHINE_END 266MACHINE_END
267#endif 267#endif
268 268
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
272 .init_machine = netspace_v2_init, 272 .init_machine = netspace_v2_init,
273 .map_io = kirkwood_map_io, 273 .map_io = kirkwood_map_io,
274 .init_irq = kirkwood_init_irq, 274 .init_irq = kirkwood_init_irq,
275 .timer = &lacie_v2_timer, 275 .timer = &kirkwood_timer,
276MACHINE_END 276MACHINE_END
277#endif 277#endif
278 278
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
282 .init_machine = netspace_v2_init, 282 .init_machine = netspace_v2_init,
283 .map_io = kirkwood_map_io, 283 .map_io = kirkwood_map_io,
284 .init_irq = kirkwood_init_irq, 284 .init_irq = kirkwood_init_irq,
285 .timer = &lacie_v2_timer, 285 .timer = &kirkwood_timer,
286MACHINE_END 286MACHINE_END
287#endif 287#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d501aef..93afd3c8bfd8 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
403 .init_machine = netxbig_v2_init, 403 .init_machine = netxbig_v2_init,
404 .map_io = kirkwood_map_io, 404 .map_io = kirkwood_map_io,
405 .init_irq = kirkwood_init_irq, 405 .init_irq = kirkwood_init_irq,
406 .timer = &lacie_v2_timer, 406 .timer = &kirkwood_timer,
407MACHINE_END 407MACHINE_END
408#endif 408#endif
409 409
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
413 .init_machine = netxbig_v2_init, 413 .init_machine = netxbig_v2_init,
414 .map_io = kirkwood_map_io, 414 .map_io = kirkwood_map_io,
415 .init_irq = kirkwood_init_irq, 415 .init_irq = kirkwood_init_irq,
416 .timer = &lacie_v2_timer, 416 .timer = &kirkwood_timer,
417MACHINE_END 417MACHINE_END
418#endif 418#endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0ce4ac..3587a281d993 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
27#include "mpp.h" 27#include "mpp.h"
28#include "tsx1x-common.h" 28#include "tsx1x-common.h"
29 29
30/* for the PCIe reset workaround */
31#include <plat/pcie.h>
32
33
30#define QNAP_TS41X_JUMPER_JP1 45 34#define QNAP_TS41X_JUMPER_JP1 45
31 35
32static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { 36static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
140 144
141static int __init ts41x_pci_init(void) 145static int __init ts41x_pci_init(void)
142{ 146{
143 if (machine_is_ts41x()) 147 if (machine_is_ts41x()) {
148 /*
149 * Without this explicit reset, the PCIe SATA controller
150 * (Marvell 88sx7042/sata_mv) is known to stop working
151 * after a few minutes.
152 */
153 orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
154
144 kirkwood_pcie_init(KW_PCIE0); 155 kirkwood_pcie_init(KW_PCIE0);
156 }
145 157
146 return 0; 158 return 0;
147} 159}
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b213f1..8a3b56dfd35d 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
46#ifdef CONFIG_CPU_MMP2 46#ifdef CONFIG_CPU_MMP2
47static inline int cpu_is_mmp2(void) 47static inline int cpu_is_mmp2(void)
48{ 48{
49 return (((cpu_readid_id() >> 8) & 0xff) == 0x58); 49 return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
50}
50#else 51#else
51#define cpu_is_mmp2() (0) 52#define cpu_is_mmp2() (0)
52#endif 53#endif
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac514eb89..84db2dfc475c 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
54 } 54 }
55 printk("\n"); 55 printk("\n");
56 56
57 while (*mpp_list) { 57 for ( ; *mpp_list; mpp_list++) {
58 unsigned int num = MPP_NUM(*mpp_list); 58 unsigned int num = MPP_NUM(*mpp_list);
59 unsigned int sel = MPP_SEL(*mpp_list); 59 unsigned int sel = MPP_SEL(*mpp_list);
60 int shift, gpio_mode; 60 int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
83 if (sel != 0) 83 if (sel != 0)
84 gpio_mode = 0; 84 gpio_mode = 0;
85 orion_gpio_set_valid(num, gpio_mode); 85 orion_gpio_set_valid(num, gpio_mode);
86
87 mpp_list++;
88 } 86 }
89 87
90 printk(KERN_DEBUG " final MPP regs:"); 88 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9aaf83..db485d3b8144 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
127 /* Initialize gpiolib. */ 127 /* Initialize gpiolib. */
128 orion_gpio_init(); 128 orion_gpio_init();
129 129
130 while (mode->mpp >= 0) { 130 for ( ; mode->mpp >= 0; mode++) {
131 u32 *reg; 131 u32 *reg;
132 int num_type; 132 int num_type;
133 int shift; 133 int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
160 orion_gpio_set_unused(mode->mpp); 160 orion_gpio_set_unused(mode->mpp);
161 161
162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO)); 162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
163
164 mode++;
165 } 163 }
166 164
167 writel(mpp_0_7_ctrl, MPP_0_7_CTRL); 165 writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5324be..c1c1cd04bdde 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
239static struct resource ts78xx_ts_nand_resources = { 239static struct resource ts78xx_ts_nand_resources = {
240 .start = TS_NAND_DATA, 240 .start = TS_NAND_DATA,
241 .end = TS_NAND_DATA + 4, 241 .end = TS_NAND_DATA + 4,
242 .flags = IORESOURCE_IO, 242 .flags = IORESOURCE_MEM,
243}; 243};
244 244
245static struct platform_device ts78xx_ts_nand_device = { 245static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598ce9724..d34b99febeb9 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
476 476
477static void __init cmx2xx_init_irq(void) 477static void __init cmx2xx_init_irq(void)
478{ 478{
479 pxa27x_init_irq();
480
481 if (cpu_is_pxa25x()) { 479 if (cpu_is_pxa25x()) {
482 pxa25x_init_irq(); 480 pxa25x_init_irq();
483 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); 481 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e045d75..ffa50e633ee6 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
116 }, 116 },
117}; 117};
118 118
119#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE) 119#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
120static uint16_t lcd_power_on[] = { 120static uint16_t lcd_power_on[] = {
121 /* single frame */ 121 /* single frame */
122 SMART_CMD_NOOP, 122 SMART_CMD_NOOP,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 54b479c35ee0..51dcd59eda6a 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -116,4 +116,6 @@ endmenu
116config SH_CLK_CPG 116config SH_CLK_CPG
117 bool 117 bool
118 118
119source "drivers/sh/Kconfig"
120
119endif 121endif
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 46ca4d4abf91..32d9e2816e56 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -565,12 +565,50 @@ static struct platform_device *qhd_devices[] __initdata = {
565 565
566/* FSI */ 566/* FSI */
567#define IRQ_FSI evt2irq(0x1840) 567#define IRQ_FSI evt2irq(0x1840)
568
569static int fsi_set_rate(int is_porta, int rate)
570{
571 struct clk *fsib_clk;
572 struct clk *fdiv_clk = &sh7372_fsidivb_clk;
573 int ret;
574
575 /* set_rate is not needed if port A */
576 if (is_porta)
577 return 0;
578
579 fsib_clk = clk_get(NULL, "fsib_clk");
580 if (IS_ERR(fsib_clk))
581 return -EINVAL;
582
583 switch (rate) {
584 case 48000:
585 clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
586 clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
587 ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
588 break;
589 default:
590 pr_err("unsupported rate in FSI2 port B\n");
591 ret = -EINVAL;
592 break;
593 }
594
595 clk_put(fsib_clk);
596
597 return ret;
598}
599
568static struct sh_fsi_platform_info fsi_info = { 600static struct sh_fsi_platform_info fsi_info = {
569 .porta_flags = SH_FSI_BRS_INV | 601 .porta_flags = SH_FSI_BRS_INV |
570 SH_FSI_OUT_SLAVE_MODE | 602 SH_FSI_OUT_SLAVE_MODE |
571 SH_FSI_IN_SLAVE_MODE | 603 SH_FSI_IN_SLAVE_MODE |
572 SH_FSI_OFMT(PCM) | 604 SH_FSI_OFMT(PCM) |
573 SH_FSI_IFMT(PCM), 605 SH_FSI_IFMT(PCM),
606
607 .portb_flags = SH_FSI_BRS_INV |
608 SH_FSI_BRM_INV |
609 SH_FSI_LRS_INV |
610 SH_FSI_OFMT(SPDIF),
611 .set_rate = fsi_set_rate,
574}; 612};
575 613
576static struct resource fsi_resources[] = { 614static struct resource fsi_resources[] = {
@@ -634,6 +672,7 @@ static struct platform_device lcdc1_device = {
634static struct sh_mobile_hdmi_info hdmi_info = { 672static struct sh_mobile_hdmi_info hdmi_info = {
635 .lcd_chan = &sh_mobile_lcdc1_info.ch[0], 673 .lcd_chan = &sh_mobile_lcdc1_info.ch[0],
636 .lcd_dev = &lcdc1_device.dev, 674 .lcd_dev = &lcdc1_device.dev,
675 .flags = HDMI_SND_SRC_SPDIF,
637}; 676};
638 677
639static struct resource hdmi_resources[] = { 678static struct resource hdmi_resources[] = {
@@ -992,6 +1031,7 @@ static void __init ap4evb_map_io(void)
992 1031
993#define GPIO_PORT9CR 0xE6051009 1032#define GPIO_PORT9CR 0xE6051009
994#define GPIO_PORT10CR 0xE605100A 1033#define GPIO_PORT10CR 0xE605100A
1034#define USCCR1 0xE6058144
995static void __init ap4evb_init(void) 1035static void __init ap4evb_init(void)
996{ 1036{
997 u32 srcr4; 1037 u32 srcr4;
@@ -1062,7 +1102,7 @@ static void __init ap4evb_init(void)
1062 /* setup USB phy */ 1102 /* setup USB phy */
1063 __raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */ 1103 __raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */
1064 1104
1065 /* enable FSI2 */ 1105 /* enable FSI2 port A (ak4643) */
1066 gpio_request(GPIO_FN_FSIAIBT, NULL); 1106 gpio_request(GPIO_FN_FSIAIBT, NULL);
1067 gpio_request(GPIO_FN_FSIAILR, NULL); 1107 gpio_request(GPIO_FN_FSIAILR, NULL);
1068 gpio_request(GPIO_FN_FSIAISLD, NULL); 1108 gpio_request(GPIO_FN_FSIAISLD, NULL);
@@ -1079,6 +1119,10 @@ static void __init ap4evb_init(void)
1079 gpio_request(GPIO_PORT41, NULL); 1119 gpio_request(GPIO_PORT41, NULL);
1080 gpio_direction_input(GPIO_PORT41); 1120 gpio_direction_input(GPIO_PORT41);
1081 1121
1122 /* setup FSI2 port B (HDMI) */
1123 gpio_request(GPIO_FN_FSIBCK, NULL);
1124 __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
1125
1082 /* set SPU2 clock to 119.6 MHz */ 1126 /* set SPU2 clock to 119.6 MHz */
1083 clk = clk_get(NULL, "spu_clk"); 1127 clk = clk_get(NULL, "spu_clk");
1084 if (!IS_ERR(clk)) { 1128 if (!IS_ERR(clk)) {
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 8565aefa21fd..7db31e6c6bf2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -50,6 +50,9 @@
50#define SMSTPCR3 0xe615013c 50#define SMSTPCR3 0xe615013c
51#define SMSTPCR4 0xe6150140 51#define SMSTPCR4 0xe6150140
52 52
53#define FSIDIVA 0xFE1F8000
54#define FSIDIVB 0xFE1F8008
55
53/* Platforms must set frequency on their DV_CLKI pin */ 56/* Platforms must set frequency on their DV_CLKI pin */
54struct clk sh7372_dv_clki_clk = { 57struct clk sh7372_dv_clki_clk = {
55}; 58};
@@ -288,6 +291,7 @@ struct clk sh7372_pllc2_clk = {
288 .ops = &pllc2_clk_ops, 291 .ops = &pllc2_clk_ops,
289 .parent = &extal1_div2_clk, 292 .parent = &extal1_div2_clk,
290 .freq_table = pllc2_freq_table, 293 .freq_table = pllc2_freq_table,
294 .nr_freqs = ARRAY_SIZE(pllc2_freq_table) - 1,
291 .parent_table = pllc2_parent, 295 .parent_table = pllc2_parent,
292 .parent_num = ARRAY_SIZE(pllc2_parent), 296 .parent_num = ARRAY_SIZE(pllc2_parent),
293}; 297};
@@ -417,6 +421,101 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
417 fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2), 421 fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
418}; 422};
419 423
424/* FSI DIV */
425static unsigned long fsidiv_recalc(struct clk *clk)
426{
427 unsigned long value;
428
429 value = __raw_readl(clk->mapping->base);
430
431 if ((value & 0x3) != 0x3)
432 return 0;
433
434 value >>= 16;
435 if (value < 2)
436 return 0;
437
438 return clk->parent->rate / value;
439}
440
441static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
442{
443 return clk_rate_div_range_round(clk, 2, 0xffff, rate);
444}
445
446static void fsidiv_disable(struct clk *clk)
447{
448 __raw_writel(0, clk->mapping->base);
449}
450
451static int fsidiv_enable(struct clk *clk)
452{
453 unsigned long value;
454
455 value = __raw_readl(clk->mapping->base) >> 16;
456 if (value < 2) {
457 fsidiv_disable(clk);
458 return -ENOENT;
459 }
460
461 __raw_writel((value << 16) | 0x3, clk->mapping->base);
462
463 return 0;
464}
465
466static int fsidiv_set_rate(struct clk *clk,
467 unsigned long rate, int algo_id)
468{
469 int idx;
470
471 if (clk->parent->rate == rate) {
472 fsidiv_disable(clk);
473 return 0;
474 }
475
476 idx = (clk->parent->rate / rate) & 0xffff;
477 if (idx < 2)
478 return -ENOENT;
479
480 __raw_writel(idx << 16, clk->mapping->base);
481 return fsidiv_enable(clk);
482}
483
484static struct clk_ops fsidiv_clk_ops = {
485 .recalc = fsidiv_recalc,
486 .round_rate = fsidiv_round_rate,
487 .set_rate = fsidiv_set_rate,
488 .enable = fsidiv_enable,
489 .disable = fsidiv_disable,
490};
491
492static struct clk_mapping sh7372_fsidiva_clk_mapping = {
493 .phys = FSIDIVA,
494 .len = 8,
495};
496
497struct clk sh7372_fsidiva_clk = {
498 .ops = &fsidiv_clk_ops,
499 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
500 .mapping = &sh7372_fsidiva_clk_mapping,
501};
502
503static struct clk_mapping sh7372_fsidivb_clk_mapping = {
504 .phys = FSIDIVB,
505 .len = 8,
506};
507
508struct clk sh7372_fsidivb_clk = {
509 .ops = &fsidiv_clk_ops,
510 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
511 .mapping = &sh7372_fsidivb_clk_mapping,
512};
513
514static struct clk *late_main_clks[] = {
515 &sh7372_fsidiva_clk,
516 &sh7372_fsidivb_clk,
517};
518
420enum { MSTP001, 519enum { MSTP001,
421 MSTP131, MSTP130, 520 MSTP131, MSTP130,
422 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 521 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
@@ -585,6 +684,9 @@ void __init sh7372_clock_init(void)
585 if (!ret) 684 if (!ret)
586 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); 685 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
587 686
687 for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
688 ret = clk_register(late_main_clks[k]);
689
588 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 690 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
589 691
590 if (!ret) 692 if (!ret)
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 5bc6bd444d72..2b1bb9e43dda 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -35,12 +35,12 @@ static inline int gpio_cansleep(unsigned gpio)
35 35
36static inline int gpio_to_irq(unsigned gpio) 36static inline int gpio_to_irq(unsigned gpio)
37{ 37{
38 return -ENOSYS; 38 return __gpio_to_irq(gpio);
39} 39}
40 40
41static inline int irq_to_gpio(unsigned int irq) 41static inline int irq_to_gpio(unsigned int irq)
42{ 42{
43 return -EINVAL; 43 return -ENOSYS;
44} 44}
45 45
46#endif /* CONFIG_GPIOLIB */ 46#endif /* CONFIG_GPIOLIB */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 147775a94bce..e4f9004e7103 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -464,5 +464,7 @@ extern struct clk sh7372_dv_clki_div2_clk;
464extern struct clk sh7372_pllc2_clk; 464extern struct clk sh7372_pllc2_clk;
465extern struct clk sh7372_fsiack_clk; 465extern struct clk sh7372_fsiack_clk;
466extern struct clk sh7372_fsibck_clk; 466extern struct clk sh7372_fsibck_clk;
467extern struct clk sh7372_fsidiva_clk;
468extern struct clk sh7372_fsidivb_clk;
467 469
468#endif /* __ASM_SH7372_H__ */ 470#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a9e025..fd25ccd7272f 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
54 54
55static void __init ct_ca9x4_map_io(void) 55static void __init ct_ca9x4_map_io(void)
56{ 56{
57#ifdef CONFIG_LOCAL_TIMERS
57 twd_base = MMIO_P2V(A9_MPCORE_TWD); 58 twd_base = MMIO_P2V(A9_MPCORE_TWD);
59#endif
58 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); 60 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
59} 61}
60 62
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd0646e859..ac6a36142fcd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
198 * fragmentation of the DMA space, and also prevents allocations 198 * fragmentation of the DMA space, and also prevents allocations
199 * smaller than a section from crossing a section boundary. 199 * smaller than a section from crossing a section boundary.
200 */ 200 */
201 bit = fls(size - 1) + 1; 201 bit = fls(size - 1);
202 if (bit > SECTION_SHIFT) 202 if (bit > SECTION_SHIFT)
203 bit = SECTION_SHIFT; 203 bit = SECTION_SHIFT;
204 align = 1 << bit; 204 align = 1 << bit;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18b8aa4..fc819120978d 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
284 if (!size) 284 if (!size)
285 return; 285 return;
286 286
287 paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); 287 paddr = memblock_alloc(size, SZ_1M);
288 if (!paddr) { 288 if (!paddr) {
289 pr_err("%s: failed to reserve %x bytes\n", 289 pr_err("%s: failed to reserve %x bytes\n",
290 __func__, size); 290 __func__, size);
291 return; 291 return;
292 } 292 }
293 memblock_free(paddr, size);
294 memblock_remove(paddr, size);
293 295
294 omap_dsp_phys_mempool_base = paddr; 296 omap_dsp_phys_mempool_base = paddr;
295} 297}
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef72b4e7..cc99163e73fd 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
11#ifndef __PLAT_PCIE_H 11#ifndef __PLAT_PCIE_H
12#define __PLAT_PCIE_H 12#define __PLAT_PCIE_H
13 13
14struct pci_bus;
15
14u32 orion_pcie_dev_id(void __iomem *base); 16u32 orion_pcie_dev_id(void __iomem *base);
15u32 orion_pcie_rev(void __iomem *base); 17u32 orion_pcie_rev(void __iomem *base);
16int orion_pcie_link_up(void __iomem *base); 18int orion_pcie_link_up(void __iomem *base);
17int orion_pcie_x4_mode(void __iomem *base); 19int orion_pcie_x4_mode(void __iomem *base);
18int orion_pcie_get_local_bus_nr(void __iomem *base); 20int orion_pcie_get_local_bus_nr(void __iomem *base);
19void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); 21void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
22void orion_pcie_reset(void __iomem *base);
20void orion_pcie_setup(void __iomem *base, 23void orion_pcie_setup(void __iomem *base,
21 struct mbus_dram_target_info *dram); 24 struct mbus_dram_target_info *dram);
22int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, 25int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a1595e..af2d733c50b5 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base,
182 u32 mask; 182 u32 mask;
183 183
184 /* 184 /*
185 * soft reset PCIe unit
186 */
187 orion_pcie_reset(base);
188
189 /*
190 * Point PCIe unit MBUS decode windows to DRAM space. 185 * Point PCIe unit MBUS decode windows to DRAM space.
191 */ 186 */
192 orion_pcie_setup_wins(base, dram); 187 orion_pcie_setup_wins(base, dram);
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284a1550..7ef4115b8c4a 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
2#define _M68K_IRQFLAGS_H 2#define _M68K_IRQFLAGS_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#ifdef CONFIG_MMU
5#include <linux/hardirq.h> 6#include <linux/hardirq.h>
7#endif
6#include <linux/preempt.h> 8#include <linux/preempt.h>
7#include <asm/thread_info.h> 9#include <asm/thread_info.h>
8#include <asm/entry.h> 10#include <asm/entry.h>
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2de0e9..415d5484916c 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@ extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy); 40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41 41
42extern void config_BSP(char *command, int len); 42extern void config_BSP(char *command, int len);
43extern void do_IRQ(int irq, struct pt_regs *fp);
43 44
44#endif /* _M68K_MACHDEP_H */ 45#endif /* _M68K_MACHDEP_H */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e538aec..b06bdae04064 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
127 127
128static void kvm_patch_ins_b(u32 *inst, int addr) 128static void kvm_patch_ins_b(u32 *inst, int addr)
129{ 129{
130#ifdef CONFIG_RELOCATABLE 130#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131 /* On relocatable kernels interrupts handlers and our code 131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */ 132 can be in different regions, so we don't patch them */
133 133
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 049846911ce4..1cc471faac2d 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@ lightweight_exit:
416 lwz r3, VCPU_PC(r4) 416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3 417 mtsrr0 r3
418 lwz r3, VCPU_SHARED(r4) 418 lwz r3, VCPU_SHARED(r4)
419 lwz r3, VCPU_SHARED_MSR(r3) 419 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
420 oris r3, r3, KVMPPC_MSR_MASK@h 420 oris r3, r3, KVMPPC_MSR_MASK@h
421 ori r3, r3, KVMPPC_MSR_MASK@l 421 ori r3, r3, KVMPPC_MSR_MASK@l
422 mtsrr1 r3 422 mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2dd5d3..e3768ee9b595 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
139 139
140 free_page((unsigned long)vcpu->arch.shared); 140 free_page((unsigned long)vcpu->arch.shared);
141 kvmppc_e500_tlb_uninit(vcpu_e500);
142 kvm_vcpu_uninit(vcpu); 141 kvm_vcpu_uninit(vcpu);
142 kvmppc_e500_tlb_uninit(vcpu_e500);
143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
144} 144}
145 145
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a1627f6c..38f756f25053 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
617 switch (ioctl) { 617 switch (ioctl) {
618 case KVM_PPC_GET_PVINFO: { 618 case KVM_PPC_GET_PVINFO: {
619 struct kvm_ppc_pvinfo pvinfo; 619 struct kvm_ppc_pvinfo pvinfo;
620 memset(&pvinfo, 0, sizeof(pvinfo));
620 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 621 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
621 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 622 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
622 r = -EFAULT; 623 r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f12a9b..a021f5827a33 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
35 int i; 35 int i;
36 36
37 /* pause guest execution to avoid concurrent updates */ 37 /* pause guest execution to avoid concurrent updates */
38 local_irq_disable();
39 mutex_lock(&vcpu->mutex); 38 mutex_lock(&vcpu->mutex);
40 39
41 vcpu->arch.last_exit_type = 0xDEAD; 40 vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
51 vcpu->arch.timing_last_enter.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0;
52 51
53 mutex_unlock(&vcpu->mutex); 52 mutex_unlock(&vcpu->mutex);
54 local_irq_enable();
55} 53}
56 54
57static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 06cbd1e8c943..90efda0b137d 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -28,39 +28,70 @@ struct qeth_arp_cache_entry {
28 __u8 reserved2[32]; 28 __u8 reserved2[32];
29} __attribute__ ((packed)); 29} __attribute__ ((packed));
30 30
31enum qeth_arp_ipaddrtype {
32 QETHARP_IP_ADDR_V4 = 1,
33 QETHARP_IP_ADDR_V6 = 2,
34};
35struct qeth_arp_entrytype {
36 __u8 mac;
37 __u8 ip;
38} __attribute__((packed));
39
40#define QETH_QARP_MEDIASPECIFIC_BYTES 32
41#define QETH_QARP_MACADDRTYPE_BYTES 1
31struct qeth_arp_qi_entry7 { 42struct qeth_arp_qi_entry7 {
32 __u8 media_specific[32]; 43 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
33 __u8 macaddr_type; 44 struct qeth_arp_entrytype type;
34 __u8 ipaddr_type;
35 __u8 macaddr[6]; 45 __u8 macaddr[6];
36 __u8 ipaddr[4]; 46 __u8 ipaddr[4];
37} __attribute__((packed)); 47} __attribute__((packed));
38 48
49struct qeth_arp_qi_entry7_ipv6 {
50 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
51 struct qeth_arp_entrytype type;
52 __u8 macaddr[6];
53 __u8 ipaddr[16];
54} __attribute__((packed));
55
39struct qeth_arp_qi_entry7_short { 56struct qeth_arp_qi_entry7_short {
40 __u8 macaddr_type; 57 struct qeth_arp_entrytype type;
41 __u8 ipaddr_type;
42 __u8 macaddr[6]; 58 __u8 macaddr[6];
43 __u8 ipaddr[4]; 59 __u8 ipaddr[4];
44} __attribute__((packed)); 60} __attribute__((packed));
45 61
62struct qeth_arp_qi_entry7_short_ipv6 {
63 struct qeth_arp_entrytype type;
64 __u8 macaddr[6];
65 __u8 ipaddr[16];
66} __attribute__((packed));
67
46struct qeth_arp_qi_entry5 { 68struct qeth_arp_qi_entry5 {
47 __u8 media_specific[32]; 69 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
48 __u8 macaddr_type; 70 struct qeth_arp_entrytype type;
49 __u8 ipaddr_type;
50 __u8 ipaddr[4]; 71 __u8 ipaddr[4];
51} __attribute__((packed)); 72} __attribute__((packed));
52 73
74struct qeth_arp_qi_entry5_ipv6 {
75 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
76 struct qeth_arp_entrytype type;
77 __u8 ipaddr[16];
78} __attribute__((packed));
79
53struct qeth_arp_qi_entry5_short { 80struct qeth_arp_qi_entry5_short {
54 __u8 macaddr_type; 81 struct qeth_arp_entrytype type;
55 __u8 ipaddr_type;
56 __u8 ipaddr[4]; 82 __u8 ipaddr[4];
57} __attribute__((packed)); 83} __attribute__((packed));
58 84
85struct qeth_arp_qi_entry5_short_ipv6 {
86 struct qeth_arp_entrytype type;
87 __u8 ipaddr[16];
88} __attribute__((packed));
59/* 89/*
60 * can be set by user if no "media specific information" is wanted 90 * can be set by user if no "media specific information" is wanted
61 * -> saves a lot of space in user space buffer 91 * -> saves a lot of space in user space buffer
62 */ 92 */
63#define QETH_QARP_STRIP_ENTRIES 0x8000 93#define QETH_QARP_STRIP_ENTRIES 0x8000
94#define QETH_QARP_WITH_IPV6 0x4000
64#define QETH_QARP_REQUEST_MASK 0x00ff 95#define QETH_QARP_REQUEST_MASK 0x00ff
65 96
66/* data sent to user space as result of query arp ioctl */ 97/* data sent to user space as result of query arp ioctl */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5c075f562eba..7f217b3a50a8 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -193,6 +193,7 @@ config CPU_SH2
193config CPU_SH2A 193config CPU_SH2A
194 bool 194 bool
195 select CPU_SH2 195 select CPU_SH2
196 select UNCACHED_MAPPING
196 197
197config CPU_SH3 198config CPU_SH3
198 bool 199 bool
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 307b3a4a790b..9c8c6e1a2a15 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -133,10 +133,7 @@ machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
133machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx 133machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
134machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast 134machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast
135machdir-$(CONFIG_SH_SH03) += mach-sh03 135machdir-$(CONFIG_SH_SH03) += mach-sh03
136machdir-$(CONFIG_SH_SECUREEDGE5410) += mach-snapgear
137machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d 136machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d
138machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh
139machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705
140machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander 137machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
141machdir-$(CONFIG_SH_MIGOR) += mach-migor 138machdir-$(CONFIG_SH_MIGOR) += mach-migor
142machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa 139machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 9c94711aa6ca..2018c7ea4c93 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -81,13 +81,6 @@ config SH_7343_SOLUTION_ENGINE
81 Select 7343 SolutionEngine if configuring for a Hitachi 81 Select 7343 SolutionEngine if configuring for a Hitachi
82 SH7343 (SH-Mobile 3AS) evaluation board. 82 SH7343 (SH-Mobile 3AS) evaluation board.
83 83
84config SH_7751_SYSTEMH
85 bool "SystemH7751R"
86 depends on CPU_SUBTYPE_SH7751R
87 help
88 Select SystemH if you are configuring for a Renesas SystemH
89 7751R evaluation board.
90
91config SH_HP6XX 84config SH_HP6XX
92 bool "HP6XX" 85 bool "HP6XX"
93 select SYS_SUPPORTS_APM_EMULATION 86 select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 38ef655cc0f0..be7d11d04b26 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -2,10 +2,12 @@
2# Specific board support, not covered by a mach group. 2# Specific board support, not covered by a mach group.
3# 3#
4obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o 4obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o
5obj-$(CONFIG_SH_SECUREEDGE5410) += board-secureedge5410.o
5obj-$(CONFIG_SH_SH2007) += board-sh2007.o 6obj-$(CONFIG_SH_SH2007) += board-sh2007.o
6obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o 7obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o
7obj-$(CONFIG_SH_URQUELL) += board-urquell.o 8obj-$(CONFIG_SH_URQUELL) += board-urquell.o
8obj-$(CONFIG_SH_SHMIN) += board-shmin.o 9obj-$(CONFIG_SH_SHMIN) += board-shmin.o
10obj-$(CONFIG_SH_EDOSK7705) += board-edosk7705.o
9obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o 11obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o
10obj-$(CONFIG_SH_ESPT) += board-espt.o 12obj-$(CONFIG_SH_ESPT) += board-espt.o
11obj-$(CONFIG_SH_POLARIS) += board-polaris.o 13obj-$(CONFIG_SH_POLARIS) += board-polaris.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644
index 000000000000..4cb3bb74c36f
--- /dev/null
+++ b/arch/sh/boards/board-edosk7705.c
@@ -0,0 +1,78 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SolutionEngine Support.
7 *
8 * Modified for edosk7705 development
9 * board by S. Dunn, 2003.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/smc91x.h>
16#include <asm/machvec.h>
17#include <asm/sizes.h>
18
19#define SMC_IOBASE 0xA2000000
20#define SMC_IO_OFFSET 0x300
21#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
22
23#define ETHERNET_IRQ 0x09
24
25static void __init sh_edosk7705_init_irq(void)
26{
27 make_imask_irq(ETHERNET_IRQ);
28}
29
30/* eth initialization functions */
31static struct smc91x_platdata smc91x_info = {
32 .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
33};
34
35static struct resource smc91x_res[] = {
36 [0] = {
37 .start = SMC_IOADDR,
38 .end = SMC_IOADDR + SZ_32 - 1,
39 .flags = IORESOURCE_MEM,
40 },
41 [1] = {
42 .start = ETHERNET_IRQ,
43 .end = ETHERNET_IRQ,
44 .flags = IORESOURCE_IRQ ,
45 }
46};
47
48static struct platform_device smc91x_dev = {
49 .name = "smc91x",
50 .id = -1,
51 .num_resources = ARRAY_SIZE(smc91x_res),
52 .resource = smc91x_res,
53
54 .dev = {
55 .platform_data = &smc91x_info,
56 },
57};
58
59/* platform init code */
60static struct platform_device *edosk7705_devices[] __initdata = {
61 &smc91x_dev,
62};
63
64static int __init init_edosk7705_devices(void)
65{
66 return platform_add_devices(edosk7705_devices,
67 ARRAY_SIZE(edosk7705_devices));
68}
69__initcall(init_edosk7705_devices);
70
71/*
72 * The Machine Vector
73 */
74static struct sh_machine_vector mv_edosk7705 __initmv = {
75 .mv_name = "EDOSK7705",
76 .mv_nr_irqs = 80,
77 .mv_init_irq = sh_edosk7705_init_irq,
78};
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/board-secureedge5410.c
index 331745dee379..32f875e8493d 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/sh/boards/snapgear/setup.c
3 *
4 * Copyright (C) 2002 David McCullough <davidm@snapgear.com> 2 * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
5 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> 3 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
6 * 4 *
@@ -19,18 +17,19 @@
19#include <linux/module.h> 17#include <linux/module.h>
20#include <linux/sched.h> 18#include <linux/sched.h>
21#include <asm/machvec.h> 19#include <asm/machvec.h>
22#include <mach/snapgear.h> 20#include <mach/secureedge5410.h>
23#include <asm/irq.h> 21#include <asm/irq.h>
24#include <asm/io.h> 22#include <asm/io.h>
25#include <cpu/timer.h> 23#include <cpu/timer.h>
26 24
25unsigned short secureedge5410_ioport;
26
27/* 27/*
28 * EraseConfig handling functions 28 * EraseConfig handling functions
29 */ 29 */
30
31static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id) 30static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
32{ 31{
33 (void)__raw_readb(0xb8000000); /* dummy read */ 32 ctrl_delay(); /* dummy read */
34 33
35 printk("SnapGear: erase switch interrupt!\n"); 34 printk("SnapGear: erase switch interrupt!\n");
36 35
@@ -39,21 +38,22 @@ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
39 38
40static int __init eraseconfig_init(void) 39static int __init eraseconfig_init(void)
41{ 40{
41 unsigned int irq = evt2irq(0x240);
42
42 printk("SnapGear: EraseConfig init\n"); 43 printk("SnapGear: EraseConfig init\n");
44
43 /* Setup "EraseConfig" switch on external IRQ 0 */ 45 /* Setup "EraseConfig" switch on external IRQ 0 */
44 if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED, 46 if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
45 "Erase Config", NULL)) 47 "Erase Config", NULL))
46 printk("SnapGear: failed to register IRQ%d for Reset witch\n", 48 printk("SnapGear: failed to register IRQ%d for Reset witch\n",
47 IRL0_IRQ); 49 irq);
48 else 50 else
49 printk("SnapGear: registered EraseConfig switch on IRQ%d\n", 51 printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
50 IRL0_IRQ); 52 irq);
51 return(0); 53 return 0;
52} 54}
53
54module_init(eraseconfig_init); 55module_init(eraseconfig_init);
55 56
56/****************************************************************************/
57/* 57/*
58 * Initialize IRQ setting 58 * Initialize IRQ setting
59 * 59 *
@@ -62,7 +62,6 @@ module_init(eraseconfig_init);
62 * IRL2 = eth1 62 * IRL2 = eth1
63 * IRL3 = crypto 63 * IRL3 = crypto
64 */ 64 */
65
66static void __init init_snapgear_IRQ(void) 65static void __init init_snapgear_IRQ(void)
67{ 66{
68 printk("Setup SnapGear IRQ/IPR ...\n"); 67 printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@ static void __init init_snapgear_IRQ(void)
76static struct sh_machine_vector mv_snapgear __initmv = { 75static struct sh_machine_vector mv_snapgear __initmv = {
77 .mv_name = "SnapGear SecureEdge5410", 76 .mv_name = "SnapGear SecureEdge5410",
78 .mv_nr_irqs = 72, 77 .mv_nr_irqs = 72,
79
80 .mv_inb = snapgear_inb,
81 .mv_inw = snapgear_inw,
82 .mv_inl = snapgear_inl,
83 .mv_outb = snapgear_outb,
84 .mv_outw = snapgear_outw,
85 .mv_outl = snapgear_outl,
86
87 .mv_inb_p = snapgear_inb_p,
88 .mv_inw_p = snapgear_inw,
89 .mv_inl_p = snapgear_inl,
90 .mv_outb_p = snapgear_outb_p,
91 .mv_outw_p = snapgear_outw,
92 .mv_outl_p = snapgear_outl,
93
94 .mv_init_irq = init_snapgear_IRQ, 78 .mv_init_irq = init_snapgear_IRQ,
95}; 79};
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644
index cd54acb51499..000000000000
--- a/arch/sh/boards/mach-edosk7705/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the EDOSK7705 specific parts of the kernel
3#
4
5obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644
index 5b9c57c43241..000000000000
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/io.c
3 *
4 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
5 * Based largely on io_se.c.
6 *
7 * I/O routines for Hitachi EDOSK7705 board.
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/io.h>
14#include <mach/edosk7705.h>
15#include <asm/addrspace.h>
16
17#define SMC_IOADDR 0xA2000000
18
19/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
20static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
21{
22 /*
23 * SMC91C96 registers are 4 byte aligned rather than the
24 * usual 2 byte!
25 */
26 if (port >= 0x300 && port < 0x320)
27 return SMC_IOADDR + ((port - 0x300) * 2);
28
29 maybebadio(port);
30 return port;
31}
32
33/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
34 * registers causes problems. So we bit-shift the value and read / write
35 * in 2 byte chunks. Setting the low byte to 0 does not cause problems
36 * now as odd byte writes are only made on the bit mask / interrupt
37 * register. This may not be the case in future Mar-2003 SJD
38 */
39unsigned char sh_edosk7705_inb(unsigned long port)
40{
41 if (port >= 0x300 && port < 0x320 && port & 0x01)
42 return __raw_readw(port - 1) >> 8;
43
44 return __raw_readb(sh_edosk7705_isa_port2addr(port));
45}
46
47void sh_edosk7705_outb(unsigned char value, unsigned long port)
48{
49 if (port >= 0x300 && port < 0x320 && port & 0x01) {
50 __raw_writew(((unsigned short)value << 8), port - 1);
51 return;
52 }
53
54 __raw_writeb(value, sh_edosk7705_isa_port2addr(port));
55}
56
57void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
58{
59 unsigned char *p = addr;
60
61 while (count--)
62 *p++ = sh_edosk7705_inb(port);
63}
64
65void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
66{
67 unsigned char *p = (unsigned char *)addr;
68
69 while (count--)
70 sh_edosk7705_outb(*p++, port);
71}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644
index d59225e26fb9..000000000000
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SolutionEngine Support.
7 *
8 * Modified for edosk7705 development
9 * board by S. Dunn, 2003.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <asm/machvec.h>
14#include <mach/edosk7705.h>
15
16static void __init sh_edosk7705_init_irq(void)
17{
18 /* This is the Ethernet interrupt */
19 make_imask_irq(0x09);
20}
21
22/*
23 * The Machine Vector
24 */
25static struct sh_machine_vector mv_edosk7705 __initmv = {
26 .mv_name = "EDOSK7705",
27 .mv_nr_irqs = 80,
28
29 .mv_inb = sh_edosk7705_inb,
30 .mv_outb = sh_edosk7705_outb,
31
32 .mv_insb = sh_edosk7705_insb,
33 .mv_outsb = sh_edosk7705_outsb,
34
35 .mv_init_irq = sh_edosk7705_init_irq,
36};
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 2960c659020e..acdafb0c6404 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -54,7 +54,7 @@
54/* 54/*
55 * map I/O ports to memory-mapped addresses 55 * map I/O ports to memory-mapped addresses
56 */ 56 */
57static unsigned long microdev_isa_port2addr(unsigned long offset) 57void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
58{ 58{
59 unsigned long result; 59 unsigned long result;
60 60
@@ -72,16 +72,6 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
72 * Configuration Registers 72 * Configuration Registers
73 */ 73 */
74 result = IO_SUPERIO_PHYS + (offset << 1); 74 result = IO_SUPERIO_PHYS + (offset << 1);
75#if 0
76 } else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
77 offset == KBD_STATUS_REG) {
78 /*
79 * SMSC FDC37C93xAPM SuperIO chip
80 *
81 * PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
82 */
83 result = IO_SUPERIO_PHYS + (offset << 1);
84#endif
85 } else if (((offset >= IO_IDE1_BASE) && 75 } else if (((offset >= IO_IDE1_BASE) &&
86 (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) || 76 (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) ||
87 (offset == IO_IDE1_MISC)) { 77 (offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
131 result = PVR; 121 result = PVR;
132 } 122 }
133 123
134 return result; 124 return (void __iomem *)result;
135}
136
137#define PORT2ADDR(x) (microdev_isa_port2addr(x))
138
139static inline void delay(void)
140{
141#if defined(CONFIG_PCI)
142 /* System board present, just make a dummy SRAM access. (CS0 will be
143 mapped to PCI memory, probably good to avoid it.) */
144 __raw_readw(0xa6800000);
145#else
146 /* CS0 will be mapped to flash, ROM etc so safe to access it. */
147 __raw_readw(0xa0000000);
148#endif
149}
150
151unsigned char microdev_inb(unsigned long port)
152{
153#ifdef CONFIG_PCI
154 if (port >= PCIBIOS_MIN_IO)
155 return microdev_pci_inb(port);
156#endif
157 return *(volatile unsigned char*)PORT2ADDR(port);
158}
159
160unsigned short microdev_inw(unsigned long port)
161{
162#ifdef CONFIG_PCI
163 if (port >= PCIBIOS_MIN_IO)
164 return microdev_pci_inw(port);
165#endif
166 return *(volatile unsigned short*)PORT2ADDR(port);
167}
168
169unsigned int microdev_inl(unsigned long port)
170{
171#ifdef CONFIG_PCI
172 if (port >= PCIBIOS_MIN_IO)
173 return microdev_pci_inl(port);
174#endif
175 return *(volatile unsigned int*)PORT2ADDR(port);
176}
177
178void microdev_outw(unsigned short b, unsigned long port)
179{
180#ifdef CONFIG_PCI
181 if (port >= PCIBIOS_MIN_IO) {
182 microdev_pci_outw(b, port);
183 return;
184 }
185#endif
186 *(volatile unsigned short*)PORT2ADDR(port) = b;
187}
188
189void microdev_outb(unsigned char b, unsigned long port)
190{
191#ifdef CONFIG_PCI
192 if (port >= PCIBIOS_MIN_IO) {
193 microdev_pci_outb(b, port);
194 return;
195 }
196#endif
197
198 /*
199 * There is a board feature with the current SH4-202 MicroDev in
200 * that the 2 byte enables (nBE0 and nBE1) are tied together (and
201 * to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
202 * it is not possible to safely perform 8-bit writes to the
203 * Ethernet registers, as 16-bits will be consumed from the Data
204 * lines (corrupting the other byte). Hence, this function is
205 * written to implement 16-bit read/modify/write for all byte-wide
206 * accesses.
207 *
208 * Note: there is no problem with byte READS (even or odd).
209 *
210 * Sean McGoogan - 16th June 2003.
211 */
212 if ((port >= IO_LAN91C111_BASE) &&
213 (port < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
214 /*
215 * Then are trying to perform a byte-write to the
216 * LAN91C111. This needs special care.
217 */
218 if (port % 2 == 1) { /* is the port odd ? */
219 /* unset bit-0, i.e. make even */
220 const unsigned long evenPort = port-1;
221 unsigned short word;
222
223 /*
224 * do a 16-bit read/write to write to 'port',
225 * preserving even byte.
226 *
227 * Even addresses are bits 0-7
228 * Odd addresses are bits 8-15
229 */
230 word = microdev_inw(evenPort);
231 word = (word & 0xffu) | (b << 8);
232 microdev_outw(word, evenPort);
233 } else {
234 /* else, we are trying to do an even byte write */
235 unsigned short word;
236
237 /*
238 * do a 16-bit read/write to write to 'port',
239 * preserving odd byte.
240 *
241 * Even addresses are bits 0-7
242 * Odd addresses are bits 8-15
243 */
244 word = microdev_inw(port);
245 word = (word & 0xff00u) | (b);
246 microdev_outw(word, port);
247 }
248 } else {
249 *(volatile unsigned char*)PORT2ADDR(port) = b;
250 }
251}
252
253void microdev_outl(unsigned int b, unsigned long port)
254{
255#ifdef CONFIG_PCI
256 if (port >= PCIBIOS_MIN_IO) {
257 microdev_pci_outl(b, port);
258 return;
259 }
260#endif
261 *(volatile unsigned int*)PORT2ADDR(port) = b;
262}
263
264unsigned char microdev_inb_p(unsigned long port)
265{
266 unsigned char v = microdev_inb(port);
267 delay();
268 return v;
269}
270
271unsigned short microdev_inw_p(unsigned long port)
272{
273 unsigned short v = microdev_inw(port);
274 delay();
275 return v;
276}
277
278unsigned int microdev_inl_p(unsigned long port)
279{
280 unsigned int v = microdev_inl(port);
281 delay();
282 return v;
283}
284
285void microdev_outb_p(unsigned char b, unsigned long port)
286{
287 microdev_outb(b, port);
288 delay();
289}
290
291void microdev_outw_p(unsigned short b, unsigned long port)
292{
293 microdev_outw(b, port);
294 delay();
295}
296
297void microdev_outl_p(unsigned int b, unsigned long port)
298{
299 microdev_outl(b, port);
300 delay();
301}
302
303void microdev_insb(unsigned long port, void *buffer, unsigned long count)
304{
305 volatile unsigned char *port_addr;
306 unsigned char *buf = buffer;
307
308 port_addr = (volatile unsigned char *)PORT2ADDR(port);
309
310 while (count--)
311 *buf++ = *port_addr;
312}
313
314void microdev_insw(unsigned long port, void *buffer, unsigned long count)
315{
316 volatile unsigned short *port_addr;
317 unsigned short *buf = buffer;
318
319 port_addr = (volatile unsigned short *)PORT2ADDR(port);
320
321 while (count--)
322 *buf++ = *port_addr;
323}
324
325void microdev_insl(unsigned long port, void *buffer, unsigned long count)
326{
327 volatile unsigned long *port_addr;
328 unsigned int *buf = buffer;
329
330 port_addr = (volatile unsigned long *)PORT2ADDR(port);
331
332 while (count--)
333 *buf++ = *port_addr;
334}
335
336void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
337{
338 volatile unsigned char *port_addr;
339 const unsigned char *buf = buffer;
340
341 port_addr = (volatile unsigned char *)PORT2ADDR(port);
342
343 while (count--)
344 *port_addr = *buf++;
345}
346
347void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
348{
349 volatile unsigned short *port_addr;
350 const unsigned short *buf = buffer;
351
352 port_addr = (volatile unsigned short *)PORT2ADDR(port);
353
354 while (count--)
355 *port_addr = *buf++;
356}
357
358void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
359{
360 volatile unsigned long *port_addr;
361 const unsigned int *buf = buffer;
362
363 port_addr = (volatile unsigned long *)PORT2ADDR(port);
364
365 while (count--)
366 *port_addr = *buf++;
367} 125}
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index d1df2a4fb9b8..d8a747291e03 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -195,27 +195,6 @@ device_initcall(microdev_devices_setup);
195static struct sh_machine_vector mv_sh4202_microdev __initmv = { 195static struct sh_machine_vector mv_sh4202_microdev __initmv = {
196 .mv_name = "SH4-202 MicroDev", 196 .mv_name = "SH4-202 MicroDev",
197 .mv_nr_irqs = 72, 197 .mv_nr_irqs = 72,
198 198 .mv_ioport_map = microdev_ioport_map,
199 .mv_inb = microdev_inb,
200 .mv_inw = microdev_inw,
201 .mv_inl = microdev_inl,
202 .mv_outb = microdev_outb,
203 .mv_outw = microdev_outw,
204 .mv_outl = microdev_outl,
205
206 .mv_inb_p = microdev_inb_p,
207 .mv_inw_p = microdev_inw_p,
208 .mv_inl_p = microdev_inl_p,
209 .mv_outb_p = microdev_outb_p,
210 .mv_outw_p = microdev_outw_p,
211 .mv_outl_p = microdev_outl_p,
212
213 .mv_insb = microdev_insb,
214 .mv_insw = microdev_insw,
215 .mv_insl = microdev_insl,
216 .mv_outsb = microdev_outsb,
217 .mv_outsw = microdev_outsw,
218 .mv_outsl = microdev_outsl,
219
220 .mv_init_irq = init_microdev_irq, 199 .mv_init_irq = init_microdev_irq,
221}; 200};
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile
index 63e7ed699f39..5c9eaa0535b9 100644
--- a/arch/sh/boards/mach-se/7206/Makefile
+++ b/arch/sh/boards/mach-se/7206/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 7206 SolutionEngine specific parts of the kernel 2# Makefile for the 7206 SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644
index adadc77532ee..000000000000
--- a/arch/sh/boards/mach-se/7206/io.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
2 *
3 * linux/arch/sh/boards/se/7206/io.c
4 *
5 * Copyright (C) 2006 Yoshinori Sato
6 *
7 * I/O routine for Hitachi 7206 SolutionEngine.
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <asm/io.h>
14#include <mach-se/mach/se7206.h>
15
16
17static inline void delay(void)
18{
19 __raw_readw(0x20000000); /* P2 ROM Area */
20}
21
22/* MS7750 requires special versions of in*, out* routines, since
23 PC-like io ports are located at upper half byte of 16-bit word which
24 can be accessed only with 16-bit wide. */
25
26static inline volatile __u16 *
27port2adr(unsigned int port)
28{
29 if (port >= 0x2000 && port < 0x2020)
30 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
31 else if (port >= 0x300 && port < 0x310)
32 return (volatile __u16 *) (PA_SMSC + (port - 0x300));
33
34 return (volatile __u16 *)port;
35}
36
37unsigned char se7206_inb(unsigned long port)
38{
39 return (*port2adr(port)) & 0xff;
40}
41
42unsigned char se7206_inb_p(unsigned long port)
43{
44 unsigned long v;
45
46 v = (*port2adr(port)) & 0xff;
47 delay();
48 return v;
49}
50
51unsigned short se7206_inw(unsigned long port)
52{
53 return *port2adr(port);
54}
55
56void se7206_outb(unsigned char value, unsigned long port)
57{
58 *(port2adr(port)) = value;
59}
60
61void se7206_outb_p(unsigned char value, unsigned long port)
62{
63 *(port2adr(port)) = value;
64 delay();
65}
66
67void se7206_outw(unsigned short value, unsigned long port)
68{
69 *port2adr(port) = value;
70}
71
72void se7206_insb(unsigned long port, void *addr, unsigned long count)
73{
74 volatile __u16 *p = port2adr(port);
75 __u8 *ap = addr;
76
77 while (count--)
78 *ap++ = *p;
79}
80
81void se7206_insw(unsigned long port, void *addr, unsigned long count)
82{
83 volatile __u16 *p = port2adr(port);
84 __u16 *ap = addr;
85 while (count--)
86 *ap++ = *p;
87}
88
89void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
90{
91 volatile __u16 *p = port2adr(port);
92 const __u8 *ap = addr;
93
94 while (count--)
95 *p = *ap++;
96}
97
98void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
99{
100 volatile __u16 *p = port2adr(port);
101 const __u16 *ap = addr;
102 while (count--)
103 *p = *ap++;
104}
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index 883b21eacaa6..d961949600fd 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -139,11 +139,13 @@ void __init init_se7206_IRQ(void)
139 make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */ 139 make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
140 make_se7206_irq(IRQ1_IRQ); /* ATA */ 140 make_se7206_irq(IRQ1_IRQ); /* ATA */
141 make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ 141 make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
142 __raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */ 142
143 __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
143 144
144 /* FPGA System register setup*/ 145 /* FPGA System register setup*/
145 __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */ 146 __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
146 __raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */ 147 __raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
148
147 /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */ 149 /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
148 __raw_writew(0x0001,INTSEL); 150 __raw_writew(0x0001,INTSEL);
149} 151}
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 8f5c65d43d1d..7f4871c71a01 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -86,20 +86,5 @@ __initcall(se7206_devices_setup);
86static struct sh_machine_vector mv_se __initmv = { 86static struct sh_machine_vector mv_se __initmv = {
87 .mv_name = "SolutionEngine", 87 .mv_name = "SolutionEngine",
88 .mv_nr_irqs = 256, 88 .mv_nr_irqs = 256,
89 .mv_inb = se7206_inb,
90 .mv_inw = se7206_inw,
91 .mv_outb = se7206_outb,
92 .mv_outw = se7206_outw,
93
94 .mv_inb_p = se7206_inb_p,
95 .mv_inw_p = se7206_inw,
96 .mv_outb_p = se7206_outb_p,
97 .mv_outw_p = se7206_outw,
98
99 .mv_insb = se7206_insb,
100 .mv_insw = se7206_insw,
101 .mv_outsb = se7206_outsb,
102 .mv_outsw = se7206_outsw,
103
104 .mv_init_irq = init_se7206_IRQ, 89 .mv_init_irq = init_se7206_IRQ,
105}; 90};
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile
index 8e624b06d5ea..43ea14feef51 100644
--- a/arch/sh/boards/mach-se/770x/Makefile
+++ b/arch/sh/boards/mach-se/770x/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 770x SolutionEngine specific parts of the kernel 2# Makefile for the 770x SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644
index 28833c8786ea..000000000000
--- a/arch/sh/boards/mach-se/770x/io.c
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * Copyright (C) 2000 Kazumoto Kojima
3 *
4 * I/O routine for Hitachi SolutionEngine.
5 */
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <asm/io.h>
9#include <mach-se/mach/se.h>
10
11/* MS7750 requires special versions of in*, out* routines, since
12 PC-like io ports are located at upper half byte of 16-bit word which
13 can be accessed only with 16-bit wide. */
14
15static inline volatile __u16 *
16port2adr(unsigned int port)
17{
18 if (port & 0xff000000)
19 return ( volatile __u16 *) port;
20 if (port >= 0x2000)
21 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
22 else if (port >= 0x1000)
23 return (volatile __u16 *) (PA_83902 + (port << 1));
24 else
25 return (volatile __u16 *) (PA_SUPERIO + (port << 1));
26}
27
28static inline int
29shifted_port(unsigned long port)
30{
31 /* For IDE registers, value is not shifted */
32 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
33 return 0;
34 else
35 return 1;
36}
37
38unsigned char se_inb(unsigned long port)
39{
40 if (shifted_port(port))
41 return (*port2adr(port) >> 8);
42 else
43 return (*port2adr(port))&0xff;
44}
45
46unsigned char se_inb_p(unsigned long port)
47{
48 unsigned long v;
49
50 if (shifted_port(port))
51 v = (*port2adr(port) >> 8);
52 else
53 v = (*port2adr(port))&0xff;
54 ctrl_delay();
55 return v;
56}
57
58unsigned short se_inw(unsigned long port)
59{
60 if (port >= 0x2000)
61 return *port2adr(port);
62 else
63 maybebadio(port);
64 return 0;
65}
66
67unsigned int se_inl(unsigned long port)
68{
69 maybebadio(port);
70 return 0;
71}
72
73void se_outb(unsigned char value, unsigned long port)
74{
75 if (shifted_port(port))
76 *(port2adr(port)) = value << 8;
77 else
78 *(port2adr(port)) = value;
79}
80
81void se_outb_p(unsigned char value, unsigned long port)
82{
83 if (shifted_port(port))
84 *(port2adr(port)) = value << 8;
85 else
86 *(port2adr(port)) = value;
87 ctrl_delay();
88}
89
90void se_outw(unsigned short value, unsigned long port)
91{
92 if (port >= 0x2000)
93 *port2adr(port) = value;
94 else
95 maybebadio(port);
96}
97
98void se_outl(unsigned int value, unsigned long port)
99{
100 maybebadio(port);
101}
102
103void se_insb(unsigned long port, void *addr, unsigned long count)
104{
105 volatile __u16 *p = port2adr(port);
106 __u8 *ap = addr;
107
108 if (shifted_port(port)) {
109 while (count--)
110 *ap++ = *p >> 8;
111 } else {
112 while (count--)
113 *ap++ = *p;
114 }
115}
116
117void se_insw(unsigned long port, void *addr, unsigned long count)
118{
119 volatile __u16 *p = port2adr(port);
120 __u16 *ap = addr;
121 while (count--)
122 *ap++ = *p;
123}
124
125void se_insl(unsigned long port, void *addr, unsigned long count)
126{
127 maybebadio(port);
128}
129
130void se_outsb(unsigned long port, const void *addr, unsigned long count)
131{
132 volatile __u16 *p = port2adr(port);
133 const __u8 *ap = addr;
134
135 if (shifted_port(port)) {
136 while (count--)
137 *p = *ap++ << 8;
138 } else {
139 while (count--)
140 *p = *ap++;
141 }
142}
143
144void se_outsw(unsigned long port, const void *addr, unsigned long count)
145{
146 volatile __u16 *p = port2adr(port);
147 const __u16 *ap = addr;
148
149 while (count--)
150 *p = *ap++;
151}
152
153void se_outsl(unsigned long port, const void *addr, unsigned long count)
154{
155 maybebadio(port);
156}
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 66d39d1b0901..31330c65c0ce 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -195,27 +195,5 @@ static struct sh_machine_vector mv_se __initmv = {
195#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 195#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
196 .mv_nr_irqs = 104, 196 .mv_nr_irqs = 104,
197#endif 197#endif
198
199 .mv_inb = se_inb,
200 .mv_inw = se_inw,
201 .mv_inl = se_inl,
202 .mv_outb = se_outb,
203 .mv_outw = se_outw,
204 .mv_outl = se_outl,
205
206 .mv_inb_p = se_inb_p,
207 .mv_inw_p = se_inw,
208 .mv_inl_p = se_inl,
209 .mv_outb_p = se_outb_p,
210 .mv_outw_p = se_outw,
211 .mv_outl_p = se_outl,
212
213 .mv_insb = se_insb,
214 .mv_insw = se_insw,
215 .mv_insl = se_insl,
216 .mv_outsb = se_outsb,
217 .mv_outsw = se_outsw,
218 .mv_outsl = se_outsl,
219
220 .mv_init_irq = init_se_IRQ, 198 .mv_init_irq = init_se_IRQ,
221}; 199};
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile
index e6f4341bfe6e..a338fd9d5039 100644
--- a/arch/sh/boards/mach-se/7751/Makefile
+++ b/arch/sh/boards/mach-se/7751/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 7751 SolutionEngine specific parts of the kernel 2# Makefile for the 7751 SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644
index 6e75bd4459e5..000000000000
--- a/arch/sh/boards/mach-se/7751/io.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
3 * Based largely on io_se.c.
4 *
5 * I/O routine for Hitachi 7751 SolutionEngine.
6 *
7 * Initial version only to support LAN access; some
8 * placeholder code from io_se.c left in with the
9 * expectation of later SuperIO and PCMCIA access.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <asm/io.h>
15#include <mach-se/mach/se7751.h>
16#include <asm/addrspace.h>
17
18static inline volatile u16 *port2adr(unsigned int port)
19{
20 if (port >= 0x2000)
21 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
22 maybebadio((unsigned long)port);
23 return (volatile __u16*)port;
24}
25
26/*
27 * General outline: remap really low stuff [eventually] to SuperIO,
28 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
29 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
30 * should be way beyond the window, and is used w/o translation for
31 * compatibility.
32 */
33unsigned char sh7751se_inb(unsigned long port)
34{
35 if (PXSEG(port))
36 return *(volatile unsigned char *)port;
37 else
38 return (*port2adr(port)) & 0xff;
39}
40
41unsigned char sh7751se_inb_p(unsigned long port)
42{
43 unsigned char v;
44
45 if (PXSEG(port))
46 v = *(volatile unsigned char *)port;
47 else
48 v = (*port2adr(port)) & 0xff;
49 ctrl_delay();
50 return v;
51}
52
53unsigned short sh7751se_inw(unsigned long port)
54{
55 if (PXSEG(port))
56 return *(volatile unsigned short *)port;
57 else if (port >= 0x2000)
58 return *port2adr(port);
59 else
60 maybebadio(port);
61 return 0;
62}
63
64unsigned int sh7751se_inl(unsigned long port)
65{
66 if (PXSEG(port))
67 return *(volatile unsigned long *)port;
68 else if (port >= 0x2000)
69 return *port2adr(port);
70 else
71 maybebadio(port);
72 return 0;
73}
74
75void sh7751se_outb(unsigned char value, unsigned long port)
76{
77
78 if (PXSEG(port))
79 *(volatile unsigned char *)port = value;
80 else
81 *(port2adr(port)) = value;
82}
83
84void sh7751se_outb_p(unsigned char value, unsigned long port)
85{
86 if (PXSEG(port))
87 *(volatile unsigned char *)port = value;
88 else
89 *(port2adr(port)) = value;
90 ctrl_delay();
91}
92
93void sh7751se_outw(unsigned short value, unsigned long port)
94{
95 if (PXSEG(port))
96 *(volatile unsigned short *)port = value;
97 else if (port >= 0x2000)
98 *port2adr(port) = value;
99 else
100 maybebadio(port);
101}
102
103void sh7751se_outl(unsigned int value, unsigned long port)
104{
105 if (PXSEG(port))
106 *(volatile unsigned long *)port = value;
107 else
108 maybebadio(port);
109}
110
111void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
112{
113 maybebadio(port);
114}
115
116void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
117{
118 maybebadio(port);
119}
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 50572512e3e8..9fbc51beb181 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -56,23 +56,5 @@ __initcall(se7751_devices_setup);
56static struct sh_machine_vector mv_7751se __initmv = { 56static struct sh_machine_vector mv_7751se __initmv = {
57 .mv_name = "7751 SolutionEngine", 57 .mv_name = "7751 SolutionEngine",
58 .mv_nr_irqs = 72, 58 .mv_nr_irqs = 72,
59
60 .mv_inb = sh7751se_inb,
61 .mv_inw = sh7751se_inw,
62 .mv_inl = sh7751se_inl,
63 .mv_outb = sh7751se_outb,
64 .mv_outw = sh7751se_outw,
65 .mv_outl = sh7751se_outl,
66
67 .mv_inb_p = sh7751se_inb_p,
68 .mv_inw_p = sh7751se_inw,
69 .mv_inl_p = sh7751se_inl,
70 .mv_outb_p = sh7751se_outb_p,
71 .mv_outw_p = sh7751se_outw,
72 .mv_outl_p = sh7751se_outl,
73
74 .mv_insl = sh7751se_insl,
75 .mv_outsl = sh7751se_outsl,
76
77 .mv_init_irq = init_7751se_IRQ, 59 .mv_init_irq = init_7751se_IRQ,
78}; 60};
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644
index d2d2f4b6a502..000000000000
--- a/arch/sh/boards/mach-snapgear/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the SnapGear specific parts of the kernel
3#
4
5obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644
index 476650e42dbc..000000000000
--- a/arch/sh/boards/mach-snapgear/io.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
3 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
4 * Based largely on io_se.c.
5 *
6 * I/O routine for Hitachi 7751 SolutionEngine.
7 *
8 * Initial version only to support LAN access; some
9 * placeholder code from io_se.c left in with the
10 * expectation of later SuperIO and PCMCIA access.
11 */
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/pci.h>
15#include <asm/io.h>
16#include <asm/addrspace.h>
17
18#ifdef CONFIG_SH_SECUREEDGE5410
19unsigned short secureedge5410_ioport;
20#endif
21
22static inline volatile __u16 *port2adr(unsigned int port)
23{
24 maybebadio((unsigned long)port);
25 return (volatile __u16*)port;
26}
27
28/*
29 * General outline: remap really low stuff [eventually] to SuperIO,
30 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
31 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
32 * should be way beyond the window, and is used w/o translation for
33 * compatibility.
34 */
35unsigned char snapgear_inb(unsigned long port)
36{
37 if (PXSEG(port))
38 return *(volatile unsigned char *)port;
39 else
40 return (*port2adr(port)) & 0xff;
41}
42
43unsigned char snapgear_inb_p(unsigned long port)
44{
45 unsigned char v;
46
47 if (PXSEG(port))
48 v = *(volatile unsigned char *)port;
49 else
50 v = (*port2adr(port))&0xff;
51 ctrl_delay();
52 return v;
53}
54
55unsigned short snapgear_inw(unsigned long port)
56{
57 if (PXSEG(port))
58 return *(volatile unsigned short *)port;
59 else if (port >= 0x2000)
60 return *port2adr(port);
61 else
62 maybebadio(port);
63 return 0;
64}
65
66unsigned int snapgear_inl(unsigned long port)
67{
68 if (PXSEG(port))
69 return *(volatile unsigned long *)port;
70 else if (port >= 0x2000)
71 return *port2adr(port);
72 else
73 maybebadio(port);
74 return 0;
75}
76
77void snapgear_outb(unsigned char value, unsigned long port)
78{
79
80 if (PXSEG(port))
81 *(volatile unsigned char *)port = value;
82 else
83 *(port2adr(port)) = value;
84}
85
86void snapgear_outb_p(unsigned char value, unsigned long port)
87{
88 if (PXSEG(port))
89 *(volatile unsigned char *)port = value;
90 else
91 *(port2adr(port)) = value;
92 ctrl_delay();
93}
94
95void snapgear_outw(unsigned short value, unsigned long port)
96{
97 if (PXSEG(port))
98 *(volatile unsigned short *)port = value;
99 else if (port >= 0x2000)
100 *port2adr(port) = value;
101 else
102 maybebadio(port);
103}
104
105void snapgear_outl(unsigned int value, unsigned long port)
106{
107 if (PXSEG(port))
108 *(volatile unsigned long *)port = value;
109 else
110 maybebadio(port);
111}
112
113void snapgear_insl(unsigned long port, void *addr, unsigned long count)
114{
115 maybebadio(port);
116}
117
118void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
119{
120 maybebadio(port);
121}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644
index 2cc6a23d9d39..000000000000
--- a/arch/sh/boards/mach-systemh/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# Makefile for the SystemH specific parts of the kernel
3#
4
5obj-y := setup.o irq.o io.o
6
7# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
8# importantly, with the generic sh7751_pcic_init() code. For now, we'll
9# just abuse the hell out of kbuild, because we can..
10
11obj-$(CONFIG_PCI) += pci.o
12pci-y := ../../se/7751/pci.o
13
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644
index 15577ff1f715..000000000000
--- a/arch/sh/boards/mach-systemh/io.c
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/io.c
3 *
4 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
5 * Based largely on io_se.c.
6 *
7 * I/O routine for Hitachi 7751 Systemh.
8 */
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <mach/systemh7751.h>
13#include <asm/addrspace.h>
14#include <asm/io.h>
15
16#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
17 of smc lan chip*/
18static inline volatile __u16 *
19port2adr(unsigned int port)
20{
21 if (port >= 0x2000)
22 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
23 maybebadio((unsigned long)port);
24 return (volatile __u16*)port;
25}
26
27/*
28 * General outline: remap really low stuff [eventually] to SuperIO,
29 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
30 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
31 * should be way beyond the window, and is used w/o translation for
32 * compatibility.
33 */
34unsigned char sh7751systemh_inb(unsigned long port)
35{
36 if (PXSEG(port))
37 return *(volatile unsigned char *)port;
38 else if (port <= 0x3F1)
39 return *(volatile unsigned char *)ETHER_IOMAP(port);
40 else
41 return (*port2adr(port))&0xff;
42}
43
44unsigned char sh7751systemh_inb_p(unsigned long port)
45{
46 unsigned char v;
47
48 if (PXSEG(port))
49 v = *(volatile unsigned char *)port;
50 else if (port <= 0x3F1)
51 v = *(volatile unsigned char *)ETHER_IOMAP(port);
52 else
53 v = (*port2adr(port))&0xff;
54 ctrl_delay();
55 return v;
56}
57
58unsigned short sh7751systemh_inw(unsigned long port)
59{
60 if (PXSEG(port))
61 return *(volatile unsigned short *)port;
62 else if (port >= 0x2000)
63 return *port2adr(port);
64 else if (port <= 0x3F1)
65 return *(volatile unsigned int *)ETHER_IOMAP(port);
66 else
67 maybebadio(port);
68 return 0;
69}
70
71unsigned int sh7751systemh_inl(unsigned long port)
72{
73 if (PXSEG(port))
74 return *(volatile unsigned long *)port;
75 else if (port >= 0x2000)
76 return *port2adr(port);
77 else if (port <= 0x3F1)
78 return *(volatile unsigned int *)ETHER_IOMAP(port);
79 else
80 maybebadio(port);
81 return 0;
82}
83
84void sh7751systemh_outb(unsigned char value, unsigned long port)
85{
86
87 if (PXSEG(port))
88 *(volatile unsigned char *)port = value;
89 else if (port <= 0x3F1)
90 *(volatile unsigned char *)ETHER_IOMAP(port) = value;
91 else
92 *(port2adr(port)) = value;
93}
94
95void sh7751systemh_outb_p(unsigned char value, unsigned long port)
96{
97 if (PXSEG(port))
98 *(volatile unsigned char *)port = value;
99 else if (port <= 0x3F1)
100 *(volatile unsigned char *)ETHER_IOMAP(port) = value;
101 else
102 *(port2adr(port)) = value;
103 ctrl_delay();
104}
105
106void sh7751systemh_outw(unsigned short value, unsigned long port)
107{
108 if (PXSEG(port))
109 *(volatile unsigned short *)port = value;
110 else if (port >= 0x2000)
111 *port2adr(port) = value;
112 else if (port <= 0x3F1)
113 *(volatile unsigned short *)ETHER_IOMAP(port) = value;
114 else
115 maybebadio(port);
116}
117
118void sh7751systemh_outl(unsigned int value, unsigned long port)
119{
120 if (PXSEG(port))
121 *(volatile unsigned long *)port = value;
122 else
123 maybebadio(port);
124}
125
126void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
127{
128 unsigned char *p = addr;
129 while (count--) *p++ = sh7751systemh_inb(port);
130}
131
132void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
133{
134 unsigned short *p = addr;
135 while (count--) *p++ = sh7751systemh_inw(port);
136}
137
138void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
139{
140 maybebadio(port);
141}
142
143void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
144{
145 unsigned char *p = (unsigned char*)addr;
146 while (count--) sh7751systemh_outb(*p++, port);
147}
148
149void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
150{
151 unsigned short *p = (unsigned short*)addr;
152 while (count--) sh7751systemh_outw(*p++, port);
153}
154
155void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
156{
157 maybebadio(port);
158}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644
index e5ee13adeff4..000000000000
--- a/arch/sh/boards/mach-systemh/irq.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/irq.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SystemH Support.
7 *
8 * Modified for 7751 SystemH by
9 * Jonathan Short.
10 */
11
12#include <linux/init.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16
17#include <mach/systemh7751.h>
18#include <asm/smc37c93x.h>
19
20/* address of external interrupt mask register
21 * address must be set prior to use these (maybe in init_XXX_irq())
22 * XXX : is it better to use .config than specifying it in code? */
23static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
24static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
25
26static void disable_systemh_irq(struct irq_data *data)
27{
28 unsigned long val, mask = 0x01 << 1;
29
30 /* Clear the "irq"th bit in the mask and set it in the request */
31 val = __raw_readl((unsigned long)systemh_irq_mask_register);
32 val &= ~mask;
33 __raw_writel(val, (unsigned long)systemh_irq_mask_register);
34
35 val = __raw_readl((unsigned long)systemh_irq_request_register);
36 val |= mask;
37 __raw_writel(val, (unsigned long)systemh_irq_request_register);
38}
39
40static void enable_systemh_irq(struct irq_data *data)
41{
42 unsigned long val, mask = 0x01 << 1;
43
44 /* Set "irq"th bit in the mask register */
45 val = __raw_readl((unsigned long)systemh_irq_mask_register);
46 val |= mask;
47 __raw_writel(val, (unsigned long)systemh_irq_mask_register);
48}
49
50static struct irq_chip systemh_irq_type = {
51 .name = "SystemH Register",
52 .irq_unmask = enable_systemh_irq,
53 .irq_mask = disable_systemh_irq,
54};
55
56void make_systemh_irq(unsigned int irq)
57{
58 disable_irq_nosync(irq);
59 set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
60 disable_systemh_irq(irq_get_irq_data(irq));
61}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644
index 219fd800a43f..000000000000
--- a/arch/sh/boards/mach-systemh/setup.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * Hitachi SystemH Support.
8 *
9 * Modified for 7751 SystemH by Jonathan Short.
10 *
11 * Rewritten for 2.6 by Paul Mundt.
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/init.h>
18#include <asm/machvec.h>
19#include <mach/systemh7751.h>
20
21extern void make_systemh_irq(unsigned int irq);
22
23/*
24 * Initialize IRQ setting
25 */
26static void __init sh7751systemh_init_irq(void)
27{
28 make_systemh_irq(0xb); /* Ethernet interrupt */
29}
30
31static struct sh_machine_vector mv_7751systemh __initmv = {
32 .mv_name = "7751 SystemH",
33 .mv_nr_irqs = 72,
34
35 .mv_inb = sh7751systemh_inb,
36 .mv_inw = sh7751systemh_inw,
37 .mv_inl = sh7751systemh_inl,
38 .mv_outb = sh7751systemh_outb,
39 .mv_outw = sh7751systemh_outw,
40 .mv_outl = sh7751systemh_outl,
41
42 .mv_inb_p = sh7751systemh_inb_p,
43 .mv_inw_p = sh7751systemh_inw,
44 .mv_inl_p = sh7751systemh_inl,
45 .mv_outb_p = sh7751systemh_outb_p,
46 .mv_outw_p = sh7751systemh_outw,
47 .mv_outl_p = sh7751systemh_outl,
48
49 .mv_insb = sh7751systemh_insb,
50 .mv_insw = sh7751systemh_insw,
51 .mv_insl = sh7751systemh_insl,
52 .mv_outsb = sh7751systemh_outsb,
53 .mv_outsw = sh7751systemh_outsw,
54 .mv_outsl = sh7751systemh_outsl,
55
56 .mv_init_irq = sh7751systemh_init_irq,
57};
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/secureedge5410_defconfig
index 7eae4e59d7f0..7eae4e59d7f0 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/secureedge5410_defconfig
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644
index b58dfc505efe..000000000000
--- a/arch/sh/configs/systemh_defconfig
+++ /dev/null
@@ -1,28 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_LOG_BUF_SHIFT=14
3CONFIG_BLK_DEV_INITRD=y
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5# CONFIG_SYSCTL_SYSCALL is not set
6# CONFIG_HOTPLUG is not set
7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10# CONFIG_BLK_DEV_BSG is not set
11CONFIG_CPU_SUBTYPE_SH7751R=y
12CONFIG_MEMORY_START=0x0c000000
13CONFIG_MEMORY_SIZE=0x00400000
14CONFIG_FLATMEM_MANUAL=y
15CONFIG_SH_7751_SYSTEMH=y
16CONFIG_PREEMPT=y
17# CONFIG_STANDALONE is not set
18CONFIG_BLK_DEV_RAM=y
19CONFIG_BLK_DEV_RAM_SIZE=1024
20# CONFIG_INPUT is not set
21# CONFIG_SERIO_SERPORT is not set
22# CONFIG_VT is not set
23CONFIG_HW_RANDOM=y
24CONFIG_PROC_KCORE=y
25CONFIG_TMPFS=y
26CONFIG_CRAMFS=y
27CONFIG_ROMFS_FS=y
28# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 446b3831c214..3d1ae2bfaa6f 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -44,10 +44,10 @@
44/* 44/*
45 * These will never work in 32-bit, don't even bother. 45 * These will never work in 32-bit, don't even bother.
46 */ 46 */
47#define P1SEGADDR(a) __futile_remapping_attempt 47#define P1SEGADDR(a) ({ (void)(a); BUG(); NULL; })
48#define P2SEGADDR(a) __futile_remapping_attempt 48#define P2SEGADDR(a) ({ (void)(a); BUG(); NULL; })
49#define P3SEGADDR(a) __futile_remapping_attempt 49#define P3SEGADDR(a) ({ (void)(a); BUG(); NULL; })
50#define P4SEGADDR(a) __futile_remapping_attempt 50#define P4SEGADDR(a) ({ (void)(a); BUG(); NULL; })
51#endif 51#endif
52#endif /* P1SEG */ 52#endif /* P1SEG */
53 53
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index a15f1058bbf4..083ea068e819 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -66,7 +66,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
66#define PHYS_ADDR_MASK29 0x1fffffff 66#define PHYS_ADDR_MASK29 0x1fffffff
67#define PHYS_ADDR_MASK32 0xffffffff 67#define PHYS_ADDR_MASK32 0xffffffff
68 68
69#ifdef CONFIG_PMB
70static inline unsigned long phys_addr_mask(void) 69static inline unsigned long phys_addr_mask(void)
71{ 70{
72 /* Is the MMU in 29bit mode? */ 71 /* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@ static inline unsigned long phys_addr_mask(void)
75 74
76 return PHYS_ADDR_MASK32; 75 return PHYS_ADDR_MASK32;
77} 76}
78#elif defined(CONFIG_32BIT)
79static inline unsigned long phys_addr_mask(void)
80{
81 return PHYS_ADDR_MASK32;
82}
83#else
84static inline unsigned long phys_addr_mask(void)
85{
86 return PHYS_ADDR_MASK29;
87}
88#endif
89 77
90#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) 78#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
91#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 79#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5afff03..10c8b1823a18 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/uncached.h>
13 14
14#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
15 16
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
137#define instruction_size(insn) (4) 138#define instruction_size(insn) (4)
138#endif 139#endif
139 140
140extern unsigned long cached_to_uncached;
141extern unsigned long uncached_size;
142
143void per_cpu_trap_init(void); 141void per_cpu_trap_init(void);
144void default_idle(void); 142void default_idle(void);
145void cpu_idle_wait(void); 143void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b2739405..a4ad1cd9bc4d 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
145 __restore_dsp(prev); \ 145 __restore_dsp(prev); \
146} while (0) 146} while (0)
147 147
148/*
149 * Jump to uncached area.
150 * When handling TLB or caches, we need to do it from an uncached area.
151 */
152#define jump_to_uncached() \
153do { \
154 unsigned long __dummy; \
155 \
156 __asm__ __volatile__( \
157 "mova 1f, %0\n\t" \
158 "add %1, %0\n\t" \
159 "jmp @%0\n\t" \
160 " nop\n\t" \
161 ".balign 4\n" \
162 "1:" \
163 : "=&z" (__dummy) \
164 : "r" (cached_to_uncached)); \
165} while (0)
166
167/*
168 * Back to cached area.
169 */
170#define back_to_cached() \
171do { \
172 unsigned long __dummy; \
173 ctrl_barrier(); \
174 __asm__ __volatile__( \
175 "mov.l 1f, %0\n\t" \
176 "jmp @%0\n\t" \
177 " nop\n\t" \
178 ".balign 4\n" \
179 "1: .long 2f\n" \
180 "2:" \
181 : "=&r" (__dummy)); \
182} while (0)
183
184#ifdef CONFIG_CPU_HAS_SR_RB 148#ifdef CONFIG_CPU_HAS_SR_RB
185#define lookup_exception_vector() \ 149#define lookup_exception_vector() \
186({ \ 150({ \
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc8..8593bc8d1a4e 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
34 &next->thread); \ 34 &next->thread); \
35} while (0) 35} while (0)
36 36
37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0)
39
40#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) 37#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
41#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) 38#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
42#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) 39#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626a..6f8816b79cf1 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
4#include <linux/bug.h> 4#include <linux/bug.h>
5 5
6#ifdef CONFIG_UNCACHED_MAPPING 6#ifdef CONFIG_UNCACHED_MAPPING
7extern unsigned long cached_to_uncached;
8extern unsigned long uncached_size;
7extern unsigned long uncached_start, uncached_end; 9extern unsigned long uncached_start, uncached_end;
8 10
9extern int virt_addr_uncached(unsigned long kaddr); 11extern int virt_addr_uncached(unsigned long kaddr);
10extern void uncached_init(void); 12extern void uncached_init(void);
11extern void uncached_resize(unsigned long size); 13extern void uncached_resize(unsigned long size);
14
15/*
16 * Jump to uncached area.
17 * When handling TLB or caches, we need to do it from an uncached area.
18 */
19#define jump_to_uncached() \
20do { \
21 unsigned long __dummy; \
22 \
23 __asm__ __volatile__( \
24 "mova 1f, %0\n\t" \
25 "add %1, %0\n\t" \
26 "jmp @%0\n\t" \
27 " nop\n\t" \
28 ".balign 4\n" \
29 "1:" \
30 : "=&z" (__dummy) \
31 : "r" (cached_to_uncached)); \
32} while (0)
33
34/*
35 * Back to cached area.
36 */
37#define back_to_cached() \
38do { \
39 unsigned long __dummy; \
40 ctrl_barrier(); \
41 __asm__ __volatile__( \
42 "mov.l 1f, %0\n\t" \
43 "jmp @%0\n\t" \
44 " nop\n\t" \
45 ".balign 4\n" \
46 "1: .long 2f\n" \
47 "2:" \
48 : "=&r" (__dummy)); \
49} while (0)
12#else 50#else
13#define virt_addr_uncached(kaddr) (0) 51#define virt_addr_uncached(kaddr) (0)
14#define uncached_init() do { } while (0) 52#define uncached_init() do { } while (0)
15#define uncached_resize(size) BUG() 53#define uncached_resize(size) BUG()
54#define jump_to_uncached() do { } while (0)
55#define back_to_cached() do { } while (0)
16#endif 56#endif
17 57
18#endif /* __ASM_SH_UNCACHED_H */ 58#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644
index efc43b323466..000000000000
--- a/arch/sh/include/mach-common/mach/edosk7705.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_SH_EDOSK7705_H
2#define __ASM_SH_EDOSK7705_H
3
4#define __IO_PREFIX sh_edosk7705
5#include <asm/io_generic.h>
6
7#endif /* __ASM_SH_EDOSK7705_H */
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
index 1aed15856e11..dcb05fa8c164 100644
--- a/arch/sh/include/mach-common/mach/microdev.h
+++ b/arch/sh/include/mach-common/mach/microdev.h
@@ -68,13 +68,4 @@ extern void microdev_print_fpga_intc_status(void);
68#define __IO_PREFIX microdev 68#define __IO_PREFIX microdev
69#include <asm/io_generic.h> 69#include <asm/io_generic.h>
70 70
71#if defined(CONFIG_PCI)
72unsigned char microdev_pci_inb(unsigned long port);
73unsigned short microdev_pci_inw(unsigned long port);
74unsigned long microdev_pci_inl(unsigned long port);
75void microdev_pci_outb(unsigned char data, unsigned long port);
76void microdev_pci_outw(unsigned short data, unsigned long port);
77void microdev_pci_outl(unsigned long data, unsigned long port);
78#endif
79
80#endif /* __ASM_SH_MICRODEV_H */ 71#endif /* __ASM_SH_MICRODEV_H */
diff --git a/arch/sh/include/mach-common/mach/snapgear.h b/arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f51c4d..3653b9a4bacc 100644
--- a/arch/sh/include/mach-common/mach/snapgear.h
+++ b/arch/sh/include/mach-common/mach/secureedge5410.h
@@ -12,30 +12,9 @@
12#ifndef _ASM_SH_IO_SNAPGEAR_H 12#ifndef _ASM_SH_IO_SNAPGEAR_H
13#define _ASM_SH_IO_SNAPGEAR_H 13#define _ASM_SH_IO_SNAPGEAR_H
14 14
15#if defined(CONFIG_CPU_SH4)
16/*
17 * The external interrupt lines, these take up ints 0 - 15 inclusive
18 * depending on the priority for the interrupt. In fact the priority
19 * is the interrupt :-)
20 */
21
22#define IRL0_IRQ 2
23#define IRL0_PRIORITY 13
24
25#define IRL1_IRQ 5
26#define IRL1_PRIORITY 10
27
28#define IRL2_IRQ 8
29#define IRL2_PRIORITY 7
30
31#define IRL3_IRQ 11
32#define IRL3_PRIORITY 4
33#endif
34
35#define __IO_PREFIX snapgear 15#define __IO_PREFIX snapgear
36#include <asm/io_generic.h> 16#include <asm/io_generic.h>
37 17
38#ifdef CONFIG_SH_SECUREEDGE5410
39/* 18/*
40 * We need to remember what was written to the ioport as some bits 19 * We need to remember what was written to the ioport as some bits
41 * are shared with other functions and you cannot read back what was 20 * are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@ extern unsigned short secureedge5410_ioport;
66 ((secureedge5410_ioport & ~(mask)) | ((val) & (mask))))) 45 ((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
67#define SECUREEDGE_READ_IOPORT() \ 46#define SECUREEDGE_READ_IOPORT() \
68 ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817)) 47 ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
69#endif
70 48
71#endif /* _ASM_SH_IO_SNAPGEAR_H */ 49#endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644
index 4161122c84ef..000000000000
--- a/arch/sh/include/mach-common/mach/systemh7751.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
2#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
3
4/*
5 * linux/include/asm-sh/systemh/7751systemh.h
6 *
7 * Copyright (C) 2000 Kazumoto Kojima
8 *
9 * Hitachi SystemH support
10
11 * Modified for 7751 SystemH by
12 * Jonathan Short, 2002.
13 */
14
15/* Box specific addresses. */
16
17#define PA_ROM 0x00000000 /* EPROM */
18#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */
19#define PA_FROM 0x01000000 /* EPROM */
20#define PA_FROM_SIZE 0x00400000 /* EPROM size 4M byte */
21#define PA_EXT1 0x04000000
22#define PA_EXT1_SIZE 0x04000000
23#define PA_EXT2 0x08000000
24#define PA_EXT2_SIZE 0x04000000
25#define PA_SDRAM 0x0c000000
26#define PA_SDRAM_SIZE 0x04000000
27
28#define PA_EXT4 0x12000000
29#define PA_EXT4_SIZE 0x02000000
30#define PA_EXT5 0x14000000
31#define PA_EXT5_SIZE 0x04000000
32#define PA_PCIC 0x18000000 /* MR-SHPC-01 PCMCIA */
33
34#define PA_DIPSW0 0xb9000000 /* Dip switch 5,6 */
35#define PA_DIPSW1 0xb9000002 /* Dip switch 7,8 */
36#define PA_LED 0xba000000 /* LED */
37#define PA_BCR 0xbb000000 /* FPGA on the MS7751SE01 */
38
39#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controller */
40#define PA_MRSHPC_MW1 0xb8400000 /* MR-SHPC-01 memory window base */
41#define PA_MRSHPC_MW2 0xb8500000 /* MR-SHPC-01 attribute window base */
42#define PA_MRSHPC_IO 0xb8600000 /* MR-SHPC-01 I/O window base */
43#define MRSHPC_MODE (PA_MRSHPC + 4)
44#define MRSHPC_OPTION (PA_MRSHPC + 6)
45#define MRSHPC_CSR (PA_MRSHPC + 8)
46#define MRSHPC_ISR (PA_MRSHPC + 10)
47#define MRSHPC_ICR (PA_MRSHPC + 12)
48#define MRSHPC_CPWCR (PA_MRSHPC + 14)
49#define MRSHPC_MW0CR1 (PA_MRSHPC + 16)
50#define MRSHPC_MW1CR1 (PA_MRSHPC + 18)
51#define MRSHPC_IOWCR1 (PA_MRSHPC + 20)
52#define MRSHPC_MW0CR2 (PA_MRSHPC + 22)
53#define MRSHPC_MW1CR2 (PA_MRSHPC + 24)
54#define MRSHPC_IOWCR2 (PA_MRSHPC + 26)
55#define MRSHPC_CDCR (PA_MRSHPC + 28)
56#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
57
58#define BCR_ILCRA (PA_BCR + 0)
59#define BCR_ILCRB (PA_BCR + 2)
60#define BCR_ILCRC (PA_BCR + 4)
61#define BCR_ILCRD (PA_BCR + 6)
62#define BCR_ILCRE (PA_BCR + 8)
63#define BCR_ILCRF (PA_BCR + 10)
64#define BCR_ILCRG (PA_BCR + 12)
65
66#define IRQ_79C973 13
67
68#define __IO_PREFIX sh7751systemh
69#include <asm/io_generic.h>
70
71#endif /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c6b53a..0fe2e9329cb2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@ static struct clk r_clk = {
48 * Default rate for the root input clock, reset this with clk_set_rate() 48 * Default rate for the root input clock, reset this with clk_set_rate()
49 * from the platform code. 49 * from the platform code.
50 */ 50 */
51struct clk extal_clk = { 51static struct clk extal_clk = {
52 .rate = 33333333, 52 .rate = 33333333,
53}; 53};
54 54
@@ -111,7 +111,7 @@ static struct clk div3_clk = {
111 .parent = &pll_clk, 111 .parent = &pll_clk,
112}; 112};
113 113
114struct clk *main_clks[] = { 114static struct clk *main_clks[] = {
115 &r_clk, 115 &r_clk,
116 &extal_clk, 116 &extal_clk,
117 &fll_clk, 117 &fll_clk,
@@ -156,7 +156,7 @@ struct clk div4_clks[DIV4_NR] = {
156 156
157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR }; 157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
158 158
159struct clk div6_clks[DIV6_NR] = { 159static struct clk div6_clks[DIV6_NR] = {
160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0), 160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0), 161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0), 162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 09370392aff1..c3e61b366493 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -79,7 +79,7 @@ config 29BIT
79 79
80config 32BIT 80config 32BIT
81 bool 81 bool
82 default y if CPU_SH5 82 default y if CPU_SH5 || !MMU
83 83
84config PMB 84config PMB
85 bool "Support 32-bit physical addressing through PMB" 85 bool "Support 32-bit physical addressing through PMB"
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 038793286990..40733a952402 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -79,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
79void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 79void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
80 enum dma_data_direction direction) 80 enum dma_data_direction direction)
81{ 81{
82#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) 82 void *addr;
83 void *p1addr = vaddr; 83
84#else 84 addr = __in_29bit_mode() ?
85 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); 85 (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
86#endif
87 86
88 switch (direction) { 87 switch (direction) {
89 case DMA_FROM_DEVICE: /* invalidate only */ 88 case DMA_FROM_DEVICE: /* invalidate only */
90 __flush_invalidate_region(p1addr, size); 89 __flush_invalidate_region(addr, size);
91 break; 90 break;
92 case DMA_TO_DEVICE: /* writeback only */ 91 case DMA_TO_DEVICE: /* writeback only */
93 __flush_wback_region(p1addr, size); 92 __flush_wback_region(addr, size);
94 break; 93 break;
95 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 94 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
96 __flush_purge_region(p1addr, size); 95 __flush_purge_region(addr, size);
97 break; 96 break;
98 default: 97 default:
99 BUG(); 98 BUG();
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 8a4eca551fc0..a7767da815e9 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached);
28 28
29void __init uncached_init(void) 29void __init uncached_init(void)
30{ 30{
31#ifdef CONFIG_29BIT 31#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
32 uncached_start = P2SEG; 32 uncached_start = P2SEG;
33#else 33#else
34 uncached_start = memory_end; 34 uncached_start = memory_end;
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 9f56eb978024..0e68465e7b50 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -26,7 +26,6 @@ HD64461 HD64461
267724SE SH_7724_SOLUTION_ENGINE 267724SE SH_7724_SOLUTION_ENGINE
277751SE SH_7751_SOLUTION_ENGINE 277751SE SH_7751_SOLUTION_ENGINE
287780SE SH_7780_SOLUTION_ENGINE 287780SE SH_7780_SOLUTION_ENGINE
297751SYSTEMH SH_7751_SYSTEMH
30HP6XX SH_HP6XX 29HP6XX SH_HP6XX
31DREAMCAST SH_DREAMCAST 30DREAMCAST SH_DREAMCAST
32SNAPGEAR SH_SECUREEDGE5410 31SNAPGEAR SH_SECUREEDGE5410
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee186721..b2a6c5de79ab 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
23 23
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/threads.h> 25#include <linux/threads.h>
26#include <asm/kmap_types.h>
27#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
28#include <asm/homecache.h> 27#include <asm/homecache.h>
29 28
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c05..3d0f20246260 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
16#define _ASM_TILE_KMAP_TYPES_H 16#define _ASM_TILE_KMAP_TYPES_H
17 17
18/* 18/*
19 * In TILE Linux each set of four of these uses another 16MB chunk of 19 * In 32-bit TILE Linux we have to balance the desire to have a lot of
20 * address space, given 64 tiles and 64KB pages, so we only enable 20 * nested atomic mappings with the fact that large page sizes and many
21 * ones that are required by the kernel configuration. 21 * processors chew up address space quickly. In a typical
22 * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
23 * adds 4MB of required address-space. For now we leave KM_TYPE_NR
24 * set to depth 8.
22 */ 25 */
23enum km_type { 26enum km_type {
27 KM_TYPE_NR = 8
28};
29
30/*
31 * We provide dummy definitions of all the stray values that used to be
32 * required for kmap_atomic() and no longer are.
33 */
34enum {
24 KM_BOUNCE_READ, 35 KM_BOUNCE_READ,
25 KM_SKB_SUNRPC_DATA, 36 KM_SKB_SUNRPC_DATA,
26 KM_SKB_DATA_SOFTIRQ, 37 KM_SKB_DATA_SOFTIRQ,
27 KM_USER0, 38 KM_USER0,
28 KM_USER1, 39 KM_USER1,
29 KM_BIO_SRC_IRQ, 40 KM_BIO_SRC_IRQ,
41 KM_BIO_DST_IRQ,
42 KM_PTE0,
43 KM_PTE1,
30 KM_IRQ0, 44 KM_IRQ0,
31 KM_IRQ1, 45 KM_IRQ1,
32 KM_SOFTIRQ0, 46 KM_SOFTIRQ0,
33 KM_SOFTIRQ1, 47 KM_SOFTIRQ1,
34 KM_MEMCPY0, 48 KM_SYNC_ICACHE,
35 KM_MEMCPY1, 49 KM_SYNC_DCACHE,
36#if defined(CONFIG_HIGHPTE) 50 KM_UML_USERCOPY,
37 KM_PTE0, 51 KM_IRQ_PTE,
38 KM_PTE1, 52 KM_NMI,
39#endif 53 KM_NMI_PTE,
40 KM_TYPE_NR 54 KM_KDB
41}; 55};
42 56
43#endif /* _ASM_TILE_KMAP_TYPES_H */ 57#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd855bc..a6604e9485da 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
344#define pgd_offset_k(address) pgd_offset(&init_mm, address) 344#define pgd_offset_k(address) pgd_offset(&init_mm, address)
345 345
346#if defined(CONFIG_HIGHPTE) 346#if defined(CONFIG_HIGHPTE)
347extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 347extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
348#define pte_offset_map(dir, address) \ 348#define pte_unmap(pte) kunmap_atomic(pte)
349 _pte_offset_map(dir, address, KM_PTE0)
350#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
351#else 349#else
352#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 350#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
353#define pte_unmap(pte) do { } while (0) 351#define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa92c70..b16e5db8f0e7 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
1#ifdef CONFIG_COMPAT
2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
3#endif
1#include <asm-generic/stat.h> 4#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff485333..b35c2db71199 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
41#ifdef CONFIG_COMPAT 41#ifdef CONFIG_COMPAT
42#define __ARCH_WANT_SYS_LLSEEK 42#define __ARCH_WANT_SYS_LLSEEK
43#endif 43#endif
44#define __ARCH_WANT_SYS_NEWFSTATAT
44#endif 45#endif
45 46
46#endif /* _ASM_TILE_UNISTD_H */ 47#endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cdd9462..67617a05e602 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -148,11 +148,11 @@ long tile_compat_sys_msgrcv(int msqid,
148#define compat_sys_readahead sys32_readahead 148#define compat_sys_readahead sys32_readahead
149#define compat_sys_sync_file_range compat_sys_sync_file_range2 149#define compat_sys_sync_file_range compat_sys_sync_file_range2
150 150
151/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 151/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
152#define compat_sys_stat64 sys_newstat 152#define compat_sys_stat64 sys_stat64
153#define compat_sys_lstat64 sys_newlstat 153#define compat_sys_lstat64 sys_lstat64
154#define compat_sys_fstat64 sys_newfstat 154#define compat_sys_fstat64 sys_fstat64
155#define compat_sys_fstatat64 sys_newfstatat 155#define compat_sys_fstatat64 sys_fstatat64
156 156
157/* The native sys_ptrace dynamically handles compat binaries. */ 157/* The native sys_ptrace dynamically handles compat binaries. */
158#define compat_sys_ptrace sys_ptrace 158#define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a0..493a0e66d916 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
54void early_panic(const char *fmt, ...) 54void early_panic(const char *fmt, ...)
55{ 55{
56 va_list ap; 56 va_list ap;
57 raw_local_irq_disable_all(); 57 arch_local_irq_disable_all();
58 va_start(ap, fmt); 58 va_start(ap, fmt);
59 early_printk("Kernel panic - not syncing: "); 59 early_printk("Kernel panic - not syncing: ");
60 early_vprintk(fmt, ap); 60 early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a7843410..e910530436e6 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
151 151
152static void enable_firewall_interrupts(void) 152static void enable_firewall_interrupts(void)
153{ 153{
154 raw_local_irq_unmask_now(INT_UDN_FIREWALL); 154 arch_local_irq_unmask_now(INT_UDN_FIREWALL);
155} 155}
156 156
157static void disable_firewall_interrupts(void) 157static void disable_firewall_interrupts(void)
158{ 158{
159 raw_local_irq_mask_now(INT_UDN_FIREWALL); 159 arch_local_irq_mask_now(INT_UDN_FIREWALL);
160} 160}
161 161
162/* Set up hardwall on this cpu based on the passed hardwall_info. */ 162/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
768} 768}
769 769
770static const struct file_operations dev_hardwall_fops = { 770static const struct file_operations dev_hardwall_fops = {
771 .open = nonseekable_open,
771 .unlocked_ioctl = hardwall_ioctl, 772 .unlocked_ioctl = hardwall_ioctl,
772#ifdef CONFIG_COMPAT 773#ifdef CONFIG_COMPAT
773 .compat_ioctl = hardwall_compat_ioctl, 774 .compat_ioctl = hardwall_compat_ioctl,
774#endif 775#endif
775 .flush = hardwall_flush, 776 .flush = hardwall_flush,
776 .release = hardwall_release, 777 .release = hardwall_release,
777 .llseek = noop_llseek,
778}; 778};
779 779
780static struct cdev hardwall_dev; 780static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e63917687e99..128805ef8f2c 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
26#define IS_HW_CLEARED 1 26#define IS_HW_CLEARED 1
27 27
28/* 28/*
29 * The set of interrupts we enable for raw_local_irq_enable(). 29 * The set of interrupts we enable for arch_local_irq_enable().
30 * This is initialized to have just a single interrupt that the kernel 30 * This is initialized to have just a single interrupt that the kernel
31 * doesn't actually use as a sentinel. During kernel init, 31 * doesn't actually use as a sentinel. During kernel init,
32 * interrupts are added as the kernel gets prepared to support them. 32 * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_K); 228 arch_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d6179..0d8b9e933487 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
182 182
183 if ((entry & IND_SOURCE)) { 183 if ((entry & IND_SOURCE)) {
184 void *va = 184 void *va =
185 kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 r = kexec_bn2cl(va); 186 r = kexec_bn2cl(va);
187 if (r) { 187 if (r) {
188 command_line = r; 188 command_line = r;
189 break; 189 break;
190 } 190 }
191 kunmap_atomic(va, KM_USER0); 191 kunmap_atomic(va);
192 } 192 }
193 } 193 }
194 194
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
198 198
199 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
200 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
201 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line);
202 } else { 202 } else {
203 pr_info("%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
204 __func__); 204 __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e3933f726..0858ee6b520f 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_K); 37 arch_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd29884c09f..e92e40527d6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@ long arch_ptrace(struct task_struct *child, long request,
50{ 50{
51 unsigned long __user *datap = (long __user __force *)data; 51 unsigned long __user *datap = (long __user __force *)data;
52 unsigned long tmp; 52 unsigned long tmp;
53 int i;
54 long ret = -EIO; 53 long ret = -EIO;
55 unsigned long *childregs;
56 char *childreg; 54 char *childreg;
55 struct pt_regs copyregs;
56 int ex1_offset;
57 57
58 switch (request) { 58 switch (request) {
59 59
@@ -80,6 +80,16 @@ long arch_ptrace(struct task_struct *child, long request,
80 if (addr >= PTREGS_SIZE) 80 if (addr >= PTREGS_SIZE)
81 break; 81 break;
82 childreg = (char *)task_pt_regs(child) + addr; 82 childreg = (char *)task_pt_regs(child) + addr;
83
84 /* Guard against overwrites of the privilege level. */
85 ex1_offset = PTREGS_OFFSET_EX1;
86#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
87 if (is_compat_task()) /* point at low word */
88 ex1_offset += sizeof(compat_long_t);
89#endif
90 if (addr == ex1_offset)
91 data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
92
83#ifdef CONFIG_COMPAT 93#ifdef CONFIG_COMPAT
84 if (is_compat_task()) { 94 if (is_compat_task()) {
85 if (addr & (sizeof(compat_long_t)-1)) 95 if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@ long arch_ptrace(struct task_struct *child, long request,
96 break; 106 break;
97 107
98 case PTRACE_GETREGS: /* Get all registers from the child. */ 108 case PTRACE_GETREGS: /* Get all registers from the child. */
99 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 109 if (copy_to_user(datap, task_pt_regs(child),
100 break; 110 sizeof(struct pt_regs)) == 0) {
101 childregs = (long *)task_pt_regs(child); 111 ret = 0;
102 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
103 ++i) {
104 ret = __put_user(childregs[i], &datap[i]);
105 if (ret != 0)
106 break;
107 } 112 }
108 break; 113 break;
109 114
110 case PTRACE_SETREGS: /* Set all registers in the child. */ 115 case PTRACE_SETREGS: /* Set all registers in the child. */
111 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 116 if (copy_from_user(&copyregs, datap,
112 break; 117 sizeof(struct pt_regs)) == 0) {
113 childregs = (long *)task_pt_regs(child); 118 copyregs.ex1 =
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 119 PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
115 ++i) { 120 *task_pt_regs(child) = copyregs;
116 ret = __get_user(childregs[i], &datap[i]); 121 ret = 0;
117 if (ret != 0)
118 break;
119 } 122 }
120 break; 123 break;
121 124
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beba..baa3d905fee2 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
27void machine_halt(void) 27void machine_halt(void)
28{ 28{
29 warn_early_printk(); 29 warn_early_printk();
30 raw_local_irq_disable_all(); 30 arch_local_irq_disable_all();
31 smp_send_stop(); 31 smp_send_stop();
32 hv_halt(); 32 hv_halt();
33} 33}
@@ -35,14 +35,14 @@ void machine_halt(void)
35void machine_power_off(void) 35void machine_power_off(void)
36{ 36{
37 warn_early_printk(); 37 warn_early_printk();
38 raw_local_irq_disable_all(); 38 arch_local_irq_disable_all();
39 smp_send_stop(); 39 smp_send_stop();
40 hv_power_off(); 40 hv_power_off();
41} 41}
42 42
43void machine_restart(char *cmd) 43void machine_restart(char *cmd)
44{ 44{
45 raw_local_irq_disable_all(); 45 arch_local_irq_disable_all();
46 smp_send_stop(); 46 smp_send_stop();
47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
48} 48}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad12da0..fb0b3cbeae14 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
868 868
869 /* Allow asynchronous TLB interrupts. */ 869 /* Allow asynchronous TLB interrupts. */
870#if CHIP_HAS_TILE_DMA() 870#if CHIP_HAS_TILE_DMA()
871 raw_local_irq_unmask(INT_DMATLB_MISS); 871 arch_local_irq_unmask(INT_DMATLB_MISS);
872 raw_local_irq_unmask(INT_DMATLB_ACCESS); 872 arch_local_irq_unmask(INT_DMATLB_ACCESS);
873#endif 873#endif
874#if CHIP_HAS_SN_PROC() 874#if CHIP_HAS_SN_PROC()
875 raw_local_irq_unmask(INT_SNITLB_MISS); 875 arch_local_irq_unmask(INT_SNITLB_MISS);
876#endif 876#endif
877#ifdef __tilegx__ 877#ifdef __tilegx__
878 raw_local_irq_unmask(INT_SINGLE_STEP_K); 878 arch_local_irq_unmask(INT_SINGLE_STEP_K);
879#endif 879#endif
880 880
881 /* 881 /*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85ae3ae..687719d4abd1 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -71,6 +71,9 @@ int restore_sigcontext(struct pt_regs *regs,
71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 72 err |= __get_user(regs->regs[i], &sc->gregs[i]);
73 73
74 /* Ensure that the PL is always set to USER_PL. */
75 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
76
74 regs->faultnum = INT_SWINT_1_SIGRETURN; 77 regs->faultnum = INT_SWINT_1_SIGRETURN;
75 78
76 err |= __get_user(*pr0, &sc->gregs[0]); 79 err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@ void do_signal(struct pt_regs *regs)
330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 333 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
331 } 334 }
332 335
333 return; 336 goto done;
334 } 337 }
335 338
336 /* Did we come from a system call? */ 339 /* Did we come from a system call? */
@@ -358,4 +361,8 @@ void do_signal(struct pt_regs *regs)
358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 361 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 362 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
360 } 363 }
364
365done:
366 /* Avoid double syscall restart if there are nested signals. */
367 regs->faultnum = INT_SWINT_1_SIGRETURN;
361} 368}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d90aff3..9575b37a8b75 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
115static void smp_stop_cpu_interrupt(void) 115static void smp_stop_cpu_interrupt(void)
116{ 116{
117 set_cpu_online(smp_processor_id(), 0); 117 set_cpu_online(smp_processor_id(), 0);
118 raw_local_irq_disable_all(); 118 arch_local_irq_disable_all();
119 for (;;) 119 for (;;)
120 asm("nap"); 120 asm("nap");
121} 121}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e1421..f2e156e44692 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
132{ 132{
133 BUG_ON(ticks > MAX_TICK); 133 BUG_ON(ticks > MAX_TICK);
134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
135 raw_local_irq_unmask_now(INT_TILE_TIMER); 135 arch_local_irq_unmask_now(INT_TILE_TIMER);
136 return 0; 136 return 0;
137} 137}
138 138
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
143static void tile_timer_set_mode(enum clock_event_mode mode, 143static void tile_timer_set_mode(enum clock_event_mode mode,
144 struct clock_event_device *evt) 144 struct clock_event_device *evt)
145{ 145{
146 raw_local_irq_mask_now(INT_TILE_TIMER); 146 arch_local_irq_mask_now(INT_TILE_TIMER);
147} 147}
148 148
149/* 149/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
172 evt->cpumask = cpumask_of(smp_processor_id()); 172 evt->cpumask = cpumask_of(smp_processor_id());
173 173
174 /* Start out with timer not firing. */ 174 /* Start out with timer not firing. */
175 raw_local_irq_mask_now(INT_TILE_TIMER); 175 arch_local_irq_mask_now(INT_TILE_TIMER);
176 176
177 /* Register tile timer. */ 177 /* Register tile timer. */
178 clockevents_register_device(evt); 178 clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
189 * and there are now by definition no events pending. 189 * and there are now by definition no events pending.
190 */ 190 */
191 raw_local_irq_mask(INT_TILE_TIMER); 191 arch_local_irq_mask(INT_TILE_TIMER);
192 192
193 /* Track time spent here in an interrupt context */ 193 /* Track time spent here in an interrupt context */
194 irq_enter(); 194 irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7b266b..f7d4a6ad61e8 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
54 * we must run with interrupts disabled to avoid the risk of some 54 * we must run with interrupts disabled to avoid the risk of some
55 * other code seeing the incoherent data in our cache. (Recall that 55 * other code seeing the incoherent data in our cache. (Recall that
56 * our cache is indexed by PA, so even if the other code doesn't use 56 * our cache is indexed by PA, so even if the other code doesn't use
57 * our KM_MEMCPY virtual addresses, they'll still hit in cache using 57 * our kmap_atomic virtual addresses, they'll still hit in cache using
58 * the normal VAs that aren't supposed to hit in cache.) 58 * the normal VAs that aren't supposed to hit in cache.)
59 */ 59 */
60static void memcpy_multicache(void *dest, const void *source, 60static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
64 unsigned long flags, newsrc, newdst; 64 unsigned long flags, newsrc, newdst;
65 pmd_t *pmdp; 65 pmd_t *pmdp;
66 pte_t *ptep; 66 pte_t *ptep;
67 int type0, type1;
67 int cpu = get_cpu(); 68 int cpu = get_cpu();
68 69
69 /* 70 /*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
77 sim_allow_multiple_caching(1); 78 sim_allow_multiple_caching(1);
78 79
79 /* Set up the new dest mapping */ 80 /* Set up the new dest mapping */
80 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 81 type0 = kmap_atomic_idx_push();
82 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
81 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
82 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
83 ptep = pte_offset_kernel(pmdp, newdst); 85 ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
87 } 89 }
88 90
89 /* Set up the new source mapping */ 91 /* Set up the new source mapping */
90 idx += (KM_MEMCPY0 - KM_MEMCPY1); 92 type1 = kmap_atomic_idx_push();
93 idx += (type0 - type1);
91 src_pte = hv_pte_set_nc(src_pte); 94 src_pte = hv_pte_set_nc(src_pte);
92 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
93 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); 96 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
119 * We're done: notify the simulator that all is back to normal, 122 * We're done: notify the simulator that all is back to normal,
120 * and re-enable interrupts and pre-emption. 123 * and re-enable interrupts and pre-emption.
121 */ 124 */
125 kmap_atomic_idx_pop();
126 kmap_atomic_idx_pop();
122 sim_allow_multiple_caching(0); 127 sim_allow_multiple_caching(0);
123 local_irq_restore(flags); 128 local_irq_restore(flags);
124 put_cpu(); 129 put_cpu();
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index abb57331cf6e..31dbbd9afe47 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
227void *__kmap_atomic(struct page *page) 227void *__kmap_atomic(struct page *page)
228{ 228{
229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
230 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, PAGE_NONE);
231} 231}
232EXPORT_SYMBOL(__kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
233 233
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982cb6c9..0b9ce69b0ee5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
988/* Select whether to free (1) or mark unusable (0) the __init pages. */ 988/* Select whether to free (1) or mark unusable (0) the __init pages. */
989static int __init set_initfree(char *str) 989static int __init set_initfree(char *str)
990{ 990{
991 strict_strtol(str, 0, &initfree); 991 long val;
992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 992 if (strict_strtol(str, 0, &val)) {
993 initfree = val;
994 pr_info("initfree: %s free init pages\n",
995 initfree ? "will" : "won't");
996 }
993 return 1; 997 return 1;
994} 998}
995__setup("initfree=", set_initfree); 999__setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c24621c41..1f5430c53d0d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
134} 134}
135 135
136#if defined(CONFIG_HIGHPTE) 136#if defined(CONFIG_HIGHPTE)
137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
138{ 138{
139 pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 139 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
141 return &pte[pte_index(address)]; 141 return &pte[pte_index(address)];
142} 142}
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f75a3c..b7c5bab9bd77 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@ struct pt_regs {
38 38
39struct task_struct; 39struct task_struct;
40 40
41extern long subarch_ptrace(struct task_struct *child, long request, long addr, 41extern long subarch_ptrace(struct task_struct *child, long request,
42 long data); 42 unsigned long addr, unsigned long data);
43extern unsigned long getreg(struct task_struct *child, int regno); 43extern unsigned long getreg(struct task_struct *child, int regno);
44extern int putreg(struct task_struct *child, int regno, unsigned long value); 44extern int putreg(struct task_struct *child, int regno, unsigned long value);
45extern int get_fpregs(struct user_i387_struct __user *buf, 45extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a5e33f29bbeb..701b672c1122 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
122 break; 122 break;
123 123
124 case PTRACE_SET_THREAD_AREA: 124 case PTRACE_SET_THREAD_AREA:
125 ret = ptrace_set_thread_area(child, addr, datavp); 125 ret = ptrace_set_thread_area(child, addr, vp);
126 break; 126 break;
127 127
128 case PTRACE_FAULTINFO: { 128 case PTRACE_FAULTINFO: {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34b0ed6..f6ce0bda3b98 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
141 141
142static inline u32 native_apic_msr_read(u32 reg) 142static inline u32 native_apic_msr_read(u32 reg)
143{ 143{
144 u32 low, high; 144 u64 msr;
145 145
146 if (reg == APIC_DFR) 146 if (reg == APIC_DFR)
147 return -1; 147 return -1;
148 148
149 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); 149 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
150 return low; 150 return (u32)msr;
151} 151}
152 152
153static inline void native_x2apic_wait_icr_idle(void) 153static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
181extern void x2apic_icr_write(u32 low, u32 id); 181extern void x2apic_icr_write(u32 low, u32 id);
182static inline int x2apic_enabled(void) 182static inline int x2apic_enabled(void)
183{ 183{
184 int msr, msr2; 184 u64 msr;
185 185
186 if (!cpu_has_x2apic) 186 if (!cpu_has_x2apic)
187 return 0; 187 return 0;
188 188
189 rdmsr(MSR_IA32_APICBASE, msr, msr2); 189 rdmsrl(MSR_IA32_APICBASE, msr);
190 if (msr & X2APIC_ENABLE) 190 if (msr & X2APIC_ENABLE)
191 return 1; 191 return 1;
192 return 0; 192 return 0;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05cec..6d90adf4428a 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@ union uvh_node_present_table_u {
806}; 806};
807 807
808/* ========================================================================= */ 808/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
810/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
812
813#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
814#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
815#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
816#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
817#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
818#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
819
820union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
821 unsigned long v;
822 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
823 unsigned long rsvd_0_23: 24; /* */
824 unsigned long base : 8; /* RW */
825 unsigned long rsvd_32_47: 16; /* */
826 unsigned long m_alias : 5; /* RW */
827 unsigned long rsvd_53_62: 10; /* */
828 unsigned long enable : 1; /* RW */
829 } s;
830};
831
832/* ========================================================================= */
833/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
834/* ========================================================================= */
835#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
836
837#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
838#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
839#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
840#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
841#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
842#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
843
844union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
845 unsigned long v;
846 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
847 unsigned long rsvd_0_23: 24; /* */
848 unsigned long base : 8; /* RW */
849 unsigned long rsvd_32_47: 16; /* */
850 unsigned long m_alias : 5; /* RW */
851 unsigned long rsvd_53_62: 10; /* */
852 unsigned long enable : 1; /* RW */
853 } s;
854};
855
856/* ========================================================================= */
857/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
858/* ========================================================================= */
859#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
860
861#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
862#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
863#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
864#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
865#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
866#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
867
868union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
869 unsigned long v;
870 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
871 unsigned long rsvd_0_23: 24; /* */
872 unsigned long base : 8; /* RW */
873 unsigned long rsvd_32_47: 16; /* */
874 unsigned long m_alias : 5; /* RW */
875 unsigned long rsvd_53_62: 10; /* */
876 unsigned long enable : 1; /* RW */
877 } s;
878};
879
880/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 881/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
810/* ========================================================================= */ 882/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 883#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
857}; 929};
858 930
859/* ========================================================================= */ 931/* ========================================================================= */
932/* UVH_RH_GAM_CONFIG_MMR */
933/* ========================================================================= */
934#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
935
936#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
937#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
938#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
939#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
940#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
941#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
942
943union uvh_rh_gam_config_mmr_u {
944 unsigned long v;
945 struct uvh_rh_gam_config_mmr_s {
946 unsigned long m_skt : 6; /* RW */
947 unsigned long n_skt : 4; /* RW */
948 unsigned long rsvd_10_11: 2; /* */
949 unsigned long mmiol_cfg : 1; /* RW */
950 unsigned long rsvd_13_63: 51; /* */
951 } s;
952};
953
954/* ========================================================================= */
860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 955/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
861/* ========================================================================= */ 956/* ========================================================================= */
862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 957#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
987 } s; 1082 } s;
988}; 1083};
989 1084
990/* ========================================================================= */
991/* UVH_SI_ADDR_MAP_CONFIG */
992/* ========================================================================= */
993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
994
995#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
996#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
997#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
998#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
999
1000union uvh_si_addr_map_config_u {
1001 unsigned long v;
1002 struct uvh_si_addr_map_config_s {
1003 unsigned long m_skt : 6; /* RW */
1004 unsigned long rsvd_6_7: 2; /* */
1005 unsigned long n_skt : 4; /* RW */
1006 unsigned long rsvd_12_63: 52; /* */
1007 } s;
1008};
1009
1010/* ========================================================================= */
1011/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1012/* ========================================================================= */
1013#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1014
1015#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1016#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1017#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1018#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1019#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1020#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1021
1022union uvh_si_alias0_overlay_config_u {
1023 unsigned long v;
1024 struct uvh_si_alias0_overlay_config_s {
1025 unsigned long rsvd_0_23: 24; /* */
1026 unsigned long base : 8; /* RW */
1027 unsigned long rsvd_32_47: 16; /* */
1028 unsigned long m_alias : 5; /* RW */
1029 unsigned long rsvd_53_62: 10; /* */
1030 unsigned long enable : 1; /* RW */
1031 } s;
1032};
1033
1034/* ========================================================================= */
1035/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1036/* ========================================================================= */
1037#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1038
1039#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1040#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1041#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1042#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1043#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1044#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1045
1046union uvh_si_alias1_overlay_config_u {
1047 unsigned long v;
1048 struct uvh_si_alias1_overlay_config_s {
1049 unsigned long rsvd_0_23: 24; /* */
1050 unsigned long base : 8; /* RW */
1051 unsigned long rsvd_32_47: 16; /* */
1052 unsigned long m_alias : 5; /* RW */
1053 unsigned long rsvd_53_62: 10; /* */
1054 unsigned long enable : 1; /* RW */
1055 } s;
1056};
1057
1058/* ========================================================================= */
1059/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1060/* ========================================================================= */
1061#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1062
1063#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1064#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1065#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1066#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1067#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1068#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1069
1070union uvh_si_alias2_overlay_config_u {
1071 unsigned long v;
1072 struct uvh_si_alias2_overlay_config_s {
1073 unsigned long rsvd_0_23: 24; /* */
1074 unsigned long base : 8; /* RW */
1075 unsigned long rsvd_32_47: 16; /* */
1076 unsigned long m_alias : 5; /* RW */
1077 unsigned long rsvd_53_62: 10; /* */
1078 unsigned long enable : 1; /* RW */
1079 } s;
1080};
1081
1082 1085
1083#endif /* _ASM_X86_UV_UV_MMRS_H */ 1086#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d1b0ed..3f838d537392 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
52#include <asm/mce.h> 52#include <asm/mce.h>
53#include <asm/kvm_para.h> 53#include <asm/kvm_para.h>
54#include <asm/tsc.h> 54#include <asm/tsc.h>
55#include <asm/atomic.h>
56 55
57unsigned int num_processors; 56unsigned int num_processors;
58 57
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ed4118de249e..194539aea175 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -379,14 +379,14 @@ struct redir_addr {
379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
380 380
381static __initdata struct redir_addr redir_addrs[] = { 381static __initdata struct redir_addr redir_addrs[] = {
382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
385}; 385};
386 386
387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
388{ 388{
389 union uvh_si_alias0_overlay_config_u alias; 389 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
391 int i; 391 int i;
392 392
@@ -660,7 +660,7 @@ void uv_nmi_init(void)
660 660
661void __init uv_system_init(void) 661void __init uv_system_init(void)
662{ 662{
663 union uvh_si_addr_map_config_u m_n_config; 663 union uvh_rh_gam_config_mmr_u m_n_config;
664 union uvh_node_id_u node_id; 664 union uvh_node_id_u node_id;
665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@ void __init uv_system_init(void)
670 670
671 map_low_mmrs(); 671 map_low_mmrs();
672 672
673 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 673 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
674 m_val = m_n_config.s.m_skt; 674 m_val = m_n_config.s.m_skt;
675 n_val = m_n_config.s.n_skt; 675 n_val = m_n_config.s.n_skt;
676 mmr_base = 676 mmr_base =
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d58448c3af..e421b8cd6944 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
280 struct amd_nb *nb; 280 struct amd_nb *nb;
281 int i; 281 int i;
282 282
283 nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); 283 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
284 cpu_to_node(cpu));
284 if (!nb) 285 if (!nb)
285 return NULL; 286 return NULL;
286 287
287 memset(nb, 0, sizeof(*nb));
288 nb->nb_id = nb_id; 288 nb->nb_id = nb_id;
289 289
290 /* 290 /*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c055c7d..ce0cb4721c9a 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
212 return 0; 212 return 0;
213 } 213 }
214 214
215 equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); 215 equiv_cpu_table = vmalloc(size);
216 if (!equiv_cpu_table) { 216 if (!equiv_cpu_table) {
217 pr_err("failed to allocate equivalent CPU table\n"); 217 pr_err("failed to allocate equivalent CPU table\n");
218 return 0; 218 return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 71825806cd44..6da143c2a6b8 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
217 wrmsrl(address, val); 217 wrmsrl(address, val);
218} 218}
219 219
220static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) 220static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
221{ 221{
222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; 222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
223 return 0; 223 return 0;
224} 224}
225 225
226static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { 226static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
227 { 227 {
228 .callback = set_check_enable_amd_mmconf, 228 .callback = set_check_enable_amd_mmconf,
229 .ident = "Sun Microsystems Machine", 229 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
234 {} 234 {}
235}; 235};
236 236
237void __cpuinit check_enable_amd_mmconf_dmi(void) 237/* Called from a __cpuinit function, but only on the BSP. */
238void __ref check_enable_amd_mmconf_dmi(void)
238{ 239{
239 dmi_check_system(mmconf_dmi_table); 240 dmi_check_system(mmconf_dmi_table);
240} 241}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e6f66d..008b91eefa18 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
41 valid_flags = flags; 41 valid_flags = flags;
42} 42}
43 43
44/*
45 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
46 * yielding a 64-bit result.
47 */
48static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
49{
50 u64 product;
51#ifdef __i386__
52 u32 tmp1, tmp2;
53#endif
54
55 if (shift < 0)
56 delta >>= -shift;
57 else
58 delta <<= shift;
59
60#ifdef __i386__
61 __asm__ (
62 "mul %5 ; "
63 "mov %4,%%eax ; "
64 "mov %%edx,%4 ; "
65 "mul %5 ; "
66 "xor %5,%5 ; "
67 "add %4,%%eax ; "
68 "adc %5,%%edx ; "
69 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
70 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
71#elif defined(__x86_64__)
72 __asm__ (
73 "mul %%rdx ; shrd $32,%%rdx,%%rax"
74 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
75#else
76#error implement me!
77#endif
78
79 return product;
80}
81
82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) 44static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
83{ 45{
84 u64 delta = native_read_tsc() - shadow->tsc_timestamp; 46 u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a51..fb8b376bf28c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
720 } 720 }
721} 721}
722 722
723static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723static int set_spte_track_bits(u64 *sptep, u64 new_spte)
724{ 724{
725 pfn_t pfn; 725 pfn_t pfn;
726 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
731 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
732 732
733 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
734 return; 734 return 0;
735 735
736 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
738 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
740 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
741 return 1;
741} 742}
742 743
743static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
744{ 745{
745 set_spte_track_bits(sptep, new_spte); 746 if (set_spte_track_bits(sptep, new_spte))
746 rmap_remove(kvm, sptep); 747 rmap_remove(kvm, sptep);
747} 748}
748 749
749static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 750static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b32..cdac9e592aa5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2561 events->exception.nr = vcpu->arch.exception.nr; 2561 events->exception.nr = vcpu->arch.exception.nr;
2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2563 events->exception.pad = 0;
2563 events->exception.error_code = vcpu->arch.exception.error_code; 2564 events->exception.error_code = vcpu->arch.exception.error_code;
2564 2565
2565 events->interrupt.injected = 2566 events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2573 events->nmi.injected = vcpu->arch.nmi_injected; 2574 events->nmi.injected = vcpu->arch.nmi_injected;
2574 events->nmi.pending = vcpu->arch.nmi_pending; 2575 events->nmi.pending = vcpu->arch.nmi_pending;
2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2577 events->nmi.pad = 0;
2576 2578
2577 events->sipi_vector = vcpu->arch.sipi_vector; 2579 events->sipi_vector = vcpu->arch.sipi_vector;
2578 2580
2579 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2580 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2581 | KVM_VCPUEVENT_VALID_SHADOW); 2583 | KVM_VCPUEVENT_VALID_SHADOW);
2584 memset(&events->reserved, 0, sizeof(events->reserved));
2582} 2585}
2583 2586
2584static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2587static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2623 dbgregs->dr6 = vcpu->arch.dr6; 2626 dbgregs->dr6 = vcpu->arch.dr6;
2624 dbgregs->dr7 = vcpu->arch.dr7; 2627 dbgregs->dr7 = vcpu->arch.dr7;
2625 dbgregs->flags = 0; 2628 dbgregs->flags = 0;
2629 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2626} 2630}
2627 2631
2628static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2632static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3106 sizeof(ps->channels)); 3110 sizeof(ps->channels));
3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3111 ps->flags = kvm->arch.vpit->pit_state.flags;
3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3112 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3113 memset(&ps->reserved, 0, sizeof(ps->reserved));
3109 return r; 3114 return r;
3110} 3115}
3111 3116
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3169 struct kvm_memslots *slots, *old_slots; 3174 struct kvm_memslots *slots, *old_slots;
3170 unsigned long *dirty_bitmap; 3175 unsigned long *dirty_bitmap;
3171 3176
3172 spin_lock(&kvm->mmu_lock);
3173 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3174 spin_unlock(&kvm->mmu_lock);
3175
3176 r = -ENOMEM; 3177 r = -ENOMEM;
3177 dirty_bitmap = vmalloc(n); 3178 dirty_bitmap = vmalloc(n);
3178 if (!dirty_bitmap) 3179 if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3195 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3195 kfree(old_slots); 3196 kfree(old_slots);
3196 3197
3198 spin_lock(&kvm->mmu_lock);
3199 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3200 spin_unlock(&kvm->mmu_lock);
3201
3197 r = -EFAULT; 3202 r = -EFAULT;
3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3203 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3199 vfree(dirty_bitmap); 3204 vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3491 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable(); 3492 local_irq_enable();
3488 user_ns.flags = 0; 3493 user_ns.flags = 0;
3494 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3489 3495
3490 r = -EFAULT; 3496 r = -EFAULT;
3491 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3497 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3972 return X86EMUL_CONTINUE; 3978 return X86EMUL_CONTINUE;
3973 3979
3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3980 if (kvm_x86_ops->has_wbinvd_exit()) {
3981 preempt_disable();
3975 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3976 wbinvd_ipi, NULL, 1); 3983 wbinvd_ipi, NULL, 1);
3984 preempt_enable();
3977 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3985 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3978 } 3986 }
3979 wbinvd(); 3987 wbinvd();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c733..12cdbb17ad18 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
251 } 251 }
252} 252}
253 253
254static int tlb_cpuhp_notify(struct notifier_block *n, 254static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
255 unsigned long action, void *hcpu) 255 unsigned long action, void *hcpu)
256{ 256{
257 switch (action & 0xf) { 257 switch (action & 0xf) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8daf75..d7b5109f7a9c 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */ 147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */
148 (type == PCI_CAP_ID_MSIX) ? 148 (type == PCI_CAP_ID_MSIX) ?
149 "pcifront-msi-x" : "pcifront-msi"); 149 "pcifront-msi-x" : "pcifront-msi");
150 if (irq < 0) 150 if (irq < 0) {
151 return -1; 151 ret = -1;
152 goto free;
153 }
152 154
153 ret = set_irq_msi(irq, msidesc); 155 ret = set_irq_msi(irq, msidesc);
154 if (ret) 156 if (ret)
@@ -164,7 +166,7 @@ error:
164 if (ret == -ENODEV) 166 if (ret == -ENODEV)
165 dev_err(&dev->dev, "Xen PCI frontend has not registered" \ 167 dev_err(&dev->dev, "Xen PCI frontend has not registered" \
166 " MSI/MSI-X support!\n"); 168 " MSI/MSI-X support!\n");
167 169free:
168 kfree(v); 170 kfree(v);
169 return ret; 171 return ret;
170} 172}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a39e2a..a318194002b5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub 1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1345 */ 1345 */
1346 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* 1346 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
1347 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1347 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
1348 BUG_ON(!bau_desc); 1348 BUG_ON(!bau_desc);
1349 1349
1350 pa = uv_gpa(bau_desc); /* need the real nasid*/ 1350 pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
1402 struct bau_payload_queue_entry *pqp_malloc; 1402 struct bau_payload_queue_entry *pqp_malloc;
1403 struct bau_control *bcp; 1403 struct bau_control *bcp;
1404 1404
1405 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 1405 pqp = kmalloc_node((DEST_Q_SIZE + 1)
1406 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 1406 * sizeof(struct bau_payload_queue_entry),
1407 GFP_KERNEL, node); 1407 GFP_KERNEL, node);
1408 BUG_ON(!pqp); 1408 BUG_ON(!pqp);
1409 pqp_malloc = pqp; 1409 pqp_malloc = pqp;
1410 1410
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
1520 1520
1521 timeout_us = calculate_destination_timeout(); 1521 timeout_us = calculate_destination_timeout();
1522 1522
1523 uvhub_descs = (struct uvhub_desc *) 1523 uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1524 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1525 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); 1524 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1526 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); 1525 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1527 for_each_present_cpu(cpu) { 1526 for_each_present_cpu(cpu) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b810b03f..21ed8d7f75a5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2126{ 2126{
2127 pmd_t *kernel_pmd; 2127 pmd_t *kernel_pmd;
2128 2128
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); 2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2130
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2132 xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa23ecc..769c4b01fa32 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
118 const struct e820map *e820) 118 const struct e820map *e820)
119{ 119{
120 phys_addr_t max_addr = PFN_PHYS(max_pfn); 120 phys_addr_t max_addr = PFN_PHYS(max_pfn);
121 phys_addr_t last_end = 0; 121 phys_addr_t last_end = ISA_END_ADDRESS;
122 unsigned long released = 0; 122 unsigned long released = 0;
123 int i; 123 int i;
124 124
125 /* Free any unused memory above the low 1Mbyte. */
125 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { 126 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
126 phys_addr_t end = e820->map[i].addr; 127 phys_addr_t end = e820->map[i].addr;
127 end = min(max_addr, end); 128 end = min(max_addr, end);
128 129
129 released += xen_release_chunk(last_end, end); 130 if (last_end < end)
130 last_end = e820->map[i].addr + e820->map[i].size; 131 released += xen_release_chunk(last_end, end);
132 last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
131 } 133 }
132 134
133 if (last_end < max_addr) 135 if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
164 XENMEM_memory_map; 166 XENMEM_memory_map;
165 rc = HYPERVISOR_memory_op(op, &memmap); 167 rc = HYPERVISOR_memory_op(op, &memmap);
166 if (rc == -ENOSYS) { 168 if (rc == -ENOSYS) {
169 BUG_ON(xen_initial_domain());
167 memmap.nr_entries = 1; 170 memmap.nr_entries = 1;
168 map[0].addr = 0ULL; 171 map[0].addr = 0ULL;
169 map[0].size = mem_end; 172 map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
201 } 204 }
202 205
203 /* 206 /*
204 * Even though this is normal, usable memory under Xen, reserve 207 * In domU, the ISA region is normal, usable memory, but we
205 * ISA memory anyway because too many things think they can poke 208 * reserve ISA memory anyway because too many things poke
206 * about in there. 209 * about in there.
207 * 210 *
208 * In a dom0 kernel, this region is identity mapped with the 211 * In Dom0, the host E820 information can leave gaps in the
209 * hardware ISA area, so it really is out of bounds. 212 * ISA range, which would cause us to release those pages. To
213 * avoid this, we unconditionally reserve them here.
210 */ 214 */
211 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 215 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
212 E820_RESERVED); 216 E820_RESERVED);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2f5727..4ce953f1b390 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1194 int where = ELEVATOR_INSERT_SORT; 1194 int where = ELEVATOR_INSERT_SORT;
1195 int rw_flags; 1195 int rw_flags;
1196 1196
1197 /* REQ_HARDBARRIER is no more */
1198 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1199 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1200 bio_endio(bio, -EOPNOTSUPP);
1201 return 0;
1202 }
1203
1204 /* 1197 /*
1205 * low level driver can indicate that it wants pages above a 1198 * low level driver can indicate that it wants pages above a
1206 * certain limit bounced to low memory (ie for highmem, or even 1199 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
1351 bdevname(bio->bi_bdev, b), 1344 bdevname(bio->bi_bdev, b),
1352 bio->bi_rw, 1345 bio->bi_rw,
1353 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1346 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1354 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1347 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1355 1348
1356 set_bit(BIO_EOF, &bio->bi_flags); 1349 set_bit(BIO_EOF, &bio->bi_flags);
1357} 1350}
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1404 return 0; 1397 return 0;
1405 1398
1406 /* Test device or partition size, when known. */ 1399 /* Test device or partition size, when known. */
1407 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1400 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1408 if (maxsector) { 1401 if (maxsector) {
1409 sector_t sector = bio->bi_sector; 1402 sector_t sector = bio->bi_sector;
1410 1403
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c55c406..3c7a339fe381 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
153} 153}
154EXPORT_SYMBOL(get_io_context); 154EXPORT_SYMBOL(get_io_context);
155 155
156void copy_io_context(struct io_context **pdst, struct io_context **psrc)
157{
158 struct io_context *src = *psrc;
159 struct io_context *dst = *pdst;
160
161 if (src) {
162 BUG_ON(atomic_long_read(&src->refcount) == 0);
163 atomic_long_inc(&src->refcount);
164 put_io_context(dst);
165 *pdst = src;
166 }
167}
168EXPORT_SYMBOL(copy_io_context);
169
170static int __init blk_ioc_init(void) 156static int __init blk_ioc_init(void)
171{ 157{
172 iocontext_cachep = kmem_cache_create("blkdev_ioc", 158 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d8691e..5d5dbe47c228 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
205 unaligned = 1; 205 unaligned = 1;
206 break; 206 break;
207 } 207 }
208 if (!iov[i].iov_len)
209 return -EINVAL;
208 } 210 }
209 211
210 if (unaligned || (q->dma_pad_mask & len) || map_data) 212 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b74dc0..58c6ee5b010c 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
745 return 0; 745 return 0;
746 case BLKGETSIZE: 746 case BLKGETSIZE:
747 size = bdev->bd_inode->i_size; 747 size = i_size_read(bdev->bd_inode);
748 if ((size >> 9) > ~0UL) 748 if ((size >> 9) > ~0UL)
749 return -EFBIG; 749 return -EFBIG;
750 return compat_put_ulong(arg, size >> 9); 750 return compat_put_ulong(arg, size >> 9);
751 751
752 case BLKGETSIZE64_32: 752 case BLKGETSIZE64_32:
753 return compat_put_u64(arg, bdev->bd_inode->i_size); 753 return compat_put_u64(arg, i_size_read(bdev->bd_inode));
754 754
755 case BLKTRACESETUP32: 755 case BLKTRACESETUP32:
756 case BLKTRACESTART: /* compatible */ 756 case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e8308f7e2..2569512830d3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
429 q->nr_sorted--; 429 q->nr_sorted--;
430 430
431 boundary = q->end_sector; 431 boundary = q->end_sector;
432 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 432 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
433 list_for_each_prev(entry, &q->queue_head) { 433 list_for_each_prev(entry, &q->queue_head) {
434 struct request *pos = list_entry_rq(entry); 434 struct request *pos = list_entry_rq(entry);
435 435
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
691void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 int plug) 692 int plug)
693{ 693{
694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & REQ_SOFTBARRIER) {
695 /* barriers are scheduling boundary, update end_sector */ 695 /* barriers are scheduling boundary, update end_sector */
696 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
697 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb1d465..3d866d0037f2 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
125 start >>= 9; 125 start >>= 9;
126 len >>= 9; 126 len >>= 9;
127 127
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 128 if (start + len > (i_size_read(bdev->bd_inode) >> 9))
129 return -EINVAL; 129 return -EINVAL;
130 if (secure) 130 if (secure)
131 flags |= BLKDEV_DISCARD_SECURE; 131 flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
242 * We need to set the startsect first, the driver may 242 * We need to set the startsect first, the driver may
243 * want to override it. 243 * want to override it.
244 */ 244 */
245 memset(&geo, 0, sizeof(geo));
245 geo.start = get_start_sect(bdev); 246 geo.start = get_start_sect(bdev);
246 ret = disk->fops->getgeo(bdev, &geo); 247 ret = disk->fops->getgeo(bdev, &geo);
247 if (ret) 248 if (ret)
@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
307 ret = blkdev_reread_part(bdev); 308 ret = blkdev_reread_part(bdev);
308 break; 309 break;
309 case BLKGETSIZE: 310 case BLKGETSIZE:
310 size = bdev->bd_inode->i_size; 311 size = i_size_read(bdev->bd_inode);
311 if ((size >> 9) > ~0UL) 312 if ((size >> 9) > ~0UL)
312 return -EFBIG; 313 return -EFBIG;
313 return put_ulong(arg, size >> 9); 314 return put_ulong(arg, size >> 9);
314 case BLKGETSIZE64: 315 case BLKGETSIZE64:
315 return put_u64(arg, bdev->bd_inode->i_size); 316 return put_u64(arg, i_size_read(bdev->bd_inode));
316 case BLKTRACESTART: 317 case BLKTRACESTART:
317 case BLKTRACESTOP: 318 case BLKTRACESTOP:
318 case BLKTRACESETUP: 319 case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10eb5b0..4f4230b79bb6 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
321 if (hdr->iovec_count) { 321 if (hdr->iovec_count) {
322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
323 size_t iov_data_len; 323 size_t iov_data_len;
324 struct sg_iovec *iov; 324 struct sg_iovec *sg_iov;
325 struct iovec *iov;
326 int i;
325 327
326 iov = kmalloc(size, GFP_KERNEL); 328 sg_iov = kmalloc(size, GFP_KERNEL);
327 if (!iov) { 329 if (!sg_iov) {
328 ret = -ENOMEM; 330 ret = -ENOMEM;
329 goto out; 331 goto out;
330 } 332 }
331 333
332 if (copy_from_user(iov, hdr->dxferp, size)) { 334 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
333 kfree(iov); 335 kfree(sg_iov);
334 ret = -EFAULT; 336 ret = -EFAULT;
335 goto out; 337 goto out;
336 } 338 }
337 339
340 /*
341 * Sum up the vecs, making sure they don't overflow
342 */
343 iov = (struct iovec *) sg_iov;
344 iov_data_len = 0;
345 for (i = 0; i < hdr->iovec_count; i++) {
346 if (iov_data_len + iov[i].iov_len < iov_data_len) {
347 kfree(sg_iov);
348 ret = -EINVAL;
349 goto out;
350 }
351 iov_data_len += iov[i].iov_len;
352 }
353
338 /* SG_IO howto says that the shorter of the two wins */ 354 /* SG_IO howto says that the shorter of the two wins */
339 iov_data_len = iov_length((struct iovec *)iov,
340 hdr->iovec_count);
341 if (hdr->dxfer_len < iov_data_len) { 355 if (hdr->dxfer_len < iov_data_len) {
342 hdr->iovec_count = iov_shorten((struct iovec *)iov, 356 hdr->iovec_count = iov_shorten(iov,
343 hdr->iovec_count, 357 hdr->iovec_count,
344 hdr->dxfer_len); 358 hdr->dxfer_len);
345 iov_data_len = hdr->dxfer_len; 359 iov_data_len = hdr->dxfer_len;
346 } 360 }
347 361
348 ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, 362 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
349 iov_data_len, GFP_KERNEL); 363 iov_data_len, GFP_KERNEL);
350 kfree(iov); 364 kfree(sg_iov);
351 } else if (hdr->dxfer_len) 365 } else if (hdr->dxfer_len)
352 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, 366 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
353 GFP_KERNEL); 367 GFP_KERNEL);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de3078215fe6..75586f1f86e7 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@ err:
504 504
505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) 505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
506{ 506{
507 kobject_put(&pcrypt->pinst->kobj);
508 free_cpumask_var(pcrypt->cb_cpumask->mask); 507 free_cpumask_var(pcrypt->cb_cpumask->mask);
509 kfree(pcrypt->cb_cpumask); 508 kfree(pcrypt->cb_cpumask);
510 509
diff --git a/drivers/Makefile b/drivers/Makefile
index 14cf9077bb2b..f3ebb30f1b7f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_REGULATOR) += regulator/
26 26
27# char/ comes before serial/ etc so that the VT console is the boot-time 27# char/ comes before serial/ etc so that the VT console is the boot-time
28# default. 28# default.
29obj-y += tty/
29obj-y += char/ 30obj-y += char/
30 31
31# gpu/ comes after char for AGP vs DRM startup 32# gpu/ comes after char for AGP vs DRM startup
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d050e073e570..3f91c01c217f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2552 * 2552 *
2553 * If door lock fails, always clear sdev->locked to 2553 * If door lock fails, always clear sdev->locked to
2554 * avoid this infinite loop. 2554 * avoid this infinite loop.
2555 *
2556 * This may happen before SCSI scan is complete. Make
2557 * sure qc->dev->sdev isn't NULL before dereferencing.
2555 */ 2558 */
2556 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2559 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2557 qc->dev->sdev->locked = 0; 2560 qc->dev->sdev->locked = 0;
2558 2561
2559 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2562 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf194138f21..6bd9425ba5ab 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
144 144
145#ifdef PATA_WINBOND_VLB_MODULE 145#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers, 146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */ 147 give I/O port if non standard */
148#else 148#else
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 74b829817891..fa1b95a9a7ff 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656
657 ocd = ap->dev->platform_data;
658 cf_port = ap->private_data; 656 cf_port = ap->private_data;
659 dma_int.u64 = 657 dma_int.u64 =
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index c8fc69c85a06..c09761959354 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -92,7 +92,7 @@
92 92
93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 93#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
94 94
95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 95#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
96 96
97#if 1 97#if 1
98#define ASSERT(expr) if (!(expr)) { \ 98#define ASSERT(expr) if (!(expr)) { \
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cbe15a86c669..930051d941a7 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2241,11 +2241,8 @@ static int __devinit lanai_dev_open(struct atm_dev *atmdev)
2241 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN); 2241 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
2242 lanai_timed_poll_start(lanai); 2242 lanai_timed_poll_start(lanai);
2243 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u " 2243 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
2244 "(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number, 2244 "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
2245 (int) lanai->pci->revision, (unsigned long) lanai->base, 2245 (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
2246 lanai->pci->irq,
2247 atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
2248 atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
2249 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), " 2246 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
2250 "board_rev=%d\n", lanai->number, 2247 "board_rev=%d\n", lanai->number,
2251 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno, 2248 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 1a9332e4efe0..9a676ee30824 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,6 +1,7 @@
1SOLOS_ATTR_RO(DriverVersion) 1SOLOS_ATTR_RO(DriverVersion)
2SOLOS_ATTR_RO(APIVersion) 2SOLOS_ATTR_RO(APIVersion)
3SOLOS_ATTR_RO(FirmwareVersion) 3SOLOS_ATTR_RO(FirmwareVersion)
4SOLOS_ATTR_RO(Version)
4// SOLOS_ATTR_RO(DspVersion) 5// SOLOS_ATTR_RO(DspVersion)
5// SOLOS_ATTR_RO(CommonHandshake) 6// SOLOS_ATTR_RO(CommonHandshake)
6SOLOS_ATTR_RO(Connected) 7SOLOS_ATTR_RO(Connected)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f46138ab38b6..2e08c996fd30 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1161 dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", 1161 dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
1162 major_ver, minor_ver, fpga_ver); 1162 major_ver, minor_ver, fpga_ver);
1163 1163
1164 if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
1165 db_fpga_upgrade || db_firmware_upgrade)) {
1166 dev_warn(&dev->dev,
1167 "FPGA too old; cannot upgrade flash. Use JTAG.\n");
1168 fpga_upgrade = firmware_upgrade = 0;
1169 db_fpga_upgrade = db_firmware_upgrade = 0;
1170 }
1171
1164 if (card->fpga_version >= DMA_SUPPORTED){ 1172 if (card->fpga_version >= DMA_SUPPORTED){
1165 card->using_dma = 1; 1173 card->using_dma = 1;
1166 } else { 1174 } else {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e18879965..528f6318ded1 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
180 BUG(); 180 BUG();
181 bio_endio(bio, -ENXIO); 181 bio_endio(bio, -ENXIO);
182 return 0; 182 return 0;
183 } else if (bio->bi_rw & REQ_HARDBARRIER) {
184 bio_endio(bio, -EOPNOTSUPP);
185 return 0;
186 } else if (bio->bi_io_vec == NULL) { 183 } else if (bio->bi_io_vec == NULL) {
187 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 184 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
188 BUG(); 185 BUG();
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 5674bd01d96d..de0435e63b02 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
297 struct sk_buff *skb; 297 struct sk_buff *skb;
298 struct net_device *ifp; 298 struct net_device *ifp;
299 299
300 read_lock(&dev_base_lock); 300 rcu_read_lock();
301 for_each_netdev(&init_net, ifp) { 301 for_each_netdev_rcu(&init_net, ifp) {
302 dev_hold(ifp); 302 dev_hold(ifp);
303 if (!is_aoe_netif(ifp)) 303 if (!is_aoe_netif(ifp))
304 goto cont; 304 goto cont;
@@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
325cont: 325cont:
326 dev_put(ifp); 326 dev_put(ifp);
327 } 327 }
328 read_unlock(&dev_base_lock); 328 rcu_read_unlock();
329} 329}
330 330
331static void 331static void
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda46279..a67d0a611a8a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -113,6 +113,8 @@ static struct board_type products[] = {
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
114 {0x40910E11, "Smart Array 6i", &SA5_access}, 114 {0x40910E11, "Smart Array 6i", &SA5_access},
115 {0x3225103C, "Smart Array P600", &SA5_access}, 115 {0x3225103C, "Smart Array P600", &SA5_access},
116 {0x3223103C, "Smart Array P800", &SA5_access},
117 {0x3234103C, "Smart Array P400", &SA5_access},
116 {0x3235103C, "Smart Array P400i", &SA5_access}, 118 {0x3235103C, "Smart Array P400i", &SA5_access},
117 {0x3211103C, "Smart Array E200i", &SA5_access}, 119 {0x3211103C, "Smart Array E200i", &SA5_access},
118 {0x3212103C, "Smart Array E200", &SA5_access}, 120 {0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
3753 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3755 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3754 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3756 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3755 break; 3757 break;
3756 msleep(10); 3758 usleep_range(10000, 20000);
3757 } 3759 }
3758} 3760}
3759 3761
@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3937 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3939 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3938 subsystem_vendor_id; 3940 subsystem_vendor_id;
3939 3941
3940 for (i = 0; i < ARRAY_SIZE(products); i++) { 3942 for (i = 0; i < ARRAY_SIZE(products); i++)
3941 if (*board_id == products[i].board_id) 3943 if (*board_id == products[i].board_id)
3942 return i; 3944 return i;
3943 }
3944 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 3945 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
3945 *board_id); 3946 *board_id);
3946 return -ENODEV; 3947 return -ENODEV;
@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
3971 return -ENODEV; 3972 return -ENODEV;
3972} 3973}
3973 3974
3974static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) 3975static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
3976 void __iomem *vaddr, int wait_for_ready)
3977#define BOARD_READY 1
3978#define BOARD_NOT_READY 0
3975{ 3979{
3976 int i; 3980 int i, iterations;
3977 u32 scratchpad; 3981 u32 scratchpad;
3978 3982
3979 for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { 3983 if (wait_for_ready)
3980 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3984 iterations = CCISS_BOARD_READY_ITERATIONS;
3981 if (scratchpad == CCISS_FIRMWARE_READY) 3985 else
3982 return 0; 3986 iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
3987
3988 for (i = 0; i < iterations; i++) {
3989 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3990 if (wait_for_ready) {
3991 if (scratchpad == CCISS_FIRMWARE_READY)
3992 return 0;
3993 } else {
3994 if (scratchpad != CCISS_FIRMWARE_READY)
3995 return 0;
3996 }
3983 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 3997 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
3984 } 3998 }
3985 dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); 3999 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3986 return -ENODEV; 4000 return -ENODEV;
3987} 4001}
3988 4002
@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
4031static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 4045static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
4032{ 4046{
4033 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 4047 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4048
4049 /* Limit commands in memory limited kdump scenario. */
4050 if (reset_devices && h->max_commands > 32)
4051 h->max_commands = 32;
4052
4034 if (h->max_commands < 16) { 4053 if (h->max_commands < 16) {
4035 dev_warn(&h->pdev->dev, "Controller reports " 4054 dev_warn(&h->pdev->dev, "Controller reports "
4036 "max supported commands of %d, an obvious lie. " 4055 "max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4148 err = -ENOMEM; 4167 err = -ENOMEM;
4149 goto err_out_free_res; 4168 goto err_out_free_res;
4150 } 4169 }
4151 err = cciss_wait_for_board_ready(h); 4170 err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4152 if (err) 4171 if (err)
4153 goto err_out_free_res; 4172 goto err_out_free_res;
4154 err = cciss_find_cfgtables(h); 4173 err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4313#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) 4332#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
4314#define cciss_noop(p) cciss_message(p, 3, 0) 4333#define cciss_noop(p) cciss_message(p, 3, 0)
4315 4334
4316static __devinit int cciss_reset_msi(struct pci_dev *pdev)
4317{
4318/* the #defines are stolen from drivers/pci/msi.h. */
4319#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
4320#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
4321
4322 int pos;
4323 u16 control = 0;
4324
4325 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
4326 if (pos) {
4327 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4328 if (control & PCI_MSI_FLAGS_ENABLE) {
4329 dev_info(&pdev->dev, "resetting MSI\n");
4330 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
4331 }
4332 }
4333
4334 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4335 if (pos) {
4336 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4337 if (control & PCI_MSIX_FLAGS_ENABLE) {
4338 dev_info(&pdev->dev, "resetting MSI-X\n");
4339 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
4340 }
4341 }
4342
4343 return 0;
4344}
4345
4346static int cciss_controller_hard_reset(struct pci_dev *pdev, 4335static int cciss_controller_hard_reset(struct pci_dev *pdev,
4347 void * __iomem vaddr, bool use_doorbell) 4336 void * __iomem vaddr, bool use_doorbell)
4348{ 4337{
@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4397 * states or using the doorbell register. */ 4386 * states or using the doorbell register. */
4398static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4387static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4399{ 4388{
4400 u16 saved_config_space[32];
4401 u64 cfg_offset; 4389 u64 cfg_offset;
4402 u32 cfg_base_addr; 4390 u32 cfg_base_addr;
4403 u64 cfg_base_addr_index; 4391 u64 cfg_base_addr_index;
4404 void __iomem *vaddr; 4392 void __iomem *vaddr;
4405 unsigned long paddr; 4393 unsigned long paddr;
4406 u32 misc_fw_support, active_transport; 4394 u32 misc_fw_support, active_transport;
4407 int rc, i; 4395 int rc;
4408 CfgTable_struct __iomem *cfgtable; 4396 CfgTable_struct __iomem *cfgtable;
4409 bool use_doorbell; 4397 bool use_doorbell;
4410 u32 board_id; 4398 u32 board_id;
4399 u16 command_register;
4411 4400
4412 /* For controllers as old a the p600, this is very nearly 4401 /* For controllers as old a the p600, this is very nearly
4413 * the same thing as 4402 * the same thing as
@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4417 * pci_set_power_state(pci_dev, PCI_D0); 4406 * pci_set_power_state(pci_dev, PCI_D0);
4418 * pci_restore_state(pci_dev); 4407 * pci_restore_state(pci_dev);
4419 * 4408 *
4420 * but we can't use these nice canned kernel routines on
4421 * kexec, because they also check the MSI/MSI-X state in PCI
4422 * configuration space and do the wrong thing when it is
4423 * set/cleared. Also, the pci_save/restore_state functions
4424 * violate the ordering requirements for restoring the
4425 * configuration space from the CCISS document (see the
4426 * comment below). So we roll our own ....
4427 *
4428 * For controllers newer than the P600, the pci power state 4409 * For controllers newer than the P600, the pci power state
4429 * method of resetting doesn't work so we have another way 4410 * method of resetting doesn't work so we have another way
4430 * using the doorbell register. 4411 * using the doorbell register.
@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4443 return -ENODEV; 4424 return -ENODEV;
4444 } 4425 }
4445 4426
4446 for (i = 0; i < 32; i++) 4427 /* Save the PCI command register */
4447 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 4428 pci_read_config_word(pdev, 4, &command_register);
4429 /* Turn the board off. This is so that later pci_restore_state()
4430 * won't turn the board on before the rest of config space is ready.
4431 */
4432 pci_disable_device(pdev);
4433 pci_save_state(pdev);
4448 4434
4449 /* find the first memory BAR, so we can find the cfg table */ 4435 /* find the first memory BAR, so we can find the cfg table */
4450 rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4436 rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4479 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4465 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4480 if (rc) 4466 if (rc)
4481 goto unmap_cfgtable; 4467 goto unmap_cfgtable;
4482 4468 pci_restore_state(pdev);
4483 /* Restore the PCI configuration space. The Open CISS 4469 rc = pci_enable_device(pdev);
4484 * Specification says, "Restore the PCI Configuration 4470 if (rc) {
4485 * Registers, offsets 00h through 60h. It is important to 4471 dev_warn(&pdev->dev, "failed to enable device.\n");
4486 * restore the command register, 16-bits at offset 04h, 4472 goto unmap_cfgtable;
4487 * last. Do not restore the configuration status register,
4488 * 16-bits at offset 06h." Note that the offset is 2*i.
4489 */
4490 for (i = 0; i < 32; i++) {
4491 if (i == 2 || i == 3)
4492 continue;
4493 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
4494 } 4473 }
4495 wmb(); 4474 pci_write_config_word(pdev, 4, command_register);
4496 pci_write_config_word(pdev, 4, saved_config_space[2]);
4497 4475
4498 /* Some devices (notably the HP Smart Array 5i Controller) 4476 /* Some devices (notably the HP Smart Array 5i Controller)
4499 need a little pause here */ 4477 need a little pause here */
4500 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4478 msleep(CCISS_POST_RESET_PAUSE_MSECS);
4501 4479
4480 /* Wait for board to become not ready, then ready. */
4481 dev_info(&pdev->dev, "Waiting for board to become ready.\n");
4482 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
4483 if (rc) /* Don't bail, might be E500, etc. which can't be reset */
4484 dev_warn(&pdev->dev,
4485 "failed waiting for board to become not ready\n");
4486 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
4487 if (rc) {
4488 dev_warn(&pdev->dev,
4489 "failed waiting for board to become ready\n");
4490 goto unmap_cfgtable;
4491 }
4492 dev_info(&pdev->dev, "board ready.\n");
4493
4502 /* Controller should be in simple mode at this point. If it's not, 4494 /* Controller should be in simple mode at this point. If it's not,
4503 * It means we're on one of those controllers which doesn't support 4495 * It means we're on one of those controllers which doesn't support
4504 * the doorbell reset method and on which the PCI power management reset 4496 * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4539 return 0; /* just try to do the kdump anyhow. */ 4531 return 0; /* just try to do the kdump anyhow. */
4540 if (rc) 4532 if (rc)
4541 return -ENODEV; 4533 return -ENODEV;
4542 if (cciss_reset_msi(pdev))
4543 return -ENODEV;
4544 4534
4545 /* Now try to get the controller to respond to a no-op */ 4535 /* Now try to get the controller to respond to a no-op */
4546 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4536 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
4936 } 4926 }
4937 } 4927 }
4938 kthread_stop(cciss_scan_thread); 4928 kthread_stop(cciss_scan_thread);
4939 remove_proc_entry("driver/cciss", NULL); 4929 if (proc_cciss)
4930 remove_proc_entry("driver/cciss", NULL);
4940 bus_unregister(&cciss_bus_type); 4931 bus_unregister(&cciss_bus_type);
4941} 4932}
4942 4933
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ffc8f81..4b8933d778f1 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@ struct ctlr_info
200 * the above. 200 * the above.
201 */ 201 */
202#define CCISS_BOARD_READY_WAIT_SECS (120) 202#define CCISS_BOARD_READY_WAIT_SECS (120)
203#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
203#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
204#define CCISS_BOARD_READY_ITERATIONS \ 205#define CCISS_BOARD_READY_ITERATIONS \
205 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
206 CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 207 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
208#define CCISS_BOARD_NOT_READY_ITERATIONS \
209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
207#define CCISS_POST_RESET_PAUSE_MSECS (3000) 211#define CCISS_POST_RESET_PAUSE_MSECS (3000)
208#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
209#define CCISS_POST_RESET_NOOP_RETRIES (12) 213#define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef97eac2..ba95cba192be 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
78 init_completion(&md_io.event); 78 init_completion(&md_io.event);
79 md_io.error = 0; 79 md_io.error = 0;
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
82 rw |= REQ_HARDBARRIER; 82 rw |= REQ_FUA;
83 rw |= REQ_UNPLUG | REQ_SYNC; 83 rw |= REQ_UNPLUG | REQ_SYNC;
84 84
85 retry:
86 bio = bio_alloc(GFP_NOIO, 1); 85 bio = bio_alloc(GFP_NOIO, 1);
87 bio->bi_bdev = bdev->md_bdev; 86 bio->bi_bdev = bdev->md_bdev;
88 bio->bi_sector = sector; 87 bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
100 wait_for_completion(&md_io.event); 99 wait_for_completion(&md_io.event);
101 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; 100 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
102 101
103 /* check for unsupported barrier op.
104 * would rather check on EOPNOTSUPP, but that is not reliable.
105 * don't try again for ANY return value != 0 */
106 if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
107 /* Try again with no barrier */
108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
109 set_bit(MD_NO_BARRIER, &mdev->flags);
110 rw &= ~REQ_HARDBARRIER;
111 bio_put(bio);
112 goto retry;
113 }
114 out: 102 out:
115 bio_put(bio); 103 bio_put(bio);
116 return ok; 104 return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
284 u32 xor_sum = 0; 272 u32 xor_sum = 0;
285 273
286 if (!get_ldev(mdev)) { 274 if (!get_ldev(mdev)) {
287 dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); 275 dev_err(DEV,
276 "disk is %s, cannot start al transaction (-%d +%d)\n",
277 drbd_disk_str(mdev->state.disk), evicted, new_enr);
288 complete(&((struct update_al_work *)w)->event); 278 complete(&((struct update_al_work *)w)->event);
289 return 1; 279 return 1;
290 } 280 }
291 /* do we have to do a bitmap write, first? 281 /* do we have to do a bitmap write, first?
292 * TODO reduce maximum latency: 282 * TODO reduce maximum latency:
293 * submit both bios, then wait for both, 283 * submit both bios, then wait for both,
294 * instead of doing two synchronous sector writes. */ 284 * instead of doing two synchronous sector writes.
285 * For now, we must not write the transaction,
286 * if we cannot write out the bitmap of the evicted extent. */
295 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) 287 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
296 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); 288 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
297 289
298 mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ 290 /* The bitmap write may have failed, causing a state change. */
291 if (mdev->state.disk < D_INCONSISTENT) {
292 dev_err(DEV,
293 "disk is %s, cannot write al transaction (-%d +%d)\n",
294 drbd_disk_str(mdev->state.disk), evicted, new_enr);
295 complete(&((struct update_al_work *)w)->event);
296 put_ldev(mdev);
297 return 1;
298 }
299
300 mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
299 buffer = (struct al_transaction *)page_address(mdev->md_io_page); 301 buffer = (struct al_transaction *)page_address(mdev->md_io_page);
300 302
301 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); 303 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
739 unsigned int enr; 741 unsigned int enr;
740 unsigned long add = 0; 742 unsigned long add = 0;
741 char ppb[10]; 743 char ppb[10];
742 int i; 744 int i, tmp;
743 745
744 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 746 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
745 747
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
747 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 749 enr = lc_element_by_index(mdev->act_log, i)->lc_number;
748 if (enr == LC_FREE) 750 if (enr == LC_FREE)
749 continue; 751 continue;
750 add += drbd_bm_ALe_set_all(mdev, enr); 752 tmp = drbd_bm_ALe_set_all(mdev, enr);
753 dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
754 add += tmp;
751 } 755 }
752 756
753 lc_unlock(mdev->act_log); 757 lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf4393c0a..1ea1a34e78b2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@ struct drbd_conf;
114#define D_ASSERT(exp) if (!(exp)) \ 114#define D_ASSERT(exp) if (!(exp)) \
115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
116 116
117#define ERR_IF(exp) if (({ \ 117#define ERR_IF(exp) if (({ \
118 int _b = (exp) != 0; \ 118 int _b = (exp) != 0; \
119 if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ 119 if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
120 __func__, #exp, __FILE__, __LINE__); \ 120 __func__, #exp, __FILE__, __LINE__); \
121 _b; \ 121 _b; \
122 })) 122 }))
123 123
124/* Defines to control fault insertion */ 124/* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
749 749
750/* drbd_epoch flag bits */ 750/* drbd_epoch flag bits */
751enum { 751enum {
752 DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
753 DE_BARRIER_IN_NEXT_EPOCH_DONE,
754 DE_CONTAINS_A_BARRIER,
755 DE_HAVE_BARRIER_NUMBER, 752 DE_HAVE_BARRIER_NUMBER,
756 DE_IS_FINISHING,
757}; 753};
758 754
759enum epoch_event { 755enum epoch_event {
760 EV_PUT, 756 EV_PUT,
761 EV_GOT_BARRIER_NR, 757 EV_GOT_BARRIER_NR,
762 EV_BARRIER_DONE,
763 EV_BECAME_LAST, 758 EV_BECAME_LAST,
764 EV_CLEANUP = 32, /* used as flag */ 759 EV_CLEANUP = 32, /* used as flag */
765}; 760};
@@ -801,11 +796,6 @@ enum {
801 __EE_CALL_AL_COMPLETE_IO, 796 __EE_CALL_AL_COMPLETE_IO,
802 __EE_MAY_SET_IN_SYNC, 797 __EE_MAY_SET_IN_SYNC,
803 798
804 /* This epoch entry closes an epoch using a barrier.
805 * On sucessful completion, the epoch is released,
806 * and the P_BARRIER_ACK send. */
807 __EE_IS_BARRIER,
808
809 /* In case a barrier failed, 799 /* In case a barrier failed,
810 * we need to resubmit without the barrier flag. */ 800 * we need to resubmit without the barrier flag. */
811 __EE_RESUBMITTED, 801 __EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
820}; 810};
821#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 811#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
822#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 812#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
823#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
824#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 813#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
825#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 814#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
826#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 815#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
843 * Gets cleared when the state.conn 832 * Gets cleared when the state.conn
844 * goes into C_CONNECTED state. */ 833 * goes into C_CONNECTED state. */
845 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ 834 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
846 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
847 CONSIDER_RESYNC, 835 CONSIDER_RESYNC,
848 836
849 MD_NO_BARRIER, /* meta data device does not support barriers, 837 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
850 so don't even try */
851 SUSPEND_IO, /* suspend application io */ 838 SUSPEND_IO, /* suspend application io */
852 BITMAP_IO, /* suspend application io; 839 BITMAP_IO, /* suspend application io;
853 once no more io in flight, start bitmap io */ 840 once no more io in flight, start bitmap io */
854 BITMAP_IO_QUEUED, /* Started bitmap IO */ 841 BITMAP_IO_QUEUED, /* Started bitmap IO */
855 GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ 842 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
843 WAS_IO_ERROR, /* Local disk failed returned IO error */
856 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 844 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
857 NET_CONGESTED, /* The data socket is congested */ 845 NET_CONGESTED, /* The data socket is congested */
858 846
@@ -947,7 +935,6 @@ enum write_ordering_e {
947 WO_none, 935 WO_none,
948 WO_drain_io, 936 WO_drain_io,
949 WO_bdev_flush, 937 WO_bdev_flush,
950 WO_bio_barrier
951}; 938};
952 939
953struct fifo_buffer { 940struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1281extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1268extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1282extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1269extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
1283extern void drbd_go_diskless(struct drbd_conf *mdev); 1270extern void drbd_go_diskless(struct drbd_conf *mdev);
1271extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1284 1272
1285 1273
1286/* Meta data layout 1274/* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1798 case EP_PASS_ON: 1786 case EP_PASS_ON:
1799 if (!forcedetach) { 1787 if (!forcedetach) {
1800 if (__ratelimit(&drbd_ratelimit_state)) 1788 if (__ratelimit(&drbd_ratelimit_state))
1801 dev_err(DEV, "Local IO failed in %s." 1789 dev_err(DEV, "Local IO failed in %s.\n", where);
1802 "Passing error on...\n", where);
1803 break; 1790 break;
1804 } 1791 }
1805 /* NOTE fall through to detach case if forcedetach set */ 1792 /* NOTE fall through to detach case if forcedetach set */
1806 case EP_DETACH: 1793 case EP_DETACH:
1807 case EP_CALL_HELPER: 1794 case EP_CALL_HELPER:
1795 set_bit(WAS_IO_ERROR, &mdev->flags);
1808 if (mdev->state.disk > D_FAILED) { 1796 if (mdev->state.disk > D_FAILED) {
1809 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1797 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1810 dev_err(DEV, "Local IO failed in %s." 1798 dev_err(DEV,
1811 "Detaching...\n", where); 1799 "Local IO failed in %s. Detaching...\n", where);
1812 } 1800 }
1813 break; 1801 break;
1814 } 1802 }
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1874static inline sector_t drbd_get_capacity(struct block_device *bdev) 1862static inline sector_t drbd_get_capacity(struct block_device *bdev)
1875{ 1863{
1876 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 1864 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1877 return bdev ? bdev->bd_inode->i_size >> 9 : 0; 1865 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1878} 1866}
1879 1867
1880/** 1868/**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
2127 __release(local); 2115 __release(local);
2128 D_ASSERT(i >= 0); 2116 D_ASSERT(i >= 0);
2129 if (i == 0) { 2117 if (i == 0) {
2118 if (mdev->state.disk == D_DISKLESS)
2119 /* even internal references gone, safe to destroy */
2120 drbd_ldev_destroy(mdev);
2130 if (mdev->state.disk == D_FAILED) 2121 if (mdev->state.disk == D_FAILED)
2122 /* all application IO references gone. */
2131 drbd_go_diskless(mdev); 2123 drbd_go_diskless(mdev);
2132 wake_up(&mdev->misc_wait); 2124 wake_up(&mdev->misc_wait);
2133 } 2125 }
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
2138{ 2130{
2139 int io_allowed; 2131 int io_allowed;
2140 2132
2133 /* never get a reference while D_DISKLESS */
2134 if (mdev->state.disk == D_DISKLESS)
2135 return 0;
2136
2141 atomic_inc(&mdev->local_cnt); 2137 atomic_inc(&mdev->local_cnt);
2142 io_allowed = (mdev->state.disk >= mins); 2138 io_allowed = (mdev->state.disk >= mins);
2143 if (!io_allowed) 2139 if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
2406{ 2402{
2407 int r; 2403 int r;
2408 2404
2409 if (test_bit(MD_NO_BARRIER, &mdev->flags)) 2405 if (test_bit(MD_NO_FUA, &mdev->flags))
2410 return; 2406 return;
2411 2407
2412 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); 2408 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2413 if (r) { 2409 if (r) {
2414 set_bit(MD_NO_BARRIER, &mdev->flags); 2410 set_bit(MD_NO_FUA, &mdev->flags);
2415 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2411 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2416 } 2412 }
2417} 2413}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73c5062..6be5401d0e88 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) 835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
836 ns.conn = os.conn; 836 ns.conn = os.conn;
837 837
838 /* we cannot fail (again) if we already detached */
839 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
840 ns.disk = D_DISKLESS;
841
842 /* if we are only D_ATTACHING yet,
843 * we can (and should) go directly to D_DISKLESS. */
844 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
845 ns.disk = D_DISKLESS;
846
838 /* After C_DISCONNECTING only C_STANDALONE may follow */ 847 /* After C_DISCONNECTING only C_STANDALONE may follow */
839 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) 848 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
840 ns.conn = os.conn; 849 ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
1056 !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) 1065 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1057 set_bit(DEVICE_DYING, &mdev->flags); 1066 set_bit(DEVICE_DYING, &mdev->flags);
1058 1067
1059 mdev->state.i = ns.i; 1068 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1069 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1070 * drbd_ldev_destroy() won't happen before our corresponding
1071 * after_state_ch works run, where we put_ldev again. */
1072 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1073 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1074 atomic_inc(&mdev->local_cnt);
1075
1076 mdev->state = ns;
1060 wake_up(&mdev->misc_wait); 1077 wake_up(&mdev->misc_wait);
1061 wake_up(&mdev->state_wait); 1078 wake_up(&mdev->state_wait);
1062 1079
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1268 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1285 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1269 drbd_uuid_new_current(mdev); 1286 drbd_uuid_new_current(mdev);
1270 clear_bit(NEW_CUR_UUID, &mdev->flags); 1287 clear_bit(NEW_CUR_UUID, &mdev->flags);
1271 drbd_md_sync(mdev);
1272 } 1288 }
1273 spin_lock_irq(&mdev->req_lock); 1289 spin_lock_irq(&mdev->req_lock);
1274 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1290 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1365 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) 1381 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1366 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); 1382 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1367 1383
1368 /* first half of local IO error */ 1384 /* first half of local IO error, failure to attach,
1369 if (os.disk > D_FAILED && ns.disk == D_FAILED) { 1385 * or administrative detach */
1370 enum drbd_io_error_p eh = EP_PASS_ON; 1386 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1387 enum drbd_io_error_p eh;
1388 int was_io_error;
1389 /* corresponding get_ldev was in __drbd_set_state, to serialize
1390 * our cleanup here with the transition to D_DISKLESS,
1391 * so it is safe to dreference ldev here. */
1392 eh = mdev->ldev->dc.on_io_error;
1393 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1394
1395 /* current state still has to be D_FAILED,
1396 * there is only one way out: to D_DISKLESS,
1397 * and that may only happen after our put_ldev below. */
1398 if (mdev->state.disk != D_FAILED)
1399 dev_err(DEV,
1400 "ASSERT FAILED: disk is %s during detach\n",
1401 drbd_disk_str(mdev->state.disk));
1371 1402
1372 if (drbd_send_state(mdev)) 1403 if (drbd_send_state(mdev))
1373 dev_warn(DEV, "Notified peer that my disk is broken.\n"); 1404 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1374 else 1405 else
1375 dev_err(DEV, "Sending state for drbd_io_error() failed\n"); 1406 dev_err(DEV, "Sending state for detaching disk failed\n");
1376 1407
1377 drbd_rs_cancel_all(mdev); 1408 drbd_rs_cancel_all(mdev);
1378 1409
1379 if (get_ldev_if_state(mdev, D_FAILED)) { 1410 /* In case we want to get something to stable storage still,
1380 eh = mdev->ldev->dc.on_io_error; 1411 * this may be the last chance.
1381 put_ldev(mdev); 1412 * Following put_ldev may transition to D_DISKLESS. */
1382 } 1413 drbd_md_sync(mdev);
1383 if (eh == EP_CALL_HELPER) 1414 put_ldev(mdev);
1415
1416 if (was_io_error && eh == EP_CALL_HELPER)
1384 drbd_khelper(mdev, "local-io-error"); 1417 drbd_khelper(mdev, "local-io-error");
1385 } 1418 }
1386 1419
1420 /* second half of local IO error, failure to attach,
1421 * or administrative detach,
1422 * after local_cnt references have reached zero again */
1423 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1424 /* We must still be diskless,
1425 * re-attach has to be serialized with this! */
1426 if (mdev->state.disk != D_DISKLESS)
1427 dev_err(DEV,
1428 "ASSERT FAILED: disk is %s while going diskless\n",
1429 drbd_disk_str(mdev->state.disk));
1387 1430
1388 /* second half of local IO error handling, 1431 mdev->rs_total = 0;
1389 * after local_cnt references have reached zero: */ 1432 mdev->rs_failed = 0;
1390 if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { 1433 atomic_set(&mdev->rs_pending_cnt, 0);
1391 mdev->rs_total = 0;
1392 mdev->rs_failed = 0;
1393 atomic_set(&mdev->rs_pending_cnt, 0);
1394 }
1395
1396 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1397 /* We must still be diskless,
1398 * re-attach has to be serialized with this! */
1399 if (mdev->state.disk != D_DISKLESS)
1400 dev_err(DEV,
1401 "ASSERT FAILED: disk is %s while going diskless\n",
1402 drbd_disk_str(mdev->state.disk));
1403 1434
1404 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
1405 * will inc/dec it frequently. Since we became D_DISKLESS, no
1406 * one has touched the protected members anymore, though, so we
1407 * are safe to free them here. */
1408 if (drbd_send_state(mdev)) 1435 if (drbd_send_state(mdev))
1409 dev_warn(DEV, "Notified peer that I detached my disk.\n"); 1436 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1410 else 1437 else
1411 dev_err(DEV, "Sending state for detach failed\n"); 1438 dev_err(DEV, "Sending state for being diskless failed\n");
1412 1439 /* corresponding get_ldev in __drbd_set_state
1413 lc_destroy(mdev->resync); 1440 * this may finaly trigger drbd_ldev_destroy. */
1414 mdev->resync = NULL; 1441 put_ldev(mdev);
1415 lc_destroy(mdev->act_log);
1416 mdev->act_log = NULL;
1417 __no_warn(local,
1418 drbd_free_bc(mdev->ldev);
1419 mdev->ldev = NULL;);
1420
1421 if (mdev->md_io_tmpp) {
1422 __free_page(mdev->md_io_tmpp);
1423 mdev->md_io_tmpp = NULL;
1424 }
1425 } 1442 }
1426 1443
1427 /* Disks got bigger while they were detached */ 1444 /* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2772 2789
2773 drbd_set_defaults(mdev); 2790 drbd_set_defaults(mdev);
2774 2791
2775 /* for now, we do NOT yet support it,
2776 * even though we start some framework
2777 * to eventually support barriers */
2778 set_bit(NO_BARRIER_SUPP, &mdev->flags);
2779
2780 atomic_set(&mdev->ap_bio_cnt, 0); 2792 atomic_set(&mdev->ap_bio_cnt, 0);
2781 atomic_set(&mdev->ap_pending_cnt, 0); 2793 atomic_set(&mdev->ap_pending_cnt, 0);
2782 atomic_set(&mdev->rs_pending_cnt, 0); 2794 atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2842 drbd_thread_init(mdev, &mdev->asender, drbd_asender); 2854 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2843 2855
2844 mdev->agreed_pro_version = PRO_VERSION_MAX; 2856 mdev->agreed_pro_version = PRO_VERSION_MAX;
2845 mdev->write_ordering = WO_bio_barrier; 2857 mdev->write_ordering = WO_bdev_flush;
2846 mdev->resync_wenr = LC_FREE; 2858 mdev->resync_wenr = LC_FREE;
2847} 2859}
2848 2860
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2899 D_ASSERT(list_empty(&mdev->resync_work.list)); 2911 D_ASSERT(list_empty(&mdev->resync_work.list));
2900 D_ASSERT(list_empty(&mdev->unplug_work.list)); 2912 D_ASSERT(list_empty(&mdev->unplug_work.list));
2901 D_ASSERT(list_empty(&mdev->go_diskless.list)); 2913 D_ASSERT(list_empty(&mdev->go_diskless.list));
2902
2903} 2914}
2904 2915
2905 2916
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3660 3671
3661 get_random_bytes(&val, sizeof(u64)); 3672 get_random_bytes(&val, sizeof(u64));
3662 _drbd_uuid_set(mdev, UI_CURRENT, val); 3673 _drbd_uuid_set(mdev, UI_CURRENT, val);
3674 /* get it to stable storage _now_ */
3675 drbd_md_sync(mdev);
3663} 3676}
3664 3677
3665void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) 3678void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3756 return 1; 3769 return 1;
3757} 3770}
3758 3771
3772void drbd_ldev_destroy(struct drbd_conf *mdev)
3773{
3774 lc_destroy(mdev->resync);
3775 mdev->resync = NULL;
3776 lc_destroy(mdev->act_log);
3777 mdev->act_log = NULL;
3778 __no_warn(local,
3779 drbd_free_bc(mdev->ldev);
3780 mdev->ldev = NULL;);
3781
3782 if (mdev->md_io_tmpp) {
3783 __free_page(mdev->md_io_tmpp);
3784 mdev->md_io_tmpp = NULL;
3785 }
3786 clear_bit(GO_DISKLESS, &mdev->flags);
3787}
3788
3759static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) 3789static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3760{ 3790{
3761 D_ASSERT(mdev->state.disk == D_FAILED); 3791 D_ASSERT(mdev->state.disk == D_FAILED);
3762 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3792 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3763 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch 3793 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3764 * the protected members anymore, though, so in the after_state_ch work 3794 * the protected members anymore, though, so once put_ldev reaches zero
3765 * it will be safe to free them. */ 3795 * again, it will be safe to free them. */
3766 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3796 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3767 /* We need to wait for return of references checked out while we still
3768 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
3769 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
3770
3771 clear_bit(GO_DISKLESS, &mdev->flags);
3772 return 1; 3797 return 1;
3773} 3798}
3774 3799
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
3777 D_ASSERT(mdev->state.disk == D_FAILED); 3802 D_ASSERT(mdev->state.disk == D_FAILED);
3778 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 3803 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3779 drbd_queue_work(&mdev->data.work, &mdev->go_diskless); 3804 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3780 /* don't drbd_queue_work_front,
3781 * we need to serialize with the after_state_ch work
3782 * of the -> D_FAILED transition. */
3783} 3805}
3784 3806
3785/** 3807/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e97e613..29e5c70e4e26 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
870 retcode = ERR_DISK_CONFIGURED; 870 retcode = ERR_DISK_CONFIGURED;
871 goto fail; 871 goto fail;
872 } 872 }
873 /* It may just now have detached because of IO error. Make sure
874 * drbd_ldev_destroy is done already, we may end up here very fast,
875 * e.g. if someone calls attach from the on-io-error handler,
876 * to realize a "hot spare" feature (not that I'd recommend that) */
877 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
873 878
874 /* allocation not in the IO path, cqueue thread context */ 879 /* allocation not in the IO path, cqueue thread context */
875 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 880 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1098 /* Reset the "barriers don't work" bits here, then force meta data to 1103 /* Reset the "barriers don't work" bits here, then force meta data to
1099 * be written, to ensure we determine if barriers are supported. */ 1104 * be written, to ensure we determine if barriers are supported. */
1100 if (nbc->dc.no_md_flush) 1105 if (nbc->dc.no_md_flush)
1101 set_bit(MD_NO_BARRIER, &mdev->flags); 1106 set_bit(MD_NO_FUA, &mdev->flags);
1102 else 1107 else
1103 clear_bit(MD_NO_BARRIER, &mdev->flags); 1108 clear_bit(MD_NO_FUA, &mdev->flags);
1104 1109
1105 /* Point of no return reached. 1110 /* Point of no return reached.
1106 * Devices and memory are no longer released by error cleanup below. 1111 * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1112 nbc = NULL; 1117 nbc = NULL;
1113 resync_lru = NULL; 1118 resync_lru = NULL;
1114 1119
1115 mdev->write_ordering = WO_bio_barrier; 1120 mdev->write_ordering = WO_bdev_flush;
1116 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1121 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1117 1122
1118 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1123 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1119 set_bit(CRASHED_PRIMARY, &mdev->flags); 1124 set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1262 force_diskless_dec: 1267 force_diskless_dec:
1263 put_ldev(mdev); 1268 put_ldev(mdev);
1264 force_diskless: 1269 force_diskless:
1265 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1270 drbd_force_state(mdev, NS(disk, D_FAILED));
1266 drbd_md_sync(mdev); 1271 drbd_md_sync(mdev);
1267 release_bdev2_fail: 1272 release_bdev2_fail:
1268 if (nbc) 1273 if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1285 return 0; 1290 return 0;
1286} 1291}
1287 1292
1293/* Detaching the disk is a process in multiple stages. First we need to lock
1294 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1295 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1296 * internal references as well.
1297 * Only then we have finally detached. */
1288static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1298static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1289 struct drbd_nl_cfg_reply *reply) 1299 struct drbd_nl_cfg_reply *reply)
1290{ 1300{
1301 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1291 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1302 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1303 if (mdev->state.disk == D_DISKLESS)
1304 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1305 drbd_resume_io(mdev);
1292 return 0; 1306 return 0;
1293} 1307}
1294 1308
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1953 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1967 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1954 drbd_uuid_new_current(mdev); 1968 drbd_uuid_new_current(mdev);
1955 clear_bit(NEW_CUR_UUID, &mdev->flags); 1969 clear_bit(NEW_CUR_UUID, &mdev->flags);
1956 drbd_md_sync(mdev);
1957 } 1970 }
1958 drbd_suspend_io(mdev); 1971 drbd_suspend_io(mdev);
1959 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 1972 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5d0ce1..7e6ac307e2de 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
158 [WO_none] = 'n', 158 [WO_none] = 'n',
159 [WO_drain_io] = 'd', 159 [WO_drain_io] = 'd',
160 [WO_bdev_flush] = 'f', 160 [WO_bdev_flush] = 'f',
161 [WO_bio_barrier] = 'b',
162 }; 161 };
163 162
164 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", 163 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169acf2f..d299fe9e78c8 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -49,11 +49,6 @@
49 49
50#include "drbd_vli.h" 50#include "drbd_vli.h"
51 51
52struct flush_work {
53 struct drbd_work w;
54 struct drbd_epoch *epoch;
55};
56
57enum finish_epoch { 52enum finish_epoch {
58 FE_STILL_LIVE, 53 FE_STILL_LIVE,
59 FE_DESTROYED, 54 FE_DESTROYED,
@@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
66static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68 63
69static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70{
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
75 prev = NULL;
76 spin_unlock(&mdev->epoch_lock);
77 return prev;
78}
79 64
80#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81 66
@@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
981 return TRUE; 966 return TRUE;
982} 967}
983 968
984static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) 969static void drbd_flush(struct drbd_conf *mdev)
985{ 970{
986 int rv; 971 int rv;
987 972
@@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
997 } 982 }
998 put_ldev(mdev); 983 put_ldev(mdev);
999 } 984 }
1000
1001 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1002}
1003
1004static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005{
1006 struct flush_work *fw = (struct flush_work *)w;
1007 struct drbd_epoch *epoch = fw->epoch;
1008
1009 kfree(w);
1010
1011 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012 drbd_flush_after_epoch(mdev, epoch);
1013
1014 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1016
1017 return 1;
1018} 985}
1019 986
1020/** 987/**
@@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027 struct drbd_epoch *epoch, 994 struct drbd_epoch *epoch,
1028 enum epoch_event ev) 995 enum epoch_event ev)
1029{ 996{
1030 int finish, epoch_size; 997 int epoch_size;
1031 struct drbd_epoch *next_epoch; 998 struct drbd_epoch *next_epoch;
1032 int schedule_flush = 0;
1033 enum finish_epoch rv = FE_STILL_LIVE; 999 enum finish_epoch rv = FE_STILL_LIVE;
1034 1000
1035 spin_lock(&mdev->epoch_lock); 1001 spin_lock(&mdev->epoch_lock);
1036 do { 1002 do {
1037 next_epoch = NULL; 1003 next_epoch = NULL;
1038 finish = 0;
1039 1004
1040 epoch_size = atomic_read(&epoch->epoch_size); 1005 epoch_size = atomic_read(&epoch->epoch_size);
1041 1006
@@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1045 break; 1010 break;
1046 case EV_GOT_BARRIER_NR: 1011 case EV_GOT_BARRIER_NR:
1047 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1012 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048
1049 /* Special case: If we just switched from WO_bio_barrier to
1050 WO_bdev_flush we should not finish the current epoch */
1051 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052 mdev->write_ordering != WO_bio_barrier &&
1053 epoch == mdev->current_epoch)
1054 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055 break;
1056 case EV_BARRIER_DONE:
1057 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058 break; 1013 break;
1059 case EV_BECAME_LAST: 1014 case EV_BECAME_LAST:
1060 /* nothing to do*/ 1015 /* nothing to do*/
@@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1063 1018
1064 if (epoch_size != 0 && 1019 if (epoch_size != 0 &&
1065 atomic_read(&epoch->active) == 0 && 1020 atomic_read(&epoch->active) == 0 &&
1066 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && 1021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1067 epoch->list.prev == &mdev->current_epoch->list &&
1068 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069 /* Nearly all conditions are met to finish that epoch... */
1070 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071 mdev->write_ordering == WO_none ||
1072 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1073 ev & EV_CLEANUP) {
1074 finish = 1;
1075 set_bit(DE_IS_FINISHING, &epoch->flags);
1076 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077 mdev->write_ordering == WO_bio_barrier) {
1078 atomic_inc(&epoch->active);
1079 schedule_flush = 1;
1080 }
1081 }
1082 if (finish) {
1083 if (!(ev & EV_CLEANUP)) { 1022 if (!(ev & EV_CLEANUP)) {
1084 spin_unlock(&mdev->epoch_lock); 1023 spin_unlock(&mdev->epoch_lock);
1085 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1024 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1102 /* atomic_set(&epoch->active, 0); is already zero */ 1041 /* atomic_set(&epoch->active, 0); is already zero */
1103 if (rv == FE_STILL_LIVE) 1042 if (rv == FE_STILL_LIVE)
1104 rv = FE_RECYCLED; 1043 rv = FE_RECYCLED;
1044 wake_up(&mdev->ee_wait);
1105 } 1045 }
1106 } 1046 }
1107 1047
@@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1113 1053
1114 spin_unlock(&mdev->epoch_lock); 1054 spin_unlock(&mdev->epoch_lock);
1115 1055
1116 if (schedule_flush) {
1117 struct flush_work *fw;
1118 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1119 if (fw) {
1120 fw->w.cb = w_flush;
1121 fw->epoch = epoch;
1122 drbd_queue_work(&mdev->data.work, &fw->w);
1123 } else {
1124 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126 /* That is not a recursion, only one level */
1127 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1129 }
1130 }
1131
1132 return rv; 1056 return rv;
1133} 1057}
1134 1058
@@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
1144 [WO_none] = "none", 1068 [WO_none] = "none",
1145 [WO_drain_io] = "drain", 1069 [WO_drain_io] = "drain",
1146 [WO_bdev_flush] = "flush", 1070 [WO_bdev_flush] = "flush",
1147 [WO_bio_barrier] = "barrier",
1148 }; 1071 };
1149 1072
1150 pwo = mdev->write_ordering; 1073 pwo = mdev->write_ordering;
1151 wo = min(pwo, wo); 1074 wo = min(pwo, wo);
1152 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153 wo = WO_bdev_flush;
1154 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1075 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155 wo = WO_drain_io; 1076 wo = WO_drain_io;
1156 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1077 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157 wo = WO_none; 1078 wo = WO_none;
1158 mdev->write_ordering = wo; 1079 mdev->write_ordering = wo;
1159 if (pwo != mdev->write_ordering || wo == WO_bio_barrier) 1080 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1160 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1081 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1161} 1082}
1162 1083
@@ -1192,7 +1113,7 @@ next_bio:
1192 bio->bi_sector = sector; 1113 bio->bi_sector = sector;
1193 bio->bi_bdev = mdev->ldev->backing_bdev; 1114 bio->bi_bdev = mdev->ldev->backing_bdev;
1194 /* we special case some flags in the multi-bio case, see below 1115 /* we special case some flags in the multi-bio case, see below
1195 * (REQ_UNPLUG, REQ_HARDBARRIER) */ 1116 * (REQ_UNPLUG) */
1196 bio->bi_rw = rw; 1117 bio->bi_rw = rw;
1197 bio->bi_private = e; 1118 bio->bi_private = e;
1198 bio->bi_end_io = drbd_endio_sec; 1119 bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@ next_bio:
1226 bio->bi_rw &= ~REQ_UNPLUG; 1147 bio->bi_rw &= ~REQ_UNPLUG;
1227 1148
1228 drbd_generic_make_request(mdev, fault_type, bio); 1149 drbd_generic_make_request(mdev, fault_type, bio);
1229
1230 /* strip off REQ_HARDBARRIER,
1231 * unless it is the first or last bio */
1232 if (bios && bios->bi_next)
1233 bios->bi_rw &= ~REQ_HARDBARRIER;
1234 } while (bios); 1150 } while (bios);
1235 maybe_kick_lo(mdev); 1151 maybe_kick_lo(mdev);
1236 return 0; 1152 return 0;
@@ -1244,45 +1160,9 @@ fail:
1244 return -ENOMEM; 1160 return -ENOMEM;
1245} 1161}
1246 1162
1247/**
1248 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1249 * @mdev: DRBD device.
1250 * @w: work object.
1251 * @cancel: The connection will be closed anyways (unused in this callback)
1252 */
1253int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254{
1255 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1256 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258 so that we can finish that epoch in drbd_may_finish_epoch().
1259 That is necessary if we already have a long chain of Epochs, before
1260 we realize that REQ_HARDBARRIER is actually not supported */
1261
1262 /* As long as the -ENOTSUPP on the barrier is reported immediately
1263 that will never trigger. If it is reported late, we will just
1264 print that warning and continue correctly for all future requests
1265 with WO_bdev_flush */
1266 if (previous_epoch(mdev, e->epoch))
1267 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268
1269 /* we still have a local reference,
1270 * get_ldev was done in receive_Data. */
1271
1272 e->w.cb = e_end_block;
1273 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274 /* drbd_submit_ee fails for one reason only:
1275 * if was not able to allocate sufficient bios.
1276 * requeue, try again later. */
1277 e->w.cb = w_e_reissue;
1278 drbd_queue_work(&mdev->data.work, &e->w);
1279 }
1280 return 1;
1281}
1282
1283static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1163static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1284{ 1164{
1285 int rv, issue_flush; 1165 int rv;
1286 struct p_barrier *p = &mdev->data.rbuf.barrier; 1166 struct p_barrier *p = &mdev->data.rbuf.barrier;
1287 struct drbd_epoch *epoch; 1167 struct drbd_epoch *epoch;
1288 1168
@@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1300 * Therefore we must send the barrier_ack after the barrier request was 1180 * Therefore we must send the barrier_ack after the barrier request was
1301 * completed. */ 1181 * completed. */
1302 switch (mdev->write_ordering) { 1182 switch (mdev->write_ordering) {
1303 case WO_bio_barrier:
1304 case WO_none: 1183 case WO_none:
1305 if (rv == FE_RECYCLED) 1184 if (rv == FE_RECYCLED)
1306 return TRUE; 1185 return TRUE;
1307 break; 1186
1187 /* receiver context, in the writeout path of the other node.
1188 * avoid potential distributed deadlock */
1189 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1190 if (epoch)
1191 break;
1192 else
1193 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1194 /* Fall through */
1308 1195
1309 case WO_bdev_flush: 1196 case WO_bdev_flush:
1310 case WO_drain_io: 1197 case WO_drain_io:
1311 if (rv == FE_STILL_LIVE) {
1312 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315 }
1316 if (rv == FE_RECYCLED)
1317 return TRUE;
1318
1319 /* The asender will send all the ACKs and barrier ACKs out, since
1320 all EEs moved from the active_ee to the done_ee. We need to
1321 provide a new epoch object for the EEs that come in soon */
1322 break;
1323 }
1324
1325 /* receiver context, in the writeout path of the other node.
1326 * avoid potential distributed deadlock */
1327 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328 if (!epoch) {
1329 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1330 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1331 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1198 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332 if (issue_flush) { 1199 drbd_flush(mdev);
1333 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1200
1334 if (rv == FE_RECYCLED) 1201 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1335 return TRUE; 1202 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1203 if (epoch)
1204 break;
1336 } 1205 }
1337 1206
1338 drbd_wait_ee_list_empty(mdev, &mdev->done_ee); 1207 epoch = mdev->current_epoch;
1208 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1209
1210 D_ASSERT(atomic_read(&epoch->active) == 0);
1211 D_ASSERT(epoch->flags == 0);
1339 1212
1340 return TRUE; 1213 return TRUE;
1214 default:
1215 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1216 return FALSE;
1341 } 1217 }
1342 1218
1343 epoch->flags = 0; 1219 epoch->flags = 0;
@@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1652{ 1528{
1653 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1529 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654 sector_t sector = e->sector; 1530 sector_t sector = e->sector;
1655 struct drbd_epoch *epoch;
1656 int ok = 1, pcmd; 1531 int ok = 1, pcmd;
1657 1532
1658 if (e->flags & EE_IS_BARRIER) {
1659 epoch = previous_epoch(mdev, e->epoch);
1660 if (epoch)
1661 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662 }
1663
1664 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 1533 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1665 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1534 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1666 pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1535 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1817 e->epoch = mdev->current_epoch; 1686 e->epoch = mdev->current_epoch;
1818 atomic_inc(&e->epoch->epoch_size); 1687 atomic_inc(&e->epoch->epoch_size);
1819 atomic_inc(&e->epoch->active); 1688 atomic_inc(&e->epoch->active);
1820
1821 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1822 struct drbd_epoch *epoch;
1823 /* Issue a barrier if we start a new epoch, and the previous epoch
1824 was not a epoch containing a single request which already was
1825 a Barrier. */
1826 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1827 if (epoch == e->epoch) {
1828 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1829 rw |= REQ_HARDBARRIER;
1830 e->flags |= EE_IS_BARRIER;
1831 } else {
1832 if (atomic_read(&epoch->epoch_size) > 1 ||
1833 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1834 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1835 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1836 rw |= REQ_HARDBARRIER;
1837 e->flags |= EE_IS_BARRIER;
1838 }
1839 }
1840 }
1841 spin_unlock(&mdev->epoch_lock); 1689 spin_unlock(&mdev->epoch_lock);
1842 1690
1843 dp_flags = be32_to_cpu(p->dp_flags); 1691 dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1995 break; 1843 break;
1996 } 1844 }
1997 1845
1998 if (mdev->state.pdsk == D_DISKLESS) { 1846 if (mdev->state.pdsk < D_INCONSISTENT) {
1999 /* In case we have the only disk of the cluster, */ 1847 /* In case we have the only disk of the cluster, */
2000 drbd_set_out_of_sync(mdev, e->sector, e->size); 1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
2001 e->flags |= EE_CALL_AL_COMPLETE_IO; 1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
1850 e->flags &= ~EE_MAY_SET_IN_SYNC;
2002 drbd_al_begin_io(mdev, e->sector); 1851 drbd_al_begin_io(mdev, e->sector);
2003 } 1852 }
2004 1853
@@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3362 if (ns.conn == C_MASK) { 3211 if (ns.conn == C_MASK) {
3363 ns.conn = C_CONNECTED; 3212 ns.conn = C_CONNECTED;
3364 if (mdev->state.disk == D_NEGOTIATING) { 3213 if (mdev->state.disk == D_NEGOTIATING) {
3365 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3214 drbd_force_state(mdev, NS(disk, D_FAILED));
3366 } else if (peer_state.disk == D_NEGOTIATING) { 3215 } else if (peer_state.disk == D_NEGOTIATING) {
3367 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3216 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3368 peer_state.disk = D_DISKLESS; 3217 peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a2545fc8..11a75d32a2e2 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
258 if (!hlist_unhashed(&req->colision)) 258 if (!hlist_unhashed(&req->colision))
259 hlist_del(&req->colision); 259 hlist_del(&req->colision);
260 else 260 else
261 D_ASSERT((s & RQ_NET_MASK) == 0); 261 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
262 262
263 /* for writes we need to do some extra housekeeping */ 263 /* for writes we need to do some extra housekeeping */
264 if (rw == WRITE) 264 if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
813 mdev->state.conn >= C_CONNECTED)); 813 mdev->state.conn >= C_CONNECTED));
814 814
815 if (!(local || remote) && !is_susp(mdev->state)) { 815 if (!(local || remote) && !is_susp(mdev->state)) {
816 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 816 if (__ratelimit(&drbd_ratelimit_state))
817 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
817 goto fail_free_complete; 818 goto fail_free_complete;
818 } 819 }
819 820
@@ -942,12 +943,21 @@ allocate_barrier:
942 if (local) { 943 if (local) {
943 req->private_bio->bi_bdev = mdev->ldev->backing_bdev; 944 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
944 945
945 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 946 /* State may have changed since we grabbed our reference on the
946 : rw == READ ? DRBD_FAULT_DT_RD 947 * mdev->ldev member. Double check, and short-circuit to endio.
947 : DRBD_FAULT_DT_RA)) 948 * In case the last activity log transaction failed to get on
949 * stable storage, and this is a WRITE, we may not even submit
950 * this bio. */
951 if (get_ldev(mdev)) {
952 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
953 : rw == READ ? DRBD_FAULT_DT_RD
954 : DRBD_FAULT_DT_RA))
955 bio_endio(req->private_bio, -EIO);
956 else
957 generic_make_request(req->private_bio);
958 put_ldev(mdev);
959 } else
948 bio_endio(req->private_bio, -EIO); 960 bio_endio(req->private_bio, -EIO);
949 else
950 generic_make_request(req->private_bio);
951 } 961 }
952 962
953 /* we need to plug ALWAYS since we possibly need to kick lo_dev. 963 /* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
1022 return 0; 1032 return 0;
1023 } 1033 }
1024 1034
1025 /* Reject barrier requests if we know the underlying device does
1026 * not support them.
1027 * XXX: Need to get this info from peer as well some how so we
1028 * XXX: reject if EITHER side/data/metadata area does not support them.
1029 *
1030 * because of those XXX, this is not yet enabled,
1031 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
1032 */
1033 if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
1034 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
1035 bio_endio(bio, -EOPNOTSUPP);
1036 return 0;
1037 }
1038
1039 /* 1035 /*
1040 * what we "blindly" assume: 1036 * what we "blindly" assume:
1041 */ 1037 */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d58015cd1..b0551ba7ad0c 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
102 put_ldev(mdev); 102 put_ldev(mdev);
103} 103}
104 104
105static int is_failed_barrier(int ee_flags)
106{
107 return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
108 == (EE_IS_BARRIER|EE_WAS_ERROR);
109}
110
111/* writes on behalf of the partner, or resync writes, 105/* writes on behalf of the partner, or resync writes,
112 * "submitted" by the receiver, final stage. */ 106 * "submitted" by the receiver, final stage. */
113static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) 107static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
119 int is_syncer_req; 113 int is_syncer_req;
120 int do_al_complete_io; 114 int do_al_complete_io;
121 115
122 /* if this is a failed barrier request, disable use of barriers,
123 * and schedule for resubmission */
124 if (is_failed_barrier(e->flags)) {
125 drbd_bump_write_ordering(mdev, WO_bdev_flush);
126 spin_lock_irqsave(&mdev->req_lock, flags);
127 list_del(&e->w.list);
128 e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
129 e->w.cb = w_e_reissue;
130 /* put_ldev actually happens below, once we come here again. */
131 __release(local);
132 spin_unlock_irqrestore(&mdev->req_lock, flags);
133 drbd_queue_work(&mdev->data.work, &e->w);
134 return;
135 }
136
137 D_ASSERT(e->block_id != ID_VACANT); 116 D_ASSERT(e->block_id != ID_VACANT);
138 117
139 /* after we moved e to done_ee, 118 /* after we moved e to done_ee,
@@ -925,7 +904,7 @@ out:
925 drbd_md_sync(mdev); 904 drbd_md_sync(mdev);
926 905
927 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { 906 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
928 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); 907 dev_info(DEV, "Writing the whole bitmap\n");
929 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); 908 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
930 } 909 }
931 910
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107cce982..3951020e494a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@ out_unreg_blkdev:
4363out_put_disk: 4363out_put_disk:
4364 while (dr--) { 4364 while (dr--) {
4365 del_timer(&motor_off_timer[dr]); 4365 del_timer(&motor_off_timer[dr]);
4366 put_disk(disks[dr]);
4367 if (disks[dr]->queue) 4366 if (disks[dr]->queue)
4368 blk_cleanup_queue(disks[dr]->queue); 4367 blk_cleanup_queue(disks[dr]->queue);
4368 put_disk(disks[dr]);
4369 } 4369 }
4370 return err; 4370 return err;
4371} 4371}
@@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void)
4573 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); 4573 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4574 platform_device_unregister(&floppy_device[drive]); 4574 platform_device_unregister(&floppy_device[drive]);
4575 } 4575 }
4576 put_disk(disks[drive]);
4577 blk_cleanup_queue(disks[drive]->queue); 4576 blk_cleanup_queue(disks[drive]->queue);
4577 put_disk(disks[drive]);
4578 } 4578 }
4579 4579
4580 del_timer_sync(&fd_timeout); 4580 del_timer_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284ef65fa..7ea0bea2f7e3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
481 if (bio_rw(bio) == WRITE) { 481 if (bio_rw(bio) == WRITE) {
482 struct file *file = lo->lo_backing_file; 482 struct file *file = lo->lo_backing_file;
483 483
484 /* REQ_HARDBARRIER is deprecated */
485 if (bio->bi_rw & REQ_HARDBARRIER) {
486 ret = -EOPNOTSUPP;
487 goto out;
488 }
489
490 if (bio->bi_rw & REQ_FLUSH) { 484 if (bio->bi_rw & REQ_FLUSH) {
491 ret = vfs_fsync(file, 0); 485 ret = vfs_fsync(file, 0);
492 if (unlikely(ret && ret != -EINVAL)) { 486 if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812ba124..255035cfc88a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
289 289
290 ring_req->operation = rq_data_dir(req) ? 290 ring_req->operation = rq_data_dir(req) ?
291 BLKIF_OP_WRITE : BLKIF_OP_READ; 291 BLKIF_OP_WRITE : BLKIF_OP_READ;
292 if (req->cmd_flags & REQ_HARDBARRIER)
293 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
294 292
295 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 293 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
296 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 294 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 3a9c01416839..ba53ec956c95 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -2,24 +2,10 @@
2# Makefile for the kernel character device drivers. 2# Makefile for the kernel character device drivers.
3# 3#
4 4
5# 5obj-y += mem.o random.o
6# This file contains the font map for the default (hardware) font
7#
8FONTMAPFILE = cp437.uni
9
10obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
11
12obj-y += tty_mutex.o
13obj-$(CONFIG_LEGACY_PTYS) += pty.o
14obj-$(CONFIG_UNIX98_PTYS) += pty.o
15obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o 6obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
16obj-y += misc.o 7obj-y += misc.o
17obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
18obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o 8obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
19obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
20obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
21obj-$(CONFIG_AUDIT) += tty_audit.o
22obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
23obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o 9obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
24obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o 10obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
25obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o 11obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
@@ -41,8 +27,6 @@ obj-$(CONFIG_ISI) += isicom.o
41obj-$(CONFIG_SYNCLINK) += synclink.o 27obj-$(CONFIG_SYNCLINK) += synclink.o
42obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o 28obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
43obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o 29obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
44obj-$(CONFIG_N_HDLC) += n_hdlc.o
45obj-$(CONFIG_N_GSM) += n_gsm.o
46obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
47obj-$(CONFIG_SX) += sx.o generic_serial.o 31obj-$(CONFIG_SX) += sx.o generic_serial.o
48obj-$(CONFIG_RIO) += rio/ generic_serial.o 32obj-$(CONFIG_RIO) += rio/ generic_serial.o
@@ -74,7 +58,6 @@ obj-$(CONFIG_PRINTER) += lp.o
74obj-$(CONFIG_APM_EMULATION) += apm-emulation.o 58obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
75 59
76obj-$(CONFIG_DTLK) += dtlk.o 60obj-$(CONFIG_DTLK) += dtlk.o
77obj-$(CONFIG_R3964) += n_r3964.o
78obj-$(CONFIG_APPLICOM) += applicom.o 61obj-$(CONFIG_APPLICOM) += applicom.o
79obj-$(CONFIG_SONYPI) += sonypi.o 62obj-$(CONFIG_SONYPI) += sonypi.o
80obj-$(CONFIG_RTC) += rtc.o 63obj-$(CONFIG_RTC) += rtc.o
@@ -115,28 +98,3 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
115 98
116obj-$(CONFIG_JS_RTC) += js-rtc.o 99obj-$(CONFIG_JS_RTC) += js-rtc.o
117js-rtc-y = rtc.o 100js-rtc-y = rtc.o
118
119# Files generated that shall be removed upon make clean
120clean-files := consolemap_deftbl.c defkeymap.c
121
122quiet_cmd_conmk = CONMK $@
123 cmd_conmk = scripts/conmakehash $< > $@
124
125$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
126 $(call cmd,conmk)
127
128$(obj)/defkeymap.o: $(obj)/defkeymap.c
129
130# Uncomment if you're changing the keymap and have an appropriate
131# loadkeys version for the map. By default, we'll use the shipped
132# versions.
133# GENERATE_KEYMAP := 1
134
135ifdef GENERATE_KEYMAP
136
137$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
138 loadkeys --mktable $< > $@.tmp
139 sed -e 's/^static *//' $@.tmp > $@
140 rm $@.tmp
141
142endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 6b6760ea2435..9272c38dd3c6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; 1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1211 u32 pte_flags; 1211 u32 pte_flags;
1212 1212
1213 if (type_mask == AGP_USER_UNCACHED_MEMORY) 1213 if (type_mask == AGP_USER_MEMORY)
1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; 1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { 1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1216 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; 1216 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1217 if (gfdt) 1217 if (gfdt)
1218 pte_flags |= GEN6_PTE_GFDT; 1218 pte_flags |= GEN6_PTE_GFDT;
1219 } else { /* set 'normal'/'cached' to LLC by default */ 1219 } else { /* set 'normal'/'cached' to LLC by default */
1220 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; 1220 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1221 if (gfdt) 1221 if (gfdt)
1222 pte_flags |= GEN6_PTE_GFDT; 1222 pte_flags |= GEN6_PTE_GFDT;
1223 } 1223 }
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a70461a12c..c0bd6f472c52 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1299{ 1299{
1300 struct async_struct * info = tty->driver_data; 1300 struct async_struct * info = tty->driver_data;
1301 struct async_icount cprev, cnow; /* kernel counter temps */ 1301 struct async_icount cprev, cnow; /* kernel counter temps */
1302 struct serial_icounter_struct icount;
1303 void __user *argp = (void __user *)arg; 1302 void __user *argp = (void __user *)arg;
1304 unsigned long flags; 1303 unsigned long flags;
1305 1304
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1f11b4..294d03e8c61a 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1828 unsigned int cmd, unsigned long arg) 1828 unsigned int cmd, unsigned long arg)
1829{ 1829{
1830 struct port *port = tty->driver_data; 1830 struct port *port = tty->driver_data;
1831 void __user *argp = (void __user *)arg;
1832 int rval = -ENOIOCTLCMD; 1831 int rval = -ENOIOCTLCMD;
1833 1832
1834 DBG1("******** IOCTL, cmd: %d", cmd); 1833 DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f89d951..eaa41992fbe2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
2796 .hangup = mgslpc_hangup, 2796 .hangup = mgslpc_hangup,
2797 .tiocmget = tiocmget, 2797 .tiocmget = tiocmget,
2798 .tiocmset = tiocmset, 2798 .tiocmset = tiocmset,
2799 .get_icount = mgslpc_get_icount,
2799 .proc_fops = &mgslpc_proc_fops, 2800 .proc_fops = &mgslpc_proc_fops,
2800}; 2801};
2801 2802
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a44611652282..d68d3aa1814b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -616,13 +616,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
616 /* get hold of clock */ 616 /* get hold of clock */
617 p->clk = clk_get(&p->pdev->dev, "cmt_fck"); 617 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
618 if (IS_ERR(p->clk)) { 618 if (IS_ERR(p->clk)) {
619 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 619 dev_err(&p->pdev->dev, "cannot get clock\n");
620 p->clk = clk_get(&p->pdev->dev, cfg->clk); 620 ret = PTR_ERR(p->clk);
621 if (IS_ERR(p->clk)) { 621 goto err1;
622 dev_err(&p->pdev->dev, "cannot get clock\n");
623 ret = PTR_ERR(p->clk);
624 goto err1;
625 }
626 } 622 }
627 623
628 if (resource_size(res) == 6) { 624 if (resource_size(res) == 6) {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index ef7a5be8a09f..40630cb98237 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -287,13 +287,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
287 /* get hold of clock */ 287 /* get hold of clock */
288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); 288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
289 if (IS_ERR(p->clk)) { 289 if (IS_ERR(p->clk)) {
290 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 290 dev_err(&p->pdev->dev, "cannot get clock\n");
291 p->clk = clk_get(&p->pdev->dev, cfg->clk); 291 ret = PTR_ERR(p->clk);
292 if (IS_ERR(p->clk)) { 292 goto err1;
293 dev_err(&p->pdev->dev, "cannot get clock\n");
294 ret = PTR_ERR(p->clk);
295 goto err1;
296 }
297 } 293 }
298 294
299 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), 295 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index de715901b82a..36aba9923060 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -393,13 +393,9 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
393 /* get hold of clock */ 393 /* get hold of clock */
394 p->clk = clk_get(&p->pdev->dev, "tmu_fck"); 394 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
395 if (IS_ERR(p->clk)) { 395 if (IS_ERR(p->clk)) {
396 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 396 dev_err(&p->pdev->dev, "cannot get clock\n");
397 p->clk = clk_get(&p->pdev->dev, cfg->clk); 397 ret = PTR_ERR(p->clk);
398 if (IS_ERR(p->clk)) { 398 goto err1;
399 dev_err(&p->pdev->dev, "cannot get clock\n");
400 ret = PTR_ERR(p->clk);
401 goto err1;
402 }
403 } 399 }
404 400
405 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 401 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d51aee..84eb607d6c03 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -577,17 +577,11 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
577 return ret; 577 return ret;
578} 578}
579 579
580static int ar_context_add_page(struct ar_context *ctx) 580static void ar_context_link_page(struct ar_context *ctx,
581 struct ar_buffer *ab, dma_addr_t ab_bus)
581{ 582{
582 struct device *dev = ctx->ohci->card.device;
583 struct ar_buffer *ab;
584 dma_addr_t uninitialized_var(ab_bus);
585 size_t offset; 583 size_t offset;
586 584
587 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
588 if (ab == NULL)
589 return -ENOMEM;
590
591 ab->next = NULL; 585 ab->next = NULL;
592 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 586 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
593 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 587 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@ static int ar_context_add_page(struct ar_context *ctx)
606 600
607 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 601 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
608 flush_writes(ctx->ohci); 602 flush_writes(ctx->ohci);
603}
604
605static int ar_context_add_page(struct ar_context *ctx)
606{
607 struct device *dev = ctx->ohci->card.device;
608 struct ar_buffer *ab;
609 dma_addr_t uninitialized_var(ab_bus);
610
611 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
612 if (ab == NULL)
613 return -ENOMEM;
614
615 ar_context_link_page(ctx, ab, ab_bus);
609 616
610 return 0; 617 return 0;
611} 618}
@@ -730,16 +737,17 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
730static void ar_context_tasklet(unsigned long data) 737static void ar_context_tasklet(unsigned long data)
731{ 738{
732 struct ar_context *ctx = (struct ar_context *)data; 739 struct ar_context *ctx = (struct ar_context *)data;
733 struct fw_ohci *ohci = ctx->ohci;
734 struct ar_buffer *ab; 740 struct ar_buffer *ab;
735 struct descriptor *d; 741 struct descriptor *d;
736 void *buffer, *end; 742 void *buffer, *end;
743 __le16 res_count;
737 744
738 ab = ctx->current_buffer; 745 ab = ctx->current_buffer;
739 d = &ab->descriptor; 746 d = &ab->descriptor;
740 747
741 if (d->res_count == 0) { 748 res_count = ACCESS_ONCE(d->res_count);
742 size_t size, rest, offset; 749 if (res_count == 0) {
750 size_t size, size2, rest, pktsize, size3, offset;
743 dma_addr_t start_bus; 751 dma_addr_t start_bus;
744 void *start; 752 void *start;
745 753
@@ -750,29 +758,63 @@ static void ar_context_tasklet(unsigned long data)
750 */ 758 */
751 759
752 offset = offsetof(struct ar_buffer, data); 760 offset = offsetof(struct ar_buffer, data);
753 start = buffer = ab; 761 start = ab;
754 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 762 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
763 buffer = ab->data;
755 764
756 ab = ab->next; 765 ab = ab->next;
757 d = &ab->descriptor; 766 d = &ab->descriptor;
758 size = buffer + PAGE_SIZE - ctx->pointer; 767 size = start + PAGE_SIZE - ctx->pointer;
768 /* valid buffer data in the next page */
759 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); 769 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
770 /* what actually fits in this page */
771 size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
760 memmove(buffer, ctx->pointer, size); 772 memmove(buffer, ctx->pointer, size);
761 memcpy(buffer + size, ab->data, rest); 773 memcpy(buffer + size, ab->data, size2);
762 ctx->current_buffer = ab; 774
763 ctx->pointer = (void *) ab->data + rest; 775 while (size > 0) {
764 end = buffer + size + rest; 776 void *next = handle_ar_packet(ctx, buffer);
777 pktsize = next - buffer;
778 if (pktsize >= size) {
779 /*
780 * We have handled all the data that was
781 * originally in this page, so we can now
782 * continue in the next page.
783 */
784 buffer = next;
785 break;
786 }
787 /* move the next packet to the start of the buffer */
788 memmove(buffer, next, size + size2 - pktsize);
789 size -= pktsize;
790 /* fill up this page again */
791 size3 = min(rest - size2,
792 (size_t)PAGE_SIZE - offset - size - size2);
793 memcpy(buffer + size + size2,
794 (void *) ab->data + size2, size3);
795 size2 += size3;
796 }
765 797
766 while (buffer < end) 798 if (rest > 0) {
767 buffer = handle_ar_packet(ctx, buffer); 799 /* handle the packets that are fully in the next page */
800 buffer = (void *) ab->data +
801 (buffer - (start + offset + size));
802 end = (void *) ab->data + rest;
803
804 while (buffer < end)
805 buffer = handle_ar_packet(ctx, buffer);
768 806
769 dma_free_coherent(ohci->card.device, PAGE_SIZE, 807 ctx->current_buffer = ab;
770 start, start_bus); 808 ctx->pointer = end;
771 ar_context_add_page(ctx); 809
810 ar_context_link_page(ctx, start, start_bus);
811 } else {
812 ctx->pointer = start + PAGE_SIZE;
813 }
772 } else { 814 } else {
773 buffer = ctx->pointer; 815 buffer = ctx->pointer;
774 ctx->pointer = end = 816 ctx->pointer = end =
775 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); 817 (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
776 818
777 while (buffer < end) 819 while (buffer < end)
778 buffer = handle_ar_packet(ctx, buffer); 820 buffer = handle_ar_packet(ctx, buffer);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195a..f7af91cb273d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
276 struct drm_crtc *tmp; 276 struct drm_crtc *tmp;
277 int crtc_mask = 1; 277 int crtc_mask = 1;
278 278
279 WARN(!crtc, "checking null crtc?"); 279 WARN(!crtc, "checking null crtc?\n");
280 280
281 dev = crtc->dev; 281 dev = crtc->dev;
282 282
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a530..a245d17165ae 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
240 .addr = DDC_ADDR, 240 .addr = DDC_ADDR,
241 .flags = I2C_M_RD, 241 .flags = I2C_M_RD,
242 .len = len, 242 .len = len,
243 .buf = buf + start, 243 .buf = buf,
244 } 244 }
245 }; 245 };
246 246
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
253static u8 * 253static u8 *
254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
255{ 255{
256 int i, j = 0; 256 int i, j = 0, valid_extensions = 0;
257 u8 *block, *new; 257 u8 *block, *new;
258 258
259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
280 280
281 for (j = 1; j <= block[0x7e]; j++) { 281 for (j = 1; j <= block[0x7e]; j++) {
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 if (drm_do_probe_ddc_edid(adapter, block, j, 283 if (drm_do_probe_ddc_edid(adapter,
284 EDID_LENGTH)) 284 block + (valid_extensions + 1) * EDID_LENGTH,
285 j, EDID_LENGTH))
285 goto out; 286 goto out;
286 if (drm_edid_block_valid(block + j * EDID_LENGTH)) 287 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
288 valid_extensions++;
287 break; 289 break;
290 }
288 } 291 }
289 if (i == 4) 292 if (i == 4)
290 goto carp; 293 dev_warn(connector->dev->dev,
294 "%s: Ignoring invalid EDID block %d.\n",
295 drm_get_connector_name(connector), j);
296 }
297
298 if (valid_extensions != block[0x7e]) {
299 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
300 block[0x7e] = valid_extensions;
301 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
302 if (!new)
303 goto out;
304 block = new;
291 } 305 }
292 306
293 return block; 307 return block;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd420760..80745f85902c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285e..90414ae86afc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1321
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1324#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1325
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1326#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1327
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..ef188e391406 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2172static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2173 struct intel_ring_buffer *ring)
2174{ 2174{
2175 if (list_empty(&ring->gpu_write_list)) 2175 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2176 return 0;
2177 2177
2178 i915_gem_flush_ring(dev, NULL, ring, 2178 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2190 int ret;
2191 2191
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2193 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2194 if (lists_empty)
2197 return 0; 2195 return 0;
2198 2196
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3106 * write domain
3109 */ 3107 */
3110 if (obj->write_domain && 3108 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3109 (obj->write_domain != obj->pending_read_domains ||
3110 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3111 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3112 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3113 obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3496 return 0;
3498} 3497}
3499 3498
3499static int
3500i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3501 struct drm_file *file,
3502 struct intel_ring_buffer *ring,
3503 struct drm_gem_object **objects,
3504 int count)
3505{
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 int ret, i;
3508
3509 /* Zero the global flush/invalidate flags. These
3510 * will be modified as new domains are computed
3511 * for each object
3512 */
3513 dev->invalidate_domains = 0;
3514 dev->flush_domains = 0;
3515 dev_priv->mm.flush_rings = 0;
3516 for (i = 0; i < count; i++)
3517 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3518
3519 if (dev->invalidate_domains | dev->flush_domains) {
3520#if WATCH_EXEC
3521 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3522 __func__,
3523 dev->invalidate_domains,
3524 dev->flush_domains);
3525#endif
3526 i915_gem_flush(dev, file,
3527 dev->invalidate_domains,
3528 dev->flush_domains,
3529 dev_priv->mm.flush_rings);
3530 }
3531
3532 for (i = 0; i < count; i++) {
3533 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3534 /* XXX replace with semaphores */
3535 if (obj->ring && ring != obj->ring) {
3536 ret = i915_gem_object_wait_rendering(&obj->base, true);
3537 if (ret)
3538 return ret;
3539 }
3540 }
3541
3542 return 0;
3543}
3544
3500/* Throttle our rendering by waiting until the ring has completed our requests 3545/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3546 * emitted over 20 msec ago.
3502 * 3547 *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3802 goto err;
3758 } 3803 }
3759 3804
3760 /* Zero the global flush/invalidate flags. These 3805 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3806 object_list, args->buffer_count);
3762 * for each object 3807 if (ret)
3763 */ 3808 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3809
3788 for (i = 0; i < args->buffer_count; i++) { 3810 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3811 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4065 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4066 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4067 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4068 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4069 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4070 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4071 if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 4877 struct drm_file *file_priv)
4857{ 4878{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 4880 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 4881 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 4882
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 4883 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 4884
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 4885 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 4886 unsigned long unwritten;
4868 if (ret) 4887
4869 return -EFAULT; 4888 /* The physical object once assigned is fixed for the lifetime
4889 * of the obj, so we can safely drop the lock and continue
4890 * to access vaddr.
4891 */
4892 mutex_unlock(&dev->struct_mutex);
4893 unwritten = copy_from_user(vaddr, user_data, args->size);
4894 mutex_lock(&dev->struct_mutex);
4895 if (unwritten)
4896 return -EFAULT;
4897 }
4870 4898
4871 drm_agp_chipset_flush(dev); 4899 drm_agp_chipset_flush(dev);
4872 return 0; 4900 return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 4928 int lists_empty;
4901 4929
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4930 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 4931 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 4932
4907 return !lists_empty; 4933 return !lists_empty;
4908} 4934}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53fa..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d959..454c064f8ef7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..48d8fd686ea9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1681 udelay(500);
1682} 1682}
1683 1683
1684static void intel_fdi_normal_train(struct drm_crtc *crtc)
1685{
1686 struct drm_device *dev = crtc->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private;
1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1689 int pipe = intel_crtc->pipe;
1690 u32 reg, temp;
1691
1692 /* enable normal train */
1693 reg = FDI_TX_CTL(pipe);
1694 temp = I915_READ(reg);
1695 temp &= ~FDI_LINK_TRAIN_NONE;
1696 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1697 I915_WRITE(reg, temp);
1698
1699 reg = FDI_RX_CTL(pipe);
1700 temp = I915_READ(reg);
1701 if (HAS_PCH_CPT(dev)) {
1702 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1703 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1704 } else {
1705 temp &= ~FDI_LINK_TRAIN_NONE;
1706 temp |= FDI_LINK_TRAIN_NONE;
1707 }
1708 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1709
1710 /* wait one idle pattern time */
1711 POSTING_READ(reg);
1712 udelay(1000);
1713}
1714
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1715/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1716static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1717{
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1798
1768 DRM_DEBUG_KMS("FDI train done\n"); 1799 DRM_DEBUG_KMS("FDI train done\n");
1769 1800
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1801}
1792 1802
1793static const int const snb_b_fdi_train_param [] = { 1803static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2100 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2101 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2102
2103 intel_fdi_normal_train(crtc);
2104
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2105 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2106 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2107 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2212 udelay(100);
2201 2213
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2214 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2215 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2216 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2217 I915_READ(FDI_RX_CHICKEN(pipe) &
2218 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2219
2207 /* still set train pattern 1 */ 2220 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2221 reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5594 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5595 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5596 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5597
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5598 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5599 PXVFREQ_PX_SHIFT;
5588 5600
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5601 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5602 dev_priv->fstart = fstart;
5591 5603
5592 dev_priv->max_delay = fmax; 5604 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5605 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5606 dev_priv->cur_delay = fstart;
5595 5607
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5608 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5609 fmax, fmin, fstart);
5598 5610
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5611 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5612
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b1..c8e005553310 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1517 status = connector_status_connected;
1518 } 1518 }
1519 1519
1520 return bit; 1520 return status;
1521} 1521}
1522 1522
1523/** 1523/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a8765..21551fe74541 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea9..4324a326f98e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 482 struct drm_display_mode *mode;
483 483
484 if (intel_lvds->edid) { 484 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 485 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 486
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 487 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 488 if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 936 */
940 intel_lvds->edid = drm_get_edid(connector, 937 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 938 &dev_priv->gmbus[pin].adapter);
942 939 if (intel_lvds->edid) {
940 if (drm_add_edid_modes(connector,
941 intel_lvds->edid)) {
942 drm_mode_connector_update_edid_property(connector,
943 intel_lvds->edid);
944 } else {
945 kfree(intel_lvds->edid);
946 intel_lvds->edid = NULL;
947 }
948 }
943 if (!intel_lvds->edid) { 949 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 950 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 951 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6b..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae2..b83306f9244b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
177 177
178 I915_WRITE_CTL(ring, 178 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 180 | RING_REPORT_64K | RING_VALID);
181 181
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 654 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 655 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 656 ring->gem_object = NULL;
657
658 if (ring->cleanup)
659 ring->cleanup(ring);
660
657 cleanup_status_page(dev, ring); 661 cleanup_status_page(dev, ring);
658} 662}
659 663
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 692{
689 unsigned long end; 693 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 694 drm_i915_private_t *dev_priv = dev->dev_private;
695 u32 head;
696
697 head = intel_read_status_page(ring, 4);
698 if (head) {
699 ring->head = head & HEAD_ADDR;
700 ring->space = ring->head - (ring->tail + 8);
701 if (ring->space < 0)
702 ring->space += ring->size;
703 if (ring->space >= n)
704 return 0;
705 }
691 706
692 trace_i915_ring_wait_begin (dev); 707 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 708 end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 869 /* do nothing */
855} 870}
856 871
872
873/* Workaround for some stepping of SNB,
874 * each time when BLT engine ring tail moved,
875 * the first command in the ring to be parsed
876 * should be MI_BATCH_BUFFER_START
877 */
878#define NEED_BLT_WORKAROUND(dev) \
879 (IS_GEN6(dev) && (dev->pdev->revision < 8))
880
881static inline struct drm_i915_gem_object *
882to_blt_workaround(struct intel_ring_buffer *ring)
883{
884 return ring->private;
885}
886
887static int blt_ring_init(struct drm_device *dev,
888 struct intel_ring_buffer *ring)
889{
890 if (NEED_BLT_WORKAROUND(dev)) {
891 struct drm_i915_gem_object *obj;
892 u32 __iomem *ptr;
893 int ret;
894
895 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
896 if (obj == NULL)
897 return -ENOMEM;
898
899 ret = i915_gem_object_pin(&obj->base, 4096);
900 if (ret) {
901 drm_gem_object_unreference(&obj->base);
902 return ret;
903 }
904
905 ptr = kmap(obj->pages[0]);
906 iowrite32(MI_BATCH_BUFFER_END, ptr);
907 iowrite32(MI_NOOP, ptr+1);
908 kunmap(obj->pages[0]);
909
910 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
911 if (ret) {
912 i915_gem_object_unpin(&obj->base);
913 drm_gem_object_unreference(&obj->base);
914 return ret;
915 }
916
917 ring->private = obj;
918 }
919
920 return init_ring_common(dev, ring);
921}
922
923static void blt_ring_begin(struct drm_device *dev,
924 struct intel_ring_buffer *ring,
925 int num_dwords)
926{
927 if (ring->private) {
928 intel_ring_begin(dev, ring, num_dwords+2);
929 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
930 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
931 } else
932 intel_ring_begin(dev, ring, 4);
933}
934
935static void blt_ring_flush(struct drm_device *dev,
936 struct intel_ring_buffer *ring,
937 u32 invalidate_domains,
938 u32 flush_domains)
939{
940 blt_ring_begin(dev, ring, 4);
941 intel_ring_emit(dev, ring, MI_FLUSH_DW);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_advance(dev, ring);
946}
947
948static u32
949blt_ring_add_request(struct drm_device *dev,
950 struct intel_ring_buffer *ring,
951 u32 flush_domains)
952{
953 u32 seqno = i915_gem_get_seqno(dev);
954
955 blt_ring_begin(dev, ring, 4);
956 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
957 intel_ring_emit(dev, ring,
958 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
959 intel_ring_emit(dev, ring, seqno);
960 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
961 intel_ring_advance(dev, ring);
962
963 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
964 return seqno;
965}
966
967static void blt_ring_cleanup(struct intel_ring_buffer *ring)
968{
969 if (!ring->private)
970 return;
971
972 i915_gem_object_unpin(ring->private);
973 drm_gem_object_unreference(ring->private);
974 ring->private = NULL;
975}
976
857static const struct intel_ring_buffer gen6_blt_ring = { 977static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 978 .name = "blt ring",
859 .id = RING_BLT, 979 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 980 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 981 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 982 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 983 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 984 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 985 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 986 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 987 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 988 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 989 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
990 .cleanup = blt_ring_cleanup,
870}; 991};
871 992
872int intel_init_render_ring_buffer(struct drm_device *dev) 993int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e5764..3126c2681983 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring);
66 67
67 /** 68 /**
68 * List of objects currently involved in rendering from the 69 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
98 99
99 wait_queue_head_t irq_queue; 100 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 101 drm_local_map_t map;
102
103 void *private;
101}; 104};
102 105
103static inline u32 106static inline u32
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec050..488c36c8f5e6 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2033 u32 grbm_int_cntl = 0; 2033 u32 grbm_int_cntl = 0;
2034 2034
2035 if (!rdev->irq.installed) { 2035 if (!rdev->irq.installed) {
2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2037 return -EINVAL; 2037 return -EINVAL;
2038 } 2038 }
2039 /* don't enable anything if the ih is disabled */ 2039 /* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@ restart_ih:
2295 case 0: /* D1 vblank */ 2295 case 0: /* D1 vblank */
2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2297 drm_handle_vblank(rdev->ddev, 0); 2297 drm_handle_vblank(rdev->ddev, 0);
2298 rdev->pm.vblank_sync = true;
2298 wake_up(&rdev->irq.vblank_queue); 2299 wake_up(&rdev->irq.vblank_queue);
2299 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2300 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2300 DRM_DEBUG("IH: D1 vblank\n"); 2301 DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@ restart_ih:
2316 case 0: /* D2 vblank */ 2317 case 0: /* D2 vblank */
2317 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2318 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2318 drm_handle_vblank(rdev->ddev, 1); 2319 drm_handle_vblank(rdev->ddev, 1);
2320 rdev->pm.vblank_sync = true;
2319 wake_up(&rdev->irq.vblank_queue); 2321 wake_up(&rdev->irq.vblank_queue);
2320 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2322 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2321 DRM_DEBUG("IH: D2 vblank\n"); 2323 DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@ restart_ih:
2337 case 0: /* D3 vblank */ 2339 case 0: /* D3 vblank */
2338 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2340 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2339 drm_handle_vblank(rdev->ddev, 2); 2341 drm_handle_vblank(rdev->ddev, 2);
2342 rdev->pm.vblank_sync = true;
2340 wake_up(&rdev->irq.vblank_queue); 2343 wake_up(&rdev->irq.vblank_queue);
2341 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2344 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2342 DRM_DEBUG("IH: D3 vblank\n"); 2345 DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@ restart_ih:
2358 case 0: /* D4 vblank */ 2361 case 0: /* D4 vblank */
2359 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2362 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2360 drm_handle_vblank(rdev->ddev, 3); 2363 drm_handle_vblank(rdev->ddev, 3);
2364 rdev->pm.vblank_sync = true;
2361 wake_up(&rdev->irq.vblank_queue); 2365 wake_up(&rdev->irq.vblank_queue);
2362 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2366 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2363 DRM_DEBUG("IH: D4 vblank\n"); 2367 DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@ restart_ih:
2379 case 0: /* D5 vblank */ 2383 case 0: /* D5 vblank */
2380 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2384 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2381 drm_handle_vblank(rdev->ddev, 4); 2385 drm_handle_vblank(rdev->ddev, 4);
2386 rdev->pm.vblank_sync = true;
2382 wake_up(&rdev->irq.vblank_queue); 2387 wake_up(&rdev->irq.vblank_queue);
2383 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2388 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2384 DRM_DEBUG("IH: D5 vblank\n"); 2389 DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@ restart_ih:
2400 case 0: /* D6 vblank */ 2405 case 0: /* D6 vblank */
2401 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2406 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2402 drm_handle_vblank(rdev->ddev, 5); 2407 drm_handle_vblank(rdev->ddev, 5);
2408 rdev->pm.vblank_sync = true;
2403 wake_up(&rdev->irq.vblank_queue); 2409 wake_up(&rdev->irq.vblank_queue);
2404 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2410 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D6 vblank\n"); 2411 DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a68927..8e10aa9f74b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
442 int r; 442 int r;
443 443
444 if (rdev->gart.table.ram.ptr) { 444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n"); 445 WARN(1, "R100 PCI GART already initialized\n");
446 return 0; 446 return 0;
447 } 447 }
448 /* Initialize common gart structure */ 448 /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
516 uint32_t tmp = 0; 516 uint32_t tmp = 0;
517 517
518 if (!rdev->irq.installed) { 518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 519 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0); 520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL; 521 return -EINVAL;
522 } 522 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe9..cde1d3480d93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
91 int r; 91 int r;
92 92
93 if (rdev->gart.table.vram.robj) { 93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 WARN(1, "RV370 PCIE GART already initialized\n");
95 return 0; 95 return 0;
96 } 96 }
97 /* Initialize common gart structure */ 97 /* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a3..0f806cc7dc75 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
97{ 97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT; 99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101 100
102 if ((temp >> 7) & 1) 101 return temp * 1000;
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108} 102}
109 103
110void r600_pm_get_dynpm_state(struct radeon_device *rdev) 104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
919 int r; 913 int r;
920 914
921 if (rdev->gart.table.vram.robj) { 915 if (rdev->gart.table.vram.robj) {
922 WARN(1, "R600 PCIE GART already initialized.\n"); 916 WARN(1, "R600 PCIE GART already initialized\n");
923 return 0; 917 return 0;
924 } 918 }
925 /* Initialize common gart structure */ 919 /* Initialize common gart structure */
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
2995 u32 hdmi1, hdmi2; 2989 u32 hdmi1, hdmi2;
2996 2990
2997 if (!rdev->irq.installed) { 2991 if (!rdev->irq.installed) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2992 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2999 return -EINVAL; 2993 return -EINVAL;
3000 } 2994 }
3001 /* don't enable anything if the ih is disabled */ 2995 /* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec9039..87ead090c7d5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
526 if (crev < 2) 526 if (crev < 2)
527 return false; 527 return false;
528 528
529 router.valid = false;
530
531 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); 529 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
532 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) 530 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
533 (ctx->bios + data_offset + 531 (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
624 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 622 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
625 continue; 623 continue;
626 624
625 router.ddc_valid = false;
626 router.cd_valid = false;
627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
629 629
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
647 usDeviceTag)); 647 usDeviceTag));
648 648
649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
650 router.valid = false;
651 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 650 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
652 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); 651 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
653 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { 652 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
654 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) 653 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
655 (ctx->bios + data_offset + 654 (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
657 ATOM_I2C_RECORD *i2c_record; 656 ATOM_I2C_RECORD *i2c_record;
658 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 657 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
659 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; 658 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
659 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = 660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
662 (ctx->bios + data_offset + 662 (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: 690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) 691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
692 record; 692 record;
693 router.valid = true; 693 router.ddc_valid = true;
694 router.mux_type = ddc_path->ucMuxType; 694 router.ddc_mux_type = ddc_path->ucMuxType;
695 router.mux_control_pin = ddc_path->ucMuxControlPin; 695 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
696 router.mux_state = ddc_path->ucMuxState[enum_id]; 696 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
697 break;
698 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
699 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
700 record;
701 router.cd_valid = true;
702 router.cd_mux_type = cd_path->ucMuxType;
703 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
704 router.cd_mux_state = cd_path->ucMuxState[enum_id];
697 break; 705 break;
698 } 706 }
699 record = (ATOM_COMMON_RECORD_HEADER *) 707 record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
860 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; 868 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
861 struct radeon_router router; 869 struct radeon_router router;
862 870
863 router.valid = false; 871 router.ddc_valid = false;
872 router.cd_valid = false;
864 873
865 bios_connectors = kzalloc(bc_size, GFP_KERNEL); 874 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
866 if (!bios_connectors) 875 if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02ee..fe6c74780f18 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
183 continue; 183 continue;
184 184
185 if (priority == true) { 185 if (priority == true) {
186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
188 conflict->status = connector_status_disconnected; 188 conflict->status = connector_status_disconnected;
189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
190 } else { 190 } else {
191 DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 191 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
192 DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); 192 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
193 current_status = connector_status_disconnected; 193 current_status = connector_status_disconnected;
194 } 194 }
195 break; 195 break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
432 mode->vdisplay == native_mode->vdisplay) { 432 mode->vdisplay == native_mode->vdisplay) {
433 *native_mode = *mode; 433 *native_mode = *mode;
434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); 434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
435 DRM_INFO("Determined LVDS native mode details from EDID\n"); 435 DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
436 break; 436 break;
437 } 437 }
438 } 438 }
439 } 439 }
440 if (!native_mode->clock) { 440 if (!native_mode->clock) {
441 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 441 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
442 radeon_encoder->rmx_type = RMX_OFF; 442 radeon_encoder->rmx_type = RMX_OFF;
443 } 443 }
444} 444}
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1116 radeon_connector->shared_ddc = true; 1116 radeon_connector->shared_ddc = true;
1117 shared_ddc = true; 1117 shared_ddc = true;
1118 } 1118 }
1119 if (radeon_connector->router_bus && router->valid && 1119 if (radeon_connector->router_bus && router->ddc_valid &&
1120 (radeon_connector->router.router_id == router->router_id)) { 1120 (radeon_connector->router.router_id == router->router_id)) {
1121 radeon_connector->shared_ddc = false; 1121 radeon_connector->shared_ddc = false;
1122 shared_ddc = false; 1122 shared_ddc = false;
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1136 radeon_connector->connector_object_id = connector_object_id; 1136 radeon_connector->connector_object_id = connector_object_id;
1137 radeon_connector->hpd = *hpd; 1137 radeon_connector->hpd = *hpd;
1138 radeon_connector->router = *router; 1138 radeon_connector->router = *router;
1139 if (router->valid) { 1139 if (router->ddc_valid || router->cd_valid) {
1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1141 if (!radeon_connector->router_bus) 1141 if (!radeon_connector->router_bus)
1142 goto failed; 1142 goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69c..1df4dc6c063c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
315 radeon_connector->ddc_bus->rec.en_data_reg, 315 radeon_connector->ddc_bus->rec.en_data_reg,
316 radeon_connector->ddc_bus->rec.y_clk_reg, 316 radeon_connector->ddc_bus->rec.y_clk_reg,
317 radeon_connector->ddc_bus->rec.y_data_reg); 317 radeon_connector->ddc_bus->rec.y_data_reg);
318 if (radeon_connector->router_bus) 318 if (radeon_connector->router.ddc_valid)
319 DRM_INFO(" DDC Router 0x%x/0x%x\n", 319 DRM_INFO(" DDC Router 0x%x/0x%x\n",
320 radeon_connector->router.mux_control_pin, 320 radeon_connector->router.ddc_mux_control_pin,
321 radeon_connector->router.mux_state); 321 radeon_connector->router.ddc_mux_state);
322 if (radeon_connector->router.cd_valid)
323 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
324 radeon_connector->router.cd_mux_control_pin,
325 radeon_connector->router.cd_mux_state);
322 } else { 326 } else {
323 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 327 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
324 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 328 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
398 int ret = 0; 402 int ret = 0;
399 403
400 /* on hw with routers, select right port */ 404 /* on hw with routers, select right port */
401 if (radeon_connector->router.valid) 405 if (radeon_connector->router.ddc_valid)
402 radeon_router_select_port(radeon_connector); 406 radeon_router_select_ddc_port(radeon_connector);
403 407
404 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 408 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
405 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 409 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
432 int ret = 0; 436 int ret = 0;
433 437
434 /* on hw with routers, select right port */ 438 /* on hw with routers, select right port */
435 if (radeon_connector->router.valid) 439 if (radeon_connector->router.ddc_valid)
436 radeon_router_select_port(radeon_connector); 440 radeon_router_select_ddc_port(radeon_connector);
437 441
438 if (!radeon_connector->ddc_bus) 442 if (!radeon_connector->ddc_bus)
439 return -1; 443 return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2e..f678257c42e6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1521{ 1521{
1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1523 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1523 1524
1524 if (radeon_encoder->active_device & 1525 if (radeon_encoder->active_device &
1525 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1526 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1531 radeon_atom_output_lock(encoder, true); 1532 radeon_atom_output_lock(encoder, true);
1532 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1533 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1533 1534
1535 /* select the clock/data port if it uses a router */
1536 if (connector) {
1537 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1538 if (radeon_connector->router.cd_valid)
1539 radeon_router_select_cd_port(radeon_connector);
1540 }
1541
1534 /* this is needed for the pll/ss setup to work correctly in some cases */ 1542 /* this is needed for the pll/ss setup to work correctly in some cases */
1535 atombios_set_encoder_crtc_source(encoder); 1543 atombios_set_encoder_crtc_source(encoder);
1536} 1544}
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1547 struct radeon_device *rdev = dev->dev_private; 1555 struct radeon_device *rdev = dev->dev_private;
1548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1556 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1549 struct radeon_encoder_atom_dig *dig; 1557 struct radeon_encoder_atom_dig *dig;
1558
1559 /* check for pre-DCE3 cards with shared encoders;
1560 * can't really use the links individually, so don't disable
1561 * the encoder if it's in use by another connector
1562 */
1563 if (!ASIC_IS_DCE3(rdev)) {
1564 struct drm_encoder *other_encoder;
1565 struct radeon_encoder *other_radeon_encoder;
1566
1567 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
1568 other_radeon_encoder = to_radeon_encoder(other_encoder);
1569 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
1570 drm_helper_encoder_in_use(other_encoder))
1571 goto disable_done;
1572 }
1573 }
1574
1550 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1575 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1551 1576
1552 switch (radeon_encoder->encoder_id) { 1577 switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1586 break; 1611 break;
1587 } 1612 }
1588 1613
1614disable_done:
1589 if (radeon_encoder_is_digital(encoder)) { 1615 if (radeon_encoder_is_digital(encoder)) {
1590 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1616 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1591 r600_hdmi_disable(encoder); 1617 r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353b..daacb281dfaf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
240 */ 240 */
241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
242 /* good news we believe it's a lockup */ 242 /* good news we believe it's a lockup */
243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
244 fence->seq, seq);
244 /* FIXME: what should we do ? marking everyone 245 /* FIXME: what should we do ? marking everyone
245 * as signaled for now 246 * as signaled for now
246 */ 247 */
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..0cfbba02c4d0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
53 }; 53 };
54 54
55 /* on hw with routers, select right port */ 55 /* on hw with routers, select right port */
56 if (radeon_connector->router.valid) 56 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_port(radeon_connector); 57 radeon_router_select_ddc_port(radeon_connector);
58 58
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 60 if (ret == 2)
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 addr, val); 1084 addr, val);
1085} 1085}
1086 1086
1087/* router switching */ 1087/* ddc router switching */
1088void radeon_router_select_port(struct radeon_connector *radeon_connector) 1088void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1089{ 1089{
1090 u8 val; 1090 u8 val;
1091 1091
1092 if (!radeon_connector->router.valid) 1092 if (!radeon_connector->router.ddc_valid)
1093 return; 1093 return;
1094 1094
1095 radeon_i2c_get_byte(radeon_connector->router_bus, 1095 radeon_i2c_get_byte(radeon_connector->router_bus,
1096 radeon_connector->router.i2c_addr, 1096 radeon_connector->router.i2c_addr,
1097 0x3, &val); 1097 0x3, &val);
1098 val &= radeon_connector->router.mux_control_pin; 1098 val &= ~radeon_connector->router.ddc_mux_control_pin;
1099 radeon_i2c_put_byte(radeon_connector->router_bus, 1099 radeon_i2c_put_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1100 radeon_connector->router.i2c_addr,
1101 0x3, val); 1101 0x3, val);
1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1102 radeon_i2c_get_byte(radeon_connector->router_bus,
1103 radeon_connector->router.i2c_addr, 1103 radeon_connector->router.i2c_addr,
1104 0x1, &val); 1104 0x1, &val);
1105 val &= radeon_connector->router.mux_control_pin; 1105 val &= ~radeon_connector->router.ddc_mux_control_pin;
1106 val |= radeon_connector->router.mux_state; 1106 val |= radeon_connector->router.ddc_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr,
1109 0x1, val);
1110}
1111
1112/* clock/data router switching */
1113void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1114{
1115 u8 val;
1116
1117 if (!radeon_connector->router.cd_valid)
1118 return;
1119
1120 radeon_i2c_get_byte(radeon_connector->router_bus,
1121 radeon_connector->router.i2c_addr,
1122 0x3, &val);
1123 val &= ~radeon_connector->router.cd_mux_control_pin;
1124 radeon_i2c_put_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr,
1126 0x3, val);
1127 radeon_i2c_get_byte(radeon_connector->router_bus,
1128 radeon_connector->router.i2c_addr,
1129 0x1, &val);
1130 val &= ~radeon_connector->router.cd_mux_control_pin;
1131 val |= radeon_connector->router.cd_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus, 1132 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr, 1133 radeon_connector->router.i2c_addr,
1109 0x1, val); 1134 0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d070..680f57644e86 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -401,13 +401,19 @@ struct radeon_hpd {
401}; 401};
402 402
403struct radeon_router { 403struct radeon_router {
404 bool valid;
405 u32 router_id; 404 u32 router_id;
406 struct radeon_i2c_bus_rec i2c_info; 405 struct radeon_i2c_bus_rec i2c_info;
407 u8 i2c_addr; 406 u8 i2c_addr;
408 u8 mux_type; 407 /* i2c mux */
409 u8 mux_control_pin; 408 bool ddc_valid;
410 u8 mux_state; 409 u8 ddc_mux_type;
410 u8 ddc_mux_control_pin;
411 u8 ddc_mux_state;
412 /* clock/data mux */
413 bool cd_valid;
414 u8 cd_mux_type;
415 u8 cd_mux_control_pin;
416 u8 cd_mux_state;
411}; 417};
412 418
413struct radeon_connector { 419struct radeon_connector {
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
488 u8 slave_addr, 494 u8 slave_addr,
489 u8 addr, 495 u8 addr,
490 u8 val); 496 u8 val);
491extern void radeon_router_select_port(struct radeon_connector *radeon_connector); 497extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
498extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
492extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 499extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
493extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 500extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
494 501
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab91416410..8eb183466015 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 type = ttm_bo_type_device; 102 type = ttm_bo_type_device;
103 } 103 }
104 *bo_ptr = NULL; 104 *bo_ptr = NULL;
105
106retry:
105 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 107 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 if (bo == NULL) 108 if (bo == NULL)
107 return -ENOMEM; 109 return -ENOMEM;
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
109 bo->gobj = gobj; 111 bo->gobj = gobj;
110 bo->surface_reg = -1; 112 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 113 INIT_LIST_HEAD(&bo->list);
112
113retry:
114 radeon_ttm_placement_from_domain(bo, domain); 114 radeon_ttm_placement_from_domain(bo, domain);
115 /* Kernel allocation are uninterruptible */ 115 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev->vram_mutex); 116 mutex_lock(&rdev->vram_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317e..01c2c736a1da 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
689 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
690 gtt->offset = bo_mem->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
691 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
693 gtt->num_pages, bo_mem, backend);
693 } 694 }
694 r = radeon_gart_bind(gtt->rdev, gtt->offset, 695 r = radeon_gart_bind(gtt->rdev, gtt->offset,
695 gtt->num_pages, gtt->pages); 696 gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a06..5512e4e5e636 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.table.ram.ptr) {
81 WARN(1, "RS400 GART already initialized.\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
84 /* Check gart size */ 84 /* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4e..f1c6e02c2e6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
375 int r; 375 int r;
376 376
377 if (rdev->gart.table.vram.robj) { 377 if (rdev->gart.table.vram.robj) {
378 WARN(1, "RS600 GART already initialized.\n"); 378 WARN(1, "RS600 GART already initialized\n");
379 return 0; 379 return 0;
380 } 380 }
381 /* Initialize common gart structure */ 381 /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
506 506
507 if (!rdev->irq.installed) { 507 if (!rdev->irq.installed) {
508 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 508 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
509 WREG32(R_000040_GEN_INT_CNTL, 0); 509 WREG32(R_000040_GEN_INT_CNTL, 0);
510 return -EINVAL; 510 return -EINVAL;
511 } 511 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c7131..3ca77dc03915 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 445 ttm_bo_mem_put(bo, &bo->mem);
453 446
454 atomic_set(&bo->reserved, 0); 447 atomic_set(&bo->reserved, 0);
448
449 /*
450 * Make processes trying to reserve really pick it up.
451 */
452 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 453 wake_up_all(&bo->event_queue);
456} 454}
457 455
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 458 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 459 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 460 struct ttm_bo_driver *driver;
463 void *sync_obj; 461 void *sync_obj = NULL;
464 void *sync_obj_arg; 462 void *sync_obj_arg;
465 int put_count; 463 int put_count;
466 int ret; 464 int ret;
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 493 spin_lock(&glob->lru_lock);
496 } 494 }
497queue: 495queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 496 driver = bdev->driver;
497 if (bo->sync_obj)
498 sync_obj = driver->sync_obj_ref(bo->sync_obj);
499 sync_obj_arg = bo->sync_obj_arg;
501 500
502 kref_get(&bo->list_kref); 501 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 502 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 503 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 504 spin_unlock(&bo->lock);
506 505
507 if (sync_obj) 506 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 507 driver->sync_obj_flush(sync_obj, sync_obj_arg);
508 driver->sync_obj_unref(&sync_obj);
509 }
509 schedule_delayed_work(&bdev->wq, 510 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 511 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 512}
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 823 bool no_wait_gpu)
823{ 824{
824 struct ttm_bo_device *bdev = bo->bdev; 825 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 826 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 827 int ret;
828 828
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 832 return ret;
833 if (mem->mm_node) 833 if (mem->mm_node)
834 break; 834 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 835 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 836 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 837 if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1119int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1120 struct ttm_placement *placement)
1127{ 1121{
1128 int i; 1122 BUG_ON((placement->fpfn || placement->lpfn) &&
1123 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1124
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1125 return 0;
1158} 1126}
1159 1127
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1144 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1145 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1146 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1147 if (destroy)
1148 (*destroy)(bo);
1149 else
1150 kfree(bo);
1179 return -EINVAL; 1151 return -EINVAL;
1180 } 1152 }
1181 bo->destroy = destroy; 1153 bo->destroy = destroy;
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1341 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1342 struct ttm_mem_type_manager *man;
1371 1343
1372 if (type >= TTM_NUM_MEM_TYPES) { 1344 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1345 man = &bdev->man[type];
1378 if (man->has_type) { 1346 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1347
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1348 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1349 if (ret)
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1352
1390 ret = 0; 1353 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1354 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1355 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1356 if (ret)
1401 return ret; 1357 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c891..038e947d00f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
31#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h" 33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h> 34#include "drm_mm.h"
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/spinlock.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h> 37#include <linux/module.h>
40 38
39/**
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
43 */
44
45struct ttm_range_manager {
46 struct drm_mm mm;
47 spinlock_t lock;
48};
49
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement, 52 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
45{ 54{
46 struct ttm_bo_global *glob = man->bdev->glob; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
47 struct drm_mm *mm = man->priv; 56 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
49 unsigned long lpfn; 58 unsigned long lpfn;
50 int ret; 59 int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 if (unlikely(ret)) 66 if (unlikely(ret))
58 return ret; 67 return ret;
59 68
60 spin_lock(&glob->lru_lock); 69 spin_lock(&rman->lock);
61 node = drm_mm_search_free_in_range(mm, 70 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment, 71 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1); 72 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) { 73 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock); 74 spin_unlock(&rman->lock);
66 return 0; 75 return 0;
67 } 76 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment, 78 mem->page_alignment,
70 placement->fpfn, 79 placement->fpfn,
71 lpfn); 80 lpfn);
72 spin_unlock(&glob->lru_lock); 81 spin_unlock(&rman->lock);
73 } while (node == NULL); 82 } while (node == NULL);
74 83
75 mem->mm_node = node; 84 mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem) 90 struct ttm_mem_reg *mem)
82{ 91{
83 struct ttm_bo_global *glob = man->bdev->glob; 92 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
84 93
85 if (mem->mm_node) { 94 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock); 95 spin_lock(&rman->lock);
87 drm_mm_put_block(mem->mm_node); 96 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock); 97 spin_unlock(&rman->lock);
89 mem->mm_node = NULL; 98 mem->mm_node = NULL;
90 } 99 }
91} 100}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size) 103 unsigned long p_size)
95{ 104{
96 struct drm_mm *mm; 105 struct ttm_range_manager *rman;
97 int ret; 106 int ret;
98 107
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 108 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
100 if (!mm) 109 if (!rman)
101 return -ENOMEM; 110 return -ENOMEM;
102 111
103 ret = drm_mm_init(mm, 0, p_size); 112 ret = drm_mm_init(&rman->mm, 0, p_size);
104 if (ret) { 113 if (ret) {
105 kfree(mm); 114 kfree(rman);
106 return ret; 115 return ret;
107 } 116 }
108 117
109 man->priv = mm; 118 spin_lock_init(&rman->lock);
119 man->priv = rman;
110 return 0; 120 return 0;
111} 121}
112 122
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{ 124{
115 struct ttm_bo_global *glob = man->bdev->glob; 125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
116 struct drm_mm *mm = man->priv; 126 struct drm_mm *mm = &rman->mm;
117 int ret = 0;
118 127
119 spin_lock(&glob->lru_lock); 128 spin_lock(&rman->lock);
120 if (drm_mm_clean(mm)) { 129 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm); 130 drm_mm_takedown(mm);
122 kfree(mm); 131 spin_unlock(&rman->lock);
132 kfree(rman);
123 man->priv = NULL; 133 man->priv = NULL;
124 } else 134 return 0;
125 ret = -EBUSY; 135 }
126 spin_unlock(&glob->lru_lock); 136 spin_unlock(&rman->lock);
127 return ret; 137 return -EBUSY;
128} 138}
129 139
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix) 141 const char *prefix)
132{ 142{
133 struct ttm_bo_global *glob = man->bdev->glob; 143 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
134 struct drm_mm *mm = man->priv;
135 144
136 spin_lock(&glob->lru_lock); 145 spin_lock(&rman->lock);
137 drm_mm_debug_table(mm, prefix); 146 drm_mm_debug_table(&rman->mm, prefix);
138 spin_unlock(&glob->lru_lock); 147 spin_unlock(&rman->lock);
139} 148}
140 149
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548b..af789dc869b9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
440 return ret; 440 return ret;
441 441
442 ret = be->func->bind(be, bo_mem); 442 ret = be->func->bind(be, bo_mem);
443 if (ret) { 443 if (unlikely(ret != 0))
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret; 444 return ret;
446 }
447 445
448 ttm->state = tt_bound; 446 ttm->state = tt_bound;
449 447
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62c..3e038a394c51 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
239 if (NULL == vsg->pages)
239 return -ENOMEM; 240 return -ENOMEM;
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, 242 ret = get_user_pages(current, current->mm,
243 (unsigned long)xfer->mem_addr, 243 (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f2..76954e3528c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
691 691
692 fence_rep.error = ret; 692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence; 693 fence_rep.fence_seq = (uint64_t) sequence;
694 fence_rep.pad64 = 0;
694 695
695 user_fence_rep = (struct drm_vmw_fence_rep __user *) 696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep; 697 (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7d..cceeb42789b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
720 &vmw_vram_ne_placement, 720 &vmw_vram_ne_placement,
721 false, &vmw_dmabuf_bo_free); 721 false, &vmw_dmabuf_bo_free);
722 vmw_overlay_resume_all(dev_priv); 722 vmw_overlay_resume_all(dev_priv);
723 if (unlikely(ret != 0))
724 vfbs->buffer = NULL;
723 725
724 return ret; 726 return ret;
725} 727}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
730 struct vmw_framebuffer_surface *vfbs = 732 struct vmw_framebuffer_surface *vfbs =
731 vmw_framebuffer_to_vfbs(&vfb->base); 733 vmw_framebuffer_to_vfbs(&vfb->base);
732 734
735 if (unlikely(vfbs->buffer == NULL))
736 return 0;
737
733 bo = &vfbs->buffer->base; 738 bo = &vfbs->buffer->base;
734 ttm_bo_unref(&bo); 739 ttm_bo_unref(&bo);
735 vfbs->buffer = NULL; 740 vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5bc..29113c9b26a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
557 return -EINVAL; 557 return -EINVAL;
558 } 558 }
559 559
560 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); 560 dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
561 561
562 if (!dev_priv->ldu_priv) 562 if (!dev_priv->ldu_priv)
563 return -ENOMEM; 563 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d5..f1a52f9e7298 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
585 return -ENOSYS; 585 return -ENOSYS;
586 } 586 }
587 587
588 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay) 589 if (!overlay)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423567cf..0e1edd7311ff 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@ config STUB_POULSBO
3 depends on PCI 3 depends on PCI
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI
6 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
7 help 10 help
8 Choose this option if you have a system that has Intel GMA500 11 Choose this option if you have a system that has Intel GMA500
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21fc1a89..86d822aa9bbf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client,
178{ 178{
179 struct ad7414_data *data; 179 struct ad7414_data *data;
180 int conf; 180 int conf;
181 int err = 0; 181 int err;
182 182
183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | 183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
184 I2C_FUNC_SMBUS_READ_WORD_DATA)) 184 I2C_FUNC_SMBUS_READ_WORD_DATA)) {
185 err = -EOPNOTSUPP;
185 goto exit; 186 goto exit;
187 }
186 188
187 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); 189 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
188 if (!data) { 190 if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e775717abb7..87d92a56a939 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client,
1286 init_completion(&data->auto_update_stop); 1286 init_completion(&data->auto_update_stop);
1287 data->auto_update = kthread_run(adt7470_update_thread, client, 1287 data->auto_update = kthread_run(adt7470_update_thread, client,
1288 dev_name(data->hwmon_dev)); 1288 dev_name(data->hwmon_dev));
1289 if (IS_ERR(data->auto_update)) 1289 if (IS_ERR(data->auto_update)) {
1290 err = PTR_ERR(data->auto_update);
1290 goto exit_unregister; 1291 goto exit_unregister;
1292 }
1291 1293
1292 return 0; 1294 return 0;
1293 1295
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index aa701a183707..f141a1de519c 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -376,10 +376,6 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
376 } 376 }
377 } 377 }
378 378
379 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
380 if (err)
381 goto err_free_gpio;
382
383 fan_data->num_ctrl = num_ctrl; 379 fan_data->num_ctrl = num_ctrl;
384 fan_data->ctrl = ctrl; 380 fan_data->ctrl = ctrl;
385 fan_data->num_speed = pdata->num_speed; 381 fan_data->num_speed = pdata->num_speed;
@@ -391,6 +387,10 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
391 goto err_free_gpio; 387 goto err_free_gpio;
392 } 388 }
393 389
390 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
391 if (err)
392 goto err_free_gpio;
393
394 return 0; 394 return 0;
395 395
396err_free_gpio: 396err_free_gpio:
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 267626178678..4b50601027d3 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -82,7 +82,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
82 val = i2c_smbus_read_byte_data(client, i); 82 val = i2c_smbus_read_byte_data(client, i);
83 if (unlikely(val < 0)) { 83 if (unlikely(val < 0)) {
84 dev_dbg(dev, 84 dev_dbg(dev,
85 "Failed to read ADC value: error %d", 85 "Failed to read ADC value: error %d\n",
86 val); 86 val);
87 ret = ERR_PTR(val); 87 ret = ERR_PTR(val);
88 goto abort; 88 goto abort;
@@ -230,8 +230,7 @@ static int ltc4261_probe(struct i2c_client *client,
230 return -ENODEV; 230 return -ENODEV;
231 231
232 if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) { 232 if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
233 dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n", 233 dev_err(&client->dev, "Failed to read status register\n");
234 adapter->id, client->addr, LTC4261_STATUS);
235 return -ENODEV; 234 return -ENODEV;
236 } 235 }
237 236
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5ea1bce9689..8aba0ba57de5 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -130,8 +130,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
130 130
131#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 131#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
132 case AF_INET6: 132 case AF_INET6:
133 read_lock(&dev_base_lock); 133 rcu_read_lock();
134 for_each_netdev(&init_net, dev) { 134 for_each_netdev_rcu(&init_net, dev) {
135 if (ipv6_chk_addr(&init_net, 135 if (ipv6_chk_addr(&init_net,
136 &((struct sockaddr_in6 *) addr)->sin6_addr, 136 &((struct sockaddr_in6 *) addr)->sin6_addr,
137 dev, 1)) { 137 dev, 1)) {
@@ -139,7 +139,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
139 break; 139 break;
140 } 140 }
141 } 141 }
142 read_unlock(&dev_base_lock); 142 rcu_read_unlock();
143 break; 143 break;
144#endif 144#endif
145 } 145 }
@@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
200 src_in->sin_family = AF_INET; 200 src_in->sin_family = AF_INET;
201 src_in->sin_addr.s_addr = rt->rt_src; 201 src_in->sin_addr.s_addr = rt->rt_src;
202 202
203 if (rt->idev->dev->flags & IFF_LOOPBACK) { 203 if (rt->dst.dev->flags & IFF_LOOPBACK) {
204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
205 if (!ret) 205 if (!ret)
206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
208 } 208 }
209 209
210 /* If the device does ARP internally, return 'done' */ 210 /* If the device does ARP internally, return 'done' */
211 if (rt->idev->dev->flags & IFF_NOARP) { 211 if (rt->dst.dev->flags & IFF_NOARP) {
212 rdma_copy_addr(addr, rt->idev->dev, NULL); 212 rdma_copy_addr(addr, rt->dst.dev, NULL);
213 goto put; 213 goto put;
214 } 214 }
215 215
216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); 216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
217 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 217 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
218 neigh_event_send(rt->dst.neighbour, NULL); 218 neigh_event_send(rt->dst.neighbour, NULL);
219 ret = -ENODATA; 219 ret = -ENODATA;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bf3e20cd0298..4e55a28fb6d4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -848,8 +848,8 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
848 goto out; 848 goto out;
849 } 849 }
850 850
851 read_lock(&dev_base_lock); 851 rcu_read_lock();
852 for_each_netdev(&init_net, tmp) { 852 for_each_netdev_rcu(&init_net, tmp) {
853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
855 vid = rdma_vlan_dev_vlan_id(tmp); 855 vid = rdma_vlan_dev_vlan_id(tmp);
@@ -884,7 +884,7 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
884 } 884 }
885 } 885 }
886 } 886 }
887 read_unlock(&dev_base_lock); 887 rcu_read_unlock();
888 888
889 for (i = 0; i < 128; ++i) 889 for (i = 0; i < 128; ++i)
890 if (!hits[i]) { 890 if (!hits[i]) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d092ef9291da..7f26ca6ecf75 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -74,6 +74,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
74 * dev->event_lock held and interrupts disabled. 74 * dev->event_lock held and interrupts disabled.
75 */ 75 */
76static void input_pass_event(struct input_dev *dev, 76static void input_pass_event(struct input_dev *dev,
77 struct input_handler *src_handler,
77 unsigned int type, unsigned int code, int value) 78 unsigned int type, unsigned int code, int value)
78{ 79{
79 struct input_handler *handler; 80 struct input_handler *handler;
@@ -92,6 +93,15 @@ static void input_pass_event(struct input_dev *dev,
92 continue; 93 continue;
93 94
94 handler = handle->handler; 95 handler = handle->handler;
96
97 /*
98 * If this is the handler that injected this
99 * particular event we want to skip it to avoid
100 * filters firing again and again.
101 */
102 if (handler == src_handler)
103 continue;
104
95 if (!handler->filter) { 105 if (!handler->filter) {
96 if (filtered) 106 if (filtered)
97 break; 107 break;
@@ -121,7 +131,7 @@ static void input_repeat_key(unsigned long data)
121 if (test_bit(dev->repeat_key, dev->key) && 131 if (test_bit(dev->repeat_key, dev->key) &&
122 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 132 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
123 133
124 input_pass_event(dev, EV_KEY, dev->repeat_key, 2); 134 input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
125 135
126 if (dev->sync) { 136 if (dev->sync) {
127 /* 137 /*
@@ -130,7 +140,7 @@ static void input_repeat_key(unsigned long data)
130 * Otherwise assume that the driver will send 140 * Otherwise assume that the driver will send
131 * SYN_REPORT once it's done. 141 * SYN_REPORT once it's done.
132 */ 142 */
133 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 143 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
134 } 144 }
135 145
136 if (dev->rep[REP_PERIOD]) 146 if (dev->rep[REP_PERIOD])
@@ -163,6 +173,7 @@ static void input_stop_autorepeat(struct input_dev *dev)
163#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 173#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
164 174
165static int input_handle_abs_event(struct input_dev *dev, 175static int input_handle_abs_event(struct input_dev *dev,
176 struct input_handler *src_handler,
166 unsigned int code, int *pval) 177 unsigned int code, int *pval)
167{ 178{
168 bool is_mt_event; 179 bool is_mt_event;
@@ -206,13 +217,15 @@ static int input_handle_abs_event(struct input_dev *dev,
206 /* Flush pending "slot" event */ 217 /* Flush pending "slot" event */
207 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 218 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
208 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); 219 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
209 input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); 220 input_pass_event(dev, src_handler,
221 EV_ABS, ABS_MT_SLOT, dev->slot);
210 } 222 }
211 223
212 return INPUT_PASS_TO_HANDLERS; 224 return INPUT_PASS_TO_HANDLERS;
213} 225}
214 226
215static void input_handle_event(struct input_dev *dev, 227static void input_handle_event(struct input_dev *dev,
228 struct input_handler *src_handler,
216 unsigned int type, unsigned int code, int value) 229 unsigned int type, unsigned int code, int value)
217{ 230{
218 int disposition = INPUT_IGNORE_EVENT; 231 int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +278,8 @@ static void input_handle_event(struct input_dev *dev,
265 278
266 case EV_ABS: 279 case EV_ABS:
267 if (is_event_supported(code, dev->absbit, ABS_MAX)) 280 if (is_event_supported(code, dev->absbit, ABS_MAX))
268 disposition = input_handle_abs_event(dev, code, &value); 281 disposition = input_handle_abs_event(dev, src_handler,
282 code, &value);
269 283
270 break; 284 break;
271 285
@@ -323,7 +337,7 @@ static void input_handle_event(struct input_dev *dev,
323 dev->event(dev, type, code, value); 337 dev->event(dev, type, code, value);
324 338
325 if (disposition & INPUT_PASS_TO_HANDLERS) 339 if (disposition & INPUT_PASS_TO_HANDLERS)
326 input_pass_event(dev, type, code, value); 340 input_pass_event(dev, src_handler, type, code, value);
327} 341}
328 342
329/** 343/**
@@ -352,7 +366,7 @@ void input_event(struct input_dev *dev,
352 366
353 spin_lock_irqsave(&dev->event_lock, flags); 367 spin_lock_irqsave(&dev->event_lock, flags);
354 add_input_randomness(type, code, value); 368 add_input_randomness(type, code, value);
355 input_handle_event(dev, type, code, value); 369 input_handle_event(dev, NULL, type, code, value);
356 spin_unlock_irqrestore(&dev->event_lock, flags); 370 spin_unlock_irqrestore(&dev->event_lock, flags);
357 } 371 }
358} 372}
@@ -382,7 +396,8 @@ void input_inject_event(struct input_handle *handle,
382 rcu_read_lock(); 396 rcu_read_lock();
383 grab = rcu_dereference(dev->grab); 397 grab = rcu_dereference(dev->grab);
384 if (!grab || grab == handle) 398 if (!grab || grab == handle)
385 input_handle_event(dev, type, code, value); 399 input_handle_event(dev, handle->handler,
400 type, code, value);
386 rcu_read_unlock(); 401 rcu_read_unlock();
387 402
388 spin_unlock_irqrestore(&dev->event_lock, flags); 403 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +610,10 @@ static void input_dev_release_keys(struct input_dev *dev)
595 for (code = 0; code <= KEY_MAX; code++) { 610 for (code = 0; code <= KEY_MAX; code++) {
596 if (is_event_supported(code, dev->keybit, KEY_MAX) && 611 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
597 __test_and_clear_bit(code, dev->key)) { 612 __test_and_clear_bit(code, dev->key)) {
598 input_pass_event(dev, EV_KEY, code, 0); 613 input_pass_event(dev, NULL, EV_KEY, code, 0);
599 } 614 }
600 } 615 }
601 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 616 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
602 } 617 }
603} 618}
604 619
@@ -873,9 +888,9 @@ int input_set_keycode(struct input_dev *dev,
873 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 888 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
874 __test_and_clear_bit(old_keycode, dev->key)) { 889 __test_and_clear_bit(old_keycode, dev->key)) {
875 890
876 input_pass_event(dev, EV_KEY, old_keycode, 0); 891 input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
877 if (dev->sync) 892 if (dev->sync)
878 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 893 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
879 } 894 }
880 895
881 out: 896 out:
@@ -1565,8 +1580,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1565 } \ 1580 } \
1566 } while (0) 1581 } while (0)
1567 1582
1568#ifdef CONFIG_PM 1583static void input_dev_toggle(struct input_dev *dev, bool activate)
1569static void input_dev_reset(struct input_dev *dev, bool activate)
1570{ 1584{
1571 if (!dev->event) 1585 if (!dev->event)
1572 return; 1586 return;
@@ -1580,12 +1594,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1580 } 1594 }
1581} 1595}
1582 1596
1597/**
1598 * input_reset_device() - reset/restore the state of input device
1599 * @dev: input device whose state needs to be reset
1600 *
1601 * This function tries to reset the state of an opened input device and
1602 * bring internal state and state if the hardware in sync with each other.
1603 * We mark all keys as released, restore LED state, repeat rate, etc.
1604 */
1605void input_reset_device(struct input_dev *dev)
1606{
1607 mutex_lock(&dev->mutex);
1608
1609 if (dev->users) {
1610 input_dev_toggle(dev, true);
1611
1612 /*
1613 * Keys that have been pressed at suspend time are unlikely
1614 * to be still pressed when we resume.
1615 */
1616 spin_lock_irq(&dev->event_lock);
1617 input_dev_release_keys(dev);
1618 spin_unlock_irq(&dev->event_lock);
1619 }
1620
1621 mutex_unlock(&dev->mutex);
1622}
1623EXPORT_SYMBOL(input_reset_device);
1624
1625#ifdef CONFIG_PM
1583static int input_dev_suspend(struct device *dev) 1626static int input_dev_suspend(struct device *dev)
1584{ 1627{
1585 struct input_dev *input_dev = to_input_dev(dev); 1628 struct input_dev *input_dev = to_input_dev(dev);
1586 1629
1587 mutex_lock(&input_dev->mutex); 1630 mutex_lock(&input_dev->mutex);
1588 input_dev_reset(input_dev, false); 1631
1632 if (input_dev->users)
1633 input_dev_toggle(input_dev, false);
1634
1589 mutex_unlock(&input_dev->mutex); 1635 mutex_unlock(&input_dev->mutex);
1590 1636
1591 return 0; 1637 return 0;
@@ -1595,18 +1641,7 @@ static int input_dev_resume(struct device *dev)
1595{ 1641{
1596 struct input_dev *input_dev = to_input_dev(dev); 1642 struct input_dev *input_dev = to_input_dev(dev);
1597 1643
1598 mutex_lock(&input_dev->mutex); 1644 input_reset_device(input_dev);
1599 input_dev_reset(input_dev, true);
1600
1601 /*
1602 * Keys that have been pressed at suspend time are unlikely
1603 * to be still pressed when we resume.
1604 */
1605 spin_lock_irq(&input_dev->event_lock);
1606 input_dev_release_keys(input_dev);
1607 spin_unlock_irq(&input_dev->event_lock);
1608
1609 mutex_unlock(&input_dev->mutex);
1610 1645
1611 return 0; 1646 return 0;
1612} 1647}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b92d1cd5cba1..af45d275f686 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
4 * I2C QWERTY Keypad and IO Expander 4 * I2C QWERTY Keypad and IO Expander
5 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 5 * Bugs: Enter bugs at http://blackfin.uclinux.org/
6 * 6 *
7 * Copyright (C) 2008-2009 Analog Devices Inc. 7 * Copyright (C) 2008-2010 Analog Devices Inc.
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
@@ -24,29 +24,6 @@
24 24
25#include <linux/i2c/adp5588.h> 25#include <linux/i2c/adp5588.h>
26 26
27 /* Configuration Register1 */
28#define AUTO_INC (1 << 7)
29#define GPIEM_CFG (1 << 6)
30#define OVR_FLOW_M (1 << 5)
31#define INT_CFG (1 << 4)
32#define OVR_FLOW_IEN (1 << 3)
33#define K_LCK_IM (1 << 2)
34#define GPI_IEN (1 << 1)
35#define KE_IEN (1 << 0)
36
37/* Interrupt Status Register */
38#define CMP2_INT (1 << 5)
39#define CMP1_INT (1 << 4)
40#define OVR_FLOW_INT (1 << 3)
41#define K_LCK_INT (1 << 2)
42#define GPI_INT (1 << 1)
43#define KE_INT (1 << 0)
44
45/* Key Lock and Event Counter Register */
46#define K_LCK_EN (1 << 6)
47#define LCK21 0x30
48#define KEC 0xF
49
50/* Key Event Register xy */ 27/* Key Event Register xy */
51#define KEY_EV_PRESSED (1 << 7) 28#define KEY_EV_PRESSED (1 << 7)
52#define KEY_EV_MASK (0x7F) 29#define KEY_EV_MASK (0x7F)
@@ -55,10 +32,6 @@
55 32
56#define KEYP_MAX_EVENT 10 33#define KEYP_MAX_EVENT 10
57 34
58#define MAXGPIO 18
59#define ADP_BANK(offs) ((offs) >> 3)
60#define ADP_BIT(offs) (1u << ((offs) & 0x7))
61
62/* 35/*
63 * Early pre 4.0 Silicon required to delay readout by at least 25ms, 36 * Early pre 4.0 Silicon required to delay readout by at least 25ms,
64 * since the Event Counter Register updated 25ms after the interrupt 37 * since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@ struct adp5588_kpad {
75 const struct adp5588_gpi_map *gpimap; 48 const struct adp5588_gpi_map *gpimap;
76 unsigned short gpimapsize; 49 unsigned short gpimapsize;
77#ifdef CONFIG_GPIOLIB 50#ifdef CONFIG_GPIOLIB
78 unsigned char gpiomap[MAXGPIO]; 51 unsigned char gpiomap[ADP5588_MAXGPIO];
79 bool export_gpio; 52 bool export_gpio;
80 struct gpio_chip gc; 53 struct gpio_chip gc;
81 struct mutex gpio_lock; /* Protect cached dir, dat_out */ 54 struct mutex gpio_lock; /* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
103static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) 76static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
104{ 77{
105 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 78 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
106 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 79 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
107 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 80 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
108 81
109 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); 82 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
110} 83}
@@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
113 unsigned off, int val) 86 unsigned off, int val)
114{ 87{
115 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 88 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
116 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 89 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
117 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 90 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
118 91
119 mutex_lock(&kpad->gpio_lock); 92 mutex_lock(&kpad->gpio_lock);
120 93
@@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
132static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) 105static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
133{ 106{
134 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 107 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
135 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 108 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
136 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 109 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
137 int ret; 110 int ret;
138 111
139 mutex_lock(&kpad->gpio_lock); 112 mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
150 unsigned off, int val) 123 unsigned off, int val)
151{ 124{
152 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 125 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
153 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 126 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
154 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 127 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
155 int ret; 128 int ret;
156 129
157 mutex_lock(&kpad->gpio_lock); 130 mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
176static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, 149static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
177 const struct adp5588_kpad_platform_data *pdata) 150 const struct adp5588_kpad_platform_data *pdata)
178{ 151{
179 bool pin_used[MAXGPIO]; 152 bool pin_used[ADP5588_MAXGPIO];
180 int n_unused = 0; 153 int n_unused = 0;
181 int i; 154 int i;
182 155
@@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
191 for (i = 0; i < kpad->gpimapsize; i++) 164 for (i = 0; i < kpad->gpimapsize; i++)
192 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; 165 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
193 166
194 for (i = 0; i < MAXGPIO; i++) 167 for (i = 0; i < ADP5588_MAXGPIO; i++)
195 if (!pin_used[i]) 168 if (!pin_used[i])
196 kpad->gpiomap[n_unused++] = i; 169 kpad->gpiomap[n_unused++] = i;
197 170
@@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
234 return error; 207 return error;
235 } 208 }
236 209
237 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 210 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
238 kpad->dat_out[i] = adp5588_read(kpad->client, 211 kpad->dat_out[i] = adp5588_read(kpad->client,
239 GPIO_DAT_OUT1 + i); 212 GPIO_DAT_OUT1 + i);
240 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); 213 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work)
318 291
319 status = adp5588_read(client, INT_STAT); 292 status = adp5588_read(client, INT_STAT);
320 293
321 if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ 294 if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
322 dev_err(&client->dev, "Event Overflow Error\n"); 295 dev_err(&client->dev, "Event Overflow Error\n");
323 296
324 if (status & KE_INT) { 297 if (status & ADP5588_KE_INT) {
325 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; 298 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
326 if (ev_cnt) { 299 if (ev_cnt) {
327 adp5588_report_events(kpad, ev_cnt); 300 adp5588_report_events(kpad, ev_cnt);
328 input_sync(kpad->input); 301 input_sync(kpad->input);
@@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
360 if (pdata->en_keylock) { 333 if (pdata->en_keylock) {
361 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); 334 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
362 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); 335 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
363 ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); 336 ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
364 } 337 }
365 338
366 for (i = 0; i < KEYP_MAX_EVENT; i++) 339 for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
384 } 357 }
385 358
386 if (gpio_data) { 359 if (gpio_data) {
387 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 360 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
388 int pull_mask = gpio_data->pullup_dis_mask; 361 int pull_mask = gpio_data->pullup_dis_mask;
389 362
390 ret |= adp5588_write(client, GPIO_PULL1 + i, 363 ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client)
392 } 365 }
393 } 366 }
394 367
395 ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | 368 ret |= adp5588_write(client, INT_STAT,
396 OVR_FLOW_INT | K_LCK_INT | 369 ADP5588_CMP2_INT | ADP5588_CMP1_INT |
397 GPI_INT | KE_INT); /* Status is W1C */ 370 ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
371 ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
398 372
399 ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); 373 ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
374 ADP5588_OVR_FLOW_IEN |
375 ADP5588_KE_IEN);
400 376
401 if (ret < 0) { 377 if (ret < 0) {
402 dev_err(&client->dev, "Write Error\n"); 378 dev_err(&client->dev, "Write Error\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8623f4..11478eb2c27d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@ static bool atkbd_extra;
63module_param_named(extra, atkbd_extra, bool, 0); 63module_param_named(extra, atkbd_extra, bool, 0);
64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); 64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
65 65
66static bool atkbd_terminal;
67module_param_named(terminal, atkbd_terminal, bool, 0);
68MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
69
66/* 70/*
67 * Scancode to keycode tables. These are just the default setting, and 71 * Scancode to keycode tables. These are just the default setting, and
68 * are loadable via a userland utility. 72 * are loadable via a userland utility.
@@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
136#define ATKBD_CMD_ENABLE 0x00f4 140#define ATKBD_CMD_ENABLE 0x00f4
137#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ 141#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
138#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ 142#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
139#define ATKBD_CMD_SETALL_MBR 0x00fa 143#define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */
144#define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */
140#define ATKBD_CMD_RESET_BAT 0x02ff 145#define ATKBD_CMD_RESET_BAT 0x02ff
141#define ATKBD_CMD_RESEND 0x00fe 146#define ATKBD_CMD_RESEND 0x00fe
142#define ATKBD_CMD_EX_ENABLE 0x10ea 147#define ATKBD_CMD_EX_ENABLE 0x10ea
@@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
764 } 769 }
765 } 770 }
766 771
772 if (atkbd_terminal) {
773 ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
774 return 3;
775 }
776
767 if (target_set != 3) 777 if (target_set != 3)
768 return 2; 778 return 2;
769 779
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc0532a..d1583aea1721 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
127 idev->id.product = 0x0001; 127 idev->id.product = 0x0001;
128 idev->id.version = 0x0100; 128 idev->id.version = 0x0100;
129 129
130 input_set_drvdata(idev, lp);
131
132 ret = input_register_device(idev);
133 if (ret) {
134 dev_err(&client->dev, "input_register_device() failed\n");
135 goto fail_register;
136 }
137
138 lp->laststate = read_state(lp); 130 lp->laststate = read_state(lp);
139 131
140 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, 132 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
142 DRV_NAME, lp); 134 DRV_NAME, lp);
143 if (ret) { 135 if (ret) {
144 dev_err(&client->dev, "IRQ %d is not free\n", client->irq); 136 dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
145 goto fail_irq; 137 goto fail_free_device;
138 }
139
140 ret = input_register_device(idev);
141 if (ret) {
142 dev_err(&client->dev, "input_register_device() failed\n");
143 goto fail_free_irq;
146 } 144 }
147 145
148 i2c_set_clientdata(client, lp); 146 i2c_set_clientdata(client, lp);
149 return 0; 147 return 0;
150 148
151 fail_irq: 149 fail_free_irq:
152 input_unregister_device(idev); 150 free_irq(client->irq, lp);
153 fail_register: 151 fail_free_device:
154 input_set_drvdata(idev, NULL);
155 input_free_device(idev); 152 input_free_device(idev);
156 fail_allocate: 153 fail_allocate:
157 kfree(lp); 154 kfree(lp);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad7416b24..a5475b577086 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
351 }, 351 },
352 }, 352 },
353 { 353 {
354 /*
355 * Most (all?) VAIOs do not have external PS/2 ports nor
356 * they implement active multiplexing properly, and
357 * MUX discovery usually messes up keyboard/touchpad.
358 */
359 .matches = {
360 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
361 DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
362 },
363 },
364 {
354 /* Amoi M636/A737 */ 365 /* Amoi M636/A737 */
355 .matches = { 366 .matches = {
356 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), 367 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a9399a36..d94f7e9aa997 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
229 229
230 err = input_register_device(acecad->input); 230 err = input_register_device(acecad->input);
231 if (err) 231 if (err)
232 goto fail2; 232 goto fail3;
233 233
234 usb_set_intfdata(intf, acecad); 234 usb_set_intfdata(intf, acecad);
235 235
236 return 0; 236 return 0;
237 237
238 fail3: usb_free_urb(acecad->irq);
238 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); 239 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
239 fail1: input_free_device(input_dev); 240 fail1: input_free_device(input_dev);
240 kfree(acecad); 241 kfree(acecad);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index e90db8870b6c..bc0529ac88a1 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -420,7 +420,7 @@ enable_hwirq(struct inf_hw *hw)
420 break; 420 break;
421 case INF_NICCY: 421 case INF_NICCY:
422 val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); 422 val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
423 val |= NICCY_IRQ_ENABLE;; 423 val |= NICCY_IRQ_ENABLE;
424 outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); 424 outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
425 break; 425 break;
426 case INF_SCT_1: 426 case INF_SCT_1:
@@ -924,7 +924,7 @@ setup_instance(struct inf_hw *card)
924 mISDNipac_init(&card->ipac, card); 924 mISDNipac_init(&card->ipac, card);
925 925
926 if (card->ipac.isac.dch.dev.Bprotocols == 0) 926 if (card->ipac.isac.dch.dev.Bprotocols == 0)
927 goto error_setup;; 927 goto error_setup;
928 928
929 err = mISDN_register_device(&card->ipac.isac.dch.dev, 929 err = mISDN_register_device(&card->ipac.isac.dch.dev,
930 &card->pdev->dev, card->name); 930 &card->pdev->dev, card->name);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 38eb31439a73..d13fa5b119f5 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -264,7 +264,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
264 while (noc) { 264 while (noc) {
265 val = le16_to_cpu(*sp++); 265 val = le16_to_cpu(*sp++);
266 *mp++ = val >> 8; 266 *mp++ = val >> 8;
267 *mp++ = val & 0xFF;; 267 *mp++ = val & 0xFF;
268 noc--; 268 noc--;
269 } 269 }
270 spin_lock_irqsave(isar->hwlock, flags); 270 spin_lock_irqsave(isar->hwlock, flags);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index fcf4ed1cb4b9..0e66af1decd4 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -314,7 +314,7 @@ hdlc_fill_fifo(struct BCState *bcs)
314 bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME; 314 bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
315 } 315 }
316 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) 316 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
317 debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len); 317 debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
318 p = bcs->tx_skb->data; 318 p = bcs->tx_skb->data;
319 ptr = (u_int *)p; 319 ptr = (u_int *)p;
320 skb_pull(bcs->tx_skb, count); 320 skb_pull(bcs->tx_skb, count);
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index f150330b5a23..37e685eafd24 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -65,7 +65,7 @@ hisax_findcard(int driverid)
65 return (struct IsdnCardState *) 0; 65 return (struct IsdnCardState *) 0;
66} 66}
67 67
68static void 68static __attribute__((format(printf, 3, 4))) void
69link_debug(struct Channel *chanp, int direction, char *fmt, ...) 69link_debug(struct Channel *chanp, int direction, char *fmt, ...)
70{ 70{
71 va_list args; 71 va_list args;
@@ -1068,7 +1068,7 @@ init_d_st(struct Channel *chanp)
1068 return 0; 1068 return 0;
1069} 1069}
1070 1070
1071static void 1071static __attribute__((format(printf, 2, 3))) void
1072callc_debug(struct FsmInst *fi, char *fmt, ...) 1072callc_debug(struct FsmInst *fi, char *fmt, ...)
1073{ 1073{
1074 va_list args; 1074 va_list args;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b133378d4dc9..c110f8679bab 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1917,7 +1917,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1917#ifdef CONFIG_PCI 1917#ifdef CONFIG_PCI
1918#include <linux/pci.h> 1918#include <linux/pci.h>
1919 1919
1920static struct pci_device_id hisax_pci_tbl[] __devinitdata = { 1920static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
1921#ifdef CONFIG_HISAX_FRITZPCI 1921#ifdef CONFIG_HISAX_FRITZPCI
1922 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, 1922 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
1923#endif 1923#endif
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 7250f56a5246..a16459a1332c 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -292,7 +292,7 @@ hfc_fill_fifo(struct BCState *bcs)
292 } 292 }
293 count = GetFreeFifoBytes_B(bcs); 293 count = GetFreeFifoBytes_B(bcs);
294 if (cs->debug & L1_DEB_HSCX) 294 if (cs->debug & L1_DEB_HSCX)
295 debugl1(cs, "hfc_fill_fifo %d count(%ld/%d),%lx", 295 debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
296 bcs->channel, bcs->tx_skb->len, 296 bcs->channel, bcs->tx_skb->len,
297 count, current->state); 297 count, current->state);
298 if (count < bcs->tx_skb->len) { 298 if (count < bcs->tx_skb->len) {
@@ -719,7 +719,7 @@ hfc_fill_dfifo(struct IsdnCardState *cs)
719 } 719 }
720 count = GetFreeFifoBytes_D(cs); 720 count = GetFreeFifoBytes_D(cs);
721 if (cs->debug & L1_DEB_ISAC) 721 if (cs->debug & L1_DEB_ISAC)
722 debugl1(cs, "hfc_fill_Dfifo count(%ld/%d)", 722 debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
723 cs->tx_skb->len, count); 723 cs->tx_skb->len, count);
724 if (count < cs->tx_skb->len) { 724 if (count < cs->tx_skb->len) {
725 if (cs->debug & L1_DEB_ISAC) 725 if (cs->debug & L1_DEB_ISAC)
diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c
index b1f6481e1193..626f85df302b 100644
--- a/drivers/isdn/hisax/hfc_2bs0.c
+++ b/drivers/isdn/hisax/hfc_2bs0.c
@@ -282,7 +282,7 @@ hfc_fill_fifo(struct BCState *bcs)
282 count += cs->hw.hfc.fifosize; 282 count += cs->hw.hfc.fifosize;
283 } /* L1_MODE_TRANS */ 283 } /* L1_MODE_TRANS */
284 if (cs->debug & L1_DEB_HSCX) 284 if (cs->debug & L1_DEB_HSCX)
285 debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)", 285 debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
286 bcs->channel, bcs->tx_skb->len, 286 bcs->channel, bcs->tx_skb->len,
287 count); 287 count);
288 if (count < bcs->tx_skb->len) { 288 if (count < bcs->tx_skb->len) {
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 917cc84065bd..3147020d188b 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -550,7 +550,7 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
550 count += D_FIFO_SIZE; /* count now contains available bytes */ 550 count += D_FIFO_SIZE; /* count now contains available bytes */
551 551
552 if (cs->debug & L1_DEB_ISAC) 552 if (cs->debug & L1_DEB_ISAC)
553 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)", 553 debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
554 cs->tx_skb->len, count); 554 cs->tx_skb->len, count);
555 if (count < cs->tx_skb->len) { 555 if (count < cs->tx_skb->len) {
556 if (cs->debug & L1_DEB_ISAC) 556 if (cs->debug & L1_DEB_ISAC)
@@ -681,7 +681,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
681 count += B_FIFO_SIZE; /* count now contains available bytes */ 681 count += B_FIFO_SIZE; /* count now contains available bytes */
682 682
683 if (cs->debug & L1_DEB_HSCX) 683 if (cs->debug & L1_DEB_HSCX)
684 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx", 684 debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
685 bcs->channel, bcs->tx_skb->len, 685 bcs->channel, bcs->tx_skb->len,
686 count, current->state); 686 count, current->state);
687 687
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 5aa138eb0b3c..1235b7131ae1 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -179,7 +179,7 @@ write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans
179 count += fifo_size; /* count now contains available bytes */ 179 count += fifo_size; /* count now contains available bytes */
180 180
181 if (cs->debug & L1_DEB_ISAC_FIFO) 181 if (cs->debug & L1_DEB_ISAC_FIFO)
182 debugl1(cs, "hfcsx_write_fifo %d count(%ld/%d)", 182 debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
183 fifo, skb->len, count); 183 fifo, skb->len, count);
184 if (count < skb->len) { 184 if (count < skb->len) {
185 if (cs->debug & L1_DEB_ISAC_FIFO) 185 if (cs->debug & L1_DEB_ISAC_FIFO)
@@ -265,7 +265,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
265 count++; 265 count++;
266 266
267 if (cs->debug & L1_DEB_ISAC_FIFO) 267 if (cs->debug & L1_DEB_ISAC_FIFO)
268 debugl1(cs, "hfcsx_read_fifo %d count %ld)", 268 debugl1(cs, "hfcsx_read_fifo %d count %u)",
269 fifo, count); 269 fifo, count);
270 270
271 if ((count > fifo_size) || (count < 4)) { 271 if ((count > fifo_size) || (count < 4)) {
@@ -986,7 +986,7 @@ HFCSX_l1hw(struct PStack *st, int pr, void *arg)
986 default: 986 default:
987 spin_unlock_irqrestore(&cs->lock, flags); 987 spin_unlock_irqrestore(&cs->lock, flags);
988 if (cs->debug & L1_DEB_WARN) 988 if (cs->debug & L1_DEB_WARN)
989 debugl1(cs, "hfcsx_l1hw loop invalid %4lx", arg); 989 debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
990 return; 990 return;
991 } 991 }
992 cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */ 992 cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 32ab3924aa73..de1c669c7b13 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1286,7 +1286,9 @@ int jiftime(char *s, long mark);
1286 1286
1287int HiSax_command(isdn_ctrl * ic); 1287int HiSax_command(isdn_ctrl * ic);
1288int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb); 1288int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
1289__attribute__((format(printf, 3, 4)))
1289void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...); 1290void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
1291__attribute__((format(printf, 3, 0)))
1290void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args); 1292void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
1291void HiSax_reportcard(int cardnr, int sel); 1293void HiSax_reportcard(int cardnr, int sel);
1292int QuickHex(char *txt, u_char * p, int cnt); 1294int QuickHex(char *txt, u_char * p, int cnt);
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 751b25f2ff58..332104103e18 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -717,7 +717,7 @@ bch_mode(struct BCState *bcs, int mode, int bc)
717 717
718 bc = bc ? 1 : 0; // in case bc is greater than 1 718 bc = bc ? 1 : 0; // in case bc is greater than 1
719 if (cs->debug & L1_DEB_HSCX) 719 if (cs->debug & L1_DEB_HSCX)
720 debugl1(cs, "mode_bch() switch B-% mode %d chan %d", hscx, mode, bc); 720 debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
721 bcs->mode = mode; 721 bcs->mode = mode;
722 bcs->channel = bc; 722 bcs->channel = bc;
723 723
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914bded8c..d4cce337add2 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -189,7 +189,7 @@ ISARVersion(struct IsdnCardState *cs, char *s)
189static int 189static int
190isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf) 190isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
191{ 191{
192 int ret, size, cnt, debug; 192 int cfu_ret, ret, size, cnt, debug;
193 u_char len, nom, noc; 193 u_char len, nom, noc;
194 u_short sadr, left, *sp; 194 u_short sadr, left, *sp;
195 u_char __user *p = buf; 195 u_char __user *p = buf;
@@ -212,9 +212,10 @@ isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
212 cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO); 212 cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO);
213#endif 213#endif
214 214
215 if ((ret = copy_from_user(&size, p, sizeof(int)))) { 215 cfu_ret = copy_from_user(&size, p, sizeof(int));
216 printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", ret); 216 if (cfu_ret) {
217 return ret; 217 printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", cfu_ret);
218 return -EFAULT;
218 } 219 }
219 p += sizeof(int); 220 p += sizeof(int);
220 printk(KERN_DEBUG"isar_load_firmware size: %d\n", size); 221 printk(KERN_DEBUG"isar_load_firmware size: %d\n", size);
@@ -953,7 +954,7 @@ isar_pump_statev_modem(struct BCState *bcs, u_char devt) {
953 break; 954 break;
954 case PSEV_GSTN_CLR: 955 case PSEV_GSTN_CLR:
955 if (cs->debug & L1_DEB_HSCX) 956 if (cs->debug & L1_DEB_HSCX)
956 debugl1(cs, "pump stev GSTN CLEAR", devt); 957 debugl1(cs, "pump stev GSTN CLEAR");
957 break; 958 break;
958 default: 959 default:
959 if (cs->debug & L1_DEB_HSCX) 960 if (cs->debug & L1_DEB_HSCX)
@@ -1268,7 +1269,7 @@ isar_int_main(struct IsdnCardState *cs)
1268static void 1269static void
1269ftimer_handler(struct BCState *bcs) { 1270ftimer_handler(struct BCState *bcs) {
1270 if (bcs->cs->debug) 1271 if (bcs->cs->debug)
1271 debugl1(bcs->cs, "ftimer flags %04x", 1272 debugl1(bcs->cs, "ftimer flags %04lx",
1272 bcs->Flag); 1273 bcs->Flag);
1273 test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag); 1274 test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
1274 if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) { 1275 if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
@@ -1427,8 +1428,8 @@ modeisar(struct BCState *bcs, int mode, int bc)
1427 &bcs->hw.isar.reg->Flags)) 1428 &bcs->hw.isar.reg->Flags))
1428 bcs->hw.isar.dpath = 1; 1429 bcs->hw.isar.dpath = 1;
1429 else { 1430 else {
1430 printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n"); 1431 printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
1431 debugl1(cs, "isar modeisar analog funktions only with DP1"); 1432 debugl1(cs, "isar modeisar analog functions only with DP1");
1432 return(1); 1433 return(1);
1433 } 1434 }
1434 break; 1435 break;
@@ -1748,7 +1749,7 @@ isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
1748 struct BCState *bcs; 1749 struct BCState *bcs;
1749 1750
1750 if (cs->debug & L1_DEB_HSCX) 1751 if (cs->debug & L1_DEB_HSCX)
1751 debugl1(cs, "isar_auxcmd cmd/ch %x/%d", ic->command, ic->arg); 1752 debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
1752 switch (ic->command) { 1753 switch (ic->command) {
1753 case (ISDN_CMD_FAXCMD): 1754 case (ISDN_CMD_FAXCMD):
1754 bcs = cs->channel[ic->arg].bcs; 1755 bcs = cs->channel[ic->arg].bcs;
diff --git a/drivers/isdn/hisax/isdnl1.h b/drivers/isdn/hisax/isdnl1.h
index 172ad4c8c961..425d86116f2b 100644
--- a/drivers/isdn/hisax/isdnl1.h
+++ b/drivers/isdn/hisax/isdnl1.h
@@ -21,6 +21,7 @@
21#define B_XMTBUFREADY 1 21#define B_XMTBUFREADY 1
22#define B_ACKPENDING 2 22#define B_ACKPENDING 2
23 23
24__attribute__((format(printf, 2, 3)))
24void debugl1(struct IsdnCardState *cs, char *fmt, ...); 25void debugl1(struct IsdnCardState *cs, char *fmt, ...);
25void DChannel_proc_xmt(struct IsdnCardState *cs); 26void DChannel_proc_xmt(struct IsdnCardState *cs);
26void DChannel_proc_rcv(struct IsdnCardState *cs); 27void DChannel_proc_rcv(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index fd0b643ab740..ad291f21b201 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -66,7 +66,7 @@ static char *strL3Event[] =
66 "EV_TIMEOUT", 66 "EV_TIMEOUT",
67}; 67};
68 68
69static void 69static __attribute__((format(printf, 2, 3))) void
70l3m_debug(struct FsmInst *fi, char *fmt, ...) 70l3m_debug(struct FsmInst *fi, char *fmt, ...)
71{ 71{
72 va_list args; 72 va_list args;
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index 5d7f0f2ff9b9..644891efc26f 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -254,7 +254,7 @@ static int make_raw_data(struct BCState *bcs) {
254 val >>= 1; 254 val >>= 1;
255 } 255 }
256 if (bcs->cs->debug & L1_DEB_HSCX) 256 if (bcs->cs->debug & L1_DEB_HSCX)
257 debugl1(bcs->cs,"tiger make_raw: in %ld out %d.%d", 257 debugl1(bcs->cs,"tiger make_raw: in %u out %d.%d",
258 bcs->tx_skb->len, s_cnt, bitcnt); 258 bcs->tx_skb->len, s_cnt, bitcnt);
259 if (bitcnt) { 259 if (bitcnt) {
260 while (8>bitcnt++) { 260 while (8>bitcnt++) {
@@ -361,7 +361,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
361 val >>= 1; 361 val >>= 1;
362 } 362 }
363 if (bcs->cs->debug & L1_DEB_HSCX) 363 if (bcs->cs->debug & L1_DEB_HSCX)
364 debugl1(bcs->cs,"tiger make_raw_56k: in %ld out %d.%d", 364 debugl1(bcs->cs,"tiger make_raw_56k: in %u out %d.%d",
365 bcs->tx_skb->len, s_cnt, bitcnt); 365 bcs->tx_skb->len, s_cnt, bitcnt);
366 if (bitcnt) { 366 if (bitcnt) {
367 while (8>bitcnt++) { 367 while (8>bitcnt++) {
@@ -612,7 +612,7 @@ void netjet_fill_dma(struct BCState *bcs)
612 if (!bcs->tx_skb) 612 if (!bcs->tx_skb)
613 return; 613 return;
614 if (bcs->cs->debug & L1_DEB_HSCX) 614 if (bcs->cs->debug & L1_DEB_HSCX)
615 debugl1(bcs->cs,"tiger fill_dma1: c%d %4x", bcs->channel, 615 debugl1(bcs->cs,"tiger fill_dma1: c%d %4lx", bcs->channel,
616 bcs->Flag); 616 bcs->Flag);
617 if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag)) 617 if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
618 return; 618 return;
@@ -625,7 +625,7 @@ void netjet_fill_dma(struct BCState *bcs)
625 return; 625 return;
626 }; 626 };
627 if (bcs->cs->debug & L1_DEB_HSCX) 627 if (bcs->cs->debug & L1_DEB_HSCX)
628 debugl1(bcs->cs,"tiger fill_dma2: c%d %4x", bcs->channel, 628 debugl1(bcs->cs,"tiger fill_dma2: c%d %4lx", bcs->channel,
629 bcs->Flag); 629 bcs->Flag);
630 if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) { 630 if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
631 write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free); 631 write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
@@ -667,7 +667,7 @@ void netjet_fill_dma(struct BCState *bcs)
667 write_raw(bcs, p, cnt); 667 write_raw(bcs, p, cnt);
668 } 668 }
669 if (bcs->cs->debug & L1_DEB_HSCX) 669 if (bcs->cs->debug & L1_DEB_HSCX)
670 debugl1(bcs->cs,"tiger fill_dma3: c%d %4x", bcs->channel, 670 debugl1(bcs->cs,"tiger fill_dma3: c%d %4lx", bcs->channel,
671 bcs->Flag); 671 bcs->Flag);
672} 672}
673 673
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index b7876b19fe73..44082637a09f 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -167,7 +167,8 @@ static struct FsmNode L1FnList[] __initdata =
167 {ST_L1_F8, EV_IND_RSY, l1_ignore}, 167 {ST_L1_F8, EV_IND_RSY, l1_ignore},
168}; 168};
169 169
170static void l1m_debug(struct FsmInst *fi, char *fmt, ...) 170static __attribute__((format(printf, 2, 3)))
171void l1m_debug(struct FsmInst *fi, char *fmt, ...)
171{ 172{
172 va_list args; 173 va_list args;
173 char buf[256]; 174 char buf[256];
@@ -269,7 +270,8 @@ static char *strDoutEvent[] =
269 "EV_DOUT_UNDERRUN", 270 "EV_DOUT_UNDERRUN",
270}; 271};
271 272
272static void dout_debug(struct FsmInst *fi, char *fmt, ...) 273static __attribute__((format(printf, 2, 3)))
274void dout_debug(struct FsmInst *fi, char *fmt, ...)
273{ 275{
274 va_list args; 276 va_list args;
275 char buf[256]; 277 char buf[256];
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 46048e55f241..d568689669f8 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -61,7 +61,7 @@ static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *
61static int isdn_concap_dl_connect_req(struct concap_proto *concap) 61static int isdn_concap_dl_connect_req(struct concap_proto *concap)
62{ 62{
63 struct net_device *ndev = concap -> net_dev; 63 struct net_device *ndev = concap -> net_dev;
64 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 64 isdn_net_local *lp = netdev_priv(ndev);
65 int ret; 65 int ret;
66 IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name); 66 IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name);
67 67
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 26d44c3ca1d8..afeede7ee295 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -827,7 +827,7 @@ isdn_net_dial(void)
827void 827void
828isdn_net_hangup(struct net_device *d) 828isdn_net_hangup(struct net_device *d)
829{ 829{
830 isdn_net_local *lp = (isdn_net_local *) netdev_priv(d); 830 isdn_net_local *lp = netdev_priv(d);
831 isdn_ctrl cmd; 831 isdn_ctrl cmd;
832#ifdef CONFIG_ISDN_X25 832#ifdef CONFIG_ISDN_X25
833 struct concap_proto *cprot = lp->netdev->cprot; 833 struct concap_proto *cprot = lp->netdev->cprot;
@@ -1052,7 +1052,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
1052{ 1052{
1053 isdn_net_dev *nd; 1053 isdn_net_dev *nd;
1054 isdn_net_local *slp; 1054 isdn_net_local *slp;
1055 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1055 isdn_net_local *lp = netdev_priv(ndev);
1056 int retv = NETDEV_TX_OK; 1056 int retv = NETDEV_TX_OK;
1057 1057
1058 if (((isdn_net_local *) netdev_priv(ndev))->master) { 1058 if (((isdn_net_local *) netdev_priv(ndev))->master) {
@@ -1116,7 +1116,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
1116static void 1116static void
1117isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) 1117isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
1118{ 1118{
1119 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1119 isdn_net_local *lp = netdev_priv(dev);
1120 if (!skb) 1120 if (!skb)
1121 return; 1121 return;
1122 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { 1122 if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
@@ -1131,7 +1131,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
1131 1131
1132static void isdn_net_tx_timeout(struct net_device * ndev) 1132static void isdn_net_tx_timeout(struct net_device * ndev)
1133{ 1133{
1134 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1134 isdn_net_local *lp = netdev_priv(ndev);
1135 1135
1136 printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate); 1136 printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
1137 if (!lp->dialstate){ 1137 if (!lp->dialstate){
@@ -1165,7 +1165,7 @@ static void isdn_net_tx_timeout(struct net_device * ndev)
1165static netdev_tx_t 1165static netdev_tx_t
1166isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1166isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167{ 1167{
1168 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1168 isdn_net_local *lp = netdev_priv(ndev);
1169#ifdef CONFIG_ISDN_X25 1169#ifdef CONFIG_ISDN_X25
1170 struct concap_proto * cprot = lp -> netdev -> cprot; 1170 struct concap_proto * cprot = lp -> netdev -> cprot;
1171/* At this point hard_start_xmit() passes control to the encapsulation 1171/* At this point hard_start_xmit() passes control to the encapsulation
@@ -1347,7 +1347,7 @@ isdn_net_close(struct net_device *dev)
1347static struct net_device_stats * 1347static struct net_device_stats *
1348isdn_net_get_stats(struct net_device *dev) 1348isdn_net_get_stats(struct net_device *dev)
1349{ 1349{
1350 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1350 isdn_net_local *lp = netdev_priv(dev);
1351 return &lp->stats; 1351 return &lp->stats;
1352} 1352}
1353 1353
@@ -1426,7 +1426,7 @@ isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
1426static int 1426static int
1427isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1427isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1428{ 1428{
1429 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1429 isdn_net_local *lp = netdev_priv(dev);
1430 unsigned long len = 0; 1430 unsigned long len = 0;
1431 unsigned long expires = 0; 1431 unsigned long expires = 0;
1432 int tmp = 0; 1432 int tmp = 0;
@@ -1493,7 +1493,7 @@ isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1493static int isdn_net_ioctl(struct net_device *dev, 1493static int isdn_net_ioctl(struct net_device *dev,
1494 struct ifreq *ifr, int cmd) 1494 struct ifreq *ifr, int cmd)
1495{ 1495{
1496 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1496 isdn_net_local *lp = netdev_priv(dev);
1497 1497
1498 switch (lp->p_encap) { 1498 switch (lp->p_encap) {
1499#ifdef CONFIG_ISDN_PPP 1499#ifdef CONFIG_ISDN_PPP
@@ -1786,7 +1786,7 @@ isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
1786static void 1786static void
1787isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) 1787isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1788{ 1788{
1789 isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); 1789 isdn_net_local *lp = netdev_priv(ndev);
1790 isdn_net_local *olp = lp; /* original 'lp' */ 1790 isdn_net_local *olp = lp; /* original 'lp' */
1791#ifdef CONFIG_ISDN_X25 1791#ifdef CONFIG_ISDN_X25
1792 struct concap_proto *cprot = lp -> netdev -> cprot; 1792 struct concap_proto *cprot = lp -> netdev -> cprot;
@@ -1800,7 +1800,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
1800 * handle master's statistics and hangup-timeout 1800 * handle master's statistics and hangup-timeout
1801 */ 1801 */
1802 ndev = lp->master; 1802 ndev = lp->master;
1803 lp = (isdn_net_local *) netdev_priv(ndev); 1803 lp = netdev_priv(ndev);
1804 lp->stats.rx_packets++; 1804 lp->stats.rx_packets++;
1805 lp->stats.rx_bytes += skb->len; 1805 lp->stats.rx_bytes += skb->len;
1806 } 1806 }
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index fe824e0cbb25..9e8162c80bb0 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1147,15 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
1147 } 1147 }
1148 1148
1149 if (is->pass_filter 1149 if (is->pass_filter
1150 && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0) { 1150 && sk_run_filter(skb, is->pass_filter) == 0) {
1151 if (is->debug & 0x2) 1151 if (is->debug & 0x2)
1152 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); 1152 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
1153 kfree_skb(skb); 1153 kfree_skb(skb);
1154 return; 1154 return;
1155 } 1155 }
1156 if (!(is->active_filter 1156 if (!(is->active_filter
1157 && sk_run_filter(skb, is->active_filter, 1157 && sk_run_filter(skb, is->active_filter) == 0)) {
1158 is->active_len) == 0)) {
1159 if (is->debug & 0x2) 1158 if (is->debug & 0x2)
1160 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); 1159 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
1161 lp->huptimer = 0; 1160 lp->huptimer = 0;
@@ -1221,7 +1220,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
1221 struct ippp_struct *ipt,*ipts; 1220 struct ippp_struct *ipt,*ipts;
1222 int slot, retval = NETDEV_TX_OK; 1221 int slot, retval = NETDEV_TX_OK;
1223 1222
1224 mlp = (isdn_net_local *) netdev_priv(netdev); 1223 mlp = netdev_priv(netdev);
1225 nd = mlp->netdev; /* get master lp */ 1224 nd = mlp->netdev; /* get master lp */
1226 1225
1227 slot = mlp->ppp_slot; 1226 slot = mlp->ppp_slot;
@@ -1294,15 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
1294 } 1293 }
1295 1294
1296 if (ipt->pass_filter 1295 if (ipt->pass_filter
1297 && sk_run_filter(skb, ipt->pass_filter, ipt->pass_len) == 0) { 1296 && sk_run_filter(skb, ipt->pass_filter) == 0) {
1298 if (ipt->debug & 0x4) 1297 if (ipt->debug & 0x4)
1299 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); 1298 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
1300 kfree_skb(skb); 1299 kfree_skb(skb);
1301 goto unlock; 1300 goto unlock;
1302 } 1301 }
1303 if (!(ipt->active_filter 1302 if (!(ipt->active_filter
1304 && sk_run_filter(skb, ipt->active_filter, 1303 && sk_run_filter(skb, ipt->active_filter) == 0)) {
1305 ipt->active_len) == 0)) {
1306 if (ipt->debug & 0x4) 1304 if (ipt->debug & 0x4)
1307 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); 1305 printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
1308 lp->huptimer = 0; 1306 lp->huptimer = 0;
@@ -1492,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
1492 } 1490 }
1493 1491
1494 drop |= is->pass_filter 1492 drop |= is->pass_filter
1495 && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0; 1493 && sk_run_filter(skb, is->pass_filter) == 0;
1496 drop |= is->active_filter 1494 drop |= is->active_filter
1497 && sk_run_filter(skb, is->active_filter, is->active_len) == 0; 1495 && sk_run_filter(skb, is->active_filter) == 0;
1498 1496
1499 skb_push(skb, IPPP_MAX_HEADER - 4); 1497 skb_push(skb, IPPP_MAX_HEADER - 4);
1500 return drop; 1498 return drop;
@@ -1985,7 +1983,7 @@ isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
1985{ 1983{
1986 struct ppp_stats __user *res = ifr->ifr_data; 1984 struct ppp_stats __user *res = ifr->ifr_data;
1987 struct ppp_stats t; 1985 struct ppp_stats t;
1988 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 1986 isdn_net_local *lp = netdev_priv(dev);
1989 1987
1990 if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) 1988 if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
1991 return -EFAULT; 1989 return -EFAULT;
@@ -2024,7 +2022,7 @@ isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2024{ 2022{
2025 int error=0; 2023 int error=0;
2026 int len; 2024 int len;
2027 isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); 2025 isdn_net_local *lp = netdev_priv(dev);
2028 2026
2029 2027
2030 if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) 2028 if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
@@ -2091,7 +2089,7 @@ isdn_ppp_dial_slave(char *name)
2091 2089
2092 sdev = lp->slave; 2090 sdev = lp->slave;
2093 while (sdev) { 2091 while (sdev) {
2094 isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev); 2092 isdn_net_local *mlp = netdev_priv(sdev);
2095 if (!(mlp->flags & ISDN_NET_CONNECTED)) 2093 if (!(mlp->flags & ISDN_NET_CONNECTED))
2096 break; 2094 break;
2097 sdev = mlp->slave; 2095 sdev = mlp->slave;
@@ -2099,7 +2097,7 @@ isdn_ppp_dial_slave(char *name)
2099 if (!sdev) 2097 if (!sdev)
2100 return 2; 2098 return 2;
2101 2099
2102 isdn_net_dial_req((isdn_net_local *) netdev_priv(sdev)); 2100 isdn_net_dial_req(netdev_priv(sdev));
2103 return 0; 2101 return 0;
2104#else 2102#else
2105 return -1; 2103 return -1;
@@ -2122,7 +2120,7 @@ isdn_ppp_hangup_slave(char *name)
2122 2120
2123 sdev = lp->slave; 2121 sdev = lp->slave;
2124 while (sdev) { 2122 while (sdev) {
2125 isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev); 2123 isdn_net_local *mlp = netdev_priv(sdev);
2126 2124
2127 if (mlp->slave) { /* find last connected link in chain */ 2125 if (mlp->slave) { /* find last connected link in chain */
2128 isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp); 2126 isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 2e847a90bad0..f2b5bab5e6a1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
1627static int __init icn_init(void) 1627static int __init icn_init(void)
1628{ 1628{
1629 char *p; 1629 char *p;
1630 char rev[10]; 1630 char rev[20];
1631 1631
1632 memset(&dev, 0, sizeof(icn_dev)); 1632 memset(&dev, 0, sizeof(icn_dev));
1633 dev.memaddr = (membase & 0x0ffc000); 1633 dev.memaddr = (membase & 0x0ffc000);
@@ -1637,9 +1637,10 @@ static int __init icn_init(void)
1637 spin_lock_init(&dev.devlock); 1637 spin_lock_init(&dev.devlock);
1638 1638
1639 if ((p = strchr(revision, ':'))) { 1639 if ((p = strchr(revision, ':'))) {
1640 strcpy(rev, p + 1); 1640 strncpy(rev, p + 1, 20);
1641 p = strchr(rev, '$'); 1641 p = strchr(rev, '$');
1642 *p = 0; 1642 if (p)
1643 *p = 0;
1643 } else 1644 } else
1644 strcpy(rev, " ??? "); 1645 strcpy(rev, " ??? ");
1645 printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev, 1646 printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index ac4aa18c632b..5cc7c001c523 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -99,12 +99,16 @@ static void
99l1m_debug(struct FsmInst *fi, char *fmt, ...) 99l1m_debug(struct FsmInst *fi, char *fmt, ...)
100{ 100{
101 struct layer1 *l1 = fi->userdata; 101 struct layer1 *l1 = fi->userdata;
102 struct va_format vaf;
102 va_list va; 103 va_list va;
103 104
104 va_start(va, fmt); 105 va_start(va, fmt);
105 printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev)); 106
106 vprintk(fmt, va); 107 vaf.fmt = fmt;
107 printk("\n"); 108 vaf.va = &va;
109
110 printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
111
108 va_end(va); 112 va_end(va);
109} 113}
110 114
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index c97371788764..4ae75053c9d2 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -95,14 +95,20 @@ static void
95l2m_debug(struct FsmInst *fi, char *fmt, ...) 95l2m_debug(struct FsmInst *fi, char *fmt, ...)
96{ 96{
97 struct layer2 *l2 = fi->userdata; 97 struct layer2 *l2 = fi->userdata;
98 struct va_format vaf;
98 va_list va; 99 va_list va;
99 100
100 if (!(*debug & DEBUG_L2_FSM)) 101 if (!(*debug & DEBUG_L2_FSM))
101 return; 102 return;
103
102 va_start(va, fmt); 104 va_start(va, fmt);
103 printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei); 105
104 vprintk(fmt, va); 106 vaf.fmt = fmt;
105 printk("\n"); 107 vaf.va = &va;
108
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf);
111
106 va_end(va); 112 va_end(va);
107} 113}
108 114
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 1b85d9d27496..687c9b6264ab 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -79,14 +79,19 @@ static void
79da_debug(struct FsmInst *fi, char *fmt, ...) 79da_debug(struct FsmInst *fi, char *fmt, ...)
80{ 80{
81 struct manager *mgr = fi->userdata; 81 struct manager *mgr = fi->userdata;
82 struct va_format vaf;
82 va_list va; 83 va_list va;
83 84
84 if (!(*debug & DEBUG_L2_TEIFSM)) 85 if (!(*debug & DEBUG_L2_TEIFSM))
85 return; 86 return;
87
86 va_start(va, fmt); 88 va_start(va, fmt);
87 printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id); 89
88 vprintk(fmt, va); 90 vaf.fmt = fmt;
89 printk("\n"); 91 vaf.va = &va;
92
93 printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
94
90 va_end(va); 95 va_end(va);
91} 96}
92 97
@@ -223,14 +228,20 @@ static void
223tei_debug(struct FsmInst *fi, char *fmt, ...) 228tei_debug(struct FsmInst *fi, char *fmt, ...)
224{ 229{
225 struct teimgr *tm = fi->userdata; 230 struct teimgr *tm = fi->userdata;
231 struct va_format vaf;
226 va_list va; 232 va_list va;
227 233
228 if (!(*debug & DEBUG_L2_TEIFSM)) 234 if (!(*debug & DEBUG_L2_TEIFSM))
229 return; 235 return;
236
230 va_start(va, fmt); 237 va_start(va, fmt);
231 printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei); 238
232 vprintk(fmt, va); 239 vaf.fmt = fmt;
233 printk("\n"); 240 vaf.va = &va;
241
242 printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
243 tm->l2->sapi, tm->l2->tei, &vaf);
244
234 va_end(va); 245 va_end(va);
235} 246}
236 247
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d5192f..77b8fd20cd90 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -10,7 +10,7 @@ menuconfig NEW_LEDS
10if NEW_LEDS 10if NEW_LEDS
11 11
12config LEDS_CLASS 12config LEDS_CLASS
13 tristate "LED Class Support" 13 bool "LED Class Support"
14 help 14 help
15 This option enables the led sysfs class in /sys/class/leds. You'll 15 This option enables the led sysfs class in /sys/class/leds. You'll
16 need this to do anything useful with LEDs. If unsure, say N. 16 need this to do anything useful with LEDs. If unsure, say N.
@@ -176,6 +176,24 @@ config LEDS_LP3944
176 To compile this driver as a module, choose M here: the 176 To compile this driver as a module, choose M here: the
177 module will be called leds-lp3944. 177 module will be called leds-lp3944.
178 178
179config LEDS_LP5521
180 tristate "LED Support for N.S. LP5521 LED driver chip"
181 depends on LEDS_CLASS && I2C
182 help
183 If you say yes here you get support for the National Semiconductor
184 LP5521 LED driver. It is 3 channel chip with programmable engines.
185 Driver provides direct control via LED class and interface for
186 programming the engines.
187
188config LEDS_LP5523
189 tristate "LED Support for N.S. LP5523 LED driver chip"
190 depends on LEDS_CLASS && I2C
191 help
192 If you say yes here you get support for the National Semiconductor
193 LP5523 LED driver. It is 9 channel chip with programmable engines.
194 Driver provides direct control via LED class and interface for
195 programming the engines.
196
179config LEDS_CLEVO_MAIL 197config LEDS_CLEVO_MAIL
180 tristate "Mail LED on Clevo notebook" 198 tristate "Mail LED on Clevo notebook"
181 depends on X86 && SERIO_I8042 && DMI 199 depends on X86 && SERIO_I8042 && DMI
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db40ef6d..aae6989ff6b6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o 25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
26obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
27obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
26obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 28obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
27obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 29obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
28obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 30obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 260660076507..211e21f34bd5 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = {
81 __ATTR_NULL, 81 __ATTR_NULL,
82}; 82};
83 83
84static void led_timer_function(unsigned long data)
85{
86 struct led_classdev *led_cdev = (void *)data;
87 unsigned long brightness;
88 unsigned long delay;
89
90 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
91 led_set_brightness(led_cdev, LED_OFF);
92 return;
93 }
94
95 brightness = led_get_brightness(led_cdev);
96 if (!brightness) {
97 /* Time to switch the LED on. */
98 brightness = led_cdev->blink_brightness;
99 delay = led_cdev->blink_delay_on;
100 } else {
101 /* Store the current brightness value to be able
102 * to restore it when the delay_off period is over.
103 */
104 led_cdev->blink_brightness = brightness;
105 brightness = LED_OFF;
106 delay = led_cdev->blink_delay_off;
107 }
108
109 led_set_brightness(led_cdev, brightness);
110
111 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
112}
113
114static void led_stop_software_blink(struct led_classdev *led_cdev)
115{
116 /* deactivate previous settings */
117 del_timer_sync(&led_cdev->blink_timer);
118 led_cdev->blink_delay_on = 0;
119 led_cdev->blink_delay_off = 0;
120}
121
122static void led_set_software_blink(struct led_classdev *led_cdev,
123 unsigned long delay_on,
124 unsigned long delay_off)
125{
126 int current_brightness;
127
128 current_brightness = led_get_brightness(led_cdev);
129 if (current_brightness)
130 led_cdev->blink_brightness = current_brightness;
131 if (!led_cdev->blink_brightness)
132 led_cdev->blink_brightness = led_cdev->max_brightness;
133
134 if (delay_on == led_cdev->blink_delay_on &&
135 delay_off == led_cdev->blink_delay_off)
136 return;
137
138 led_stop_software_blink(led_cdev);
139
140 led_cdev->blink_delay_on = delay_on;
141 led_cdev->blink_delay_off = delay_off;
142
143 /* never on - don't blink */
144 if (!delay_on)
145 return;
146
147 /* never off - just set to brightness */
148 if (!delay_off) {
149 led_set_brightness(led_cdev, led_cdev->blink_brightness);
150 return;
151 }
152
153 mod_timer(&led_cdev->blink_timer, jiffies + 1);
154}
155
156
84/** 157/**
85 * led_classdev_suspend - suspend an led_classdev. 158 * led_classdev_suspend - suspend an led_classdev.
86 * @led_cdev: the led_classdev to suspend. 159 * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
148 221
149 led_update_brightness(led_cdev); 222 led_update_brightness(led_cdev);
150 223
224 init_timer(&led_cdev->blink_timer);
225 led_cdev->blink_timer.function = led_timer_function;
226 led_cdev->blink_timer.data = (unsigned long)led_cdev;
227
151#ifdef CONFIG_LEDS_TRIGGERS 228#ifdef CONFIG_LEDS_TRIGGERS
152 led_trigger_set_default(led_cdev); 229 led_trigger_set_default(led_cdev);
153#endif 230#endif
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
157 234
158 return 0; 235 return 0;
159} 236}
160
161EXPORT_SYMBOL_GPL(led_classdev_register); 237EXPORT_SYMBOL_GPL(led_classdev_register);
162 238
163/** 239/**
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
175 up_write(&led_cdev->trigger_lock); 251 up_write(&led_cdev->trigger_lock);
176#endif 252#endif
177 253
254 /* Stop blinking */
255 led_brightness_set(led_cdev, LED_OFF);
256
178 device_unregister(led_cdev->dev); 257 device_unregister(led_cdev->dev);
179 258
180 down_write(&leds_list_lock); 259 down_write(&leds_list_lock);
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
183} 262}
184EXPORT_SYMBOL_GPL(led_classdev_unregister); 263EXPORT_SYMBOL_GPL(led_classdev_unregister);
185 264
265void led_blink_set(struct led_classdev *led_cdev,
266 unsigned long *delay_on,
267 unsigned long *delay_off)
268{
269 if (led_cdev->blink_set &&
270 led_cdev->blink_set(led_cdev, delay_on, delay_off))
271 return;
272
273 /* blink with 1 Hz as default if nothing specified */
274 if (!*delay_on && !*delay_off)
275 *delay_on = *delay_off = 500;
276
277 led_set_software_blink(led_cdev, *delay_on, *delay_off);
278}
279EXPORT_SYMBOL(led_blink_set);
280
281void led_brightness_set(struct led_classdev *led_cdev,
282 enum led_brightness brightness)
283{
284 led_stop_software_blink(led_cdev);
285 led_cdev->brightness_set(led_cdev, brightness);
286}
287EXPORT_SYMBOL(led_brightness_set);
288
186static int __init leds_init(void) 289static int __init leds_init(void)
187{ 290{
188 leds_class = class_create(THIS_MODULE, "leds"); 291 leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db88b5e..c41eb6180c9c 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
113 if (led_cdev->trigger->deactivate) 113 if (led_cdev->trigger->deactivate)
114 led_cdev->trigger->deactivate(led_cdev); 114 led_cdev->trigger->deactivate(led_cdev);
115 led_cdev->trigger = NULL; 115 led_cdev->trigger = NULL;
116 led_set_brightness(led_cdev, LED_OFF); 116 led_brightness_set(led_cdev, LED_OFF);
117 } 117 }
118 if (trigger) { 118 if (trigger) {
119 write_lock_irqsave(&trigger->leddev_list_lock, flags); 119 write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05d08f3..4d9fa38d9ff6 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = {
316 316
317static int __init gpio_led_init(void) 317static int __init gpio_led_init(void)
318{ 318{
319 int ret; 319 int ret = 0;
320 320
321#ifdef CONFIG_LEDS_GPIO_PLATFORM 321#ifdef CONFIG_LEDS_GPIO_PLATFORM
322 ret = platform_driver_register(&gpio_led_driver); 322 ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 000000000000..3782f31f06d2
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,821 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5521.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
39
40#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
41#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
42
43#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
44#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
45
46#define LP5521_CMD_LOAD 0x15 /* 00010101 */
47#define LP5521_CMD_RUN 0x2a /* 00101010 */
48#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
49#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
50
51/* Registers */
52#define LP5521_REG_ENABLE 0x00
53#define LP5521_REG_OP_MODE 0x01
54#define LP5521_REG_R_PWM 0x02
55#define LP5521_REG_G_PWM 0x03
56#define LP5521_REG_B_PWM 0x04
57#define LP5521_REG_R_CURRENT 0x05
58#define LP5521_REG_G_CURRENT 0x06
59#define LP5521_REG_B_CURRENT 0x07
60#define LP5521_REG_CONFIG 0x08
61#define LP5521_REG_R_CHANNEL_PC 0x09
62#define LP5521_REG_G_CHANNEL_PC 0x0A
63#define LP5521_REG_B_CHANNEL_PC 0x0B
64#define LP5521_REG_STATUS 0x0C
65#define LP5521_REG_RESET 0x0D
66#define LP5521_REG_GPO 0x0E
67#define LP5521_REG_R_PROG_MEM 0x10
68#define LP5521_REG_G_PROG_MEM 0x30
69#define LP5521_REG_B_PROG_MEM 0x50
70
71#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
72#define LP5521_PROG_MEM_SIZE 0x20
73
74/* Base register to set LED current */
75#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
76
77/* Base register to set the brightness */
78#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
79
80/* Bits in ENABLE register */
81#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */
82#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
83#define LP5521_EXEC_RUN 0x2A
84
85/* Bits in CONFIG register */
86#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
87#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
88#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
89#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
90#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
91#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
92#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
93#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
94#define LP5521_CLK_INT 1 /* Internal clock */
95#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
96
97/* Status */
98#define LP5521_EXT_CLK_USED 0x08
99
100struct lp5521_engine {
101 const struct attribute_group *attributes;
102 int id;
103 u8 mode;
104 u8 prog_page;
105 u8 engine_mask;
106};
107
108struct lp5521_led {
109 int id;
110 u8 chan_nr;
111 u8 led_current;
112 u8 max_current;
113 struct led_classdev cdev;
114 struct work_struct brightness_work;
115 u8 brightness;
116};
117
118struct lp5521_chip {
119 struct lp5521_platform_data *pdata;
120 struct mutex lock; /* Serialize control */
121 struct i2c_client *client;
122 struct lp5521_engine engines[LP5521_MAX_ENGINES];
123 struct lp5521_led leds[LP5521_MAX_LEDS];
124 u8 num_channels;
125 u8 num_leds;
126};
127
128#define cdev_to_led(c) container_of(c, struct lp5521_led, cdev)
129#define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \
130 engines[(eng)->id - 1])
131#define led_to_lp5521(led) container_of((led), struct lp5521_chip, \
132 leds[(led)->id])
133
134static void lp5521_led_brightness_work(struct work_struct *work);
135
136static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
137{
138 return i2c_smbus_write_byte_data(client, reg, value);
139}
140
141static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
142{
143 s32 ret;
144
145 ret = i2c_smbus_read_byte_data(client, reg);
146 if (ret < 0)
147 return -EIO;
148
149 *buf = ret;
150 return 0;
151}
152
153static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
154{
155 struct lp5521_chip *chip = engine_to_lp5521(engine);
156 struct i2c_client *client = chip->client;
157 int ret;
158 u8 engine_state;
159
160 /* Only transition between RUN and DIRECT mode are handled here */
161 if (mode == LP5521_CMD_LOAD)
162 return 0;
163
164 if (mode == LP5521_CMD_DISABLED)
165 mode = LP5521_CMD_DIRECT;
166
167 ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
168
169 /* set mode only for this engine */
170 engine_state &= ~(engine->engine_mask);
171 mode &= engine->engine_mask;
172 engine_state |= mode;
173 ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
174
175 return ret;
176}
177
178static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
179{
180 struct lp5521_chip *chip = engine_to_lp5521(eng);
181 struct i2c_client *client = chip->client;
182 int ret;
183 int addr;
184 u8 mode;
185
186 /* move current engine to direct mode and remember the state */
187 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
188 usleep_range(1000, 10000);
189 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
190
191 /* For loading, all the engines to load mode */
192 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
193 usleep_range(1000, 10000);
194 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
195 usleep_range(1000, 10000);
196
197 addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
198 i2c_smbus_write_i2c_block_data(client,
199 addr,
200 LP5521_PROG_MEM_SIZE,
201 pattern);
202
203 ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
204 return ret;
205}
206
207static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
208{
209 return lp5521_write(chip->client,
210 LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
211 curr);
212}
213
214static void lp5521_init_engine(struct lp5521_chip *chip,
215 const struct attribute_group *attr_group)
216{
217 int i;
218 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
219 chip->engines[i].id = i + 1;
220 chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
221 chip->engines[i].prog_page = i;
222 chip->engines[i].attributes = &attr_group[i];
223 }
224}
225
226static int lp5521_configure(struct i2c_client *client,
227 const struct attribute_group *attr_group)
228{
229 struct lp5521_chip *chip = i2c_get_clientdata(client);
230 int ret;
231
232 lp5521_init_engine(chip, attr_group);
233
234 lp5521_write(client, LP5521_REG_RESET, 0xff);
235
236 usleep_range(10000, 20000);
237
238 /* Set all PWMs to direct control mode */
239 ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
240
241 /* Enable auto-powersave, set charge pump to auto, red to battery */
242 ret |= lp5521_write(client, LP5521_REG_CONFIG,
243 LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
244
245 /* Initialize all channels PWM to zero -> leds off */
246 ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
247 ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
248 ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
249
250 /* Set engines are set to run state when OP_MODE enables engines */
251 ret |= lp5521_write(client, LP5521_REG_ENABLE,
252 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
253 LP5521_EXEC_RUN);
254 /* enable takes 500us */
255 usleep_range(500, 20000);
256
257 return ret;
258}
259
260static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
261{
262 int ret;
263 u8 status;
264
265 ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
266 if (ret < 0)
267 return ret;
268
269 /* Check that ext clock is really in use if requested */
270 if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
271 if ((status & LP5521_EXT_CLK_USED) == 0)
272 return -EIO;
273 return 0;
274}
275
276static void lp5521_set_brightness(struct led_classdev *cdev,
277 enum led_brightness brightness)
278{
279 struct lp5521_led *led = cdev_to_led(cdev);
280 led->brightness = (u8)brightness;
281 schedule_work(&led->brightness_work);
282}
283
284static void lp5521_led_brightness_work(struct work_struct *work)
285{
286 struct lp5521_led *led = container_of(work,
287 struct lp5521_led,
288 brightness_work);
289 struct lp5521_chip *chip = led_to_lp5521(led);
290 struct i2c_client *client = chip->client;
291
292 mutex_lock(&chip->lock);
293 lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
294 led->brightness);
295 mutex_unlock(&chip->lock);
296}
297
298/* Detect the chip by setting its ENABLE register and reading it back. */
299static int lp5521_detect(struct i2c_client *client)
300{
301 int ret;
302 u8 buf;
303
304 ret = lp5521_write(client, LP5521_REG_ENABLE,
305 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
306 if (ret)
307 return ret;
308 usleep_range(1000, 10000);
309 ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
310 if (ret)
311 return ret;
312 if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
313 return -ENODEV;
314
315 return 0;
316}
317
318/* Set engine mode and create appropriate sysfs attributes, if required. */
319static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
320{
321 struct lp5521_chip *chip = engine_to_lp5521(engine);
322 struct i2c_client *client = chip->client;
323 struct device *dev = &client->dev;
324 int ret = 0;
325
326 /* if in that mode already do nothing, except for run */
327 if (mode == engine->mode && mode != LP5521_CMD_RUN)
328 return 0;
329
330 if (mode == LP5521_CMD_RUN) {
331 ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
332 } else if (mode == LP5521_CMD_LOAD) {
333 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
334 lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
335
336 ret = sysfs_create_group(&dev->kobj, engine->attributes);
337 if (ret)
338 return ret;
339 } else if (mode == LP5521_CMD_DISABLED) {
340 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
341 }
342
343 /* remove load attribute from sysfs if not in load mode */
344 if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
345 sysfs_remove_group(&dev->kobj, engine->attributes);
346
347 engine->mode = mode;
348
349 return ret;
350}
351
352static int lp5521_do_store_load(struct lp5521_engine *engine,
353 const char *buf, size_t len)
354{
355 struct lp5521_chip *chip = engine_to_lp5521(engine);
356 struct i2c_client *client = chip->client;
357 int ret, nrchars, offset = 0, i = 0;
358 char c[3];
359 unsigned cmd;
360 u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
361
362 while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
363 /* separate sscanfs because length is working only for %s */
364 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
365 ret = sscanf(c, "%2x", &cmd);
366 if (ret != 1)
367 goto fail;
368 pattern[i] = (u8)cmd;
369
370 offset += nrchars;
371 i++;
372 }
373
374 /* Each instruction is 16bit long. Check that length is even */
375 if (i % 2)
376 goto fail;
377
378 mutex_lock(&chip->lock);
379 ret = lp5521_load_program(engine, pattern);
380 mutex_unlock(&chip->lock);
381
382 if (ret) {
383 dev_err(&client->dev, "failed loading pattern\n");
384 return ret;
385 }
386
387 return len;
388fail:
389 dev_err(&client->dev, "wrong pattern format\n");
390 return -EINVAL;
391}
392
393static ssize_t store_engine_load(struct device *dev,
394 struct device_attribute *attr,
395 const char *buf, size_t len, int nr)
396{
397 struct i2c_client *client = to_i2c_client(dev);
398 struct lp5521_chip *chip = i2c_get_clientdata(client);
399 return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
400}
401
402#define store_load(nr) \
403static ssize_t store_engine##nr##_load(struct device *dev, \
404 struct device_attribute *attr, \
405 const char *buf, size_t len) \
406{ \
407 return store_engine_load(dev, attr, buf, len, nr); \
408}
409store_load(1)
410store_load(2)
411store_load(3)
412
413static ssize_t show_engine_mode(struct device *dev,
414 struct device_attribute *attr,
415 char *buf, int nr)
416{
417 struct i2c_client *client = to_i2c_client(dev);
418 struct lp5521_chip *chip = i2c_get_clientdata(client);
419 switch (chip->engines[nr - 1].mode) {
420 case LP5521_CMD_RUN:
421 return sprintf(buf, "run\n");
422 case LP5521_CMD_LOAD:
423 return sprintf(buf, "load\n");
424 case LP5521_CMD_DISABLED:
425 return sprintf(buf, "disabled\n");
426 default:
427 return sprintf(buf, "disabled\n");
428 }
429}
430
431#define show_mode(nr) \
432static ssize_t show_engine##nr##_mode(struct device *dev, \
433 struct device_attribute *attr, \
434 char *buf) \
435{ \
436 return show_engine_mode(dev, attr, buf, nr); \
437}
438show_mode(1)
439show_mode(2)
440show_mode(3)
441
442static ssize_t store_engine_mode(struct device *dev,
443 struct device_attribute *attr,
444 const char *buf, size_t len, int nr)
445{
446 struct i2c_client *client = to_i2c_client(dev);
447 struct lp5521_chip *chip = i2c_get_clientdata(client);
448 struct lp5521_engine *engine = &chip->engines[nr - 1];
449 mutex_lock(&chip->lock);
450
451 if (!strncmp(buf, "run", 3))
452 lp5521_set_mode(engine, LP5521_CMD_RUN);
453 else if (!strncmp(buf, "load", 4))
454 lp5521_set_mode(engine, LP5521_CMD_LOAD);
455 else if (!strncmp(buf, "disabled", 8))
456 lp5521_set_mode(engine, LP5521_CMD_DISABLED);
457
458 mutex_unlock(&chip->lock);
459 return len;
460}
461
462#define store_mode(nr) \
463static ssize_t store_engine##nr##_mode(struct device *dev, \
464 struct device_attribute *attr, \
465 const char *buf, size_t len) \
466{ \
467 return store_engine_mode(dev, attr, buf, len, nr); \
468}
469store_mode(1)
470store_mode(2)
471store_mode(3)
472
473static ssize_t show_max_current(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 struct led_classdev *led_cdev = dev_get_drvdata(dev);
478 struct lp5521_led *led = cdev_to_led(led_cdev);
479
480 return sprintf(buf, "%d\n", led->max_current);
481}
482
483static ssize_t show_current(struct device *dev,
484 struct device_attribute *attr,
485 char *buf)
486{
487 struct led_classdev *led_cdev = dev_get_drvdata(dev);
488 struct lp5521_led *led = cdev_to_led(led_cdev);
489
490 return sprintf(buf, "%d\n", led->led_current);
491}
492
493static ssize_t store_current(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t len)
496{
497 struct led_classdev *led_cdev = dev_get_drvdata(dev);
498 struct lp5521_led *led = cdev_to_led(led_cdev);
499 struct lp5521_chip *chip = led_to_lp5521(led);
500 ssize_t ret;
501 unsigned long curr;
502
503 if (strict_strtoul(buf, 0, &curr))
504 return -EINVAL;
505
506 if (curr > led->max_current)
507 return -EINVAL;
508
509 mutex_lock(&chip->lock);
510 ret = lp5521_set_led_current(chip, led->id, curr);
511 mutex_unlock(&chip->lock);
512
513 if (ret < 0)
514 return ret;
515
516 led->led_current = (u8)curr;
517
518 return len;
519}
520
521static ssize_t lp5521_selftest(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
524{
525 struct i2c_client *client = to_i2c_client(dev);
526 struct lp5521_chip *chip = i2c_get_clientdata(client);
527 int ret;
528
529 mutex_lock(&chip->lock);
530 ret = lp5521_run_selftest(chip, buf);
531 mutex_unlock(&chip->lock);
532 return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
533}
534
535/* led class device attributes */
536static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
537static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
538
539static struct attribute *lp5521_led_attributes[] = {
540 &dev_attr_led_current.attr,
541 &dev_attr_max_current.attr,
542 NULL,
543};
544
545static struct attribute_group lp5521_led_attribute_group = {
546 .attrs = lp5521_led_attributes
547};
548
549/* device attributes */
550static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
551 show_engine1_mode, store_engine1_mode);
552static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
553 show_engine2_mode, store_engine2_mode);
554static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
555 show_engine3_mode, store_engine3_mode);
556static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
557static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
558static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
559static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
560
561static struct attribute *lp5521_attributes[] = {
562 &dev_attr_engine1_mode.attr,
563 &dev_attr_engine2_mode.attr,
564 &dev_attr_engine3_mode.attr,
565 &dev_attr_selftest.attr,
566 NULL
567};
568
569static struct attribute *lp5521_engine1_attributes[] = {
570 &dev_attr_engine1_load.attr,
571 NULL
572};
573
574static struct attribute *lp5521_engine2_attributes[] = {
575 &dev_attr_engine2_load.attr,
576 NULL
577};
578
579static struct attribute *lp5521_engine3_attributes[] = {
580 &dev_attr_engine3_load.attr,
581 NULL
582};
583
584static const struct attribute_group lp5521_group = {
585 .attrs = lp5521_attributes,
586};
587
588static const struct attribute_group lp5521_engine_group[] = {
589 {.attrs = lp5521_engine1_attributes },
590 {.attrs = lp5521_engine2_attributes },
591 {.attrs = lp5521_engine3_attributes },
592};
593
594static int lp5521_register_sysfs(struct i2c_client *client)
595{
596 struct device *dev = &client->dev;
597 return sysfs_create_group(&dev->kobj, &lp5521_group);
598}
599
600static void lp5521_unregister_sysfs(struct i2c_client *client)
601{
602 struct lp5521_chip *chip = i2c_get_clientdata(client);
603 struct device *dev = &client->dev;
604 int i;
605
606 sysfs_remove_group(&dev->kobj, &lp5521_group);
607
608 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
609 if (chip->engines[i].mode == LP5521_CMD_LOAD)
610 sysfs_remove_group(&dev->kobj,
611 chip->engines[i].attributes);
612 }
613
614 for (i = 0; i < chip->num_leds; i++)
615 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
616 &lp5521_led_attribute_group);
617}
618
619static int __init lp5521_init_led(struct lp5521_led *led,
620 struct i2c_client *client,
621 int chan, struct lp5521_platform_data *pdata)
622{
623 struct device *dev = &client->dev;
624 char name[32];
625 int res;
626
627 if (chan >= LP5521_MAX_LEDS)
628 return -EINVAL;
629
630 if (pdata->led_config[chan].led_current == 0)
631 return 0;
632
633 led->led_current = pdata->led_config[chan].led_current;
634 led->max_current = pdata->led_config[chan].max_current;
635 led->chan_nr = pdata->led_config[chan].chan_nr;
636
637 if (led->chan_nr >= LP5521_MAX_LEDS) {
638 dev_err(dev, "Use channel numbers between 0 and %d\n",
639 LP5521_MAX_LEDS - 1);
640 return -EINVAL;
641 }
642
643 snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
644 led->cdev.brightness_set = lp5521_set_brightness;
645 led->cdev.name = name;
646 res = led_classdev_register(dev, &led->cdev);
647 if (res < 0) {
648 dev_err(dev, "couldn't register led on channel %d\n", chan);
649 return res;
650 }
651
652 res = sysfs_create_group(&led->cdev.dev->kobj,
653 &lp5521_led_attribute_group);
654 if (res < 0) {
655 dev_err(dev, "couldn't register current attribute\n");
656 led_classdev_unregister(&led->cdev);
657 return res;
658 }
659 return 0;
660}
661
662static int lp5521_probe(struct i2c_client *client,
663 const struct i2c_device_id *id)
664{
665 struct lp5521_chip *chip;
666 struct lp5521_platform_data *pdata;
667 int ret, i, led;
668
669 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
670 if (!chip)
671 return -ENOMEM;
672
673 i2c_set_clientdata(client, chip);
674 chip->client = client;
675
676 pdata = client->dev.platform_data;
677
678 if (!pdata) {
679 dev_err(&client->dev, "no platform data\n");
680 ret = -EINVAL;
681 goto fail1;
682 }
683
684 mutex_init(&chip->lock);
685
686 chip->pdata = pdata;
687
688 if (pdata->setup_resources) {
689 ret = pdata->setup_resources();
690 if (ret < 0)
691 goto fail1;
692 }
693
694 if (pdata->enable) {
695 pdata->enable(0);
696 usleep_range(1000, 10000);
697 pdata->enable(1);
698 usleep_range(1000, 10000); /* Spec says min 500us */
699 }
700
701 ret = lp5521_detect(client);
702
703 if (ret) {
704 dev_err(&client->dev, "Chip not found\n");
705 goto fail2;
706 }
707
708 dev_info(&client->dev, "%s programmable led chip found\n", id->name);
709
710 ret = lp5521_configure(client, lp5521_engine_group);
711 if (ret < 0) {
712 dev_err(&client->dev, "error configuring chip\n");
713 goto fail2;
714 }
715
716 /* Initialize leds */
717 chip->num_channels = pdata->num_channels;
718 chip->num_leds = 0;
719 led = 0;
720 for (i = 0; i < pdata->num_channels; i++) {
721 /* Do not initialize channels that are not connected */
722 if (pdata->led_config[i].led_current == 0)
723 continue;
724
725 ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
726 if (ret) {
727 dev_err(&client->dev, "error initializing leds\n");
728 goto fail3;
729 }
730 chip->num_leds++;
731
732 chip->leds[led].id = led;
733 /* Set initial LED current */
734 lp5521_set_led_current(chip, led,
735 chip->leds[led].led_current);
736
737 INIT_WORK(&(chip->leds[led].brightness_work),
738 lp5521_led_brightness_work);
739
740 led++;
741 }
742
743 ret = lp5521_register_sysfs(client);
744 if (ret) {
745 dev_err(&client->dev, "registering sysfs failed\n");
746 goto fail3;
747 }
748 return ret;
749fail3:
750 for (i = 0; i < chip->num_leds; i++) {
751 led_classdev_unregister(&chip->leds[i].cdev);
752 cancel_work_sync(&chip->leds[i].brightness_work);
753 }
754fail2:
755 if (pdata->enable)
756 pdata->enable(0);
757 if (pdata->release_resources)
758 pdata->release_resources();
759fail1:
760 kfree(chip);
761 return ret;
762}
763
764static int lp5521_remove(struct i2c_client *client)
765{
766 struct lp5521_chip *chip = i2c_get_clientdata(client);
767 int i;
768
769 lp5521_unregister_sysfs(client);
770
771 for (i = 0; i < chip->num_leds; i++) {
772 led_classdev_unregister(&chip->leds[i].cdev);
773 cancel_work_sync(&chip->leds[i].brightness_work);
774 }
775
776 if (chip->pdata->enable)
777 chip->pdata->enable(0);
778 if (chip->pdata->release_resources)
779 chip->pdata->release_resources();
780 kfree(chip);
781 return 0;
782}
783
784static const struct i2c_device_id lp5521_id[] = {
785 { "lp5521", 0 }, /* Three channel chip */
786 { }
787};
788MODULE_DEVICE_TABLE(i2c, lp5521_id);
789
790static struct i2c_driver lp5521_driver = {
791 .driver = {
792 .name = "lp5521",
793 },
794 .probe = lp5521_probe,
795 .remove = lp5521_remove,
796 .id_table = lp5521_id,
797};
798
799static int __init lp5521_init(void)
800{
801 int ret;
802
803 ret = i2c_add_driver(&lp5521_driver);
804
805 if (ret < 0)
806 printk(KERN_ALERT "Adding lp5521 driver failed\n");
807
808 return ret;
809}
810
811static void __exit lp5521_exit(void)
812{
813 i2c_del_driver(&lp5521_driver);
814}
815
816module_init(lp5521_init);
817module_exit(lp5521_exit);
818
819MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
820MODULE_DESCRIPTION("LP5521 LED engine");
821MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 000000000000..1e11fcc08b28
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1065 @@
1/*
2 * lp5523.c - LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5523.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5523_REG_ENABLE 0x00
39#define LP5523_REG_OP_MODE 0x01
40#define LP5523_REG_RATIOMETRIC_MSB 0x02
41#define LP5523_REG_RATIOMETRIC_LSB 0x03
42#define LP5523_REG_ENABLE_LEDS_MSB 0x04
43#define LP5523_REG_ENABLE_LEDS_LSB 0x05
44#define LP5523_REG_LED_CNTRL_BASE 0x06
45#define LP5523_REG_LED_PWM_BASE 0x16
46#define LP5523_REG_LED_CURRENT_BASE 0x26
47#define LP5523_REG_CONFIG 0x36
48#define LP5523_REG_CHANNEL1_PC 0x37
49#define LP5523_REG_CHANNEL2_PC 0x38
50#define LP5523_REG_CHANNEL3_PC 0x39
51#define LP5523_REG_STATUS 0x3a
52#define LP5523_REG_GPO 0x3b
53#define LP5523_REG_VARIABLE 0x3c
54#define LP5523_REG_RESET 0x3d
55#define LP5523_REG_TEMP_CTRL 0x3e
56#define LP5523_REG_TEMP_READ 0x3f
57#define LP5523_REG_TEMP_WRITE 0x40
58#define LP5523_REG_LED_TEST_CTRL 0x41
59#define LP5523_REG_LED_TEST_ADC 0x42
60#define LP5523_REG_ENG1_VARIABLE 0x45
61#define LP5523_REG_ENG2_VARIABLE 0x46
62#define LP5523_REG_ENG3_VARIABLE 0x47
63#define LP5523_REG_MASTER_FADER1 0x48
64#define LP5523_REG_MASTER_FADER2 0x49
65#define LP5523_REG_MASTER_FADER3 0x4a
66#define LP5523_REG_CH1_PROG_START 0x4c
67#define LP5523_REG_CH2_PROG_START 0x4d
68#define LP5523_REG_CH3_PROG_START 0x4e
69#define LP5523_REG_PROG_PAGE_SEL 0x4f
70#define LP5523_REG_PROG_MEM 0x50
71
72#define LP5523_CMD_LOAD 0x15 /* 00010101 */
73#define LP5523_CMD_RUN 0x2a /* 00101010 */
74#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
75
76#define LP5523_ENABLE 0x40
77#define LP5523_AUTO_INC 0x40
78#define LP5523_PWR_SAVE 0x20
79#define LP5523_PWM_PWR_SAVE 0x04
80#define LP5523_CP_1 0x08
81#define LP5523_CP_1_5 0x10
82#define LP5523_CP_AUTO 0x18
83#define LP5523_INT_CLK 0x01
84#define LP5523_AUTO_CLK 0x02
85#define LP5523_EN_LEDTEST 0x80
86#define LP5523_LEDTEST_DONE 0x80
87
88#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
89#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
90#define LP5523_PROGRAM_PAGES 6
91#define LP5523_ADC_SHORTCIRC_LIM 80
92
93#define LP5523_LEDS 9
94#define LP5523_ENGINES 3
95
96#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
97
98#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
99
100#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
101
102#define LP5523_EXT_CLK_USED 0x08
103
104#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
105#define SHIFT_MASK(id) (((id) - 1) * 2)
106
107struct lp5523_engine {
108 const struct attribute_group *attributes;
109 int id;
110 u8 mode;
111 u8 prog_page;
112 u8 mux_page;
113 u16 led_mux;
114 u8 engine_mask;
115};
116
117struct lp5523_led {
118 int id;
119 u8 chan_nr;
120 u8 led_current;
121 u8 max_current;
122 struct led_classdev cdev;
123 struct work_struct brightness_work;
124 u8 brightness;
125};
126
127struct lp5523_chip {
128 struct mutex lock; /* Serialize control */
129 struct i2c_client *client;
130 struct lp5523_engine engines[LP5523_ENGINES];
131 struct lp5523_led leds[LP5523_LEDS];
132 struct lp5523_platform_data *pdata;
133 u8 num_channels;
134 u8 num_leds;
135};
136
137#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev)
138
139static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
140{
141 return container_of(engine, struct lp5523_chip,
142 engines[engine->id - 1]);
143}
144
145static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
146{
147 return container_of(led, struct lp5523_chip,
148 leds[led->id]);
149}
150
151static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
152static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
153static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
154
155static void lp5523_led_brightness_work(struct work_struct *work);
156
157static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
158{
159 return i2c_smbus_write_byte_data(client, reg, value);
160}
161
162static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
163{
164 s32 ret = i2c_smbus_read_byte_data(client, reg);
165
166 if (ret < 0)
167 return -EIO;
168
169 *buf = ret;
170 return 0;
171}
172
173static int lp5523_detect(struct i2c_client *client)
174{
175 int ret;
176 u8 buf;
177
178 ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
179 if (ret)
180 return ret;
181 ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
182 if (ret)
183 return ret;
184 if (buf == 0x40)
185 return 0;
186 else
187 return -ENODEV;
188}
189
190static int lp5523_configure(struct i2c_client *client)
191{
192 struct lp5523_chip *chip = i2c_get_clientdata(client);
193 int ret = 0;
194 u8 status;
195
196 /* one pattern per engine setting led mux start and stop addresses */
197 u8 pattern[][LP5523_PROGRAM_LENGTH] = {
198 { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
199 { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
200 { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
201 };
202
203 lp5523_write(client, LP5523_REG_RESET, 0xff);
204
205 usleep_range(10000, 100000);
206
207 ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
208 /* Chip startup time after reset is 500 us */
209 usleep_range(1000, 10000);
210
211 ret |= lp5523_write(client, LP5523_REG_CONFIG,
212 LP5523_AUTO_INC | LP5523_PWR_SAVE |
213 LP5523_CP_AUTO | LP5523_AUTO_CLK |
214 LP5523_PWM_PWR_SAVE);
215
216 /* turn on all leds */
217 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
218 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
219
220 /* hardcode 32 bytes of memory for each engine from program memory */
221 ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
222 ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
223 ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
224
225 /* write led mux address space for each channel */
226 ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
227 ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
228 ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
229
230 if (ret) {
231 dev_err(&client->dev, "could not load mux programs\n");
232 return -1;
233 }
234
235 /* set all engines exec state and mode to run 00101010 */
236 ret |= lp5523_write(client, LP5523_REG_ENABLE,
237 (LP5523_CMD_RUN | LP5523_ENABLE));
238
239 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
240
241 if (ret) {
242 dev_err(&client->dev, "could not start mux programs\n");
243 return -1;
244 }
245
246 /* Wait 3ms and check the engine status */
247 usleep_range(3000, 20000);
248 lp5523_read(client, LP5523_REG_STATUS, &status);
249 status &= LP5523_ENG_STATUS_MASK;
250
251 if (status == LP5523_ENG_STATUS_MASK) {
252 dev_dbg(&client->dev, "all engines configured\n");
253 } else {
254 dev_info(&client->dev, "status == %x\n", status);
255 dev_err(&client->dev, "cound not configure LED engine\n");
256 return -1;
257 }
258
259 dev_info(&client->dev, "disabling engines\n");
260
261 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
262
263 return ret;
264}
265
266static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
267{
268 struct lp5523_chip *chip = engine_to_lp5523(engine);
269 struct i2c_client *client = chip->client;
270 int ret;
271 u8 engine_state;
272
273 ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
274 if (ret)
275 goto fail;
276
277 engine_state &= ~(engine->engine_mask);
278
279 /* set mode only for this engine */
280 mode &= engine->engine_mask;
281
282 engine_state |= mode;
283
284 ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
285fail:
286 return ret;
287}
288
289static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
290{
291 struct lp5523_chip *chip = engine_to_lp5523(engine);
292 struct i2c_client *client = chip->client;
293 int ret = 0;
294
295 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
296
297 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
298 ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
299 (u8)(mux >> 8));
300 ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
301 engine->led_mux = mux;
302
303 return ret;
304}
305
306static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
307{
308 struct lp5523_chip *chip = engine_to_lp5523(engine);
309 struct i2c_client *client = chip->client;
310
311 int ret = 0;
312
313 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
314
315 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
316 engine->prog_page);
317 ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
318 LP5523_PROGRAM_LENGTH, pattern);
319
320 return ret;
321}
322
323static int lp5523_run_program(struct lp5523_engine *engine)
324{
325 struct lp5523_chip *chip = engine_to_lp5523(engine);
326 struct i2c_client *client = chip->client;
327 int ret;
328
329 ret = lp5523_write(client, LP5523_REG_ENABLE,
330 LP5523_CMD_RUN | LP5523_ENABLE);
331 if (ret)
332 goto fail;
333
334 ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
335fail:
336 return ret;
337}
338
339static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
340{
341 int i;
342 u16 tmp_mux = 0;
343 len = len < LP5523_LEDS ? len : LP5523_LEDS;
344 for (i = 0; i < len; i++) {
345 switch (buf[i]) {
346 case '1':
347 tmp_mux |= (1 << i);
348 break;
349 case '0':
350 break;
351 case '\n':
352 i = len;
353 break;
354 default:
355 return -1;
356 }
357 }
358 *mux = tmp_mux;
359
360 return 0;
361}
362
363static void lp5523_mux_to_array(u16 led_mux, char *array)
364{
365 int i, pos = 0;
366 for (i = 0; i < LP5523_LEDS; i++)
367 pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
368
369 array[pos] = '\0';
370}
371
372/*--------------------------------------------------------------*/
373/* Sysfs interface */
374/*--------------------------------------------------------------*/
375
376static ssize_t show_engine_leds(struct device *dev,
377 struct device_attribute *attr,
378 char *buf, int nr)
379{
380 struct i2c_client *client = to_i2c_client(dev);
381 struct lp5523_chip *chip = i2c_get_clientdata(client);
382 char mux[LP5523_LEDS + 1];
383
384 lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
385
386 return sprintf(buf, "%s\n", mux);
387}
388
389#define show_leds(nr) \
390static ssize_t show_engine##nr##_leds(struct device *dev, \
391 struct device_attribute *attr, \
392 char *buf) \
393{ \
394 return show_engine_leds(dev, attr, buf, nr); \
395}
396show_leds(1)
397show_leds(2)
398show_leds(3)
399
400static ssize_t store_engine_leds(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf, size_t len, int nr)
403{
404 struct i2c_client *client = to_i2c_client(dev);
405 struct lp5523_chip *chip = i2c_get_clientdata(client);
406 u16 mux = 0;
407
408 if (lp5523_mux_parse(buf, &mux, len))
409 return -EINVAL;
410
411 if (lp5523_load_mux(&chip->engines[nr - 1], mux))
412 return -EINVAL;
413
414 return len;
415}
416
417#define store_leds(nr) \
418static ssize_t store_engine##nr##_leds(struct device *dev, \
419 struct device_attribute *attr, \
420 const char *buf, size_t len) \
421{ \
422 return store_engine_leds(dev, attr, buf, len, nr); \
423}
424store_leds(1)
425store_leds(2)
426store_leds(3)
427
428static ssize_t lp5523_selftest(struct device *dev,
429 struct device_attribute *attr,
430 char *buf)
431{
432 struct i2c_client *client = to_i2c_client(dev);
433 struct lp5523_chip *chip = i2c_get_clientdata(client);
434 int i, ret, pos = 0;
435 int led = 0;
436 u8 status, adc, vdd;
437
438 mutex_lock(&chip->lock);
439
440 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
441 if (ret < 0)
442 goto fail;
443
444 /* Check that ext clock is really in use if requested */
445 if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
446 if ((status & LP5523_EXT_CLK_USED) == 0)
447 goto fail;
448
449 /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
450 lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
451 LP5523_EN_LEDTEST | 16);
452 usleep_range(3000, 10000);
453 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
454 if (!(status & LP5523_LEDTEST_DONE))
455 usleep_range(3000, 10000);
456
457 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
458 vdd--; /* There may be some fluctuation in measurement */
459
460 for (i = 0; i < LP5523_LEDS; i++) {
461 /* Skip non-existing channels */
462 if (chip->pdata->led_config[i].led_current == 0)
463 continue;
464
465 /* Set default current */
466 lp5523_write(chip->client,
467 LP5523_REG_LED_CURRENT_BASE + i,
468 chip->pdata->led_config[i].led_current);
469
470 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
471 /* let current stabilize 2ms before measurements start */
472 usleep_range(2000, 10000);
473 lp5523_write(chip->client,
474 LP5523_REG_LED_TEST_CTRL,
475 LP5523_EN_LEDTEST | i);
476 /* ledtest takes 2.7ms */
477 usleep_range(3000, 10000);
478 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
479 if (!(status & LP5523_LEDTEST_DONE))
480 usleep_range(3000, 10000);
481 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
482
483 if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
484 pos += sprintf(buf + pos, "LED %d FAIL\n", i);
485
486 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
487
488 /* Restore current */
489 lp5523_write(chip->client,
490 LP5523_REG_LED_CURRENT_BASE + i,
491 chip->leds[led].led_current);
492 led++;
493 }
494 if (pos == 0)
495 pos = sprintf(buf, "OK\n");
496 goto release_lock;
497fail:
498 pos = sprintf(buf, "FAIL\n");
499
500release_lock:
501 mutex_unlock(&chip->lock);
502
503 return pos;
504}
505
506static void lp5523_set_brightness(struct led_classdev *cdev,
507 enum led_brightness brightness)
508{
509 struct lp5523_led *led = cdev_to_led(cdev);
510
511 led->brightness = (u8)brightness;
512
513 schedule_work(&led->brightness_work);
514}
515
516static void lp5523_led_brightness_work(struct work_struct *work)
517{
518 struct lp5523_led *led = container_of(work,
519 struct lp5523_led,
520 brightness_work);
521 struct lp5523_chip *chip = led_to_lp5523(led);
522 struct i2c_client *client = chip->client;
523
524 mutex_lock(&chip->lock);
525
526 lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
527 led->brightness);
528
529 mutex_unlock(&chip->lock);
530}
531
532static int lp5523_do_store_load(struct lp5523_engine *engine,
533 const char *buf, size_t len)
534{
535 struct lp5523_chip *chip = engine_to_lp5523(engine);
536 struct i2c_client *client = chip->client;
537 int ret, nrchars, offset = 0, i = 0;
538 char c[3];
539 unsigned cmd;
540 u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
541
542 while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
543 /* separate sscanfs because length is working only for %s */
544 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
545 ret = sscanf(c, "%2x", &cmd);
546 if (ret != 1)
547 goto fail;
548 pattern[i] = (u8)cmd;
549
550 offset += nrchars;
551 i++;
552 }
553
554 /* Each instruction is 16bit long. Check that length is even */
555 if (i % 2)
556 goto fail;
557
558 mutex_lock(&chip->lock);
559
560 ret = lp5523_load_program(engine, pattern);
561 mutex_unlock(&chip->lock);
562
563 if (ret) {
564 dev_err(&client->dev, "failed loading pattern\n");
565 return ret;
566 }
567
568 return len;
569fail:
570 dev_err(&client->dev, "wrong pattern format\n");
571 return -EINVAL;
572}
573
574static ssize_t store_engine_load(struct device *dev,
575 struct device_attribute *attr,
576 const char *buf, size_t len, int nr)
577{
578 struct i2c_client *client = to_i2c_client(dev);
579 struct lp5523_chip *chip = i2c_get_clientdata(client);
580 return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
581}
582
583#define store_load(nr) \
584static ssize_t store_engine##nr##_load(struct device *dev, \
585 struct device_attribute *attr, \
586 const char *buf, size_t len) \
587{ \
588 return store_engine_load(dev, attr, buf, len, nr); \
589}
590store_load(1)
591store_load(2)
592store_load(3)
593
594static ssize_t show_engine_mode(struct device *dev,
595 struct device_attribute *attr,
596 char *buf, int nr)
597{
598 struct i2c_client *client = to_i2c_client(dev);
599 struct lp5523_chip *chip = i2c_get_clientdata(client);
600 switch (chip->engines[nr - 1].mode) {
601 case LP5523_CMD_RUN:
602 return sprintf(buf, "run\n");
603 case LP5523_CMD_LOAD:
604 return sprintf(buf, "load\n");
605 case LP5523_CMD_DISABLED:
606 return sprintf(buf, "disabled\n");
607 default:
608 return sprintf(buf, "disabled\n");
609 }
610}
611
612#define show_mode(nr) \
613static ssize_t show_engine##nr##_mode(struct device *dev, \
614 struct device_attribute *attr, \
615 char *buf) \
616{ \
617 return show_engine_mode(dev, attr, buf, nr); \
618}
619show_mode(1)
620show_mode(2)
621show_mode(3)
622
623static ssize_t store_engine_mode(struct device *dev,
624 struct device_attribute *attr,
625 const char *buf, size_t len, int nr)
626{
627 struct i2c_client *client = to_i2c_client(dev);
628 struct lp5523_chip *chip = i2c_get_clientdata(client);
629 struct lp5523_engine *engine = &chip->engines[nr - 1];
630 mutex_lock(&chip->lock);
631
632 if (!strncmp(buf, "run", 3))
633 lp5523_set_mode(engine, LP5523_CMD_RUN);
634 else if (!strncmp(buf, "load", 4))
635 lp5523_set_mode(engine, LP5523_CMD_LOAD);
636 else if (!strncmp(buf, "disabled", 8))
637 lp5523_set_mode(engine, LP5523_CMD_DISABLED);
638
639 mutex_unlock(&chip->lock);
640 return len;
641}
642
643#define store_mode(nr) \
644static ssize_t store_engine##nr##_mode(struct device *dev, \
645 struct device_attribute *attr, \
646 const char *buf, size_t len) \
647{ \
648 return store_engine_mode(dev, attr, buf, len, nr); \
649}
650store_mode(1)
651store_mode(2)
652store_mode(3)
653
654static ssize_t show_max_current(struct device *dev,
655 struct device_attribute *attr,
656 char *buf)
657{
658 struct led_classdev *led_cdev = dev_get_drvdata(dev);
659 struct lp5523_led *led = cdev_to_led(led_cdev);
660
661 return sprintf(buf, "%d\n", led->max_current);
662}
663
664static ssize_t show_current(struct device *dev,
665 struct device_attribute *attr,
666 char *buf)
667{
668 struct led_classdev *led_cdev = dev_get_drvdata(dev);
669 struct lp5523_led *led = cdev_to_led(led_cdev);
670
671 return sprintf(buf, "%d\n", led->led_current);
672}
673
674static ssize_t store_current(struct device *dev,
675 struct device_attribute *attr,
676 const char *buf, size_t len)
677{
678 struct led_classdev *led_cdev = dev_get_drvdata(dev);
679 struct lp5523_led *led = cdev_to_led(led_cdev);
680 struct lp5523_chip *chip = led_to_lp5523(led);
681 ssize_t ret;
682 unsigned long curr;
683
684 if (strict_strtoul(buf, 0, &curr))
685 return -EINVAL;
686
687 if (curr > led->max_current)
688 return -EINVAL;
689
690 mutex_lock(&chip->lock);
691 ret = lp5523_write(chip->client,
692 LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
693 (u8)curr);
694 mutex_unlock(&chip->lock);
695
696 if (ret < 0)
697 return ret;
698
699 led->led_current = (u8)curr;
700
701 return len;
702}
703
704/* led class device attributes */
705static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
706static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
707
708static struct attribute *lp5523_led_attributes[] = {
709 &dev_attr_led_current.attr,
710 &dev_attr_max_current.attr,
711 NULL,
712};
713
714static struct attribute_group lp5523_led_attribute_group = {
715 .attrs = lp5523_led_attributes
716};
717
718/* device attributes */
719static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
720 show_engine1_mode, store_engine1_mode);
721static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
722 show_engine2_mode, store_engine2_mode);
723static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
724 show_engine3_mode, store_engine3_mode);
725static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
726 show_engine1_leds, store_engine1_leds);
727static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
728 show_engine2_leds, store_engine2_leds);
729static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
730 show_engine3_leds, store_engine3_leds);
731static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
732static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
733static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
734static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
735
736static struct attribute *lp5523_attributes[] = {
737 &dev_attr_engine1_mode.attr,
738 &dev_attr_engine2_mode.attr,
739 &dev_attr_engine3_mode.attr,
740 &dev_attr_selftest.attr,
741 NULL
742};
743
744static struct attribute *lp5523_engine1_attributes[] = {
745 &dev_attr_engine1_load.attr,
746 &dev_attr_engine1_leds.attr,
747 NULL
748};
749
750static struct attribute *lp5523_engine2_attributes[] = {
751 &dev_attr_engine2_load.attr,
752 &dev_attr_engine2_leds.attr,
753 NULL
754};
755
756static struct attribute *lp5523_engine3_attributes[] = {
757 &dev_attr_engine3_load.attr,
758 &dev_attr_engine3_leds.attr,
759 NULL
760};
761
762static const struct attribute_group lp5523_group = {
763 .attrs = lp5523_attributes,
764};
765
766static const struct attribute_group lp5523_engine_group[] = {
767 {.attrs = lp5523_engine1_attributes },
768 {.attrs = lp5523_engine2_attributes },
769 {.attrs = lp5523_engine3_attributes },
770};
771
772static int lp5523_register_sysfs(struct i2c_client *client)
773{
774 struct device *dev = &client->dev;
775 int ret;
776
777 ret = sysfs_create_group(&dev->kobj, &lp5523_group);
778 if (ret < 0)
779 return ret;
780
781 return 0;
782}
783
784static void lp5523_unregister_sysfs(struct i2c_client *client)
785{
786 struct lp5523_chip *chip = i2c_get_clientdata(client);
787 struct device *dev = &client->dev;
788 int i;
789
790 sysfs_remove_group(&dev->kobj, &lp5523_group);
791
792 for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
793 if (chip->engines[i].mode == LP5523_CMD_LOAD)
794 sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
795
796 for (i = 0; i < chip->num_leds; i++)
797 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
798 &lp5523_led_attribute_group);
799}
800
801/*--------------------------------------------------------------*/
802/* Set chip operating mode */
803/*--------------------------------------------------------------*/
804static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
805{
806 /* engine to chip */
807 struct lp5523_chip *chip = engine_to_lp5523(engine);
808 struct i2c_client *client = chip->client;
809 struct device *dev = &client->dev;
810 int ret = 0;
811
812 /* if in that mode already do nothing, except for run */
813 if (mode == engine->mode && mode != LP5523_CMD_RUN)
814 return 0;
815
816 if (mode == LP5523_CMD_RUN) {
817 ret = lp5523_run_program(engine);
818 } else if (mode == LP5523_CMD_LOAD) {
819 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
820 lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
821
822 ret = sysfs_create_group(&dev->kobj, engine->attributes);
823 if (ret)
824 return ret;
825 } else if (mode == LP5523_CMD_DISABLED) {
826 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
827 }
828
829 /* remove load attribute from sysfs if not in load mode */
830 if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
831 sysfs_remove_group(&dev->kobj, engine->attributes);
832
833 engine->mode = mode;
834
835 return ret;
836}
837
838/*--------------------------------------------------------------*/
839/* Probe, Attach, Remove */
840/*--------------------------------------------------------------*/
841static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
842{
843 if (id < 1 || id > LP5523_ENGINES)
844 return -1;
845 engine->id = id;
846 engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
847 engine->prog_page = id - 1;
848 engine->mux_page = id + 2;
849 engine->attributes = &lp5523_engine_group[id - 1];
850
851 return 0;
852}
853
854static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
855 int chan, struct lp5523_platform_data *pdata)
856{
857 char name[32];
858 int res;
859
860 if (chan >= LP5523_LEDS)
861 return -EINVAL;
862
863 if (pdata->led_config[chan].led_current) {
864 led->led_current = pdata->led_config[chan].led_current;
865 led->max_current = pdata->led_config[chan].max_current;
866 led->chan_nr = pdata->led_config[chan].chan_nr;
867
868 if (led->chan_nr >= LP5523_LEDS) {
869 dev_err(dev, "Use channel numbers between 0 and %d\n",
870 LP5523_LEDS - 1);
871 return -EINVAL;
872 }
873
874 snprintf(name, 32, "lp5523:channel%d", chan);
875
876 led->cdev.name = name;
877 led->cdev.brightness_set = lp5523_set_brightness;
878 res = led_classdev_register(dev, &led->cdev);
879 if (res < 0) {
880 dev_err(dev, "couldn't register led on channel %d\n",
881 chan);
882 return res;
883 }
884 res = sysfs_create_group(&led->cdev.dev->kobj,
885 &lp5523_led_attribute_group);
886 if (res < 0) {
887 dev_err(dev, "couldn't register current attribute\n");
888 led_classdev_unregister(&led->cdev);
889 return res;
890 }
891 } else {
892 led->led_current = 0;
893 }
894 return 0;
895}
896
897static struct i2c_driver lp5523_driver;
898
899static int lp5523_probe(struct i2c_client *client,
900 const struct i2c_device_id *id)
901{
902 struct lp5523_chip *chip;
903 struct lp5523_platform_data *pdata;
904 int ret, i, led;
905
906 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
907 if (!chip)
908 return -ENOMEM;
909
910 i2c_set_clientdata(client, chip);
911 chip->client = client;
912
913 pdata = client->dev.platform_data;
914
915 if (!pdata) {
916 dev_err(&client->dev, "no platform data\n");
917 ret = -EINVAL;
918 goto fail1;
919 }
920
921 mutex_init(&chip->lock);
922
923 chip->pdata = pdata;
924
925 if (pdata->setup_resources) {
926 ret = pdata->setup_resources();
927 if (ret < 0)
928 goto fail1;
929 }
930
931 if (pdata->enable) {
932 pdata->enable(0);
933 usleep_range(1000, 10000);
934 pdata->enable(1);
935 usleep_range(1000, 10000); /* Spec says min 500us */
936 }
937
938 ret = lp5523_detect(client);
939 if (ret)
940 goto fail2;
941
942 dev_info(&client->dev, "LP5523 Programmable led chip found\n");
943
944 /* Initialize engines */
945 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
946 ret = lp5523_init_engine(&chip->engines[i], i + 1);
947 if (ret) {
948 dev_err(&client->dev, "error initializing engine\n");
949 goto fail2;
950 }
951 }
952 ret = lp5523_configure(client);
953 if (ret < 0) {
954 dev_err(&client->dev, "error configuring chip\n");
955 goto fail2;
956 }
957
958 /* Initialize leds */
959 chip->num_channels = pdata->num_channels;
960 chip->num_leds = 0;
961 led = 0;
962 for (i = 0; i < pdata->num_channels; i++) {
963 /* Do not initialize channels that are not connected */
964 if (pdata->led_config[i].led_current == 0)
965 continue;
966
967 ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
968 if (ret) {
969 dev_err(&client->dev, "error initializing leds\n");
970 goto fail3;
971 }
972 chip->num_leds++;
973
974 chip->leds[led].id = led;
975 /* Set LED current */
976 lp5523_write(client,
977 LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
978 chip->leds[led].led_current);
979
980 INIT_WORK(&(chip->leds[led].brightness_work),
981 lp5523_led_brightness_work);
982
983 led++;
984 }
985
986 ret = lp5523_register_sysfs(client);
987 if (ret) {
988 dev_err(&client->dev, "registering sysfs failed\n");
989 goto fail3;
990 }
991 return ret;
992fail3:
993 for (i = 0; i < chip->num_leds; i++) {
994 led_classdev_unregister(&chip->leds[i].cdev);
995 cancel_work_sync(&chip->leds[i].brightness_work);
996 }
997fail2:
998 if (pdata->enable)
999 pdata->enable(0);
1000 if (pdata->release_resources)
1001 pdata->release_resources();
1002fail1:
1003 kfree(chip);
1004 return ret;
1005}
1006
1007static int lp5523_remove(struct i2c_client *client)
1008{
1009 struct lp5523_chip *chip = i2c_get_clientdata(client);
1010 int i;
1011
1012 lp5523_unregister_sysfs(client);
1013
1014 for (i = 0; i < chip->num_leds; i++) {
1015 led_classdev_unregister(&chip->leds[i].cdev);
1016 cancel_work_sync(&chip->leds[i].brightness_work);
1017 }
1018
1019 if (chip->pdata->enable)
1020 chip->pdata->enable(0);
1021 if (chip->pdata->release_resources)
1022 chip->pdata->release_resources();
1023 kfree(chip);
1024 return 0;
1025}
1026
1027static const struct i2c_device_id lp5523_id[] = {
1028 { "lp5523", 0 },
1029 { }
1030};
1031
1032MODULE_DEVICE_TABLE(i2c, lp5523_id);
1033
1034static struct i2c_driver lp5523_driver = {
1035 .driver = {
1036 .name = "lp5523",
1037 },
1038 .probe = lp5523_probe,
1039 .remove = lp5523_remove,
1040 .id_table = lp5523_id,
1041};
1042
1043static int __init lp5523_init(void)
1044{
1045 int ret;
1046
1047 ret = i2c_add_driver(&lp5523_driver);
1048
1049 if (ret < 0)
1050 printk(KERN_ALERT "Adding lp5523 driver failed\n");
1051
1052 return ret;
1053}
1054
1055static void __exit lp5523_exit(void)
1056{
1057 i2c_del_driver(&lp5523_driver);
1058}
1059
1060module_init(lp5523_init);
1061module_exit(lp5523_exit);
1062
1063MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
1064MODULE_DESCRIPTION("LP5523 LED engine");
1065MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 3063f591f0dc..1739557a9038 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -92,3 +92,5 @@ unmap:
92} 92}
93 93
94arch_initcall(soekris_init); 94arch_initcall(soekris_init);
95
96MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd482ff..b09bcbeade9c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/device.h> 17#include <linux/device.h>
21#include <linux/sysdev.h>
22#include <linux/timer.h>
23#include <linux/ctype.h> 18#include <linux/ctype.h>
24#include <linux/leds.h> 19#include <linux/leds.h>
25#include <linux/slab.h>
26#include "leds.h" 20#include "leds.h"
27 21
28struct timer_trig_data {
29 int brightness_on; /* LED brightness during "on" period.
30 * (LED_OFF < brightness_on <= LED_FULL)
31 */
32 unsigned long delay_on; /* milliseconds on */
33 unsigned long delay_off; /* milliseconds off */
34 struct timer_list timer;
35};
36
37static void led_timer_function(unsigned long data)
38{
39 struct led_classdev *led_cdev = (struct led_classdev *) data;
40 struct timer_trig_data *timer_data = led_cdev->trigger_data;
41 unsigned long brightness;
42 unsigned long delay;
43
44 if (!timer_data->delay_on || !timer_data->delay_off) {
45 led_set_brightness(led_cdev, LED_OFF);
46 return;
47 }
48
49 brightness = led_get_brightness(led_cdev);
50 if (!brightness) {
51 /* Time to switch the LED on. */
52 brightness = timer_data->brightness_on;
53 delay = timer_data->delay_on;
54 } else {
55 /* Store the current brightness value to be able
56 * to restore it when the delay_off period is over.
57 */
58 timer_data->brightness_on = brightness;
59 brightness = LED_OFF;
60 delay = timer_data->delay_off;
61 }
62
63 led_set_brightness(led_cdev, brightness);
64
65 mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
66}
67
68static ssize_t led_delay_on_show(struct device *dev, 22static ssize_t led_delay_on_show(struct device *dev,
69 struct device_attribute *attr, char *buf) 23 struct device_attribute *attr, char *buf)
70{ 24{
71 struct led_classdev *led_cdev = dev_get_drvdata(dev); 25 struct led_classdev *led_cdev = dev_get_drvdata(dev);
72 struct timer_trig_data *timer_data = led_cdev->trigger_data;
73 26
74 return sprintf(buf, "%lu\n", timer_data->delay_on); 27 return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
75} 28}
76 29
77static ssize_t led_delay_on_store(struct device *dev, 30static ssize_t led_delay_on_store(struct device *dev,
78 struct device_attribute *attr, const char *buf, size_t size) 31 struct device_attribute *attr, const char *buf, size_t size)
79{ 32{
80 struct led_classdev *led_cdev = dev_get_drvdata(dev); 33 struct led_classdev *led_cdev = dev_get_drvdata(dev);
81 struct timer_trig_data *timer_data = led_cdev->trigger_data;
82 int ret = -EINVAL; 34 int ret = -EINVAL;
83 char *after; 35 char *after;
84 unsigned long state = simple_strtoul(buf, &after, 10); 36 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev,
88 count++; 40 count++;
89 41
90 if (count == size) { 42 if (count == size) {
91 if (timer_data->delay_on != state) { 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
92 /* the new value differs from the previous */
93 timer_data->delay_on = state;
94
95 /* deactivate previous settings */
96 del_timer_sync(&timer_data->timer);
97
98 /* try to activate hardware acceleration, if any */
99 if (!led_cdev->blink_set ||
100 led_cdev->blink_set(led_cdev,
101 &timer_data->delay_on, &timer_data->delay_off)) {
102 /* no hardware acceleration, blink via timer */
103 mod_timer(&timer_data->timer, jiffies + 1);
104 }
105 }
106 ret = count; 44 ret = count;
107 } 45 }
108 46
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev,
113 struct device_attribute *attr, char *buf) 51 struct device_attribute *attr, char *buf)
114{ 52{
115 struct led_classdev *led_cdev = dev_get_drvdata(dev); 53 struct led_classdev *led_cdev = dev_get_drvdata(dev);
116 struct timer_trig_data *timer_data = led_cdev->trigger_data;
117 54
118 return sprintf(buf, "%lu\n", timer_data->delay_off); 55 return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
119} 56}
120 57
121static ssize_t led_delay_off_store(struct device *dev, 58static ssize_t led_delay_off_store(struct device *dev,
122 struct device_attribute *attr, const char *buf, size_t size) 59 struct device_attribute *attr, const char *buf, size_t size)
123{ 60{
124 struct led_classdev *led_cdev = dev_get_drvdata(dev); 61 struct led_classdev *led_cdev = dev_get_drvdata(dev);
125 struct timer_trig_data *timer_data = led_cdev->trigger_data;
126 int ret = -EINVAL; 62 int ret = -EINVAL;
127 char *after; 63 char *after;
128 unsigned long state = simple_strtoul(buf, &after, 10); 64 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev,
132 count++; 68 count++;
133 69
134 if (count == size) { 70 if (count == size) {
135 if (timer_data->delay_off != state) { 71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
136 /* the new value differs from the previous */
137 timer_data->delay_off = state;
138
139 /* deactivate previous settings */
140 del_timer_sync(&timer_data->timer);
141
142 /* try to activate hardware acceleration, if any */
143 if (!led_cdev->blink_set ||
144 led_cdev->blink_set(led_cdev,
145 &timer_data->delay_on, &timer_data->delay_off)) {
146 /* no hardware acceleration, blink via timer */
147 mod_timer(&timer_data->timer, jiffies + 1);
148 }
149 }
150 ret = count; 72 ret = count;
151 } 73 }
152 74
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
158 80
159static void timer_trig_activate(struct led_classdev *led_cdev) 81static void timer_trig_activate(struct led_classdev *led_cdev)
160{ 82{
161 struct timer_trig_data *timer_data;
162 int rc; 83 int rc;
163 84
164 timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL); 85 led_cdev->trigger_data = NULL;
165 if (!timer_data)
166 return;
167
168 timer_data->brightness_on = led_get_brightness(led_cdev);
169 if (timer_data->brightness_on == LED_OFF)
170 timer_data->brightness_on = led_cdev->max_brightness;
171 led_cdev->trigger_data = timer_data;
172
173 init_timer(&timer_data->timer);
174 timer_data->timer.function = led_timer_function;
175 timer_data->timer.data = (unsigned long) led_cdev;
176 86
177 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); 87 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
178 if (rc) 88 if (rc)
179 goto err_out; 89 return;
180 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); 90 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
181 if (rc) 91 if (rc)
182 goto err_out_delayon; 92 goto err_out_delayon;
183 93
184 /* If there is hardware support for blinking, start one 94 led_cdev->trigger_data = (void *)1;
185 * user friendly blink rate chosen by the driver.
186 */
187 if (led_cdev->blink_set)
188 led_cdev->blink_set(led_cdev,
189 &timer_data->delay_on, &timer_data->delay_off);
190 95
191 return; 96 return;
192 97
193err_out_delayon: 98err_out_delayon:
194 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 99 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
195err_out:
196 led_cdev->trigger_data = NULL;
197 kfree(timer_data);
198} 100}
199 101
200static void timer_trig_deactivate(struct led_classdev *led_cdev) 102static void timer_trig_deactivate(struct led_classdev *led_cdev)
201{ 103{
202 struct timer_trig_data *timer_data = led_cdev->trigger_data; 104 if (led_cdev->trigger_data) {
203 unsigned long on = 0, off = 0;
204
205 if (timer_data) {
206 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 105 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
207 device_remove_file(led_cdev->dev, &dev_attr_delay_off); 106 device_remove_file(led_cdev->dev, &dev_attr_delay_off);
208 del_timer_sync(&timer_data->timer);
209 kfree(timer_data);
210 } 107 }
211 108
212 /* If there is hardware support for blinking, stop it */ 109 /* Stop blinking */
213 if (led_cdev->blink_set) 110 led_brightness_set(led_cdev, LED_OFF);
214 led_cdev->blink_set(led_cdev, &on, &off);
215} 111}
216 112
217static struct led_trigger timer_led_trigger = { 113static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 444696625171..f5f4da3d0b67 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state)
80static void adb_iop_complete(struct iop_msg *msg) 80static void adb_iop_complete(struct iop_msg *msg)
81{ 81{
82 struct adb_request *req; 82 struct adb_request *req;
83 uint flags; 83 unsigned long flags;
84 84
85 local_irq_save(flags); 85 local_irq_save(flags);
86 86
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg)
103{ 103{
104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; 104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
105 struct adb_request *req; 105 struct adb_request *req;
106 uint flags; 106 unsigned long flags;
107#ifdef DEBUG_ADB_IOP 107#ifdef DEBUG_ADB_IOP
108 int i; 108 int i;
109#endif 109#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e957f3140a8..324a3663fcda 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel)
706/* return the offset of the super block in 512byte sectors */ 706/* return the offset of the super block in 512byte sectors */
707static inline sector_t calc_dev_sboffset(struct block_device *bdev) 707static inline sector_t calc_dev_sboffset(struct block_device *bdev)
708{ 708{
709 sector_t num_sectors = bdev->bd_inode->i_size / 512; 709 sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
710 return MD_NEW_SIZE_SECTORS(num_sectors); 710 return MD_NEW_SIZE_SECTORS(num_sectors);
711} 711}
712 712
@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1386 */ 1386 */
1387 switch(minor_version) { 1387 switch(minor_version) {
1388 case 0: 1388 case 0:
1389 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1389 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1390 sb_start -= 8*2; 1390 sb_start -= 8*2;
1391 sb_start &= ~(sector_t)(4*2-1); 1391 sb_start &= ~(sector_t)(4*2-1);
1392 break; 1392 break;
@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1472 ret = 0; 1472 ret = 0;
1473 } 1473 }
1474 if (minor_version) 1474 if (minor_version)
1475 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1475 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
1476 le64_to_cpu(sb->data_offset); 1476 le64_to_cpu(sb->data_offset);
1477 else 1477 else
1478 rdev->sectors = rdev->sb_start; 1478 rdev->sectors = rdev->sb_start;
@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1680 return 0; /* component must fit device */ 1680 return 0; /* component must fit device */
1681 if (rdev->sb_start < rdev->data_offset) { 1681 if (rdev->sb_start < rdev->data_offset) {
1682 /* minor versions 1 and 2; superblock before data */ 1682 /* minor versions 1 and 2; superblock before data */
1683 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1683 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1684 max_sectors -= rdev->data_offset; 1684 max_sectors -= rdev->data_offset;
1685 if (!num_sectors || num_sectors > max_sectors) 1685 if (!num_sectors || num_sectors > max_sectors)
1686 num_sectors = max_sectors; 1686 num_sectors = max_sectors;
@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1690 } else { 1690 } else {
1691 /* minor version 0; superblock after data */ 1691 /* minor version 0; superblock after data */
1692 sector_t sb_start; 1692 sector_t sb_start;
1693 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1693 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1694 sb_start &= ~(sector_t)(4*2 - 1); 1694 sb_start &= ~(sector_t)(4*2 - 1);
1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1696 if (!num_sectors || num_sectors > max_sectors) 1696 if (!num_sectors || num_sectors > max_sectors)
@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 if (!sectors) 2584 if (!sectors)
2585 return -EBUSY; 2585 return -EBUSY;
2586 } else if (!sectors) 2586 } else if (!sectors)
2587 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2587 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2588 rdev->data_offset; 2588 rdev->data_offset;
2589 } 2589 }
2590 if (sectors < my_mddev->dev_sectors) 2590 if (sectors < my_mddev->dev_sectors)
@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2797 2797
2798 kobject_init(&rdev->kobj, &rdev_ktype); 2798 kobject_init(&rdev->kobj, &rdev_ktype);
2799 2799
2800 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2800 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
2801 if (!size) { 2801 if (!size) {
2802 printk(KERN_WARNING 2802 printk(KERN_WARNING
2803 "md: %s has zero or unknown size, marking faulty!\n", 2803 "md: %s has zero or unknown size, marking faulty!\n",
@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5235 5235
5236 if (!mddev->persistent) { 5236 if (!mddev->persistent) {
5237 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5237 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5238 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5238 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5239 } else 5239 } else
5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5241 rdev->sectors = rdev->sb_start; 5241 rdev->sectors = rdev->sb_start;
5242 5242
@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
5306 if (mddev->persistent) 5306 if (mddev->persistent)
5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5308 else 5308 else
5309 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5309 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5310 5310
5311 rdev->sectors = rdev->sb_start; 5311 rdev->sectors = rdev->sb_start;
5312 5312
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index f9b91ba8900c..0ed09358027e 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
123{ 123{
124 struct i2c_client *client = to_i2c_client(dev); 124 struct i2c_client *client = to_i2c_client(dev);
125 struct als_data *data = i2c_get_clientdata(client); 125 struct als_data *data = i2c_get_clientdata(client);
126 unsigned int ret_val; 126 int ret_val;
127 unsigned long val; 127 unsigned long val;
128 128
129 if (strict_strtoul(buf, 10, &val)) 129 if (strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index cee632e645e1..d79a972f2c79 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev,
649{ 649{
650 struct bh1770_chip *chip = dev_get_drvdata(dev); 650 struct bh1770_chip *chip = dev_get_drvdata(dev);
651 unsigned long value; 651 unsigned long value;
652 size_t ret; 652 ssize_t ret;
653 653
654 if (strict_strtoul(buf, 0, &value)) 654 if (strict_strtoul(buf, 0, &value))
655 return -EINVAL; 655 return -EINVAL;
@@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev,
659 pm_runtime_get_sync(dev); 659 pm_runtime_get_sync(dev);
660 660
661 ret = bh1770_lux_rate(chip, chip->lux_rate_index); 661 ret = bh1770_lux_rate(chip, chip->lux_rate_index);
662 ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); 662 if (ret < 0) {
663 pm_runtime_put(dev);
664 goto leave;
665 }
663 666
667 ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
664 if (ret < 0) { 668 if (ret < 0) {
665 pm_runtime_put(dev); 669 pm_runtime_put(dev);
666 goto leave; 670 goto leave;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 34fe835921c4..ca47e6285075 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count) 87 struct device_attribute *attr, const char *buf, size_t count)
88{ 88{
89 struct i2c_client *client = to_i2c_client(dev); 89 struct i2c_client *client = to_i2c_client(dev);
90 unsigned int ret_val; 90 int ret_val;
91 unsigned long val; 91 unsigned long val;
92 92
93 if (strict_strtoul(buf, 10, &val)) 93 if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev,
106 val = 4; 106 val = 4;
107 107
108 ret_val = i2c_smbus_read_byte_data(client, 0x00); 108 ret_val = i2c_smbus_read_byte_data(client, 0x00);
109 if (ret_val < 0)
110 return ret_val;
109 111
110 ret_val &= 0xFC; /*reset the bit before setting them */ 112 ret_val &= 0xFC; /*reset the bit before setting them */
111 ret_val |= val - 1; 113 ret_val |= val - 1;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..475a66d95b34 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ 201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
202#define RX_BUF_END (dev->mem_end - dev->mem_start) 202#define RX_BUF_END (dev->mem_end - dev->mem_start)
203 203
204#define TX_TIMEOUT 5 204#define TX_TIMEOUT (HZ/20)
205 205
206/* 206/*
207 That's it: only 86 bytes to set up the beast, including every extra 207 That's it: only 86 bytes to set up the beast, including every extra
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
98#define WAIT_TX_AVAIL 200 98#define WAIT_TX_AVAIL 200
99 99
100/* Operational parameter that usually are not changed. */ 100/* Operational parameter that usually are not changed. */
101#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */ 101#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
102 102
103/* The size here is somewhat misleading: the Corkscrew also uses the ISA 103/* The size here is somewhat misleading: the Corkscrew also uses the ISA
104 aliased registers at <base>+0x400. 104 aliased registers at <base>+0x400.
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
699#define DEVICE_PCI(dev) NULL 699#define DEVICE_PCI(dev) NULL
700#endif 700#endif
701 701
702#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) 702#define VORTEX_PCI(vp) \
703 ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
703 704
704#ifdef CONFIG_EISA 705#ifdef CONFIG_EISA
705#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) 706#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
707#define DEVICE_EISA(dev) NULL 708#define DEVICE_EISA(dev) NULL
708#endif 709#endif
709 710
710#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) 711#define VORTEX_EISA(vp) \
712 ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711 713
712/* The action to take with a media selection timer tick. 714/* The action to take with a media selection timer tick.
713 Note that we deviate from the 3Com order by checking 10base2 before AUI. 715 Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
490{ 490{
491 unsigned int protocol = (status >> 16) & 0x3; 491 unsigned int protocol = (status >> 16) & 0x3;
492 492
493 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) 493 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
494 ((protocol == RxProtoUDP) && !(status & UDPFail)))
494 return 1; 495 return 1;
495 else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) 496 else
496 return 1; 497 return 0;
497 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
498 return 1;
499 return 0;
500} 498}
501 499
502static int cp_rx_poll(struct napi_struct *napi, int budget) 500static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f5166dccd8df..98517a373473 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1092,10 +1092,11 @@ err_out:
1092static void __devexit rtl8139_remove_one (struct pci_dev *pdev) 1092static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
1093{ 1093{
1094 struct net_device *dev = pci_get_drvdata (pdev); 1094 struct net_device *dev = pci_get_drvdata (pdev);
1095 struct rtl8139_private *tp = netdev_priv(dev);
1095 1096
1096 assert (dev != NULL); 1097 assert (dev != NULL);
1097 1098
1098 flush_scheduled_work(); 1099 cancel_delayed_work_sync(&tp->thread);
1099 1100
1100 unregister_netdev (dev); 1101 unregister_netdev (dev);
1101 1102
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
191#define RX_SUSPEND 0x0030 191#define RX_SUSPEND 0x0030
192#define RX_ABORT 0x0040 192#define RX_ABORT 0x0040
193 193
194#define TX_TIMEOUT 5 194#define TX_TIMEOUT (HZ/20)
195 195
196 196
197struct i596_reg { 197struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..a20693fcb321 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
1533 1533
1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
1535 1535
1536 to identify the adapter. 1536 to identify the adapter.
1537 1537
1538 For the latest Intel PRO/100 network driver for Linux, see: 1538 For the latest Intel PRO/100 network driver for Linux, see:
1539 1539
@@ -1786,17 +1786,17 @@ config KS8842
1786 tristate "Micrel KSZ8841/42 with generic bus interface" 1786 tristate "Micrel KSZ8841/42 with generic bus interface"
1787 depends on HAS_IOMEM && DMA_ENGINE 1787 depends on HAS_IOMEM && DMA_ENGINE
1788 help 1788 help
1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port) 1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or 1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or
1791 Timberdale(FPGA). 1791 Timberdale(FPGA).
1792 1792
1793config KS8851 1793config KS8851
1794 tristate "Micrel KS8851 SPI" 1794 tristate "Micrel KS8851 SPI"
1795 depends on SPI 1795 depends on SPI
1796 select MII 1796 select MII
1797 select CRC32 1797 select CRC32
1798 help 1798 help
1799 SPI driver for Micrel KS8851 SPI attached network chip. 1799 SPI driver for Micrel KS8851 SPI attached network chip.
1800 1800
1801config KS8851_MLL 1801config KS8851_MLL
1802 tristate "Micrel KS8851 MLL" 1802 tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
2133 will be called ipg. This is recommended. 2133 will be called ipg. This is recommended.
2134 2134
2135config IGB 2135config IGB
2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
2137 depends on PCI 2137 depends on PCI
2138 ---help--- 2138 ---help---
2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
2140 adapters. For more information on how to identify your adapter, go 2140 adapters. For more information on how to identify your adapter, go
2141 to the Adapter & Driver ID Guide at: 2141 to the Adapter & Driver ID Guide at:
2142 2142
2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2144 2144
2145 For general information and support, go to the Intel support 2145 For general information and support, go to the Intel support
2146 website at: 2146 website at:
2147 2147
2148 <http://support.intel.com> 2148 <http://support.intel.com>
2149 2149
2150 More specific information on configuring the driver is in 2150 More specific information on configuring the driver is in
2151 <file:Documentation/networking/e1000.txt>. 2151 <file:Documentation/networking/e1000.txt>.
2152 2152
2153 To compile this driver as a module, choose M here. The module 2153 To compile this driver as a module, choose M here. The module
2154 will be called igb. 2154 will be called igb.
2155 2155
2156config IGB_DCA 2156config IGB_DCA
2157 bool "Direct Cache Access (DCA) Support" 2157 bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
2163 is used, with the intent of lessening the impact of cache misses. 2163 is used, with the intent of lessening the impact of cache misses.
2164 2164
2165config IGBVF 2165config IGBVF
2166 tristate "Intel(R) 82576 Virtual Function Ethernet support" 2166 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2167 depends on PCI 2167 depends on PCI
2168 ---help--- 2168 ---help---
2169 This driver supports Intel(R) 82576 virtual functions. For more 2169 This driver supports Intel(R) 82576 virtual functions. For more
2170 information on how to identify your adapter, go to the Adapter & 2170 information on how to identify your adapter, go to the Adapter &
2171 Driver ID Guide at: 2171 Driver ID Guide at:
2172 2172
2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2174 2174
2175 For general information and support, go to the Intel support 2175 For general information and support, go to the Intel support
2176 website at: 2176 website at:
2177 2177
2178 <http://support.intel.com> 2178 <http://support.intel.com>
2179 2179
2180 More specific information on configuring the driver is in 2180 More specific information on configuring the driver is in
2181 <file:Documentation/networking/e1000.txt>. 2181 <file:Documentation/networking/e1000.txt>.
2182 2182
2183 To compile this driver as a module, choose M here. The module 2183 To compile this driver as a module, choose M here. The module
2184 will be called igbvf. 2184 will be called igbvf.
2185 2185
2186source "drivers/net/ixp2000/Kconfig" 2186source "drivers/net/ixp2000/Kconfig"
2187 2187
@@ -2300,14 +2300,14 @@ config SKGE
2300 will be called skge. This is recommended. 2300 will be called skge. This is recommended.
2301 2301
2302config SKGE_DEBUG 2302config SKGE_DEBUG
2303 bool "Debugging interface" 2303 bool "Debugging interface"
2304 depends on SKGE && DEBUG_FS 2304 depends on SKGE && DEBUG_FS
2305 help 2305 help
2306 This option adds the ability to dump driver state for debugging. 2306 This option adds the ability to dump driver state for debugging.
2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal 2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal
2308 transmit and receive rings. 2308 transmit and receive rings.
2309 2309
2310 If unsure, say N. 2310 If unsure, say N.
2311 2311
2312config SKY2 2312config SKY2
2313 tristate "SysKonnect Yukon2 support" 2313 tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
2326 will be called sky2. This is recommended. 2326 will be called sky2. This is recommended.
2327 2327
2328config SKY2_DEBUG 2328config SKY2_DEBUG
2329 bool "Debugging interface" 2329 bool "Debugging interface"
2330 depends on SKY2 && DEBUG_FS 2330 depends on SKY2 && DEBUG_FS
2331 help 2331 help
2332 This option adds the ability to dump driver state for debugging. 2332 This option adds the ability to dump driver state for debugging.
2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal 2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
2334 transmit and receive rings. 2334 transmit and receive rings.
2335 2335
2336 If unsure, say N. 2336 If unsure, say N.
2337 2337
2338config VIA_VELOCITY 2338config VIA_VELOCITY
2339 tristate "VIA Velocity support" 2339 tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@ config SPIDER_NET
2389 Cell Processor-Based Blades from IBM. 2389 Cell Processor-Based Blades from IBM.
2390 2390
2391config TSI108_ETH 2391config TSI108_ETH
2392 tristate "Tundra TSI108 gigabit Ethernet support" 2392 tristate "Tundra TSI108 gigabit Ethernet support"
2393 depends on TSI108_BRIDGE 2393 depends on TSI108_BRIDGE
2394 help 2394 help
2395 This driver supports Tundra TSI108 gigabit Ethernet ports. 2395 This driver supports Tundra TSI108 gigabit Ethernet ports.
2396 To compile this driver as a module, choose M here: the module 2396 To compile this driver as a module, choose M here: the module
2397 will be called tsi108_eth. 2397 will be called tsi108_eth.
2398 2398
2399config GELIC_NET 2399config GELIC_NET
2400 tristate "PS3 Gigabit Ethernet driver" 2400 tristate "PS3 Gigabit Ethernet driver"
@@ -2543,10 +2543,10 @@ config PCH_GBE
2543 depends on PCI 2543 depends on PCI
2544 select MII 2544 select MII
2545 ---help--- 2545 ---help---
2546 This is a gigabit ethernet driver for Topcliff PCH. 2546 This is a gigabit ethernet driver for EG20T PCH.
2547 Topcliff PCH is the platform controller hub that is used in Intel's 2547 EG20T PCH is the platform controller hub that is used in Intel's
2548 general embedded platform. 2548 general embedded platform.
2549 Topcliff PCH has Gigabit Ethernet interface. 2549 EG20T PCH has Gigabit Ethernet interface.
2550 Using this interface, it is able to access system devices connected 2550 Using this interface, it is able to access system devices connected
2551 to Gigabit Ethernet. 2551 to Gigabit Ethernet.
2552 This driver enables Gigabit Ethernet function. 2552 This driver enables Gigabit Ethernet function.
@@ -2573,32 +2573,32 @@ config MDIO
2573 tristate 2573 tristate
2574 2574
2575config CHELSIO_T1 2575config CHELSIO_T1
2576 tristate "Chelsio 10Gb Ethernet support" 2576 tristate "Chelsio 10Gb Ethernet support"
2577 depends on PCI 2577 depends on PCI
2578 select CRC32 2578 select CRC32
2579 select MDIO 2579 select MDIO
2580 help 2580 help
2581 This driver supports Chelsio gigabit and 10-gigabit 2581 This driver supports Chelsio gigabit and 10-gigabit
2582 Ethernet cards. More information about adapter features and 2582 Ethernet cards. More information about adapter features and
2583 performance tuning is in <file:Documentation/networking/cxgb.txt>. 2583 performance tuning is in <file:Documentation/networking/cxgb.txt>.
2584 2584
2585 For general information about Chelsio and our products, visit 2585 For general information about Chelsio and our products, visit
2586 our website at <http://www.chelsio.com>. 2586 our website at <http://www.chelsio.com>.
2587 2587
2588 For customer support, please visit our customer support page at 2588 For customer support, please visit our customer support page at
2589 <http://www.chelsio.com/support.html>. 2589 <http://www.chelsio.com/support.html>.
2590 2590
2591 Please send feedback to <linux-bugs@chelsio.com>. 2591 Please send feedback to <linux-bugs@chelsio.com>.
2592 2592
2593 To compile this driver as a module, choose M here: the module 2593 To compile this driver as a module, choose M here: the module
2594 will be called cxgb. 2594 will be called cxgb.
2595 2595
2596config CHELSIO_T1_1G 2596config CHELSIO_T1_1G
2597 bool "Chelsio gigabit Ethernet support" 2597 bool "Chelsio gigabit Ethernet support"
2598 depends on CHELSIO_T1 2598 depends on CHELSIO_T1
2599 help 2599 help
2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2601 are using only 10G cards say 'N' here. 2601 are using only 10G cards say 'N' here.
2602 2602
2603config CHELSIO_T3_DEPENDS 2603config CHELSIO_T3_DEPENDS
2604 tristate 2604 tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
2728 If unsure, say N. 2728 If unsure, say N.
2729 2729
2730config IXGBEVF 2730config IXGBEVF
2731 tristate "Intel(R) 82599 Virtual Function Ethernet support" 2731 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2732 depends on PCI_MSI 2732 depends on PCI_MSI
2733 ---help--- 2733 ---help---
2734 This driver supports Intel(R) 82599 virtual functions. For more 2734 This driver supports Intel(R) 82599 virtual functions. For more
2735 information on how to identify your adapter, go to the Adapter & 2735 information on how to identify your adapter, go to the Adapter &
2736 Driver ID Guide at: 2736 Driver ID Guide at:
2737 2737
2738 <http://support.intel.com/support/network/sb/CS-008441.htm> 2738 <http://support.intel.com/support/network/sb/CS-008441.htm>
2739 2739
2740 For general information and support, go to the Intel support 2740 For general information and support, go to the Intel support
2741 website at: 2741 website at:
2742 2742
2743 <http://support.intel.com> 2743 <http://support.intel.com>
2744 2744
2745 More specific information on configuring the driver is in 2745 More specific information on configuring the driver is in
2746 <file:Documentation/networking/ixgbevf.txt>. 2746 <file:Documentation/networking/ixgbevf.txt>.
2747 2747
2748 To compile this driver as a module, choose M here. The module 2748 To compile this driver as a module, choose M here. The module
2749 will be called ixgbevf. MSI-X interrupt support is required 2749 will be called ixgbevf. MSI-X interrupt support is required
2750 for this driver to work correctly. 2750 for this driver to work correctly.
2751 2751
2752config IXGB 2752config IXGB
2753 tristate "Intel(R) PRO/10GbE support" 2753 tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
2772 will be called ixgb. 2772 will be called ixgb.
2773 2773
2774config S2IO 2774config S2IO
2775 tristate "S2IO 10Gbe XFrame NIC" 2775 tristate "Exar Xframe 10Gb Ethernet Adapter"
2776 depends on PCI 2776 depends on PCI
2777 ---help--- 2777 ---help---
2778 This driver supports the 10Gbe XFrame NIC of S2IO. 2778 This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
2779
2779 More specific information on configuring the driver is in 2780 More specific information on configuring the driver is in
2780 <file:Documentation/networking/s2io.txt>. 2781 <file:Documentation/networking/s2io.txt>.
2781 2782
2783 To compile this driver as a module, choose M here. The module
2784 will be called s2io.
2785
2782config VXGE 2786config VXGE
2783 tristate "Neterion X3100 Series 10GbE PCIe Server Adapter" 2787 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
2784 depends on PCI && INET 2788 depends on PCI && INET
2785 ---help--- 2789 ---help---
2786 This driver supports Neterion Inc's X3100 Series 10 GbE PCIe 2790 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
2787 I/O Virtualized Server Adapter. 2791 I/O Virtualized Server Adapter.
2792
2788 More specific information on configuring the driver is in 2793 More specific information on configuring the driver is in
2789 <file:Documentation/networking/vxge.txt>. 2794 <file:Documentation/networking/vxge.txt>.
2790 2795
2796 To compile this driver as a module, choose M here. The module
2797 will be called vxge.
2798
2791config VXGE_DEBUG_TRACE_ALL 2799config VXGE_DEBUG_TRACE_ALL
2792 bool "Enabling All Debug trace statments in driver" 2800 bool "Enabling All Debug trace statments in driver"
2793 default n 2801 default n
2794 depends on VXGE 2802 depends on VXGE
2795 ---help--- 2803 ---help---
2796 Say Y here if you want to enabling all the debug trace statements in 2804 Say Y here if you want to enabling all the debug trace statements in
2797 driver. By default only few debug trace statements are enabled. 2805 the vxge driver. By default only few debug trace statements are
2806 enabled.
2798 2807
2799config MYRI10GE 2808config MYRI10GE
2800 tristate "Myricom Myri-10G Ethernet support" 2809 tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
2906 will be called qlge. 2915 will be called qlge.
2907 2916
2908config BNA 2917config BNA
2909 tristate "Brocade 1010/1020 10Gb Ethernet Driver support" 2918 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2910 depends on PCI 2919 depends on PCI
2911 ---help--- 2920 ---help---
2912 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet 2921 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2913 cards. 2922 cards.
2914 To compile this driver as a module, choose M here: the module 2923 To compile this driver as a module, choose M here: the module
2915 will be called bna. 2924 will be called bna.
2916 2925
2917 For general information and support, go to the Brocade support 2926 For general information and support, go to the Brocade support
2918 website at: 2927 website at:
2919 2928
2920 <http://support.brocade.com> 2929 <http://support.brocade.com>
2921 2930
2922source "drivers/net/sfc/Kconfig" 2931source "drivers/net/sfc/Kconfig"
2923 2932
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
3227 modules once you have said "make modules". If unsure, say N. 3236 modules once you have said "make modules". If unsure, say N.
3228 3237
3229config PPP_MPPE 3238config PPP_MPPE
3230 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)" 3239 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
3231 depends on PPP && EXPERIMENTAL 3240 depends on PPP && EXPERIMENTAL
3232 select CRYPTO 3241 select CRYPTO
3233 select CRYPTO_SHA1 3242 select CRYPTO_SHA1
3234 select CRYPTO_ARC4 3243 select CRYPTO_ARC4
3235 select CRYPTO_ECB 3244 select CRYPTO_ECB
3236 ---help--- 3245 ---help---
3237 Support for the MPPE Encryption protocol, as employed by the 3246 Support for the MPPE Encryption protocol, as employed by the
3238 Microsoft Point-to-Point Tunneling Protocol. 3247 Microsoft Point-to-Point Tunneling Protocol.
3239 3248
3240 See http://pptpclient.sourceforge.net/ for information on 3249 See http://pptpclient.sourceforge.net/ for information on
3241 configuring PPTP clients and servers to utilize this method. 3250 configuring PPTP clients and servers to utilize this method.
3242 3251
3243config PPPOE 3252config PPPOE
3244 tristate "PPP over Ethernet (EXPERIMENTAL)" 3253 tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
3397 depends on EXPERIMENTAL && VIRTIO 3406 depends on EXPERIMENTAL && VIRTIO
3398 ---help--- 3407 ---help---
3399 This is the virtual network driver for virtio. It can be used with 3408 This is the virtual network driver for virtio. It can be used with
3400 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3409 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3401 3410
3402config VMXNET3 3411config VMXNET3
3403 tristate "VMware VMXNET3 ethernet driver" 3412 tristate "VMware VMXNET3 ethernet driver"
3404 depends on PCI && INET 3413 depends on PCI && INET
3405 help 3414 help
3406 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3415 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3407 To compile this driver as a module, choose M here: the 3416 To compile this driver as a module, choose M here: the
3408 module will be called vmxnet3. 3417 module will be called vmxnet3.
3409 3418
3410endif # NETDEVICES 3419endif # NETDEVICES
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f21106efec..0c9217f48b72 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 340 return 0;
341} 341}
342 342
343/*
344 * Get the current statistics.
345 */
346static struct net_device_stats *am79c961_getstats (struct net_device *dev)
347{
348 return &dev->stats;
349}
350
351static void am79c961_mc_hash(char *addr, unsigned short *hash) 343static void am79c961_mc_hash(char *addr, unsigned short *hash)
352{ 344{
353 if (addr[0] & 0x01) { 345 if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
665 .ndo_open = am79c961_open, 657 .ndo_open = am79c961_open,
666 .ndo_stop = am79c961_close, 658 .ndo_stop = am79c961_close,
667 .ndo_start_xmit = am79c961_sendpacket, 659 .ndo_start_xmit = am79c961_sendpacket,
668 .ndo_get_stats = am79c961_getstats,
669 .ndo_set_multicast_list = am79c961_setmulticastlist, 660 .ndo_set_multicast_list = am79c961_setmulticastlist,
670 .ndo_tx_timeout = am79c961_timeout, 661 .ndo_tx_timeout = am79c961_timeout,
671 .ndo_validate_addr = eth_validate_addr, 662 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
117#define TX_DESC_SIZE 10 117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600 118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600 119#define MAX_TBUFF_SZ 0x600
120#define TX_TIMEOUT 50 120#define TX_TIMEOUT (HZ/2)
121#define DELAY 1000 121#define DELAY 1000
122#define CAM0 0x0 122#define CAM0 0x0
123 123
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..871b1633f543 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
150#define PORT_OFFSET(o) (o) 150#define PORT_OFFSET(o) (o)
151 151
152 152
153#define TX_TIMEOUT 10 153#define TX_TIMEOUT (HZ/10)
154 154
155 155
156/* Index to functions, as function prototypes. */ 156/* Index to functions, as function prototypes. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) 116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
118 118
119#define TX_TIMEOUT 20 119#define TX_TIMEOUT (HZ/5)
120 120
121/* The LANCE Rx and Tx ring descriptors. */ 121/* The LANCE Rx and Tx ring descriptors. */
122struct lance_rx_head { 122struct lance_rx_head {
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
82 addr[0] = addr[1] = 0; 82 addr[0] = addr[1] = 0;
83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
84 if (atl1c_check_eeprom_exist(hw)) { 84 if (atl1c_check_eeprom_exist(hw)) {
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { 85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
86 /* Enable OTP CLK */ 86 /* Enable OTP CLK */
87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { 87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
88 otp_ctrl_data |= OTP_CTRL_CLK_EN; 88 otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3b24ac..53363108994e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3043 atl1_pcie_patch(adapter); 3043 atl1_pcie_patch(adapter);
3044 /* assume we have no link for now */ 3044 /* assume we have no link for now */
3045 netif_carrier_off(netdev); 3045 netif_carrier_off(netdev);
3046 netif_stop_queue(netdev);
3047 3046
3048 setup_timer(&adapter->phy_config_timer, atl1_phy_config, 3047 setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3049 (unsigned long)adapter); 3048 (unsigned long)adapter);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 35b14bec1207..4e6f4e95a5a0 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1504,8 +1504,8 @@ static void __devexit atl2_remove(struct pci_dev *pdev)
1504 1504
1505 del_timer_sync(&adapter->watchdog_timer); 1505 del_timer_sync(&adapter->watchdog_timer);
1506 del_timer_sync(&adapter->phy_config_timer); 1506 del_timer_sync(&adapter->phy_config_timer);
1507 1507 cancel_work_sync(&adapter->reset_task);
1508 flush_scheduled_work(); 1508 cancel_work_sync(&adapter->link_chg_task);
1509 1509
1510 unregister_netdev(netdev); 1510 unregister_netdev(netdev);
1511 1511
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..b9debcfb61a0 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -106,8 +106,6 @@ MODULE_VERSION(DRV_VERSION);
106 * complete immediately. 106 * complete immediately.
107 */ 107 */
108 108
109struct au1000_private *au_macs[NUM_ETH_INTERFACES];
110
111/* 109/*
112 * board-specific configurations 110 * board-specific configurations
113 * 111 *
@@ -155,10 +153,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
155 spin_lock_irqsave(&aup->lock, flags); 153 spin_lock_irqsave(&aup->lock, flags);
156 154
157 if (force_reset || (!aup->mac_enabled)) { 155 if (force_reset || (!aup->mac_enabled)) {
158 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 156 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
159 au_sync_delay(2); 157 au_sync_delay(2);
160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 158 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
161 | MAC_EN_CLOCK_ENABLE), &aup->enable); 159 | MAC_EN_CLOCK_ENABLE), aup->enable);
162 au_sync_delay(2); 160 au_sync_delay(2);
163 161
164 aup->mac_enabled = 1; 162 aup->mac_enabled = 1;
@@ -503,9 +501,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
503 501
504 au1000_hard_stop(dev); 502 au1000_hard_stop(dev);
505 503
506 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 504 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
507 au_sync_delay(2); 505 au_sync_delay(2);
508 writel(0, &aup->enable); 506 writel(0, aup->enable);
509 au_sync_delay(2); 507 au_sync_delay(2);
510 508
511 aup->tx_full = 0; 509 aup->tx_full = 0;
@@ -1119,7 +1117,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1119 /* set a random MAC now in case platform_data doesn't provide one */ 1117 /* set a random MAC now in case platform_data doesn't provide one */
1120 random_ether_addr(dev->dev_addr); 1118 random_ether_addr(dev->dev_addr);
1121 1119
1122 writel(0, &aup->enable); 1120 writel(0, aup->enable);
1123 aup->mac_enabled = 0; 1121 aup->mac_enabled = 0;
1124 1122
1125 pd = pdev->dev.platform_data; 1123 pd = pdev->dev.platform_data;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694b..4bebff3faeab 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
325static void 325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len) 326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 327{
328 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 328 struct ei_device *ei_local = netdev_priv(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
330 unsigned int memr; 330 unsigned int memr;
331 331
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
364static unsigned int 364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no) 365ax_phy_ei_inbits(struct net_device *dev, int no)
366{ 366{
367 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr; 369 unsigned int memr;
370 unsigned int result = 0; 370 unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
412static int 412static int
413ax_phy_read(struct net_device *dev, int phy_addr, int reg) 413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 414{
415 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 415 struct ei_device *ei_local = netdev_priv(dev);
416 unsigned long flags; 416 unsigned long flags;
417 unsigned int result; 417 unsigned int result;
418 418
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
435static void 435static void
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) 436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 437{
438 struct ei_device *ei = (struct ei_device *) netdev_priv(dev); 438 struct ei_device *ei = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 439 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags; 440 unsigned long flags;
441 441
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e86315b3f8..2e2b76258ab4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
381 __b44_set_flow_ctrl(bp, pause_enab); 381 __b44_set_flow_ctrl(bp, pause_enab);
382} 382}
383 383
384#ifdef SSB_DRIVER_MIPS 384#ifdef CONFIG_BCM47XX
385extern char *nvram_get(char *name); 385#include <asm/mach-bcm47xx/nvram.h>
386static void b44_wap54g10_workaround(struct b44 *bp) 386static void b44_wap54g10_workaround(struct b44 *bp)
387{ 387{
388 const char *str; 388 char buf[20];
389 u32 val; 389 u32 val;
390 int err; 390 int err;
391 391
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
394 * see https://dev.openwrt.org/ticket/146 394 * see https://dev.openwrt.org/ticket/146
395 * check and reset bit "isolate" 395 * check and reset bit "isolate"
396 */ 396 */
397 str = nvram_get("boardnum"); 397 if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
398 if (!str)
399 return; 398 return;
400 if (simple_strtoul(str, NULL, 0) == 2) { 399 if (simple_strtoul(buf, NULL, 0) == 2) {
401 err = __b44_readphy(bp, 0, MII_BMCR, &val); 400 err = __b44_readphy(bp, 0, MII_BMCR, &val);
402 if (err) 401 if (err)
403 goto error; 402 goto error;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ecfef240a303..e94a966af418 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1097,7 +1097,7 @@ static int bcm_enet_stop(struct net_device *dev)
1097 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 1097 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1098 1098
1099 /* make sure no mib update is scheduled */ 1099 /* make sure no mib update is scheduled */
1100 flush_scheduled_work(); 1100 cancel_work_sync(&priv->mib_update_task);
1101 1101
1102 /* disable dma & mac */ 1102 /* disable dma & mac */
1103 bcm_enet_disable_dma(priv, priv->tx_chan); 1103 bcm_enet_disable_dma(priv, priv->tx_chan);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f66..9cab32328bba 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC" 40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43 44
44#define BE_VENDOR_ID 0x19a2 45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
45#define BE_DEVICE_ID1 0x211 47#define BE_DEVICE_ID1 0x211
46#define BE_DEVICE_ID2 0x221 48#define BE_DEVICE_ID2 0x221
47#define OC_DEVICE_ID1 0x700 49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
48#define OC_DEVICE_ID2 0x710 50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
49 52
50static inline char *nic_name(struct pci_dev *pdev) 53static inline char *nic_name(struct pci_dev *pdev)
51{ 54{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
53 case OC_DEVICE_ID1: 56 case OC_DEVICE_ID1:
54 return OC_NAME; 57 return OC_NAME;
55 case OC_DEVICE_ID2: 58 case OC_DEVICE_ID2:
56 return OC_NAME1; 59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 return OC_NAME_LANCER;
57 case BE_DEVICE_ID2: 62 case BE_DEVICE_ID2:
58 return BE3_NAME; 63 return BE3_NAME;
59 default: 64 default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
149 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
150 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
151 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx;
152 158
153 struct napi_struct napi; 159 struct napi_struct napi;
154}; 160};
@@ -214,7 +220,9 @@ struct be_rx_obj {
214 struct be_rx_stats stats; 220 struct be_rx_stats stats;
215 u8 rss_id; 221 u8 rss_id;
216 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 222 bool rx_post_starved; /* Zero rx frags have been posted to BE */
217 u32 cache_line_barrier[16]; 223 u16 last_frag_index;
224 u16 rsvd;
225 u32 cache_line_barrier[15];
218}; 226};
219 227
220struct be_vf_cfg { 228struct be_vf_cfg {
@@ -260,6 +268,8 @@ struct be_adapter {
260 u32 num_rx_qs; 268 u32 num_rx_qs;
261 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 269 u32 big_page_size; /* Compounded page size shared by rx wrbs */
262 270
271 u8 msix_vec_next_idx;
272
263 struct vlan_group *vlan_grp; 273 struct vlan_group *vlan_grp;
264 u16 vlans_added; 274 u16 vlans_added;
265 u16 max_vlans; /* Number of vlans supported */ 275 u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +309,8 @@ struct be_adapter {
299 309
300 bool sriov_enabled; 310 bool sriov_enabled;
301 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 311 struct be_vf_cfg vf_cfg[BE_MAX_VF];
302 u8 base_eq_id;
303 u8 is_virtfn; 312 u8 is_virtfn;
313 u32 sli_family;
304}; 314};
305 315
306#define be_physfn(adapter) (!adapter->is_virtfn) 316#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +319,8 @@ struct be_adapter {
309#define BE_GEN2 2 319#define BE_GEN2 2
310#define BE_GEN3 3 320#define BE_GEN3 3
311 321
322#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
323
312extern const struct ethtool_ops be_ethtool_ops; 324extern const struct ethtool_ops be_ethtool_ops;
313 325
314#define tx_stats(adapter) (&adapter->tx_stats) 326#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +428,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
416static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 428static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
417{ 429{
418 u8 data; 430 u8 data;
419 431 u32 sli_intf;
420 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); 432
421 pci_read_config_byte(adapter->pdev, 0xFE, &data); 433 if (lancer_chip(adapter)) {
422 adapter->is_virtfn = (data != 0xAA); 434 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
435 &sli_intf);
436 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
437 } else {
438 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
439 pci_read_config_byte(adapter->pdev, 0xFE, &data);
440 adapter->is_virtfn = (data != 0xAA);
441 }
423} 442}
424 443
425static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 444static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..171a08caf2be 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
323 323
324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325{ 325{
326 u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 326 u32 sem;
327
328 if (lancer_chip(adapter))
329 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330 else
331 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327 332
328 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 333 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 334 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -680,16 +685,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
680 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 685 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
681 686
682 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 687 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
688 if (lancer_chip(adapter)) {
689 req->hdr.version = 1;
690 req->page_size = 1; /* 1 for 4K */
691 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
692 coalesce_wm);
693 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
694 no_delay);
695 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
696 __ilog2_u32(cq->len/256));
697 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
698 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
699 ctxt, 1);
700 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
701 ctxt, eq->id);
702 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
703 } else {
704 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
705 coalesce_wm);
706 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
707 ctxt, no_delay);
708 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
709 __ilog2_u32(cq->len/256));
710 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
711 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
712 ctxt, sol_evts);
713 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
714 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
715 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
716 }
683 717
684 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
685 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
686 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
687 __ilog2_u32(cq->len/256));
688 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
689 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
690 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
691 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
692 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
693 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 718 be_dws_cpu_to_le(ctxt, sizeof(req->context));
694 719
695 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 720 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +762,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 762 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
738 763
739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 764 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
765 if (lancer_chip(adapter)) {
766 req->hdr.version = 1;
767 req->cq_id = cpu_to_le16(cq->id);
768
769 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
770 be_encoded_q_len(mccq->len));
771 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
772 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
773 ctxt, cq->id);
774 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
775 ctxt, 1);
776
777 } else {
778 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
779 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
780 be_encoded_q_len(mccq->len));
781 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
782 }
740 783
741 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
743 be_encoded_q_len(mccq->len));
744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 784 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022; 785 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
747 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 786 be_dws_cpu_to_le(ctxt, sizeof(req->context));
748 787
749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 788 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1235,7 +1274,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1235 1274
1236 i = 0; 1275 i = 0;
1237 netdev_for_each_mc_addr(ha, netdev) 1276 netdev_for_each_mc_addr(ha, netdev)
1238 memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); 1277 memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1239 } else { 1278 } else {
1240 req->promiscuous = 1; 1279 req->promiscuous = 1;
1241 } 1280 }
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f30..83d15c8a9fa3 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
309/******************** Create CQ ***************************/ 309/******************** Create CQ ***************************/
310/* Pseudo amap definition in which each bit of the actual structure is defined 310/* Pseudo amap definition in which each bit of the actual structure is defined
311 * as a byte: used to calculate offset/shift/mask of each field */ 311 * as a byte: used to calculate offset/shift/mask of each field */
312struct amap_cq_context { 312struct amap_cq_context_be {
313 u8 cidx[11]; /* dword 0*/ 313 u8 cidx[11]; /* dword 0*/
314 u8 rsvd0; /* dword 0*/ 314 u8 rsvd0; /* dword 0*/
315 u8 coalescwm[2]; /* dword 0*/ 315 u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
332 u8 rsvd5[32]; /* dword 3*/ 332 u8 rsvd5[32]; /* dword 3*/
333} __packed; 333} __packed;
334 334
335struct amap_cq_context_lancer {
336 u8 rsvd0[12]; /* dword 0*/
337 u8 coalescwm[2]; /* dword 0*/
338 u8 nodelay; /* dword 0*/
339 u8 rsvd1[12]; /* dword 0*/
340 u8 count[2]; /* dword 0*/
341 u8 valid; /* dword 0*/
342 u8 rsvd2; /* dword 0*/
343 u8 eventable; /* dword 0*/
344 u8 eqid[16]; /* dword 1*/
345 u8 rsvd3[15]; /* dword 1*/
346 u8 armed; /* dword 1*/
347 u8 rsvd4[32]; /* dword 2*/
348 u8 rsvd5[32]; /* dword 3*/
349} __packed;
350
335struct be_cmd_req_cq_create { 351struct be_cmd_req_cq_create {
336 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
337 u16 num_pages; 353 u16 num_pages;
338 u16 rsvd0; 354 u8 page_size;
339 u8 context[sizeof(struct amap_cq_context) / 8]; 355 u8 rsvd0;
356 u8 context[sizeof(struct amap_cq_context_be) / 8];
340 struct phys_addr pages[8]; 357 struct phys_addr pages[8];
341} __packed; 358} __packed;
342 359
360
343struct be_cmd_resp_cq_create { 361struct be_cmd_resp_cq_create {
344 struct be_cmd_resp_hdr hdr; 362 struct be_cmd_resp_hdr hdr;
345 u16 cq_id; 363 u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
349/******************** Create MCCQ ***************************/ 367/******************** Create MCCQ ***************************/
350/* Pseudo amap definition in which each bit of the actual structure is defined 368/* Pseudo amap definition in which each bit of the actual structure is defined
351 * as a byte: used to calculate offset/shift/mask of each field */ 369 * as a byte: used to calculate offset/shift/mask of each field */
352struct amap_mcc_context { 370struct amap_mcc_context_be {
353 u8 con_index[14]; 371 u8 con_index[14];
354 u8 rsvd0[2]; 372 u8 rsvd0[2];
355 u8 ring_size[4]; 373 u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
364 u8 rsvd2[32]; 382 u8 rsvd2[32];
365} __packed; 383} __packed;
366 384
385struct amap_mcc_context_lancer {
386 u8 async_cq_id[16];
387 u8 ring_size[4];
388 u8 rsvd0[12];
389 u8 rsvd1[31];
390 u8 valid;
391 u8 async_cq_valid[1];
392 u8 rsvd2[31];
393 u8 rsvd3[32];
394} __packed;
395
367struct be_cmd_req_mcc_create { 396struct be_cmd_req_mcc_create {
368 struct be_cmd_req_hdr hdr; 397 struct be_cmd_req_hdr hdr;
369 u16 num_pages; 398 u16 num_pages;
370 u16 rsvd0; 399 u16 cq_id;
371 u32 async_event_bitmap[1]; 400 u32 async_event_bitmap[1];
372 u8 context[sizeof(struct amap_mcc_context) / 8]; 401 u8 context[sizeof(struct amap_mcc_context_be) / 8];
373 struct phys_addr pages[8]; 402 struct phys_addr pages[8];
374} __packed; 403} __packed;
375 404
@@ -605,6 +634,7 @@ struct be_hw_stats {
605 struct be_rxf_stats rxf; 634 struct be_rxf_stats rxf;
606 u32 rsvd[48]; 635 u32 rsvd[48];
607 struct be_erx_stats erx; 636 struct be_erx_stats erx;
637 u32 rsvd1[6];
608}; 638};
609 639
610struct be_cmd_req_get_stats { 640struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d733..4096d9778234 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1 37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31 38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
39/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ 43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8 68#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC 69#define PCICFG_UE_STATUS_HI_MASK 0xAC
68 70
71/******** SLI_INTF ***********************/
72#define SLI_INTF_REG_OFFSET 0x58
73#define SLI_INTF_VALID_MASK 0xE0000000
74#define SLI_INTF_VALID 0xC0000000
75#define SLI_INTF_HINT2_MASK 0x1F000000
76#define SLI_INTF_HINT2_SHIFT 24
77#define SLI_INTF_HINT1_MASK 0x00FF0000
78#define SLI_INTF_HINT1_SHIFT 16
79#define SLI_INTF_FAMILY_MASK 0x00000F00
80#define SLI_INTF_FAMILY_SHIFT 8
81#define SLI_INTF_IF_TYPE_MASK 0x0000F000
82#define SLI_INTF_IF_TYPE_SHIFT 12
83#define SLI_INTF_REV_MASK 0x000000F0
84#define SLI_INTF_REV_SHIFT 4
85#define SLI_INTF_FT_MASK 0x00000001
86
87
88/* SLI family */
89#define BE_SLI_FAMILY 0x0
90#define LANCER_A0_SLI_FAMILY 0xA
91
92
69/********* ISR0 Register offset **********/ 93/********* ISR0 Register offset **********/
70#define CEV_ISR0_OFFSET 0xC18 94#define CEV_ISR0_OFFSET 0xC18
71#define CEV_ISR_SIZE 4 95#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
73/********* Event Q door bell *************/ 97/********* Event Q door bell *************/
74#define DB_EQ_OFFSET DB_CQ_OFFSET 98#define DB_EQ_OFFSET DB_CQ_OFFSET
75#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 99#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
100#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
101#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
102
76/* Clear the interrupt for this eq */ 103/* Clear the interrupt for this eq */
77#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 104#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
78/* Must be 1 */ 105/* Must be 1 */
@@ -85,6 +112,10 @@
85/********* Compl Q door bell *************/ 112/********* Compl Q door bell *************/
86#define DB_CQ_OFFSET 0x120 113#define DB_CQ_OFFSET 0x120
87#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 114#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
115#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
116#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
117 placing at 11-15 */
118
88/* Number of event entries processed */ 119/* Number of event entries processed */
89#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 120#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
90/* Rearm bit */ 121/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2ffbadc..0b35e4a8bf19 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { 0 } 45 { 0 }
45}; 46};
46MODULE_DEVICE_TABLE(pci, be_dev_ids); 47MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK; 191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 194
192 if (adapter->eeh_err) 195 if (adapter->eeh_err)
193 return; 196 return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205{ 208{
206 u32 val = 0; 209 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK; 210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 213
209 if (adapter->eeh_err) 214 if (adapter->eeh_err)
210 return; 215 return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
404} 409}
405 410
406/* Determine number of WRB entries needed to xmit data in an skb */ 411/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
408{ 414{
409 int cnt = (skb->len > skb->data_len); 415 int cnt = (skb->len > skb->data_len);
410 416
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
412 418
413 /* to account for hdr wrb */ 419 /* to account for hdr wrb */
414 cnt++; 420 cnt++;
415 if (cnt & 1) { 421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
416 /* add a dummy to make it an even num */ 424 /* add a dummy to make it an even num */
417 cnt++; 425 cnt++;
418 *dummy = true; 426 *dummy = true;
419 } else 427 }
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); 428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt; 429 return cnt;
423} 430}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size); 452 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb)) 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb)) 466 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
566 u32 start = txq->head; 583 u32 start = txq->head;
567 bool dummy_wrb, stopped = false; 584 bool dummy_wrb, stopped = false;
568 585
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
570 587
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) { 589 if (copied) {
@@ -894,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
894 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
895 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
896 913
897 for (i = 0; i < num_rcvd; i++) { 914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
898 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
899 put_page(page_info->page); 916
900 memset(page_info, 0, sizeof(*page_info)); 917 rxo->last_frag_index = rxq_idx;
901 index_inc(&rxq_idx, rxq->len); 918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
902 } 925 }
903} 926}
904 927
@@ -999,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
999 u8 vtm; 1022 u8 vtm;
1000 1023
1001 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1002 /* Is it a flush compl that has no data */
1003 if (unlikely(num_rcvd == 0))
1004 return;
1005 1025
1006 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 1026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1007 if (unlikely(!skb)) { 1027 if (unlikely(!skb)) {
@@ -1035,7 +1055,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1035 return; 1055 return;
1036 } 1056 }
1037 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038 vid = swab16(vid); 1058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
1039 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 1060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040 } else { 1061 } else {
1041 netif_receive_skb(skb); 1062 netif_receive_skb(skb);
@@ -1057,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1057 u8 pkt_type; 1078 u8 pkt_type;
1058 1079
1059 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1060 /* Is it a flush compl that has no data */
1061 if (unlikely(num_rcvd == 0))
1062 return;
1063
1064 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1065 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1066 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1113,7 +1130,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 napi_gro_frags(&eq_obj->napi); 1130 napi_gro_frags(&eq_obj->napi);
1114 } else { 1131 } else {
1115 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116 vid = swab16(vid); 1133 if (!lancer_chip(adapter))
1134 vid = swab16(vid);
1117 1135
1118 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1136 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119 return; 1137 return;
@@ -1330,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1330 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1331 be_rx_compl_discard(adapter, rxo, rxcp); 1349 be_rx_compl_discard(adapter, rxo, rxcp);
1332 be_rx_compl_reset(rxcp); 1350 be_rx_compl_reset(rxcp);
1333 be_cq_notify(adapter, rx_cq->id, true, 1); 1351 be_cq_notify(adapter, rx_cq->id, false, 1);
1334 } 1352 }
1335 1353
1336 /* Then free posted rx buffer that were not used */ 1354 /* Then free posted rx buffer that were not used */
@@ -1381,7 +1399,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1381 sent_skb = sent_skbs[txq->tail]; 1399 sent_skb = sent_skbs[txq->tail];
1382 end_idx = txq->tail; 1400 end_idx = txq->tail;
1383 index_adv(&end_idx, 1401 index_adv(&end_idx,
1384 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); 1402 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 txq->len);
1385 be_tx_compl_process(adapter, end_idx); 1404 be_tx_compl_process(adapter, end_idx);
1386 } 1405 }
1387} 1406}
@@ -1476,7 +1495,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1476 /* Ask BE to create Tx Event queue */ 1495 /* Ask BE to create Tx Event queue */
1477 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1496 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478 goto tx_eq_free; 1497 goto tx_eq_free;
1479 adapter->base_eq_id = adapter->tx_eq.q.id; 1498
1499 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
1480 1501
1481 /* Alloc TX eth compl queue */ 1502 /* Alloc TX eth compl queue */
1482 cq = &adapter->tx_obj.cq; 1503 cq = &adapter->tx_obj.cq;
@@ -1554,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1554 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555 for_all_rx_queues(adapter, rxo, i) { 1576 for_all_rx_queues(adapter, rxo, i) {
1556 rxo->adapter = adapter; 1577 rxo->adapter = adapter;
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
1557 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1581 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558 rxo->rx_eq.enable_aic = true; 1582 rxo->rx_eq.enable_aic = true;
1559 1583
@@ -1568,6 +1592,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1568 if (rc) 1592 if (rc)
1569 goto err; 1593 goto err;
1570 1594
1595 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
1571 /* CQ */ 1597 /* CQ */
1572 cq = &rxo->cq; 1598 cq = &rxo->cq;
1573 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1599 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1604,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1604 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc) 1605 if (rc)
1580 goto err; 1606 goto err;
1581
1582 /* Rx Q */ 1607 /* Rx Q */
1583 q = &rxo->q; 1608 q = &rxo->q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, 1609 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1636,45 @@ err:
1611 return -1; 1636 return -1;
1612} 1637}
1613 1638
1614/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1639static bool event_peek(struct be_eq_obj *eq_obj)
1615static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616{ 1640{
1617 return eq_id - adapter->base_eq_id; 1641 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642 if (!eqe->evt)
1643 return false;
1644 else
1645 return true;
1618} 1646}
1619 1647
1620static irqreturn_t be_intx(int irq, void *dev) 1648static irqreturn_t be_intx(int irq, void *dev)
1621{ 1649{
1622 struct be_adapter *adapter = dev; 1650 struct be_adapter *adapter = dev;
1623 struct be_rx_obj *rxo; 1651 struct be_rx_obj *rxo;
1624 int isr, i; 1652 int isr, i, tx = 0 , rx = 0;
1625 1653
1626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1654 if (lancer_chip(adapter)) {
1627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1655 if (event_peek(&adapter->tx_eq))
1628 if (!isr) 1656 tx = event_handle(adapter, &adapter->tx_eq);
1629 return IRQ_NONE; 1657 for_all_rx_queues(adapter, rxo, i) {
1658 if (event_peek(&rxo->rx_eq))
1659 rx |= event_handle(adapter, &rxo->rx_eq);
1660 }
1630 1661
1631 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) 1662 if (!(tx || rx))
1632 event_handle(adapter, &adapter->tx_eq); 1663 return IRQ_NONE;
1633 1664
1634 for_all_rx_queues(adapter, rxo, i) { 1665 } else {
1635 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) 1666 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1636 event_handle(adapter, &rxo->rx_eq); 1667 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668 if (!isr)
1669 return IRQ_NONE;
1670
1671 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &adapter->tx_eq);
1673
1674 for_all_rx_queues(adapter, rxo, i) {
1675 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &rxo->rx_eq);
1677 }
1637 } 1678 }
1638 1679
1639 return IRQ_HANDLED; 1680 return IRQ_HANDLED;
@@ -1658,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1658 return IRQ_HANDLED; 1699 return IRQ_HANDLED;
1659} 1700}
1660 1701
1661static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, 1702static inline bool do_gro(struct be_rx_obj *rxo,
1662 struct be_eth_rx_compl *rxcp) 1703 struct be_eth_rx_compl *rxcp, u8 err)
1663{ 1704{
1664 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1665 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1666 1706
1667 if (err) 1707 if (err)
@@ -1678,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1678 struct be_queue_info *rx_cq = &rxo->cq; 1718 struct be_queue_info *rx_cq = &rxo->cq;
1679 struct be_eth_rx_compl *rxcp; 1719 struct be_eth_rx_compl *rxcp;
1680 u32 work_done; 1720 u32 work_done;
1721 u16 frag_index, num_rcvd;
1722 u8 err;
1681 1723
1682 rxo->stats.rx_polls++; 1724 rxo->stats.rx_polls++;
1683 for (work_done = 0; work_done < budget; work_done++) { 1725 for (work_done = 0; work_done < budget; work_done++) {
@@ -1685,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1685 if (!rxcp) 1727 if (!rxcp)
1686 break; 1728 break;
1687 1729
1688 if (do_gro(adapter, rxo, rxcp)) 1730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1689 be_rx_compl_process_gro(adapter, rxo, rxcp); 1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1690 else 1732 rxcp);
1691 be_rx_compl_process(adapter, rxo, rxcp); 1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else
1744 be_rx_compl_process(adapter, rxo, rxcp);
1745 }
1692 1746
1693 be_rx_compl_reset(rxcp); 1747 be_rx_compl_reset(rxcp);
1694 } 1748 }
@@ -1830,8 +1884,7 @@ static void be_worker(struct work_struct *work)
1830 be_post_rx_frags(rxo); 1884 be_post_rx_frags(rxo);
1831 } 1885 }
1832 } 1886 }
1833 1887 if (!adapter->ue_detected && !lancer_chip(adapter))
1834 if (!adapter->ue_detected)
1835 be_detect_dump_ue(adapter); 1888 be_detect_dump_ue(adapter);
1836 1889
1837reschedule: 1890reschedule:
@@ -1910,10 +1963,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
1910#endif 1963#endif
1911} 1964}
1912 1965
1913static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1966static inline int be_msix_vec_get(struct be_adapter *adapter,
1967 struct be_eq_obj *eq_obj)
1914{ 1968{
1915 return adapter->msix_entries[ 1969 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1916 be_evt_bit_get(adapter, eq_id)].vector;
1917} 1970}
1918 1971
1919static int be_request_irq(struct be_adapter *adapter, 1972static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1977,14 @@ static int be_request_irq(struct be_adapter *adapter,
1924 int vec; 1977 int vec;
1925 1978
1926 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1979 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1927 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1980 vec = be_msix_vec_get(adapter, eq_obj);
1928 return request_irq(vec, handler, 0, eq_obj->desc, context); 1981 return request_irq(vec, handler, 0, eq_obj->desc, context);
1929} 1982}
1930 1983
1931static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, 1984static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1932 void *context) 1985 void *context)
1933{ 1986{
1934 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1987 int vec = be_msix_vec_get(adapter, eq_obj);
1935 free_irq(vec, context); 1988 free_irq(vec, context);
1936} 1989}
1937 1990
@@ -2036,14 +2089,15 @@ static int be_close(struct net_device *netdev)
2036 netif_carrier_off(netdev); 2089 netif_carrier_off(netdev);
2037 adapter->link_up = false; 2090 adapter->link_up = false;
2038 2091
2039 be_intr_set(adapter, false); 2092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
2040 2094
2041 if (adapter->msix_enabled) { 2095 if (adapter->msix_enabled) {
2042 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2096 vec = be_msix_vec_get(adapter, tx_eq);
2043 synchronize_irq(vec); 2097 synchronize_irq(vec);
2044 2098
2045 for_all_rx_queues(adapter, rxo, i) { 2099 for_all_rx_queues(adapter, rxo, i) {
2046 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); 2100 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2047 synchronize_irq(vec); 2101 synchronize_irq(vec);
2048 } 2102 }
2049 } else { 2103 } else {
@@ -2082,7 +2136,8 @@ static int be_open(struct net_device *netdev)
2082 2136
2083 be_irq_register(adapter); 2137 be_irq_register(adapter);
2084 2138
2085 be_intr_set(adapter, true); 2139 if (!lancer_chip(adapter))
2140 be_intr_set(adapter, true);
2086 2141
2087 /* The evt queues are created in unarmed state; arm them */ 2142 /* The evt queues are created in unarmed state; arm them */
2088 for_all_rx_queues(adapter, rxo, i) { 2143 for_all_rx_queues(adapter, rxo, i) {
@@ -2458,6 +2513,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2458 int status, i = 0, num_imgs = 0; 2513 int status, i = 0, num_imgs = 0;
2459 const u8 *p; 2514 const u8 *p;
2460 2515
2516 if (!netif_running(adapter->netdev)) {
2517 dev_err(&adapter->pdev->dev,
2518 "Firmware load not allowed (interface is down)\n");
2519 return -EPERM;
2520 }
2521
2461 strcpy(fw_file, func); 2522 strcpy(fw_file, func);
2462 2523
2463 status = request_firmware(&fw, fw_file, &adapter->pdev->dev); 2524 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -2537,10 +2598,15 @@ static void be_netdev_init(struct net_device *netdev)
2537 int i; 2598 int i;
2538 2599
2539 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2540 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2541 NETIF_F_GRO | NETIF_F_TSO6; 2603 NETIF_F_GRO | NETIF_F_TSO6;
2542 2604
2543 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607
2608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2544 2610
2545 netdev->flags |= IFF_MULTICAST; 2611 netdev->flags |= IFF_MULTICAST;
2546 2612
@@ -2581,6 +2647,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2581 u8 __iomem *addr; 2647 u8 __iomem *addr;
2582 int pcicfg_reg, db_reg; 2648 int pcicfg_reg, db_reg;
2583 2649
2650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
2584 if (be_physfn(adapter)) { 2659 if (be_physfn(adapter)) {
2585 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2586 pci_resource_len(adapter->pdev, 2)); 2661 pci_resource_len(adapter->pdev, 2));
@@ -2777,6 +2852,44 @@ static int be_get_config(struct be_adapter *adapter)
2777 return 0; 2852 return 0;
2778} 2853}
2779 2854
2855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
2780static int __devinit be_probe(struct pci_dev *pdev, 2893static int __devinit be_probe(struct pci_dev *pdev,
2781 const struct pci_device_id *pdev_id) 2894 const struct pci_device_id *pdev_id)
2782{ 2895{
@@ -2799,22 +2912,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2799 goto rel_reg; 2912 goto rel_reg;
2800 } 2913 }
2801 adapter = netdev_priv(netdev); 2914 adapter = netdev_priv(netdev);
2802
2803 switch (pdev->device) {
2804 case BE_DEVICE_ID1:
2805 case OC_DEVICE_ID1:
2806 adapter->generation = BE_GEN2;
2807 break;
2808 case BE_DEVICE_ID2:
2809 case OC_DEVICE_ID2:
2810 adapter->generation = BE_GEN3;
2811 break;
2812 default:
2813 adapter->generation = 0;
2814 }
2815
2816 adapter->pdev = pdev; 2915 adapter->pdev = pdev;
2817 pci_set_drvdata(pdev, adapter); 2916 pci_set_drvdata(pdev, adapter);
2917
2918 status = be_dev_family_check(adapter);
2919 if (status)
2920 goto free_netdev;
2921
2818 adapter->netdev = netdev; 2922 adapter->netdev = netdev;
2819 SET_NETDEV_DEV(netdev, &pdev->dev); 2923 SET_NETDEV_DEV(netdev, &pdev->dev);
2820 2924
@@ -2889,7 +2993,7 @@ ctrl_clean:
2889 be_ctrl_cleanup(adapter); 2993 be_ctrl_cleanup(adapter);
2890free_netdev: 2994free_netdev:
2891 be_sriov_disable(adapter); 2995 be_sriov_disable(adapter);
2892 free_netdev(adapter->netdev); 2996 free_netdev(netdev);
2893 pci_set_drvdata(pdev, NULL); 2997 pci_set_drvdata(pdev, NULL);
2894rel_reg: 2998rel_reg:
2895 pci_release_regions(pdev); 2999 pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073b..5c811f3fa11a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.18" 59#define DRV_MODULE_VERSION "2.0.20"
60#define DRV_MODULE_RELDATE "Oct 7, 2010" 60#define DRV_MODULE_RELDATE "Nov 24, 2010"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
766 int j; 766 int j;
767 767
768 rxr->rx_buf_ring = 768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 769 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL) 770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM; 771 return -ENOMEM;
772 772
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776 for (j = 0; j < bp->rx_max_ring; j++) { 773 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] = 774 rxr->rx_desc_ring[j] =
778 dma_alloc_coherent(&bp->pdev->dev, 775 dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
785 } 782 }
786 783
787 if (bp->rx_pg_ring_size) { 784 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * 785 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring); 786 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL) 787 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM; 788 return -ENOMEM;
792 789
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 } 790 }
796 791
797 for (j = 0; j < bp->rx_max_pg_ring; j++) { 792 for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4645 4640
4646 /* Wait for the current PCI transaction to complete before 4641 /* Wait for the current PCI transaction to complete before
4647 * issuing a reset. */ 4642 * issuing a reset. */
4648 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4643 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4649 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4644 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4650 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4645 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4651 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4646 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4652 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4647 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4653 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4648 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4654 udelay(5); 4649 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4650 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4651 udelay(5);
4652 } else { /* 5709 */
4653 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4654 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4655 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4656 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4657
4658 for (i = 0; i < 100; i++) {
4659 msleep(1);
4660 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4661 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4662 break;
4663 }
4664 }
4655 4665
4656 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4666 /* Wait for the firmware to tell us it is ok to issue a reset. */
4657 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4667 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4673 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4683 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4674 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4684 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4675 4685
4676 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); 4686 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4677 4687
4678 } else { 4688 } else {
4679 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4689 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -7914,15 +7924,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7914 goto err_out_release; 7924 goto err_out_release;
7915 } 7925 }
7916 7926
7927 bnx2_set_power_state(bp, PCI_D0);
7928
7917 /* Configure byte swap and enable write to the reg_window registers. 7929 /* Configure byte swap and enable write to the reg_window registers.
7918 * Rely on CPU to do target byte swapping on big endian systems 7930 * Rely on CPU to do target byte swapping on big endian systems
7919 * The chip's target access swapping will not swap all accesses 7931 * The chip's target access swapping will not swap all accesses
7920 */ 7932 */
7921 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, 7933 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7922 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 7934 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7923 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 7935 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7924
7925 bnx2_set_power_state(bp, PCI_D0);
7926 7936
7927 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 7937 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7928 7938
@@ -8383,7 +8393,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8383 struct net_device *dev = pci_get_drvdata(pdev); 8393 struct net_device *dev = pci_get_drvdata(pdev);
8384 struct bnx2 *bp = netdev_priv(dev); 8394 struct bnx2 *bp = netdev_priv(dev);
8385 8395
8386 flush_scheduled_work(); 8396 cancel_work_sync(&bp->reset_task);
8387 8397
8388 unregister_netdev(dev); 8398 unregister_netdev(dev);
8389 8399
@@ -8421,7 +8431,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8421 if (!netif_running(dev)) 8431 if (!netif_running(dev))
8422 return 0; 8432 return 0;
8423 8433
8424 flush_scheduled_work(); 8434 cancel_work_sync(&bp->reset_task);
8425 bnx2_netif_stop(bp, true); 8435 bnx2_netif_stop(bp, true);
8426 netif_device_detach(dev); 8436 netif_device_detach(dev);
8427 del_timer_sync(&bp->timer); 8437 del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067d..5488a2e82fe9 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@ struct l2_fhdr {
461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
463 463
464#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
465#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
464 466
465/* 467/*
466 * pci_reg definition 468 * pci_reg definition
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225decaf..7e4d682f0df1 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-3" 23#define DRV_MODULE_VERSION "1.60.00-7"
24#define DRV_MODULE_RELDATE "2010/10/19" 24#define DRV_MODULE_RELDATE "2010/12/08"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
@@ -671,6 +671,10 @@ enum {
671 CAM_ISCSI_ETH_LINE, 671 CAM_ISCSI_ETH_LINE,
672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE 672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
673}; 673};
674/* number of MACs per function in NIG memory - used for SI mode */
675#define NIG_LLH_FUNC_MEM_SIZE 16
676/* number of entries in NIG_REG_LLHX_FUNC_MEM */
677#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
674 678
675#define BNX2X_VF_ID_INVALID 0xFF 679#define BNX2X_VF_ID_INVALID 0xFF
676 680
@@ -967,6 +971,8 @@ struct bnx2x {
967 u16 mf_ov; 971 u16 mf_ov;
968 u8 mf_mode; 972 u8 mf_mode;
969#define IS_MF(bp) (bp->mf_mode != 0) 973#define IS_MF(bp) (bp->mf_mode != 0)
974#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
975#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
970 976
971 u8 wol; 977 u8 wol;
972 978
@@ -1010,6 +1016,7 @@ struct bnx2x {
1010#define BNX2X_ACCEPT_ALL_UNICAST 0x0004 1016#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
1011#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 1017#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
1012#define BNX2X_ACCEPT_BROADCAST 0x0010 1018#define BNX2X_ACCEPT_BROADCAST 0x0010
1019#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
1013#define BNX2X_PROMISCUOUS_MODE 0x10000 1020#define BNX2X_PROMISCUOUS_MODE 0x10000
1014 1021
1015 u32 rx_mode; 1022 u32 rx_mode;
@@ -1329,7 +1336,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1329 1336
1330#define BNX2X_ILT_ZALLOC(x, y, size) \ 1337#define BNX2X_ILT_ZALLOC(x, y, size) \
1331 do { \ 1338 do { \
1332 x = pci_alloc_consistent(bp->pdev, size, y); \ 1339 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
1333 if (x) \ 1340 if (x) \
1334 memset(x, 0, size); \ 1341 memset(x, 0, size); \
1335 } while (0) 1342 } while (0)
@@ -1337,7 +1344,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1337#define BNX2X_ILT_FREE(x, y, size) \ 1344#define BNX2X_ILT_FREE(x, y, size) \
1338 do { \ 1345 do { \
1339 if (x) { \ 1346 if (x) { \
1340 pci_free_consistent(bp->pdev, size, x, y); \ 1347 dma_free_coherent(&bp->pdev->dev, size, x, y); \
1341 x = NULL; \ 1348 x = NULL; \
1342 y = 0; \ 1349 y = 0; \
1343 } \ 1350 } \
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7bc..236c00c3f568 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
698 mutex_unlock(&bp->port.phy_mutex); 698 mutex_unlock(&bp->port.phy_mutex);
699} 699}
700 700
701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
701void bnx2x_link_report(struct bnx2x *bp) 724void bnx2x_link_report(struct bnx2x *bp)
702{ 725{
703 if (bp->flags & MF_FUNC_DIS) { 726 if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
713 netif_carrier_on(bp->dev); 736 netif_carrier_on(bp->dev);
714 netdev_info(bp->dev, "NIC Link is Up, "); 737 netdev_info(bp->dev, "NIC Link is Up, ");
715 738
716 line_speed = bp->link_vars.line_speed; 739 line_speed = bnx2x_get_mf_speed(bp);
717 if (IS_MF(bp)) {
718 u16 vn_max_rate;
719 740
720 vn_max_rate =
721 ((bp->mf_config[BP_VN(bp)] &
722 FUNC_MF_CFG_MAX_BW_MASK) >>
723 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
724 if (vn_max_rate < line_speed)
725 line_speed = vn_max_rate;
726 }
727 pr_cont("%d Mbps ", line_speed); 741 pr_cont("%d Mbps ", line_speed);
728 742
729 if (bp->link_vars.duplex == DUPLEX_FULL) 743 if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -1680,7 +1694,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1680 rc = XMIT_PLAIN; 1694 rc = XMIT_PLAIN;
1681 1695
1682 else { 1696 else {
1683 if (skb->protocol == htons(ETH_P_IPV6)) { 1697 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1684 rc = XMIT_CSUM_V6; 1698 rc = XMIT_CSUM_V6;
1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1699 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1686 rc |= XMIT_CSUM_TCP; 1700 rc |= XMIT_CSUM_TCP;
@@ -1692,11 +1706,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1692 } 1706 }
1693 } 1707 }
1694 1708
1695 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 1709 if (skb_is_gso_v6(skb))
1696 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 1710 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1697 1711 else if (skb_is_gso(skb))
1698 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1712 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1699 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1700 1713
1701 return rc; 1714 return rc;
1702} 1715}
@@ -1782,15 +1795,15 @@ exit_lbl:
1782} 1795}
1783#endif 1796#endif
1784 1797
1785static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, 1798static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1786 struct eth_tx_parse_bd_e2 *pbd, 1799 u32 xmit_type)
1787 u32 xmit_type)
1788{ 1800{
1789 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) << 1801 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1790 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT; 1802 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1803 ETH_TX_PARSE_BD_E2_LSO_MSS;
1791 if ((xmit_type & XMIT_GSO_V6) && 1804 if ((xmit_type & XMIT_GSO_V6) &&
1792 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 1805 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1793 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 1806 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1794} 1807}
1795 1808
1796/** 1809/**
@@ -1835,15 +1848,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1835 * @return header len 1848 * @return header len
1836 */ 1849 */
1837static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 1850static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1838 struct eth_tx_parse_bd_e2 *pbd, 1851 u32 *parsing_data, u32 xmit_type)
1839 u32 xmit_type)
1840{ 1852{
1841 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) << 1853 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1842 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT; 1854 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1855 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
1843 1856
1844 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) - 1857 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1845 skb->data) / 2) << 1858 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1846 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT; 1859 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
1847 1860
1848 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 1861 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1849} 1862}
@@ -1912,6 +1925,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1912 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1925 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1913 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1926 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1914 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 1927 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1928 u32 pbd_e2_parsing_data = 0;
1915 u16 pkt_prod, bd_prod; 1929 u16 pkt_prod, bd_prod;
1916 int nbd, fp_index; 1930 int nbd, fp_index;
1917 dma_addr_t mapping; 1931 dma_addr_t mapping;
@@ -2033,8 +2047,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2033 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2047 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2034 /* Set PBD in checksum offload case */ 2048 /* Set PBD in checksum offload case */
2035 if (xmit_type & XMIT_CSUM) 2049 if (xmit_type & XMIT_CSUM)
2036 hlen = bnx2x_set_pbd_csum_e2(bp, 2050 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2037 skb, pbd_e2, xmit_type); 2051 &pbd_e2_parsing_data,
2052 xmit_type);
2038 } else { 2053 } else {
2039 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; 2054 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2040 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2055 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2091,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2076 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2091 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2077 hlen, bd_prod, ++nbd); 2092 hlen, bd_prod, ++nbd);
2078 if (CHIP_IS_E2(bp)) 2093 if (CHIP_IS_E2(bp))
2079 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type); 2094 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2095 xmit_type);
2080 else 2096 else
2081 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 2097 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2082 } 2098 }
2099
2100 /* Set the PBD's parsing_data field if not zero
2101 * (for the chips newer than 57711).
2102 */
2103 if (pbd_e2_parsing_data)
2104 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2105
2083 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2106 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2084 2107
2085 /* Handle fragmented skb */ 2108 /* Handle fragmented skb */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739c5302..cb8f2a040a18 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
73void bnx2x_link_report(struct bnx2x *bp); 73void bnx2x_link_report(struct bnx2x *bp);
74 74
75/** 75/**
76 * calculates MF speed according to current linespeed and MF
77 * configuration
78 *
79 * @param bp
80 *
81 * @return u16
82 */
83u16 bnx2x_get_mf_speed(struct bnx2x *bp);
84
85/**
76 * MSI-X slowpath interrupt handler 86 * MSI-X slowpath interrupt handler
77 * 87 *
78 * @param irq 88 * @param irq
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0e..bd94827e5e57 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -45,14 +45,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
45 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 45 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
46 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 46 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
47 } 47 }
48 if (IS_MF(bp)) {
49 u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
50 FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
51 100;
52 48
53 if (vn_max_rate < cmd->speed) 49 if (IS_MF(bp))
54 cmd->speed = vn_max_rate; 50 cmd->speed = bnx2x_get_mf_speed(bp);
55 }
56 51
57 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 52 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
58 cmd->port = PORT_TP; 53 cmd->port = PORT_TP;
@@ -87,18 +82,57 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
87{ 82{
88 struct bnx2x *bp = netdev_priv(dev); 83 struct bnx2x *bp = netdev_priv(dev);
89 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 84 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
85 u32 speed;
90 86
91 if (IS_MF(bp)) 87 if (IS_MF_SD(bp))
92 return 0; 88 return 0;
93 89
94 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 90 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
95 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 91 " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
96 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 92 " duplex %d port %d phy_address %d transceiver %d\n"
97 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 93 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
98 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 94 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
95 cmd->speed_hi,
99 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 96 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
100 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 97 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
101 98
99 speed = cmd->speed;
100 speed |= (cmd->speed_hi << 16);
101
102 if (IS_MF_SI(bp)) {
103 u32 param = 0;
104 u32 line_speed = bp->link_vars.line_speed;
105
106 /* use 10G if no link detected */
107 if (!line_speed)
108 line_speed = 10000;
109
110 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
111 BNX2X_DEV_INFO("To set speed BC %X or higher "
112 "is required, please upgrade BC\n",
113 REQ_BC_VER_4_SET_MF_BW);
114 return -EINVAL;
115 }
116 if (line_speed < speed) {
117 BNX2X_DEV_INFO("New speed should be less or equal "
118 "to actual line speed\n");
119 return -EINVAL;
120 }
121 /* load old values */
122 param = bp->mf_config[BP_VN(bp)];
123
124 /* leave only MIN value */
125 param &= FUNC_MF_CFG_MIN_BW_MASK;
126
127 /* set new MAX value */
128 param |= (((speed * 100) / line_speed)
129 << FUNC_MF_CFG_MAX_BW_SHIFT)
130 & FUNC_MF_CFG_MAX_BW_MASK;
131
132 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
133 return 0;
134 }
135
102 cfg_idx = bnx2x_get_link_cfg_idx(bp); 136 cfg_idx = bnx2x_get_link_cfg_idx(bp);
103 old_multi_phy_config = bp->link_params.multi_phy_config; 137 old_multi_phy_config = bp->link_params.multi_phy_config;
104 switch (cmd->port) { 138 switch (cmd->port) {
@@ -168,8 +202,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
168 202
169 } else { /* forced speed */ 203 } else { /* forced speed */
170 /* advertise the requested speed and duplex if supported */ 204 /* advertise the requested speed and duplex if supported */
171 u32 speed = cmd->speed;
172 speed |= (cmd->speed_hi << 16);
173 switch (speed) { 205 switch (speed) {
174 case SPEED_10: 206 case SPEED_10:
175 if (cmd->duplex == DUPLEX_FULL) { 207 if (cmd->duplex == DUPLEX_FULL) {
@@ -1499,8 +1531,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1499 * updates that have been performed while interrupts were 1531 * updates that have been performed while interrupts were
1500 * disabled. 1532 * disabled.
1501 */ 1533 */
1502 if (bp->common.int_block == INT_BLOCK_IGU) 1534 if (bp->common.int_block == INT_BLOCK_IGU) {
1535 /* Disable local BHes to prevent a dead-lock situation between
1536 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1537 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1538 */
1539 local_bh_disable();
1503 bnx2x_tx_int(fp_tx); 1540 bnx2x_tx_int(fp_tx);
1541 local_bh_enable();
1542 }
1504 1543
1505 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1544 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1506 if (rx_idx != rx_start_idx + num_pkts) 1545 if (rx_idx != rx_start_idx + num_pkts)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..6555c477f893 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
244 244
245 u16 xgxs_config_tx[4]; /* 0x1A0 */ 245 u16 xgxs_config_tx[4]; /* 0x1A0 */
246 246
247 u32 Reserved1[57]; /* 0x1A8 */ 247 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */
249 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254
248 u32 speed_capability_mask2; /* 0x28C */ 255 u32 speed_capability_mask2; /* 0x28C */
249#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
250#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -427,7 +434,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
427#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 434#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
428#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 435#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
429 436
430#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100 437#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
438#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
439#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
440#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
441#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
442#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
431 443
432}; 444};
433 445
@@ -808,6 +820,9 @@ struct drv_func_mb {
808#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 820#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
809#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 821#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
810 822
823#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
824#define REQ_BC_VER_4_SET_MF_BW 0x00060202
825#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
811#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 826#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
812#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 827#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
813#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 828#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -881,6 +896,7 @@ struct drv_func_mb {
881 896
882 u32 drv_status; 897 u32 drv_status;
883#define DRV_STATUS_PMF 0x00000001 898#define DRV_STATUS_PMF 0x00000001
899#define DRV_STATUS_SET_MF_BW 0x00000004
884 900
885#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 901#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
886#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 902#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -981,12 +997,43 @@ struct func_mf_cfg {
981 997
982}; 998};
983 999
1000/* This structure is not applicable and should not be accessed on 57711 */
1001struct func_ext_cfg {
1002 u32 func_cfg;
1003#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
1004#define MACP_FUNC_CFG_FLAGS_SHIFT 0
1005#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1006#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1007#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1008#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1009
1010 u32 iscsi_mac_addr_upper;
1011 u32 iscsi_mac_addr_lower;
1012
1013 u32 fcoe_mac_addr_upper;
1014 u32 fcoe_mac_addr_lower;
1015
1016 u32 fcoe_wwn_port_name_upper;
1017 u32 fcoe_wwn_port_name_lower;
1018
1019 u32 fcoe_wwn_node_name_upper;
1020 u32 fcoe_wwn_node_name_lower;
1021
1022 u32 preserve_data;
1023#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
1024#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
1025#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
1026#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
1027#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
1028};
1029
984struct mf_cfg { 1030struct mf_cfg {
985 1031
986 struct shared_mf_cfg shared_mf_config; 1032 struct shared_mf_cfg shared_mf_config;
987 struct port_mf_cfg port_mf_config[PORT_MAX]; 1033 struct port_mf_cfg port_mf_config[PORT_MAX];
988 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; 1034 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
989 1035
1036 struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
990}; 1037};
991 1038
992 1039
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e46b61..66df29fcf751 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
838/**************************************************************************** 838/****************************************************************************
839* SRC initializations 839* SRC initializations
840****************************************************************************/ 840****************************************************************************/
841 841#ifdef BCM_CNIC
842/* called during init func stage */ 842/* called during init func stage */
843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
844 dma_addr_t t2_mapping, int src_cid_count) 844 dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
862 U64_HI((u64)t2_mapping + 862 U64_HI((u64)t2_mapping +
863 (src_cid_count-1) * sizeof(struct src_ent))); 863 (src_cid_count-1) * sizeof(struct src_ent)));
864} 864}
865 865#endif
866#endif /* BNX2X_INIT_OPS_H */ 866#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774df843..38aeffef2a83 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
610 /* reset and unreset the BigMac */ 610 /* reset and unreset the BigMac */
611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
613 udelay(10); 613 msleep(1);
614 614
615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
3526 3526
3527 /* Enable CL37 BAM */ 3527 /* Enable CL37 BAM */
3528 bnx2x_cl45_read(bp, phy, 3528 if (REG_RD(bp, params->shmem_base +
3529 MDIO_AN_DEVAD, 3529 offsetof(struct shmem_region, dev_info.
3530 MDIO_AN_REG_8073_BAM, &val); 3530 port_hw_config[params->port].default_cfg)) &
3531 bnx2x_cl45_write(bp, phy, 3531 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3532 MDIO_AN_DEVAD,
3533 MDIO_AN_REG_8073_BAM, val | 1);
3534 3532
3533 bnx2x_cl45_read(bp, phy,
3534 MDIO_AN_DEVAD,
3535 MDIO_AN_REG_8073_BAM, &val);
3536 bnx2x_cl45_write(bp, phy,
3537 MDIO_AN_DEVAD,
3538 MDIO_AN_REG_8073_BAM, val | 1);
3539 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3540 }
3535 if (params->loopback_mode == LOOPBACK_EXT) { 3541 if (params->loopback_mode == LOOPBACK_EXT) {
3536 bnx2x_807x_force_10G(bp, phy); 3542 bnx2x_807x_force_10G(bp, phy);
3537 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); 3543 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -3898,7 +3904,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3898 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3904 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3899 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3905 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3900 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 3906 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3901 return 0;; 3907 return 0;
3902 msleep(1); 3908 msleep(1);
3903 } 3909 }
3904 return -EINVAL; 3910 return -EINVAL;
@@ -3982,7 +3988,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3982 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3988 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3983 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3989 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3984 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 3990 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3985 return 0;; 3991 return 0;
3986 msleep(1); 3992 msleep(1);
3987 } 3993 }
3988 3994
@@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5302{ 5308{
5303 struct bnx2x *bp = params->bp; 5309 struct bnx2x *bp = params->bp;
5304 u16 autoneg_val, an_1000_val, an_10_100_val; 5310 u16 autoneg_val, an_1000_val, an_10_100_val;
5305 bnx2x_wait_reset_complete(bp, phy); 5311
5306 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 5312 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5307 1 << NIG_LATCH_BC_ENABLE_MI_INT); 5313 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5308 5314
@@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5431 5437
5432 /* HW reset */ 5438 /* HW reset */
5433 bnx2x_ext_phy_hw_reset(bp, params->port); 5439 bnx2x_ext_phy_hw_reset(bp, params->port);
5440 bnx2x_wait_reset_complete(bp, phy);
5434 5441
5435 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5442 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5436 return bnx2x_848xx_cmn_config_init(phy, params, vars); 5443 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5441 struct link_vars *vars) 5448 struct link_vars *vars)
5442{ 5449{
5443 struct bnx2x *bp = params->bp; 5450 struct bnx2x *bp = params->bp;
5444 u8 port = params->port, initialize = 1; 5451 u8 port, initialize = 1;
5445 u16 val; 5452 u16 val;
5446 u16 temp; 5453 u16 temp;
5447 u32 actual_phy_selection; 5454 u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5450 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 5457 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5451 5458
5452 msleep(1); 5459 msleep(1);
5460 if (CHIP_IS_E2(bp))
5461 port = BP_PATH(bp);
5462 else
5463 port = params->port;
5453 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5464 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5454 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 5465 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
5455 port); 5466 port);
5456 msleep(200); /* 100 is not enough */ 5467 bnx2x_wait_reset_complete(bp, phy);
5457 5468 /* Wait for GPHY to come out of reset */
5469 msleep(50);
5458 /* BCM84823 requires that XGXS links up first @ 10G for normal 5470 /* BCM84823 requires that XGXS links up first @ 10G for normal
5459 behavior */ 5471 behavior */
5460 temp = vars->line_speed; 5472 temp = vars->line_speed;
@@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5625 struct link_params *params) 5637 struct link_params *params)
5626{ 5638{
5627 struct bnx2x *bp = params->bp; 5639 struct bnx2x *bp = params->bp;
5628 u8 port = params->port; 5640 u8 port;
5641 if (CHIP_IS_E2(bp))
5642 port = BP_PATH(bp);
5643 else
5644 port = params->port;
5629 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5645 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5630 MISC_REGISTERS_GPIO_OUTPUT_LOW, 5646 MISC_REGISTERS_GPIO_OUTPUT_LOW,
5631 port); 5647 port);
@@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6928 u8 reset_ext_phy) 6944 u8 reset_ext_phy)
6929{ 6945{
6930 struct bnx2x *bp = params->bp; 6946 struct bnx2x *bp = params->bp;
6931 u8 phy_index, port = params->port; 6947 u8 phy_index, port = params->port, clear_latch_ind = 0;
6932 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6948 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6933 /* disable attentions */ 6949 /* disable attentions */
6934 vars->link_status = 0; 6950 vars->link_status = 0;
@@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6966 params->phy[phy_index].link_reset( 6982 params->phy[phy_index].link_reset(
6967 &params->phy[phy_index], 6983 &params->phy[phy_index],
6968 params); 6984 params);
6985 if (params->phy[phy_index].flags &
6986 FLAGS_REARM_LATCH_SIGNAL)
6987 clear_latch_ind = 1;
6969 } 6988 }
6970 } 6989 }
6971 6990
6991 if (clear_latch_ind) {
6992 /* Clear latching indication */
6993 bnx2x_rearm_latch_signal(bp, port, 0);
6994 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
6995 1 << NIG_LATCH_BC_ENABLE_MI_INT);
6996 }
6972 if (params->phy[INT_PHY].link_reset) 6997 if (params->phy[INT_PHY].link_reset)
6973 params->phy[INT_PHY].link_reset( 6998 params->phy[INT_PHY].link_reset(
6974 &params->phy[INT_PHY], params); 6999 &params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
6999 s8 port; 7024 s8 port;
7000 s8 port_of_path = 0; 7025 s8 port_of_path = 0;
7001 7026
7027 bnx2x_ext_phy_hw_reset(bp, 0);
7002 /* PART1 - Reset both phys */ 7028 /* PART1 - Reset both phys */
7003 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7029 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7004 u32 shmem_base, shmem2_base; 7030 u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7021 return -EINVAL; 7047 return -EINVAL;
7022 } 7048 }
7023 /* disable attentions */ 7049 /* disable attentions */
7024 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7050 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7051 port_of_path*4,
7025 (NIG_MASK_XGXS0_LINK_STATUS | 7052 (NIG_MASK_XGXS0_LINK_STATUS |
7026 NIG_MASK_XGXS0_LINK10G | 7053 NIG_MASK_XGXS0_LINK10G |
7027 NIG_MASK_SERDES0_LINK_STATUS | 7054 NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7132 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 7159 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7133 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 7160 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
7134 7161
7135 bnx2x_ext_phy_hw_reset(bp, 1); 7162 bnx2x_ext_phy_hw_reset(bp, 0);
7136 msleep(5); 7163 msleep(5);
7137 for (port = 0; port < PORT_MAX; port++) { 7164 for (port = 0; port < PORT_MAX; port++) {
7138 u32 shmem_base, shmem2_base; 7165 u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index e9ad16f00b56..0068a1dbc064 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2026,13 +2026,28 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2026 2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp) 2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{ 2028{
2029 int vn; 2029 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2030 2030
2031 if (BP_NOMCP(bp)) 2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */ 2032 return; /* what should be the default bvalue in this case */
2033 2033
2034 /* For 2 port configuration the absolute function number formula
2035 * is:
2036 * abs_func = 2 * vn + BP_PORT + BP_PATH
2037 *
2038 * and there are 4 functions per port
2039 *
2040 * For 4 port configuration it is
2041 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2042 *
2043 * and there are 2 functions per port
2044 */
2034 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2045 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2035 int /*abs*/func = 2*vn + BP_PORT(bp); 2046 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2047
2048 if (func >= E1H_FUNC_MAX)
2049 break;
2050
2036 bp->mf_config[vn] = 2051 bp->mf_config[vn] =
2037 MF_CFG_RD(bp, func_mf_config[func].config); 2052 MF_CFG_RD(bp, func_mf_config[func].config);
2038 } 2053 }
@@ -2248,10 +2263,21 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2248 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2263 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2249 u8 unmatched_unicast = 0; 2264 u8 unmatched_unicast = 0;
2250 2265
2266 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2267 unmatched_unicast = 1;
2268
2251 if (filters & BNX2X_PROMISCUOUS_MODE) { 2269 if (filters & BNX2X_PROMISCUOUS_MODE) {
2252 /* promiscious - accept all, drop none */ 2270 /* promiscious - accept all, drop none */
2253 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; 2271 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2254 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; 2272 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2273 if (IS_MF_SI(bp)) {
2274 /*
2275 * SI mode defines to accept in promiscuos mode
2276 * only unmatched packets
2277 */
2278 unmatched_unicast = 1;
2279 accp_all_ucast = 0;
2280 }
2255 } 2281 }
2256 if (filters & BNX2X_ACCEPT_UNICAST) { 2282 if (filters & BNX2X_ACCEPT_UNICAST) {
2257 /* accept matched ucast */ 2283 /* accept matched ucast */
@@ -2260,6 +2286,11 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2260 if (filters & BNX2X_ACCEPT_MULTICAST) { 2286 if (filters & BNX2X_ACCEPT_MULTICAST) {
2261 /* accept matched mcast */ 2287 /* accept matched mcast */
2262 drop_all_mcast = 0; 2288 drop_all_mcast = 0;
2289 if (IS_MF_SI(bp))
2290 /* since mcast addresses won't arrive with ovlan,
2291 * fw needs to accept all of them in
2292 * switch-independent mode */
2293 accp_all_mcast = 1;
2263 } 2294 }
2264 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2295 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2265 /* accept all mcast */ 2296 /* accept all mcast */
@@ -2372,7 +2403,7 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2372 /* calculate queue flags */ 2403 /* calculate queue flags */
2373 flags |= QUEUE_FLG_CACHE_ALIGN; 2404 flags |= QUEUE_FLG_CACHE_ALIGN;
2374 flags |= QUEUE_FLG_HC; 2405 flags |= QUEUE_FLG_HC;
2375 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0; 2406 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2376 2407
2377 flags |= QUEUE_FLG_VLAN; 2408 flags |= QUEUE_FLG_VLAN;
2378 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 2409 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2573,6 +2604,26 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2573 */ 2604 */
2574} 2605}
2575 2606
2607/* called due to MCP event (on pmf):
2608 * reread new bandwidth configuration
2609 * configure FW
2610 * notify others function about the change
2611 */
2612static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2613{
2614 if (bp->link_vars.link_up) {
2615 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2616 bnx2x_link_sync_notify(bp);
2617 }
2618 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2619}
2620
2621static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2622{
2623 bnx2x_config_mf_bw(bp);
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2625}
2626
2576static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577{ 2628{
2578 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2649,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2598 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 2649 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 } 2650 }
2600 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2651 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601 2652 bnx2x_config_mf_bw(bp);
2602 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2603 bnx2x_link_sync_notify(bp);
2604 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2605 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2653 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2606 } 2654 }
2607 2655
@@ -3022,6 +3070,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3022 if (val & DRV_STATUS_DCC_EVENT_MASK) 3070 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 bnx2x_dcc_event(bp, 3071 bnx2x_dcc_event(bp,
3024 (val & DRV_STATUS_DCC_EVENT_MASK)); 3072 (val & DRV_STATUS_DCC_EVENT_MASK));
3073
3074 if (val & DRV_STATUS_SET_MF_BW)
3075 bnx2x_set_mf_bw(bp);
3076
3025 bnx2x__link_status_update(bp); 3077 bnx2x__link_status_update(bp);
3026 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3078 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3027 bnx2x_pmf_update(bp); 3079 bnx2x_pmf_update(bp);
@@ -4232,6 +4284,15 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4232 bp->mf_mode); 4284 bp->mf_mode);
4233 } 4285 }
4234 4286
4287 if (IS_MF_SI(bp))
4288 /*
4289 * In switch independent mode, the TSTORM needs to accept
4290 * packets that failed classification, since approximate match
4291 * mac addresses aren't written to NIG LLH
4292 */
4293 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4295
4235 /* Zero this manually as its initialization is 4296 /* Zero this manually as its initialization is
4236 currently missing in the initTool */ 4297 currently missing in the initTool */
4237 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4298 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5048,12 +5109,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5048 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5109 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5049#endif 5110#endif
5050 if (!CHIP_IS_E1(bp)) 5111 if (!CHIP_IS_E1(bp))
5051 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5112 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5052 5113
5053 if (CHIP_IS_E2(bp)) { 5114 if (CHIP_IS_E2(bp)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the 5115 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */ 5116 basic Ethernet header */
5056 int has_ovlan = IS_MF(bp); 5117 int has_ovlan = IS_MF_SD(bp);
5057 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5118 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5058 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5119 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5059 } 5120 }
@@ -5087,7 +5148,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5087 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5148 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5088 5149
5089 if (CHIP_IS_E2(bp)) { 5150 if (CHIP_IS_E2(bp)) {
5090 int has_ovlan = IS_MF(bp); 5151 int has_ovlan = IS_MF_SD(bp);
5091 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5152 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5092 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5153 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5093 } 5154 }
@@ -5164,12 +5225,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5164 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5225 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5165 if (!CHIP_IS_E1(bp)) { 5226 if (!CHIP_IS_E1(bp)) {
5166 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 5227 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5167 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp)); 5228 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5168 } 5229 }
5169 if (CHIP_IS_E2(bp)) { 5230 if (CHIP_IS_E2(bp)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the 5231 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */ 5232 basic Ethernet header */
5172 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6)); 5233 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5173 } 5234 }
5174 5235
5175 if (CHIP_REV_IS_SLOW(bp)) 5236 if (CHIP_REV_IS_SLOW(bp))
@@ -5386,7 +5447,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5386 if (!CHIP_IS_E1(bp)) { 5447 if (!CHIP_IS_E1(bp)) {
5387 /* 0x2 disable mf_ov, 0x1 enable */ 5448 /* 0x2 disable mf_ov, 0x1 enable */
5388 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5449 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5389 (IS_MF(bp) ? 0x1 : 0x2)); 5450 (IS_MF_SD(bp) ? 0x1 : 0x2));
5390 5451
5391 if (CHIP_IS_E2(bp)) { 5452 if (CHIP_IS_E2(bp)) {
5392 val = 0; 5453 val = 0;
@@ -6170,6 +6231,70 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6170 return BP_VN(bp) * 32 + rel_offset; 6231 return BP_VN(bp) * 32 + rel_offset;
6171} 6232}
6172 6233
6234/**
6235 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6236 * relevant. In addition, current implementation is tuned for a
6237 * single ETH MAC.
6238 *
6239 * When multiple unicast ETH MACs PF configuration in switch
6240 * independent mode is required (NetQ, multiple netdev MACs,
6241 * etc.), consider better utilisation of 16 per function MAC
6242 * entries in the LLH memory.
6243 */
6244enum {
6245 LLH_CAM_ISCSI_ETH_LINE = 0,
6246 LLH_CAM_ETH_LINE,
6247 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6248};
6249
6250static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6251 int set,
6252 unsigned char *dev_addr,
6253 int index)
6254{
6255 u32 wb_data[2];
6256 u32 mem_offset, ena_offset, mem_index;
6257 /**
6258 * indexes mapping:
6259 * 0..7 - goes to MEM
6260 * 8..15 - goes to MEM2
6261 */
6262
6263 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6264 return;
6265
6266 /* calculate memory start offset according to the mapping
6267 * and index in the memory */
6268 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6269 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6270 NIG_REG_LLH0_FUNC_MEM;
6271 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6272 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6273 mem_index = index;
6274 } else {
6275 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6276 NIG_REG_P0_LLH_FUNC_MEM2;
6277 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6278 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6279 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6280 }
6281
6282 if (set) {
6283 /* LLH_FUNC_MEM is a u64 WB register */
6284 mem_offset += 8*mem_index;
6285
6286 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6287 (dev_addr[4] << 8) | dev_addr[5]);
6288 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6289
6290 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6291 }
6292
6293 /* enable/disable the entry */
6294 REG_WR(bp, ena_offset + 4*mem_index, set);
6295
6296}
6297
6173void bnx2x_set_eth_mac(struct bnx2x *bp, int set) 6298void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6174{ 6299{
6175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : 6300 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
@@ -6179,6 +6304,8 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, 6304 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0); 6305 (1 << bp->fp->cl_id), cam_offset , 0);
6181 6306
6307 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6308
6182 if (CHIP_IS_E1(bp)) { 6309 if (CHIP_IS_E1(bp)) {
6183 /* broadcast MAC */ 6310 /* broadcast MAC */
6184 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 6311 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -6289,6 +6416,8 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6289 /* Send a SET_MAC ramrod */ 6416 /* Send a SET_MAC ramrod */
6290 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6417 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6291 cam_offset, 0); 6418 cam_offset, 0);
6419
6420 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6292 return 0; 6421 return 0;
6293} 6422}
6294#endif 6423#endif
@@ -8076,9 +8205,8 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8076static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8205static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8077{ 8206{
8078 int port = BP_PORT(bp); 8207 int port = BP_PORT(bp);
8079 u32 val, val2;
8080 u32 config; 8208 u32 config;
8081 u32 ext_phy_type, ext_phy_config;; 8209 u32 ext_phy_type, ext_phy_config;
8082 8210
8083 bp->link_params.bp = bp; 8211 bp->link_params.bp = bp;
8084 bp->link_params.port = port; 8212 bp->link_params.port = port;
@@ -8135,25 +8263,62 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8135 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8263 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8136 bp->mdio.prtad = 8264 bp->mdio.prtad =
8137 XGXS_EXT_PHY_ADDR(ext_phy_config); 8265 XGXS_EXT_PHY_ADDR(ext_phy_config);
8266}
8138 8267
8139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 8268static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 8269{
8141 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8270 u32 val, val2;
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8271 int func = BP_ABS_FUNC(bp);
8143 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8272 int port = BP_PORT(bp);
8273
8274 if (BP_NOMCP(bp)) {
8275 BNX2X_ERROR("warning: random MAC workaround active\n");
8276 random_ether_addr(bp->dev->dev_addr);
8277 } else if (IS_MF(bp)) {
8278 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8279 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8280 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8281 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8282 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8283
8284#ifdef BCM_CNIC
8285 /* iSCSI NPAR MAC */
8286 if (IS_MF_SI(bp)) {
8287 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8288 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8289 val2 = MF_CFG_RD(bp, func_ext_config[func].
8290 iscsi_mac_addr_upper);
8291 val = MF_CFG_RD(bp, func_ext_config[func].
8292 iscsi_mac_addr_lower);
8293 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8294 }
8295 }
8296#endif
8297 } else {
8298 /* in SF read MACs from port configuration */
8299 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8300 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8301 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8144 8302
8145#ifdef BCM_CNIC 8303#ifdef BCM_CNIC
8146 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); 8304 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8147 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); 8305 iscsi_mac_upper);
8148 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8306 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8307 iscsi_mac_lower);
8308 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8149#endif 8309#endif
8310 }
8311
8312 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8313 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8314
8150} 8315}
8151 8316
8152static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8317static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8153{ 8318{
8154 int func = BP_ABS_FUNC(bp); 8319 int /*abs*/func = BP_ABS_FUNC(bp);
8155 int vn; 8320 int vn, port;
8156 u32 val, val2; 8321 u32 val = 0;
8157 int rc = 0; 8322 int rc = 0;
8158 8323
8159 bnx2x_get_common_hwinfo(bp); 8324 bnx2x_get_common_hwinfo(bp);
@@ -8186,44 +8351,99 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8186 bp->mf_ov = 0; 8351 bp->mf_ov = 0;
8187 bp->mf_mode = 0; 8352 bp->mf_mode = 0;
8188 vn = BP_E1HVN(bp); 8353 vn = BP_E1HVN(bp);
8354 port = BP_PORT(bp);
8355
8189 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 8356 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8357 DP(NETIF_MSG_PROBE,
8358 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8359 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8360 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8190 if (SHMEM2_HAS(bp, mf_cfg_addr)) 8361 if (SHMEM2_HAS(bp, mf_cfg_addr))
8191 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 8362 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8192 else 8363 else
8193 bp->common.mf_cfg_base = bp->common.shmem_base + 8364 bp->common.mf_cfg_base = bp->common.shmem_base +
8194 offsetof(struct shmem_region, func_mb) + 8365 offsetof(struct shmem_region, func_mb) +
8195 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 8366 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8196 bp->mf_config[vn] = 8367 /*
8197 MF_CFG_RD(bp, func_mf_config[func].config); 8368 * get mf configuration:
8369 * 1. existance of MF configuration
8370 * 2. MAC address must be legal (check only upper bytes)
8371 * for Switch-Independent mode;
8372 * OVLAN must be legal for Switch-Dependent mode
8373 * 3. SF_MODE configures specific MF mode
8374 */
8375 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8376 /* get mf configuration */
8377 val = SHMEM_RD(bp,
8378 dev_info.shared_feature_config.config);
8379 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8380
8381 switch (val) {
8382 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8383 val = MF_CFG_RD(bp, func_mf_config[func].
8384 mac_upper);
8385 /* check for legal mac (upper bytes)*/
8386 if (val != 0xffff) {
8387 bp->mf_mode = MULTI_FUNCTION_SI;
8388 bp->mf_config[vn] = MF_CFG_RD(bp,
8389 func_mf_config[func].config);
8390 } else
8391 DP(NETIF_MSG_PROBE, "illegal MAC "
8392 "address for SI\n");
8393 break;
8394 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8395 /* get OV configuration */
8396 val = MF_CFG_RD(bp,
8397 func_mf_config[FUNC_0].e1hov_tag);
8398 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8399
8400 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8401 bp->mf_mode = MULTI_FUNCTION_SD;
8402 bp->mf_config[vn] = MF_CFG_RD(bp,
8403 func_mf_config[func].config);
8404 } else
8405 DP(NETIF_MSG_PROBE, "illegal OV for "
8406 "SD\n");
8407 break;
8408 default:
8409 /* Unknown configuration: reset mf_config */
8410 bp->mf_config[vn] = 0;
8411 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8412 val);
8413 }
8414 }
8198 8415
8199 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8200 FUNC_MF_CFG_E1HOV_TAG_MASK);
8201 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8202 bp->mf_mode = 1;
8203 BNX2X_DEV_INFO("%s function mode\n", 8416 BNX2X_DEV_INFO("%s function mode\n",
8204 IS_MF(bp) ? "multi" : "single"); 8417 IS_MF(bp) ? "multi" : "single");
8205 8418
8206 if (IS_MF(bp)) { 8419 switch (bp->mf_mode) {
8207 val = (MF_CFG_RD(bp, func_mf_config[func]. 8420 case MULTI_FUNCTION_SD:
8208 e1hov_tag) & 8421 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8209 FUNC_MF_CFG_E1HOV_TAG_MASK); 8422 FUNC_MF_CFG_E1HOV_TAG_MASK;
8210 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8211 bp->mf_ov = val; 8424 bp->mf_ov = val;
8212 BNX2X_DEV_INFO("MF OV for func %d is %d " 8425 BNX2X_DEV_INFO("MF OV for func %d is %d"
8213 "(0x%04x)\n", 8426 " (0x%04x)\n", func,
8214 func, bp->mf_ov, bp->mf_ov); 8427 bp->mf_ov, bp->mf_ov);
8215 } else { 8428 } else {
8216 BNX2X_ERROR("No valid MF OV for func %d," 8429 BNX2X_ERR("No valid MF OV for func %d,"
8217 " aborting\n", func); 8430 " aborting\n", func);
8218 rc = -EPERM; 8431 rc = -EPERM;
8219 } 8432 }
8220 } else { 8433 break;
8221 if (BP_VN(bp)) { 8434 case MULTI_FUNCTION_SI:
8222 BNX2X_ERROR("VN %d in single function mode," 8435 BNX2X_DEV_INFO("func %d is in MF "
8223 " aborting\n", BP_E1HVN(bp)); 8436 "switch-independent mode\n", func);
8437 break;
8438 default:
8439 if (vn) {
8440 BNX2X_ERR("VN %d in single function mode,"
8441 " aborting\n", vn);
8224 rc = -EPERM; 8442 rc = -EPERM;
8225 } 8443 }
8444 break;
8226 } 8445 }
8446
8227 } 8447 }
8228 8448
8229 /* adjust igu_sb_cnt to MF for E1x */ 8449 /* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8468,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8468 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8249 } 8469 }
8250 8470
8251 if (IS_MF(bp)) { 8471 /* Get MAC addresses */
8252 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 8472 bnx2x_get_mac_hwinfo(bp);
8253 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8254 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8255 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8256 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8257 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8258 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8259 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8260 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8261 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8262 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8263 ETH_ALEN);
8264 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
8266 }
8267
8268 return rc;
8269 }
8270
8271 if (BP_NOMCP(bp)) {
8272 /* only supposed to happen on emulation/FPGA */
8273 BNX2X_ERROR("warning: random MAC workaround active\n");
8274 random_ether_addr(bp->dev->dev_addr);
8275 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8276 }
8277 8473
8278 return rc; 8474 return rc;
8279} 8475}
@@ -8761,7 +8957,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8761 dev->netdev_ops = &bnx2x_netdev_ops; 8957 dev->netdev_ops = &bnx2x_netdev_ops;
8762 bnx2x_set_ethtool_ops(dev); 8958 bnx2x_set_ethtool_ops(dev);
8763 dev->features |= NETIF_F_SG; 8959 dev->features |= NETIF_F_SG;
8764 dev->features |= NETIF_F_HW_CSUM; 8960 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8765 if (bp->flags & USING_DAC_FLAG) 8961 if (bp->flags & USING_DAC_FLAG)
8766 dev->features |= NETIF_F_HIGHDMA; 8962 dev->features |= NETIF_F_HIGHDMA;
8767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,7 +8965,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8769 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 8965 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8770 8966
8771 dev->vlan_features |= NETIF_F_SG; 8967 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM; 8968 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8773 if (bp->flags & USING_DAC_FLAG) 8969 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA; 8970 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8971 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -9064,7 +9260,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9064 default: 9260 default:
9065 pr_err("Unknown board_type (%ld), aborting\n", 9261 pr_err("Unknown board_type (%ld), aborting\n",
9066 ent->driver_data); 9262 ent->driver_data);
9067 return ENODEV; 9263 return -ENODEV;
9068 } 9264 }
9069 9265
9070 cid_count += CNIC_CONTEXT_USE; 9266 cid_count += CNIC_CONTEXT_USE;
@@ -9096,12 +9292,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9096 /* calc qm_cid_count */ 9292 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); 9293 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098 9294
9099 rc = register_netdev(dev);
9100 if (rc) {
9101 dev_err(&pdev->dev, "Cannot register net device\n");
9102 goto init_one_exit;
9103 }
9104
9105 /* Configure interupt mode: try to enable MSI-X/MSI if 9295 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately. 9296 * needed, set bp->num_queues appropriately.
9107 */ 9297 */
@@ -9110,6 +9300,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9110 /* Add all NAPI objects */ 9300 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp); 9301 bnx2x_add_all_napi(bp);
9112 9302
9303 rc = register_netdev(dev);
9304 if (rc) {
9305 dev_err(&pdev->dev, "Cannot register net device\n");
9306 goto init_one_exit;
9307 }
9308
9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9309 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9114 9310
9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1cefe489a955..64bdda189e5a 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1774,6 +1774,8 @@
1774/* [RW 8] event id for llh0 */ 1774/* [RW 8] event id for llh0 */
1775#define NIG_REG_LLH0_EVENT_ID 0x10084 1775#define NIG_REG_LLH0_EVENT_ID 0x10084
1776#define NIG_REG_LLH0_FUNC_EN 0x160fc 1776#define NIG_REG_LLH0_FUNC_EN 0x160fc
1777#define NIG_REG_LLH0_FUNC_MEM 0x16180
1778#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
1777#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 1779#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1778/* [RW 1] Determine the IP version to look for in 1780/* [RW 1] Determine the IP version to look for in
1779 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ 1781 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1799,9 @@
1797#define NIG_REG_LLH1_ERROR_MASK 0x10090 1799#define NIG_REG_LLH1_ERROR_MASK 0x10090
1798/* [RW 8] event id for llh1 */ 1800/* [RW 8] event id for llh1 */
1799#define NIG_REG_LLH1_EVENT_ID 0x10088 1801#define NIG_REG_LLH1_EVENT_ID 0x10088
1802#define NIG_REG_LLH1_FUNC_MEM 0x161c0
1803#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
1804#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
1800/* [RW 8] init credit counter for port1 in LLH */ 1805/* [RW 8] init credit counter for port1 in LLH */
1801#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 1806#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1802#define NIG_REG_LLH1_XCM_MASK 0x10134 1807#define NIG_REG_LLH1_XCM_MASK 0x10134
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 6f9c6faef24c..0e2737eac8b7 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
8 8
9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o 9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
10bonding-objs += $(ipv6-y) 10bonding-objs += $(ipv6-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914bc4e9c..48cf24ff4e6f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2474 goto out; 2474 goto out;
2475 2475
2476 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2477 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2477 slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
2478 orig_dev);
2479 if (!slave) 2478 if (!slave)
2480 goto out_unlock; 2479 goto out_unlock;
2481 2480
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
new file mode 100644
index 000000000000..ae1eb2fc3a47
--- /dev/null
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -0,0 +1,96 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/device.h>
4#include <linux/netdevice.h>
5
6#include "bonding.h"
7
8#ifdef CONFIG_DEBUG_FS
9
10#include <linux/debugfs.h>
11#include <linux/seq_file.h>
12
13static struct dentry *bonding_debug_root;
14
15void bond_debug_register(struct bonding *bond)
16{
17 if (!bonding_debug_root)
18 return;
19
20 bond->debug_dir =
21 debugfs_create_dir(bond->dev->name, bonding_debug_root);
22
23 if (!bond->debug_dir) {
24 pr_warning("%s: Warning: failed to register to debugfs\n",
25 bond->dev->name);
26 return;
27 }
28}
29
30void bond_debug_unregister(struct bonding *bond)
31{
32 if (!bonding_debug_root)
33 return;
34
35 debugfs_remove_recursive(bond->debug_dir);
36}
37
38void bond_debug_reregister(struct bonding *bond)
39{
40 struct dentry *d;
41
42 if (!bonding_debug_root)
43 return;
44
45 d = debugfs_rename(bonding_debug_root, bond->debug_dir,
46 bonding_debug_root, bond->dev->name);
47 if (d) {
48 bond->debug_dir = d;
49 } else {
50 pr_warning("%s: Warning: failed to reregister, "
51 "so just unregister old one\n",
52 bond->dev->name);
53 bond_debug_unregister(bond);
54 }
55}
56
57void bond_create_debugfs(void)
58{
59 bonding_debug_root = debugfs_create_dir("bonding", NULL);
60
61 if (!bonding_debug_root) {
62 pr_warning("Warning: Cannot create bonding directory"
63 " in debugfs\n");
64 }
65}
66
67void bond_destroy_debugfs(void)
68{
69 debugfs_remove_recursive(bonding_debug_root);
70 bonding_debug_root = NULL;
71}
72
73
74#else /* !CONFIG_DEBUG_FS */
75
76void bond_debug_register(struct bonding *bond)
77{
78}
79
80void bond_debug_unregister(struct bonding *bond)
81{
82}
83
84void bond_debug_reregister(struct bonding *bond)
85{
86}
87
88void bond_create_debugfs(void)
89{
90}
91
92void bond_destroy_debugfs(void)
93{
94}
95
96#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..07011e42cec7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -873,15 +873,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
873static void __bond_resend_igmp_join_requests(struct net_device *dev) 873static void __bond_resend_igmp_join_requests(struct net_device *dev)
874{ 874{
875 struct in_device *in_dev; 875 struct in_device *in_dev;
876 struct ip_mc_list *im;
877 876
878 rcu_read_lock(); 877 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(dev); 878 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 879 if (in_dev)
881 for (im = in_dev->mc_list; im; im = im->next) 880 ip_mc_rejoin_groups(in_dev);
882 ip_mc_rejoin_group(im);
883 }
884
885 rcu_read_unlock(); 881 rcu_read_unlock();
886} 882}
887 883
@@ -1574,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1574 1570
1575 /* If this is the first slave, then we need to set the master's hardware 1571 /* If this is the first slave, then we need to set the master's hardware
1576 * address to be the same as the slave's. */ 1572 * address to be the same as the slave's. */
1577 if (bond->slave_cnt == 0) 1573 if (is_zero_ether_addr(bond->dev->dev_addr))
1578 memcpy(bond->dev->dev_addr, slave_dev->dev_addr, 1574 memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
1579 slave_dev->addr_len); 1575 slave_dev->addr_len);
1580 1576
@@ -3209,7 +3205,7 @@ out:
3209#ifdef CONFIG_PROC_FS 3205#ifdef CONFIG_PROC_FS
3210 3206
3211static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3207static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3212 __acquires(&dev_base_lock) 3208 __acquires(RCU)
3213 __acquires(&bond->lock) 3209 __acquires(&bond->lock)
3214{ 3210{
3215 struct bonding *bond = seq->private; 3211 struct bonding *bond = seq->private;
@@ -3218,7 +3214,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3218 int i; 3214 int i;
3219 3215
3220 /* make sure the bond won't be taken away */ 3216 /* make sure the bond won't be taken away */
3221 read_lock(&dev_base_lock); 3217 rcu_read_lock();
3222 read_lock(&bond->lock); 3218 read_lock(&bond->lock);
3223 3219
3224 if (*pos == 0) 3220 if (*pos == 0)
@@ -3248,12 +3244,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3248 3244
3249static void bond_info_seq_stop(struct seq_file *seq, void *v) 3245static void bond_info_seq_stop(struct seq_file *seq, void *v)
3250 __releases(&bond->lock) 3246 __releases(&bond->lock)
3251 __releases(&dev_base_lock) 3247 __releases(RCU)
3252{ 3248{
3253 struct bonding *bond = seq->private; 3249 struct bonding *bond = seq->private;
3254 3250
3255 read_unlock(&bond->lock); 3251 read_unlock(&bond->lock);
3256 read_unlock(&dev_base_lock); 3252 rcu_read_unlock();
3257} 3253}
3258 3254
3259static void bond_info_show_master(struct seq_file *seq) 3255static void bond_info_show_master(struct seq_file *seq)
@@ -3507,6 +3503,8 @@ static int bond_event_changename(struct bonding *bond)
3507 bond_remove_proc_entry(bond); 3503 bond_remove_proc_entry(bond);
3508 bond_create_proc_entry(bond); 3504 bond_create_proc_entry(bond);
3509 3505
3506 bond_debug_reregister(bond);
3507
3510 return NOTIFY_DONE; 3508 return NOTIFY_DONE;
3511} 3509}
3512 3510
@@ -4789,6 +4787,8 @@ static void bond_uninit(struct net_device *bond_dev)
4789 4787
4790 bond_remove_proc_entry(bond); 4788 bond_remove_proc_entry(bond);
4791 4789
4790 bond_debug_unregister(bond);
4791
4792 __hw_addr_flush(&bond->mc_list); 4792 __hw_addr_flush(&bond->mc_list);
4793 4793
4794 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { 4794 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
@@ -5191,6 +5191,8 @@ static int bond_init(struct net_device *bond_dev)
5191 5191
5192 bond_prepare_sysfs_group(bond); 5192 bond_prepare_sysfs_group(bond);
5193 5193
5194 bond_debug_register(bond);
5195
5194 __hw_addr_init(&bond->mc_list); 5196 __hw_addr_init(&bond->mc_list);
5195 return 0; 5197 return 0;
5196} 5198}
@@ -5312,6 +5314,8 @@ static int __init bonding_init(void)
5312 if (res) 5314 if (res)
5313 goto err_link; 5315 goto err_link;
5314 5316
5317 bond_create_debugfs();
5318
5315 for (i = 0; i < max_bonds; i++) { 5319 for (i = 0; i < max_bonds; i++) {
5316 res = bond_create(&init_net, NULL); 5320 res = bond_create(&init_net, NULL);
5317 if (res) 5321 if (res)
@@ -5322,7 +5326,6 @@ static int __init bonding_init(void)
5322 if (res) 5326 if (res)
5323 goto err; 5327 goto err;
5324 5328
5325
5326 register_netdevice_notifier(&bond_netdev_notifier); 5329 register_netdevice_notifier(&bond_netdev_notifier);
5327 register_inetaddr_notifier(&bond_inetaddr_notifier); 5330 register_inetaddr_notifier(&bond_inetaddr_notifier);
5328 bond_register_ipv6_notifier(); 5331 bond_register_ipv6_notifier();
@@ -5346,6 +5349,7 @@ static void __exit bonding_exit(void)
5346 bond_unregister_ipv6_notifier(); 5349 bond_unregister_ipv6_notifier();
5347 5350
5348 bond_destroy_sysfs(); 5351 bond_destroy_sysfs();
5352 bond_destroy_debugfs();
5349 5353
5350 rtnl_link_unregister(&bond_link_ops); 5354 rtnl_link_unregister(&bond_link_ops);
5351 unregister_pernet_subsys(&bond_net_ops); 5355 unregister_pernet_subsys(&bond_net_ops);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..03710f8f5c49 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -259,6 +259,10 @@ struct bonding {
259#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 259#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
260 struct in6_addr master_ipv6; 260 struct in6_addr master_ipv6;
261#endif 261#endif
262#ifdef CONFIG_DEBUG_FS
263 /* debugging suport via debugfs */
264 struct dentry *debug_dir;
265#endif /* CONFIG_DEBUG_FS */
262}; 266};
263 267
264/** 268/**
@@ -286,7 +290,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
286 return NULL; 290 return NULL;
287 } 291 }
288 292
289 return (struct bonding *)netdev_priv(slave->dev->master); 293 return netdev_priv(slave->dev->master);
290} 294}
291 295
292static inline bool bond_is_lb(const struct bonding *bond) 296static inline bool bond_is_lb(const struct bonding *bond)
@@ -380,6 +384,11 @@ void bond_select_active_slave(struct bonding *bond);
380void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 384void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
381void bond_register_arp(struct bonding *); 385void bond_register_arp(struct bonding *);
382void bond_unregister_arp(struct bonding *); 386void bond_unregister_arp(struct bonding *);
387void bond_create_debugfs(void);
388void bond_destroy_debugfs(void);
389void bond_debug_register(struct bonding *bond);
390void bond_debug_unregister(struct bonding *bond);
391void bond_debug_reregister(struct bonding *bond);
383 392
384struct bond_net { 393struct bond_net {
385 struct net * net; /* Associated network namespace */ 394 struct net * net; /* Associated network namespace */
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da86f13..32b1c6fb2de1 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,7 +5,7 @@
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9 9
10#include <linux/version.h> 10#include <linux/version.h>
11#include <linux/init.h> 11#include <linux/init.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c0656667..80511167f35b 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt 9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10 10
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
34MODULE_DESCRIPTION("CAIF SPI driver"); 34MODULE_DESCRIPTION("CAIF SPI driver");
35 35
36/* Returns the number of padding bytes for alignment. */
37#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
38
36static int spi_loop; 39static int spi_loop;
37module_param(spi_loop, bool, S_IRUGO); 40module_param(spi_loop, bool, S_IRUGO);
38MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); 41MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
41module_param(spi_frm_align, int, S_IRUGO); 44module_param(spi_frm_align, int, S_IRUGO);
42MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 45MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
43 46
44/* SPI padding options. */ 47/*
48 * SPI padding options.
49 * Warning: must be a base of 2 (& operation used) and can not be zero !
50 */
45module_param(spi_up_head_align, int, S_IRUGO); 51module_param(spi_up_head_align, int, S_IRUGO);
46MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 52MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
47 53
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
240static const struct file_operations dbgfs_state_fops = { 246static const struct file_operations dbgfs_state_fops = {
241 .open = dbgfs_open, 247 .open = dbgfs_open,
242 .read = dbgfs_state, 248 .read = dbgfs_state,
243 .owner = THIS_MODULE, 249 .owner = THIS_MODULE
244 .llseek = default_llseek,
245}; 250};
246 251
247static const struct file_operations dbgfs_frame_fops = { 252static const struct file_operations dbgfs_frame_fops = {
248 .open = dbgfs_open, 253 .open = dbgfs_open,
249 .read = dbgfs_frame, 254 .read = dbgfs_frame,
250 .owner = THIS_MODULE, 255 .owner = THIS_MODULE
251 .llseek = default_llseek,
252}; 256};
253 257
254static inline void dev_debugfs_add(struct cfspi *cfspi) 258static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
337 u8 *dst = buf; 341 u8 *dst = buf;
338 caif_assert(buf); 342 caif_assert(buf);
339 343
344 if (cfspi->slave && !cfspi->slave_talked)
345 cfspi->slave_talked = true;
346
340 do { 347 do {
341 struct sk_buff *skb; 348 struct sk_buff *skb;
342 struct caif_payload_info *info; 349 struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
357 * Compute head offset i.e. number of bytes to add to 364 * Compute head offset i.e. number of bytes to add to
358 * get the start of the payload aligned. 365 * get the start of the payload aligned.
359 */ 366 */
360 if (spi_up_head_align) { 367 if (spi_up_head_align > 1) {
361 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 368 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
362 *dst = (u8)(spad - 1); 369 *dst = (u8)(spad - 1);
363 dst += spad; 370 dst += spad;
364 } 371 }
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
373 * Compute tail offset i.e. number of bytes to add to 380 * Compute tail offset i.e. number of bytes to add to
374 * get the complete CAIF frame aligned. 381 * get the complete CAIF frame aligned.
375 */ 382 */
376 epad = (skb->len + spad) & spi_up_tail_align; 383 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
377 dst += epad; 384 dst += epad;
378 385
379 dev_kfree_skb(skb); 386 dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
417 * Compute head offset i.e. number of bytes to add to 424 * Compute head offset i.e. number of bytes to add to
418 * get the start of the payload aligned. 425 * get the start of the payload aligned.
419 */ 426 */
420 if (spi_up_head_align) 427 if (spi_up_head_align > 1)
421 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 428 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
422 429
423 /* 430 /*
424 * Compute tail offset i.e. number of bytes to add to 431 * Compute tail offset i.e. number of bytes to add to
425 * get the complete CAIF frame aligned. 432 * get the complete CAIF frame aligned.
426 */ 433 */
427 epad = (skb->len + spad) & spi_up_tail_align; 434 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
428 435
429 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 436 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
430 skb_queue_tail(&cfspi->chead, skb); 437 skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
433 } else { 440 } else {
434 /* Put back packet. */ 441 /* Put back packet. */
435 skb_queue_head(&cfspi->qhead, skb); 442 skb_queue_head(&cfspi->qhead, skb);
443 break;
436 } 444 }
437 } while (pkts <= CAIF_MAX_SPI_PKTS); 445 } while (pkts <= CAIF_MAX_SPI_PKTS);
438 446
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
453{ 461{
454 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 462 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
455 463
464 /*
465 * The slave device is the master on the link. Interrupts before the
466 * slave has transmitted are considered spurious.
467 */
468 if (cfspi->slave && !cfspi->slave_talked) {
469 printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
470 return;
471 }
472
456 if (!in_interrupt()) 473 if (!in_interrupt())
457 spin_lock(&cfspi->lock); 474 spin_lock(&cfspi->lock);
458 if (assert) { 475 if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
465 spin_unlock(&cfspi->lock); 482 spin_unlock(&cfspi->lock);
466 483
467 /* Wake up the xfer thread. */ 484 /* Wake up the xfer thread. */
468 wake_up_interruptible(&cfspi->wait); 485 if (assert)
486 wake_up_interruptible(&cfspi->wait);
469} 487}
470 488
471static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) 489static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
523 * Compute head offset i.e. number of bytes added to 541 * Compute head offset i.e. number of bytes added to
524 * get the start of the payload aligned. 542 * get the start of the payload aligned.
525 */ 543 */
526 if (spi_down_head_align) { 544 if (spi_down_head_align > 1) {
527 spad = 1 + *src; 545 spad = 1 + *src;
528 src += spad; 546 src += spad;
529 } 547 }
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
564 * Compute tail offset i.e. number of bytes added to 582 * Compute tail offset i.e. number of bytes added to
565 * get the complete CAIF frame aligned. 583 * get the complete CAIF frame aligned.
566 */ 584 */
567 epad = (pkt_len + spad) & spi_down_tail_align; 585 epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
568 src += epad; 586 src += epad;
569 } while ((src - buf) < len); 587 } while ((src - buf) < len);
570 588
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
617 635
618 ndev = alloc_netdev(sizeof(struct cfspi), 636 ndev = alloc_netdev(sizeof(struct cfspi),
619 "cfspi%d", cfspi_setup); 637 "cfspi%d", cfspi_setup);
620 if (!dev) 638 if (!ndev)
621 return -ENODEV; 639 return -ENOMEM;
622 640
623 cfspi = netdev_priv(ndev); 641 cfspi = netdev_priv(ndev);
624 netif_stop_queue(ndev); 642 netif_stop_queue(ndev);
625 cfspi->ndev = ndev; 643 cfspi->ndev = ndev;
626 cfspi->pdev = pdev; 644 cfspi->pdev = pdev;
627 645
628 /* Set flow info */ 646 /* Set flow info. */
629 cfspi->flow_off_sent = 0; 647 cfspi->flow_off_sent = 0;
630 cfspi->qd_low_mark = LOW_WATER_MARK; 648 cfspi->qd_low_mark = LOW_WATER_MARK;
631 cfspi->qd_high_mark = HIGH_WATER_MARK; 649 cfspi->qd_high_mark = HIGH_WATER_MARK;
632 650
651 /* Set slave info. */
652 if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
653 cfspi->slave = true;
654 cfspi->slave_talked = false;
655 } else {
656 cfspi->slave = false;
657 cfspi->slave_talked = false;
658 }
659
633 /* Assign the SPI device. */ 660 /* Assign the SPI device. */
634 cfspi->dev = dev; 661 cfspi->dev = dev;
635 /* Assign the device ifc to this SPI interface. */ 662 /* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
36#endif 36#endif
37 37
38int spi_frm_align = 2; 38int spi_frm_align = 2;
39int spi_up_head_align = 1; 39
40int spi_up_tail_align; 40/*
41int spi_down_head_align = 3; 41 * SPI padding options.
42int spi_down_tail_align = 1; 42 * Warning: must be a base of 2 (& operation used) and can not be zero !
43 */
44int spi_up_head_align = 1 << 1;
45int spi_up_tail_align = 1 << 0;
46int spi_down_head_align = 1 << 2;
47int spi_down_tail_align = 1 << 1;
43 48
44#ifdef CONFIG_DEBUG_FS 49#ifdef CONFIG_DEBUG_FS
45static inline void debugfs_store_prev(struct cfspi *cfspi) 50static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b0fff0..d5a9db60ade9 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@ config CAN_VCAN
12 This driver can also be built as a module. If so, the module 12 This driver can also be built as a module. If so, the module
13 will be called vcan. 13 will be called vcan.
14 14
15config CAN_SLCAN
16 tristate "Serial / USB serial CAN Adaptors (slcan)"
17 depends on CAN
18 default N
19 ---help---
20 CAN driver for several 'low cost' CAN interfaces that are attached
21 via serial lines or via USB-to-serial adapters using the LAWICEL
22 ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
23
24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see
30 http://developer.berlios.de/projects/socketcan for details.
31
32 The slcan driver supports up to 10 CAN netdevices by default which
33 can be changed by the 'maxdev=xx' module option. This driver can
34 also be built as a module. If so, the module will be called slcan.
35
15config CAN_DEV 36config CAN_DEV
16 tristate "Platform CAN drivers with Netlink support" 37 tristate "Platform CAN drivers with Netlink support"
17 depends on CAN 38 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a4f106..07ca159ba3f9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_VCAN) += vcan.o 5obj-$(CONFIG_CAN_VCAN) += vcan.o
6obj-$(CONFIG_CAN_SLCAN) += slcan.o
6 7
7obj-$(CONFIG_CAN_DEV) += can-dev.o 8obj-$(CONFIG_CAN_DEV) += can-dev.o
8can-dev-y := dev.o 9can-dev-y := dev.o
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 6e533dcc36c0..b9a6d7a5a739 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1114,11 +1114,6 @@ static bool ican3_txok(struct ican3_dev *mod)
1114/* 1114/*
1115 * Recieve one CAN frame from the hardware 1115 * Recieve one CAN frame from the hardware
1116 * 1116 *
1117 * This works like the core of a NAPI function, but is intended to be called
1118 * from workqueue context instead. This driver already needs a workqueue to
1119 * process control messages, so we use the workqueue instead of using NAPI.
1120 * This was done to simplify locking.
1121 *
1122 * CONTEXT: must be called from user context 1117 * CONTEXT: must be called from user context
1123 */ 1118 */
1124static int ican3_recv_skb(struct ican3_dev *mod) 1119static int ican3_recv_skb(struct ican3_dev *mod)
@@ -1251,7 +1246,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
1251 * Reset an ICAN module to its power-on state 1246 * Reset an ICAN module to its power-on state
1252 * 1247 *
1253 * CONTEXT: no network device registered 1248 * CONTEXT: no network device registered
1254 * LOCKING: work function disabled
1255 */ 1249 */
1256static int ican3_reset_module(struct ican3_dev *mod) 1250static int ican3_reset_module(struct ican3_dev *mod)
1257{ 1251{
@@ -1262,9 +1256,6 @@ static int ican3_reset_module(struct ican3_dev *mod)
1262 /* disable interrupts so no more work is scheduled */ 1256 /* disable interrupts so no more work is scheduled */
1263 iowrite8(1 << mod->num, &mod->ctrl->int_disable); 1257 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1264 1258
1265 /* flush any pending work */
1266 flush_scheduled_work();
1267
1268 /* the first unallocated page in the DPM is #9 */ 1259 /* the first unallocated page in the DPM is #9 */
1269 mod->free_page = DPM_FREE_START; 1260 mod->free_page = DPM_FREE_START;
1270 1261
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
182 182
183 priv->can.state = CAN_STATE_ERROR_ACTIVE; 183 priv->can.state = CAN_STATE_ERROR_ACTIVE;
184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), 184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
185 "bus-off state expected"); 185 "bus-off state expected\n");
186 out_8(&regs->canmisc, MSCAN_BOHOLD); 186 out_8(&regs->canmisc, MSCAN_BOHOLD);
187 /* Re-enable receive interrupts. */ 187 /* Re-enable receive interrupts. */
188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); 188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c68..a9b6a6525a65 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -32,99 +32,91 @@
32#include <linux/can/dev.h> 32#include <linux/can/dev.h>
33#include <linux/can/error.h> 33#include <linux/can/error.h>
34 34
35#define MAX_MSG_OBJ 32 35#define PCH_ENABLE 1 /* The enable flag */
36#define MSG_OBJ_RX 0 /* The receive message object flag. */ 36#define PCH_DISABLE 0 /* The disable flag */
37#define MSG_OBJ_TX 1 /* The transmit message object flag. */ 37#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
38 38#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
39#define ENABLE 1 /* The enable flag */ 39#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
40#define DISABLE 0 /* The disable flag */ 40#define PCH_CTRL_CCE BIT(6)
41#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */ 41#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
42#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */ 42#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
43#define CAN_CTRL_IE_SIE_EIE 0x000e 43#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
44#define CAN_CTRL_CCE 0x0040 44
45#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */ 45#define PCH_CMASK_RX_TX_SET 0x00f3
46#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */ 46#define PCH_CMASK_RX_TX_GET 0x0073
47#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */ 47#define PCH_CMASK_ALL 0xff
48#define CAN_CMASK_RX_TX_SET 0x00f3 48#define PCH_CMASK_NEWDAT BIT(2)
49#define CAN_CMASK_RX_TX_GET 0x0073 49#define PCH_CMASK_CLRINTPND BIT(3)
50#define CAN_CMASK_ALL 0xff 50#define PCH_CMASK_CTRL BIT(4)
51#define CAN_CMASK_RDWR 0x80 51#define PCH_CMASK_ARB BIT(5)
52#define CAN_CMASK_ARB 0x20 52#define PCH_CMASK_MASK BIT(6)
53#define CAN_CMASK_CTRL 0x10 53#define PCH_CMASK_RDWR BIT(7)
54#define CAN_CMASK_MASK 0x40 54#define PCH_IF_MCONT_NEWDAT BIT(15)
55#define CAN_CMASK_NEWDAT 0x04 55#define PCH_IF_MCONT_MSGLOST BIT(14)
56#define CAN_CMASK_CLRINTPND 0x08 56#define PCH_IF_MCONT_INTPND BIT(13)
57 57#define PCH_IF_MCONT_UMASK BIT(12)
58#define CAN_IF_MCONT_NEWDAT 0x8000 58#define PCH_IF_MCONT_TXIE BIT(11)
59#define CAN_IF_MCONT_INTPND 0x2000 59#define PCH_IF_MCONT_RXIE BIT(10)
60#define CAN_IF_MCONT_UMASK 0x1000 60#define PCH_IF_MCONT_RMTEN BIT(9)
61#define CAN_IF_MCONT_TXIE 0x0800 61#define PCH_IF_MCONT_TXRQXT BIT(8)
62#define CAN_IF_MCONT_RXIE 0x0400 62#define PCH_IF_MCONT_EOB BIT(7)
63#define CAN_IF_MCONT_RMTEN 0x0200 63#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
64#define CAN_IF_MCONT_TXRQXT 0x0100 64#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
65#define CAN_IF_MCONT_EOB 0x0080 65#define PCH_ID2_DIR BIT(13)
66#define CAN_IF_MCONT_DLC 0x000f 66#define PCH_ID2_XTD BIT(14)
67#define CAN_IF_MCONT_MSGLOST 0x4000 67#define PCH_ID_MSGVAL BIT(15)
68#define CAN_MASK2_MDIR_MXTD 0xc000 68#define PCH_IF_CREQ_BUSY BIT(15)
69#define CAN_ID2_DIR 0x2000 69
70#define CAN_ID_MSGVAL 0x8000 70#define PCH_STATUS_INT 0x8000
71 71#define PCH_REC 0x00007f00
72#define CAN_STATUS_INT 0x8000 72#define PCH_TEC 0x000000ff
73#define CAN_IF_CREQ_BUSY 0x8000 73
74#define CAN_ID2_XTD 0x4000 74#define PCH_TX_OK BIT(3)
75 75#define PCH_RX_OK BIT(4)
76#define CAN_REC 0x00007f00 76#define PCH_EPASSIV BIT(5)
77#define CAN_TEC 0x000000ff 77#define PCH_EWARN BIT(6)
78 78#define PCH_BUS_OFF BIT(7)
79#define PCH_RX_OK 0x00000010
80#define PCH_TX_OK 0x00000008
81#define PCH_BUS_OFF 0x00000080
82#define PCH_EWARN 0x00000040
83#define PCH_EPASSIV 0x00000020
84#define PCH_LEC0 0x00000001
85#define PCH_LEC1 0x00000002
86#define PCH_LEC2 0x00000004
87#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
88#define PCH_STUF_ERR PCH_LEC0
89#define PCH_FORM_ERR PCH_LEC1
90#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
91#define PCH_BIT1_ERR PCH_LEC2
92#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
93#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
94 79
95/* bit position of certain controller bits. */ 80/* bit position of certain controller bits. */
96#define BIT_BITT_BRP 0 81#define PCH_BIT_BRP 0
97#define BIT_BITT_SJW 6 82#define PCH_BIT_SJW 6
98#define BIT_BITT_TSEG1 8 83#define PCH_BIT_TSEG1 8
99#define BIT_BITT_TSEG2 12 84#define PCH_BIT_TSEG2 12
100#define BIT_IF1_MCONT_RXIE 10 85#define PCH_BIT_BRPE_BRPE 6
101#define BIT_IF2_MCONT_TXIE 11 86#define PCH_MSK_BITT_BRP 0x3f
102#define BIT_BRPE_BRPE 6 87#define PCH_MSK_BRPE_BRPE 0x3c0
103#define BIT_ES_TXERRCNT 0 88#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
104#define BIT_ES_RXERRCNT 8 89#define PCH_COUNTER_LIMIT 10
105#define MSK_BITT_BRP 0x3f
106#define MSK_BITT_SJW 0xc0
107#define MSK_BITT_TSEG1 0xf00
108#define MSK_BITT_TSEG2 0x7000
109#define MSK_BRPE_BRPE 0x3c0
110#define MSK_BRPE_GET 0x0f
111#define MSK_CTRL_IE_SIE_EIE 0x07
112#define MSK_MCONT_TXIE 0x08
113#define MSK_MCONT_RXIE 0x10
114#define PCH_CAN_NO_TX_BUFF 1
115#define COUNTER_LIMIT 10
116 90
117#define PCH_CAN_CLK 50000000 /* 50MHz */ 91#define PCH_CAN_CLK 50000000 /* 50MHz */
118 92
119/* Define the number of message object. 93/* Define the number of message object.
120 * PCH CAN communications are done via Message RAM. 94 * PCH CAN communications are done via Message RAM.
121 * The Message RAM consists of 32 message objects. */ 95 * The Message RAM consists of 32 message objects. */
122#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/ 96#define PCH_RX_OBJ_NUM 26
123#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/ 97#define PCH_TX_OBJ_NUM 6
124#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM) 98#define PCH_RX_OBJ_START 1
99#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
100#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
101#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
125 102
126#define PCH_FIFO_THRESH 16 103#define PCH_FIFO_THRESH 16
127 104
105enum pch_ifreg {
106 PCH_RX_IFREG,
107 PCH_TX_IFREG,
108};
109
110enum pch_can_err {
111 PCH_STUF_ERR = 1,
112 PCH_FORM_ERR,
113 PCH_ACK_ERR,
114 PCH_BIT1_ERR,
115 PCH_BIT0_ERR,
116 PCH_CRC_ERR,
117 PCH_LEC_ALL,
118};
119
128enum pch_can_mode { 120enum pch_can_mode {
129 PCH_CAN_ENABLE, 121 PCH_CAN_ENABLE,
130 PCH_CAN_DISABLE, 122 PCH_CAN_DISABLE,
@@ -134,6 +126,21 @@ enum pch_can_mode {
134 PCH_CAN_RUN 126 PCH_CAN_RUN
135}; 127};
136 128
129struct pch_can_if_regs {
130 u32 creq;
131 u32 cmask;
132 u32 mask1;
133 u32 mask2;
134 u32 id1;
135 u32 id2;
136 u32 mcont;
137 u32 dataa1;
138 u32 dataa2;
139 u32 datab1;
140 u32 datab2;
141 u32 rsv[13];
142};
143
137struct pch_can_regs { 144struct pch_can_regs {
138 u32 cont; 145 u32 cont;
139 u32 stat; 146 u32 stat;
@@ -142,38 +149,21 @@ struct pch_can_regs {
142 u32 intr; 149 u32 intr;
143 u32 opt; 150 u32 opt;
144 u32 brpe; 151 u32 brpe;
145 u32 reserve1; 152 u32 reserve;
146 u32 if1_creq; 153 struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
147 u32 if1_cmask; 154 u32 reserve1[8];
148 u32 if1_mask1;
149 u32 if1_mask2;
150 u32 if1_id1;
151 u32 if1_id2;
152 u32 if1_mcont;
153 u32 if1_dataa1;
154 u32 if1_dataa2;
155 u32 if1_datab1;
156 u32 if1_datab2;
157 u32 reserve2;
158 u32 reserve3[12];
159 u32 if2_creq;
160 u32 if2_cmask;
161 u32 if2_mask1;
162 u32 if2_mask2;
163 u32 if2_id1;
164 u32 if2_id2;
165 u32 if2_mcont;
166 u32 if2_dataa1;
167 u32 if2_dataa2;
168 u32 if2_datab1;
169 u32 if2_datab2;
170 u32 reserve4;
171 u32 reserve5[20];
172 u32 treq1; 155 u32 treq1;
173 u32 treq2; 156 u32 treq2;
174 u32 reserve6[2]; 157 u32 reserve2[6];
175 u32 reserve7[56]; 158 u32 data1;
176 u32 reserve8[3]; 159 u32 data2;
160 u32 reserve3[6];
161 u32 canipend1;
162 u32 canipend2;
163 u32 reserve4[6];
164 u32 canmval1;
165 u32 canmval2;
166 u32 reserve5[37];
177 u32 srst; 167 u32 srst;
178}; 168};
179 169
@@ -181,14 +171,13 @@ struct pch_can_priv {
181 struct can_priv can; 171 struct can_priv can;
182 unsigned int can_num; 172 unsigned int can_num;
183 struct pci_dev *dev; 173 struct pci_dev *dev;
184 unsigned int tx_enable[MAX_MSG_OBJ]; 174 int tx_enable[PCH_TX_OBJ_END];
185 unsigned int rx_enable[MAX_MSG_OBJ]; 175 int rx_enable[PCH_TX_OBJ_END];
186 unsigned int rx_link[MAX_MSG_OBJ]; 176 int rx_link[PCH_TX_OBJ_END];
187 unsigned int int_enables; 177 unsigned int int_enables;
188 unsigned int int_stat; 178 unsigned int int_stat;
189 struct net_device *ndev; 179 struct net_device *ndev;
190 spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/ 180 unsigned int msg_obj[PCH_TX_OBJ_END];
191 unsigned int msg_obj[MAX_MSG_OBJ];
192 struct pch_can_regs __iomem *regs; 181 struct pch_can_regs __iomem *regs;
193 struct napi_struct napi; 182 struct napi_struct napi;
194 unsigned int tx_obj; /* Point next Tx Obj index */ 183 unsigned int tx_obj; /* Point next Tx Obj index */
@@ -228,11 +217,11 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
228{ 217{
229 switch (mode) { 218 switch (mode) {
230 case PCH_CAN_RUN: 219 case PCH_CAN_RUN:
231 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT); 220 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
232 break; 221 break;
233 222
234 case PCH_CAN_STOP: 223 case PCH_CAN_STOP:
235 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT); 224 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
236 break; 225 break;
237 226
238 default: 227 default:
@@ -246,30 +235,30 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
246 u32 reg_val = ioread32(&priv->regs->opt); 235 u32 reg_val = ioread32(&priv->regs->opt);
247 236
248 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 237 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
249 reg_val |= CAN_OPT_SILENT; 238 reg_val |= PCH_OPT_SILENT;
250 239
251 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 240 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
252 reg_val |= CAN_OPT_LBACK; 241 reg_val |= PCH_OPT_LBACK;
253 242
254 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT); 243 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
255 iowrite32(reg_val, &priv->regs->opt); 244 iowrite32(reg_val, &priv->regs->opt);
256} 245}
257 246
258static void pch_can_set_int_custom(struct pch_can_priv *priv) 247static void pch_can_set_int_custom(struct pch_can_priv *priv)
259{ 248{
260 /* Clearing the IE, SIE and EIE bits of Can control register. */ 249 /* Clearing the IE, SIE and EIE bits of Can control register. */
261 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 250 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
262 251
263 /* Appropriately setting them. */ 252 /* Appropriately setting them. */
264 pch_can_bit_set(&priv->regs->cont, 253 pch_can_bit_set(&priv->regs->cont,
265 ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1)); 254 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
266} 255}
267 256
268/* This function retrieves interrupt enabled for the CAN device. */ 257/* This function retrieves interrupt enabled for the CAN device. */
269static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables) 258static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
270{ 259{
271 /* Obtaining the status of IE, SIE and EIE interrupt bits. */ 260 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
272 *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1); 261 *enables = ((ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1);
273} 262}
274 263
275static void pch_can_set_int_enables(struct pch_can_priv *priv, 264static void pch_can_set_int_enables(struct pch_can_priv *priv,
@@ -277,19 +266,19 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
277{ 266{
278 switch (interrupt_no) { 267 switch (interrupt_no) {
279 case PCH_CAN_ENABLE: 268 case PCH_CAN_ENABLE:
280 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE); 269 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE);
281 break; 270 break;
282 271
283 case PCH_CAN_DISABLE: 272 case PCH_CAN_DISABLE:
284 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE); 273 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
285 break; 274 break;
286 275
287 case PCH_CAN_ALL: 276 case PCH_CAN_ALL:
288 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 277 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
289 break; 278 break;
290 279
291 case PCH_CAN_NONE: 280 case PCH_CAN_NONE:
292 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 281 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
293 break; 282 break;
294 283
295 default: 284 default:
@@ -300,12 +289,12 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
300 289
301static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num) 290static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
302{ 291{
303 u32 counter = COUNTER_LIMIT; 292 u32 counter = PCH_COUNTER_LIMIT;
304 u32 ifx_creq; 293 u32 ifx_creq;
305 294
306 iowrite32(num, creq_addr); 295 iowrite32(num, creq_addr);
307 while (counter) { 296 while (counter) {
308 ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY; 297 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
309 if (!ifx_creq) 298 if (!ifx_creq)
310 break; 299 break;
311 counter--; 300 counter--;
@@ -315,143 +304,76 @@ static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
315 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__); 304 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
316} 305}
317 306
318static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num, 307static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
319 u32 set) 308 u32 set, enum pch_ifreg dir)
320{ 309{
321 unsigned long flags; 310 u32 ie;
311
312 if (dir)
313 ie = PCH_IF_MCONT_TXIE;
314 else
315 ie = PCH_IF_MCONT_RXIE;
322 316
323 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
324 /* Reading the receive buffer data from RAM to Interface1 registers */ 317 /* Reading the receive buffer data from RAM to Interface1 registers */
325 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 318 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
326 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num); 319 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
327 320
328 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */ 321 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
329 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL, 322 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
330 &priv->regs->if1_cmask); 323 &priv->regs->ifregs[dir].cmask);
331 324
332 if (set == ENABLE) { 325 if (set == PCH_ENABLE) {
333 /* Setting the MsgVal and RxIE bits */ 326 /* Setting the MsgVal and RxIE bits */
334 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE); 327 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
335 pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL); 328 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
336 329
337 } else if (set == DISABLE) { 330 } else if (set == PCH_DISABLE) {
338 /* Resetting the MsgVal and RxIE bits */ 331 /* Resetting the MsgVal and RxIE bits */
339 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE); 332 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
340 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL); 333 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
341 } 334 }
342 335
343 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num); 336 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
344 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
345} 337}
346 338
347static void pch_can_rx_enable_all(struct pch_can_priv *priv) 339static void pch_can_set_rx_all(struct pch_can_priv *priv, u32 set)
348{ 340{
349 int i; 341 int i;
350 342
351 /* Traversing to obtain the object configured as receivers. */ 343 /* Traversing to obtain the object configured as receivers. */
352 for (i = 0; i < PCH_OBJ_NUM; i++) { 344 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
353 if (priv->msg_obj[i] == MSG_OBJ_RX) 345 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
354 pch_can_set_rx_enable(priv, i + 1, ENABLE);
355 }
356}
357
358static void pch_can_rx_disable_all(struct pch_can_priv *priv)
359{
360 int i;
361
362 /* Traversing to obtain the object configured as receivers. */
363 for (i = 0; i < PCH_OBJ_NUM; i++) {
364 if (priv->msg_obj[i] == MSG_OBJ_RX)
365 pch_can_set_rx_enable(priv, i + 1, DISABLE);
366 }
367}
368
369static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
370 u32 set)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
375 /* Reading the Msg buffer from Message RAM to Interface2 registers. */
376 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
377 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
378
379 /* Setting the IF2CMASK register for accessing the
380 MsgVal and TxIE bits */
381 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
382 &priv->regs->if2_cmask);
383
384 if (set == ENABLE) {
385 /* Setting the MsgVal and TxIE bits */
386 pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
387 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
388 } else if (set == DISABLE) {
389 /* Resetting the MsgVal and TxIE bits. */
390 pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
391 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
392 }
393
394 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
395 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
396} 346}
397 347
398static void pch_can_tx_enable_all(struct pch_can_priv *priv) 348static void pch_can_set_tx_all(struct pch_can_priv *priv, u32 set)
399{ 349{
400 int i; 350 int i;
401 351
402 /* Traversing to obtain the object configured as transmit object. */ 352 /* Traversing to obtain the object configured as transmit object. */
403 for (i = 0; i < PCH_OBJ_NUM; i++) { 353 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
404 if (priv->msg_obj[i] == MSG_OBJ_TX) 354 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
405 pch_can_set_tx_enable(priv, i + 1, ENABLE);
406 }
407}
408
409static void pch_can_tx_disable_all(struct pch_can_priv *priv)
410{
411 int i;
412
413 /* Traversing to obtain the object configured as transmit object. */
414 for (i = 0; i < PCH_OBJ_NUM; i++) {
415 if (priv->msg_obj[i] == MSG_OBJ_TX)
416 pch_can_set_tx_enable(priv, i + 1, DISABLE);
417 }
418} 355}
419 356
420static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num, 357static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
421 u32 *enable) 358 enum pch_ifreg dir)
422{ 359{
423 unsigned long flags; 360 u32 ie, enable;
424 361
425 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 362 if (dir)
426 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 363 ie = PCH_IF_MCONT_RXIE;
427 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
428
429 if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
430 ((ioread32(&priv->regs->if1_mcont)) &
431 CAN_IF_MCONT_RXIE))
432 *enable = ENABLE;
433 else 364 else
434 *enable = DISABLE; 365 ie = PCH_IF_MCONT_TXIE;
435 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
436}
437 366
438static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num, 367 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
439 u32 *enable) 368 pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
444 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
445 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
446 369
447 if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) && 370 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
448 ((ioread32(&priv->regs->if2_mcont)) & 371 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie)) {
449 CAN_IF_MCONT_TXIE)) { 372 enable = 1;
450 *enable = ENABLE;
451 } else { 373 } else {
452 *enable = DISABLE; 374 enable = 0;
453 } 375 }
454 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 376 return enable;
455} 377}
456 378
457static int pch_can_int_pending(struct pch_can_priv *priv) 379static int pch_can_int_pending(struct pch_can_priv *priv)
@@ -462,141 +384,131 @@ static int pch_can_int_pending(struct pch_can_priv *priv)
462static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv, 384static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
463 u32 buffer_num, u32 set) 385 u32 buffer_num, u32 set)
464{ 386{
465 unsigned long flags; 387 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
466 388 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
467 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 389 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
468 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 390 &priv->regs->ifregs[0].cmask);
469 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 391 if (set == PCH_ENABLE)
470 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask); 392 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
471 if (set == ENABLE) 393 PCH_IF_MCONT_EOB);
472 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
473 else 394 else
474 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB); 395 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
475 396
476 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 397 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
477 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
478} 398}
479 399
480static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv, 400static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
481 u32 buffer_num, u32 *link) 401 u32 buffer_num, u32 *link)
482{ 402{
483 unsigned long flags; 403 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
484 404 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
485 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
486 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
487 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
488 405
489 if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB) 406 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
490 *link = DISABLE; 407 *link = PCH_DISABLE;
491 else 408 else
492 *link = ENABLE; 409 *link = PCH_ENABLE;
493 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
494} 410}
495 411
496static void pch_can_clear_buffers(struct pch_can_priv *priv) 412static void pch_can_clear_buffers(struct pch_can_priv *priv)
497{ 413{
498 int i; 414 int i;
499 415
500 for (i = 0; i < PCH_RX_OBJ_NUM; i++) { 416 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
501 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask); 417 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
502 iowrite32(0xffff, &priv->regs->if1_mask1); 418 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
503 iowrite32(0xffff, &priv->regs->if1_mask2); 419 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
504 iowrite32(0x0, &priv->regs->if1_id1); 420 iowrite32(0x0, &priv->regs->ifregs[0].id1);
505 iowrite32(0x0, &priv->regs->if1_id2); 421 iowrite32(0x0, &priv->regs->ifregs[0].id2);
506 iowrite32(0x0, &priv->regs->if1_mcont); 422 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
507 iowrite32(0x0, &priv->regs->if1_dataa1); 423 iowrite32(0x0, &priv->regs->ifregs[0].dataa1);
508 iowrite32(0x0, &priv->regs->if1_dataa2); 424 iowrite32(0x0, &priv->regs->ifregs[0].dataa2);
509 iowrite32(0x0, &priv->regs->if1_datab1); 425 iowrite32(0x0, &priv->regs->ifregs[0].datab1);
510 iowrite32(0x0, &priv->regs->if1_datab2); 426 iowrite32(0x0, &priv->regs->ifregs[0].datab2);
511 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 427 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
512 CAN_CMASK_ARB | CAN_CMASK_CTRL, 428 PCH_CMASK_ARB | PCH_CMASK_CTRL,
513 &priv->regs->if1_cmask); 429 &priv->regs->ifregs[0].cmask);
514 pch_can_check_if_busy(&priv->regs->if1_creq, i+1); 430 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
515 } 431 }
516 432
517 for (i = i; i < PCH_OBJ_NUM; i++) { 433 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
518 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask); 434 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[1].cmask);
519 iowrite32(0xffff, &priv->regs->if2_mask1); 435 iowrite32(0xffff, &priv->regs->ifregs[1].mask1);
520 iowrite32(0xffff, &priv->regs->if2_mask2); 436 iowrite32(0xffff, &priv->regs->ifregs[1].mask2);
521 iowrite32(0x0, &priv->regs->if2_id1); 437 iowrite32(0x0, &priv->regs->ifregs[1].id1);
522 iowrite32(0x0, &priv->regs->if2_id2); 438 iowrite32(0x0, &priv->regs->ifregs[1].id2);
523 iowrite32(0x0, &priv->regs->if2_mcont); 439 iowrite32(0x0, &priv->regs->ifregs[1].mcont);
524 iowrite32(0x0, &priv->regs->if2_dataa1); 440 iowrite32(0x0, &priv->regs->ifregs[1].dataa1);
525 iowrite32(0x0, &priv->regs->if2_dataa2); 441 iowrite32(0x0, &priv->regs->ifregs[1].dataa2);
526 iowrite32(0x0, &priv->regs->if2_datab1); 442 iowrite32(0x0, &priv->regs->ifregs[1].datab1);
527 iowrite32(0x0, &priv->regs->if2_datab2); 443 iowrite32(0x0, &priv->regs->ifregs[1].datab2);
528 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 444 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
529 CAN_CMASK_ARB | CAN_CMASK_CTRL, 445 PCH_CMASK_ARB | PCH_CMASK_CTRL,
530 &priv->regs->if2_cmask); 446 &priv->regs->ifregs[1].cmask);
531 pch_can_check_if_busy(&priv->regs->if2_creq, i+1); 447 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
532 } 448 }
533} 449}
534 450
535static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv) 451static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
536{ 452{
537 int i; 453 int i;
538 unsigned long flags;
539 454
540 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 455 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
456 iowrite32(PCH_CMASK_RX_TX_GET,
457 &priv->regs->ifregs[0].cmask);
458 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
541 459
542 for (i = 0; i < PCH_OBJ_NUM; i++) { 460 iowrite32(0x0, &priv->regs->ifregs[0].id1);
543 if (priv->msg_obj[i] == MSG_OBJ_RX) { 461 iowrite32(0x0, &priv->regs->ifregs[0].id2);
544 iowrite32(CAN_CMASK_RX_TX_GET,
545 &priv->regs->if1_cmask);
546 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
547 462
548 iowrite32(0x0, &priv->regs->if1_id1); 463 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
549 iowrite32(0x0, &priv->regs->if1_id2); 464 PCH_IF_MCONT_UMASK);
550 465
551 pch_can_bit_set(&priv->regs->if1_mcont, 466 /* Set FIFO mode set to 0 except last Rx Obj*/
552 CAN_IF_MCONT_UMASK); 467 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
468 PCH_IF_MCONT_EOB);
469 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
470 if (i == PCH_RX_OBJ_END)
471 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
472 PCH_IF_MCONT_EOB);
553 473
554 /* Set FIFO mode set to 0 except last Rx Obj*/ 474 iowrite32(0, &priv->regs->ifregs[0].mask1);
555 pch_can_bit_clear(&priv->regs->if1_mcont, 475 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
556 CAN_IF_MCONT_EOB); 476 0x1fff | PCH_MASK2_MDIR_MXTD);
557 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
558 if (i == (PCH_RX_OBJ_NUM - 1))
559 pch_can_bit_set(&priv->regs->if1_mcont,
560 CAN_IF_MCONT_EOB);
561 477
562 iowrite32(0, &priv->regs->if1_mask1); 478 /* Setting CMASK for writing */
563 pch_can_bit_clear(&priv->regs->if1_mask2, 479 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
564 0x1fff | CAN_MASK2_MDIR_MXTD); 480 PCH_CMASK_ARB | PCH_CMASK_CTRL,
481 &priv->regs->ifregs[0].cmask);
565 482
566 /* Setting CMASK for writing */ 483 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
567 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 484 }
568 CAN_CMASK_ARB | CAN_CMASK_CTRL,
569 &priv->regs->if1_cmask);
570 485
571 pch_can_check_if_busy(&priv->regs->if1_creq, i+1); 486 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
572 } else if (priv->msg_obj[i] == MSG_OBJ_TX) { 487 iowrite32(PCH_CMASK_RX_TX_GET,
573 iowrite32(CAN_CMASK_RX_TX_GET, 488 &priv->regs->ifregs[1].cmask);
574 &priv->regs->if2_cmask); 489 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
575 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
576 490
577 /* Resetting DIR bit for reception */ 491 /* Resetting DIR bit for reception */
578 iowrite32(0x0, &priv->regs->if2_id1); 492 iowrite32(0x0, &priv->regs->ifregs[1].id1);
579 iowrite32(0x0, &priv->regs->if2_id2); 493 iowrite32(0x0, &priv->regs->ifregs[1].id2);
580 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR); 494 pch_can_bit_set(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
581 495
582 /* Setting EOB bit for transmitter */ 496 /* Setting EOB bit for transmitter */
583 iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont); 497 iowrite32(PCH_IF_MCONT_EOB, &priv->regs->ifregs[1].mcont);
584 498
585 pch_can_bit_set(&priv->regs->if2_mcont, 499 pch_can_bit_set(&priv->regs->ifregs[1].mcont,
586 CAN_IF_MCONT_UMASK); 500 PCH_IF_MCONT_UMASK);
587 501
588 iowrite32(0, &priv->regs->if2_mask1); 502 iowrite32(0, &priv->regs->ifregs[1].mask1);
589 pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff); 503 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
590 504
591 /* Setting CMASK for writing */ 505 /* Setting CMASK for writing */
592 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 506 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
593 CAN_CMASK_ARB | CAN_CMASK_CTRL, 507 PCH_CMASK_ARB | PCH_CMASK_CTRL,
594 &priv->regs->if2_cmask); 508 &priv->regs->ifregs[1].cmask);
595 509
596 pch_can_check_if_busy(&priv->regs->if2_creq, i+1); 510 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
597 }
598 } 511 }
599 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
600} 512}
601 513
602static void pch_can_init(struct pch_can_priv *priv) 514static void pch_can_init(struct pch_can_priv *priv)
@@ -623,50 +535,50 @@ static void pch_can_release(struct pch_can_priv *priv)
623 pch_can_set_int_enables(priv, PCH_CAN_NONE); 535 pch_can_set_int_enables(priv, PCH_CAN_NONE);
624 536
625 /* Disabling all the receive object. */ 537 /* Disabling all the receive object. */
626 pch_can_rx_disable_all(priv); 538 pch_can_set_rx_all(priv, 0);
627 539
628 /* Disabling all the transmit object. */ 540 /* Disabling all the transmit object. */
629 pch_can_tx_disable_all(priv); 541 pch_can_set_tx_all(priv, 0);
630} 542}
631 543
632/* This function clears interrupt(s) from the CAN device. */ 544/* This function clears interrupt(s) from the CAN device. */
633static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask) 545static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
634{ 546{
635 if (mask == CAN_STATUS_INT) { 547 if (mask == PCH_STATUS_INT) {
636 ioread32(&priv->regs->stat); 548 ioread32(&priv->regs->stat);
637 return; 549 return;
638 } 550 }
639 551
640 /* Clear interrupt for transmit object */ 552 /* Clear interrupt for transmit object */
641 if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) { 553 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
642 /* Setting CMASK for clearing interrupts for
643 frame transmission. */
644 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
645 &priv->regs->if2_cmask);
646
647 /* Resetting the ID registers. */
648 pch_can_bit_set(&priv->regs->if2_id2,
649 CAN_ID2_DIR | (0x7ff << 2));
650 iowrite32(0x0, &priv->regs->if2_id1);
651
652 /* Claring NewDat, TxRqst & IntPnd */
653 pch_can_bit_clear(&priv->regs->if2_mcont,
654 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
655 CAN_IF_MCONT_TXRQXT);
656 pch_can_check_if_busy(&priv->regs->if2_creq, mask);
657 } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
658 /* Setting CMASK for clearing the reception interrupts. */ 554 /* Setting CMASK for clearing the reception interrupts. */
659 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB, 555 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
660 &priv->regs->if1_cmask); 556 &priv->regs->ifregs[0].cmask);
661 557
662 /* Clearing the Dir bit. */ 558 /* Clearing the Dir bit. */
663 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR); 559 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
664 560
665 /* Clearing NewDat & IntPnd */ 561 /* Clearing NewDat & IntPnd */
666 pch_can_bit_clear(&priv->regs->if1_mcont, 562 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
667 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND); 563 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
564
565 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, mask);
566 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
567 /* Setting CMASK for clearing interrupts for
568 frame transmission. */
569 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
570 &priv->regs->ifregs[1].cmask);
571
572 /* Resetting the ID registers. */
573 pch_can_bit_set(&priv->regs->ifregs[1].id2,
574 PCH_ID2_DIR | (0x7ff << 2));
575 iowrite32(0x0, &priv->regs->ifregs[1].id1);
668 576
669 pch_can_check_if_busy(&priv->regs->if1_creq, mask); 577 /* Claring NewDat, TxRqst & IntPnd */
578 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
579 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
580 PCH_IF_MCONT_TXRQXT);
581 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, mask);
670 } 582 }
671} 583}
672 584
@@ -688,7 +600,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
688 struct sk_buff *skb; 600 struct sk_buff *skb;
689 struct pch_can_priv *priv = netdev_priv(ndev); 601 struct pch_can_priv *priv = netdev_priv(ndev);
690 struct can_frame *cf; 602 struct can_frame *cf;
691 u32 errc; 603 u32 errc, lec;
692 struct net_device_stats *stats = &(priv->ndev->stats); 604 struct net_device_stats *stats = &(priv->ndev->stats);
693 enum can_state state = priv->can.state; 605 enum can_state state = priv->can.state;
694 606
@@ -697,8 +609,8 @@ static void pch_can_error(struct net_device *ndev, u32 status)
697 return; 609 return;
698 610
699 if (status & PCH_BUS_OFF) { 611 if (status & PCH_BUS_OFF) {
700 pch_can_tx_disable_all(priv); 612 pch_can_set_tx_all(priv, 0);
701 pch_can_rx_disable_all(priv); 613 pch_can_set_rx_all(priv, 0);
702 state = CAN_STATE_BUS_OFF; 614 state = CAN_STATE_BUS_OFF;
703 cf->can_id |= CAN_ERR_BUSOFF; 615 cf->can_id |= CAN_ERR_BUSOFF;
704 can_bus_off(ndev); 616 can_bus_off(ndev);
@@ -712,9 +624,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
712 priv->can.can_stats.error_warning++; 624 priv->can.can_stats.error_warning++;
713 cf->can_id |= CAN_ERR_CRTL; 625 cf->can_id |= CAN_ERR_CRTL;
714 errc = ioread32(&priv->regs->errc); 626 errc = ioread32(&priv->regs->errc);
715 if (((errc & CAN_REC) >> 8) > 96) 627 if (((errc & PCH_REC) >> 8) > 96)
716 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; 628 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
717 if ((errc & CAN_TEC) > 96) 629 if ((errc & PCH_TEC) > 96)
718 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; 630 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
719 dev_warn(&ndev->dev, 631 dev_warn(&ndev->dev,
720 "%s -> Error Counter is more than 96.\n", __func__); 632 "%s -> Error Counter is more than 96.\n", __func__);
@@ -725,41 +637,45 @@ static void pch_can_error(struct net_device *ndev, u32 status)
725 state = CAN_STATE_ERROR_PASSIVE; 637 state = CAN_STATE_ERROR_PASSIVE;
726 cf->can_id |= CAN_ERR_CRTL; 638 cf->can_id |= CAN_ERR_CRTL;
727 errc = ioread32(&priv->regs->errc); 639 errc = ioread32(&priv->regs->errc);
728 if (((errc & CAN_REC) >> 8) > 127) 640 if (((errc & PCH_REC) >> 8) > 127)
729 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 641 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
730 if ((errc & CAN_TEC) > 127) 642 if ((errc & PCH_TEC) > 127)
731 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 643 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
732 dev_err(&ndev->dev, 644 dev_err(&ndev->dev,
733 "%s -> CAN controller is ERROR PASSIVE .\n", __func__); 645 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
734 } 646 }
735 647
736 if (status & PCH_LEC_ALL) { 648 lec = status & PCH_LEC_ALL;
649 switch (lec) {
650 case PCH_STUF_ERR:
651 cf->data[2] |= CAN_ERR_PROT_STUFF;
737 priv->can.can_stats.bus_error++; 652 priv->can.can_stats.bus_error++;
738 stats->rx_errors++; 653 stats->rx_errors++;
739 switch (status & PCH_LEC_ALL) { 654 break;
740 case PCH_STUF_ERR: 655 case PCH_FORM_ERR:
741 cf->data[2] |= CAN_ERR_PROT_STUFF; 656 cf->data[2] |= CAN_ERR_PROT_FORM;
742 break; 657 priv->can.can_stats.bus_error++;
743 case PCH_FORM_ERR: 658 stats->rx_errors++;
744 cf->data[2] |= CAN_ERR_PROT_FORM; 659 break;
745 break; 660 case PCH_ACK_ERR:
746 case PCH_ACK_ERR: 661 cf->can_id |= CAN_ERR_ACK;
747 cf->data[2] |= CAN_ERR_PROT_LOC_ACK | 662 priv->can.can_stats.bus_error++;
748 CAN_ERR_PROT_LOC_ACK_DEL; 663 stats->rx_errors++;
749 break; 664 break;
750 case PCH_BIT1_ERR: 665 case PCH_BIT1_ERR:
751 case PCH_BIT0_ERR: 666 case PCH_BIT0_ERR:
752 cf->data[2] |= CAN_ERR_PROT_BIT; 667 cf->data[2] |= CAN_ERR_PROT_BIT;
753 break; 668 priv->can.can_stats.bus_error++;
754 case PCH_CRC_ERR: 669 stats->rx_errors++;
755 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 670 break;
756 CAN_ERR_PROT_LOC_CRC_DEL; 671 case PCH_CRC_ERR:
757 break; 672 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
758 default: 673 CAN_ERR_PROT_LOC_CRC_DEL;
759 iowrite32(status | PCH_LEC_ALL, &priv->regs->stat); 674 priv->can.can_stats.bus_error++;
760 break; 675 stats->rx_errors++;
761 } 676 break;
762 677 case PCH_LEC_ALL: /* Written by CPU. No error status */
678 break;
763 } 679 }
764 680
765 priv->can.state = state; 681 priv->can.state = state;
@@ -795,22 +711,22 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
795 struct net_device_stats *stats = &(priv->ndev->stats); 711 struct net_device_stats *stats = &(priv->ndev->stats);
796 712
797 /* Reading the messsage object from the Message RAM */ 713 /* Reading the messsage object from the Message RAM */
798 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 714 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
799 pch_can_check_if_busy(&priv->regs->if1_creq, int_stat); 715 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, int_stat);
800 716
801 /* Reading the MCONT register. */ 717 /* Reading the MCONT register. */
802 reg = ioread32(&priv->regs->if1_mcont); 718 reg = ioread32(&priv->regs->ifregs[0].mcont);
803 reg &= 0xffff; 719 reg &= 0xffff;
804 720
805 for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) { 721 for (k = int_stat; !(reg & PCH_IF_MCONT_EOB); k++) {
806 /* If MsgLost bit set. */ 722 /* If MsgLost bit set. */
807 if (reg & CAN_IF_MCONT_MSGLOST) { 723 if (reg & PCH_IF_MCONT_MSGLOST) {
808 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n"); 724 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
809 pch_can_bit_clear(&priv->regs->if1_mcont, 725 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
810 CAN_IF_MCONT_MSGLOST); 726 PCH_IF_MCONT_MSGLOST);
811 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, 727 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
812 &priv->regs->if1_cmask); 728 &priv->regs->ifregs[0].cmask);
813 pch_can_check_if_busy(&priv->regs->if1_creq, k); 729 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
814 730
815 skb = alloc_can_err_skb(ndev, &cf); 731 skb = alloc_can_err_skb(ndev, &cf);
816 if (!skb) 732 if (!skb)
@@ -828,7 +744,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
828 rcv_pkts++; 744 rcv_pkts++;
829 goto RX_NEXT; 745 goto RX_NEXT;
830 } 746 }
831 if (!(reg & CAN_IF_MCONT_NEWDAT)) 747 if (!(reg & PCH_IF_MCONT_NEWDAT))
832 goto RX_NEXT; 748 goto RX_NEXT;
833 749
834 skb = alloc_can_skb(priv->ndev, &cf); 750 skb = alloc_can_skb(priv->ndev, &cf);
@@ -836,29 +752,30 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
836 return -ENOMEM; 752 return -ENOMEM;
837 753
838 /* Get Received data */ 754 /* Get Received data */
839 ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14; 755 ide = ((ioread32(&priv->regs->ifregs[0].id2)) & PCH_ID2_XTD) >>
756 14;
840 if (ide) { 757 if (ide) {
841 id = (ioread32(&priv->regs->if1_id1) & 0xffff); 758 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
842 id |= (((ioread32(&priv->regs->if1_id2)) & 759 id |= (((ioread32(&priv->regs->ifregs[0].id2)) &
843 0x1fff) << 16); 760 0x1fff) << 16);
844 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; 761 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
845 } else { 762 } else {
846 id = (((ioread32(&priv->regs->if1_id2)) & 763 id = (((ioread32(&priv->regs->ifregs[0].id2)) &
847 (CAN_SFF_MASK << 2)) >> 2); 764 (CAN_SFF_MASK << 2)) >> 2);
848 cf->can_id = (id & CAN_SFF_MASK); 765 cf->can_id = (id & CAN_SFF_MASK);
849 } 766 }
850 767
851 rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR); 768 rtr = (ioread32(&priv->regs->ifregs[0].id2) & PCH_ID2_DIR);
852 if (rtr) { 769 if (rtr) {
853 cf->can_dlc = 0; 770 cf->can_dlc = 0;
854 cf->can_id |= CAN_RTR_FLAG; 771 cf->can_id |= CAN_RTR_FLAG;
855 } else { 772 } else {
856 cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) & 773 cf->can_dlc =
857 0x0f); 774 ((ioread32(&priv->regs->ifregs[0].mcont)) & 0x0f);
858 } 775 }
859 776
860 for (i = 0, j = 0; i < cf->can_dlc; j++) { 777 for (i = 0, j = 0; i < cf->can_dlc; j++) {
861 reg = ioread32(&priv->regs->if1_dataa1 + j*4); 778 reg = ioread32(&priv->regs->ifregs[0].dataa1 + j*4);
862 cf->data[i++] = cpu_to_le32(reg & 0xff); 779 cf->data[i++] = cpu_to_le32(reg & 0xff);
863 if (i == cf->can_dlc) 780 if (i == cf->can_dlc)
864 break; 781 break;
@@ -871,16 +788,17 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
871 stats->rx_bytes += cf->can_dlc; 788 stats->rx_bytes += cf->can_dlc;
872 789
873 if (k < PCH_FIFO_THRESH) { 790 if (k < PCH_FIFO_THRESH) {
874 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | 791 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
875 CAN_CMASK_ARB, &priv->regs->if1_cmask); 792 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
876 793
877 /* Clearing the Dir bit. */ 794 /* Clearing the Dir bit. */
878 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR); 795 pch_can_bit_clear(&priv->regs->ifregs[0].id2,
796 PCH_ID2_DIR);
879 797
880 /* Clearing NewDat & IntPnd */ 798 /* Clearing NewDat & IntPnd */
881 pch_can_bit_clear(&priv->regs->if1_mcont, 799 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
882 CAN_IF_MCONT_INTPND); 800 PCH_IF_MCONT_INTPND);
883 pch_can_check_if_busy(&priv->regs->if1_creq, k); 801 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
884 } else if (k > PCH_FIFO_THRESH) { 802 } else if (k > PCH_FIFO_THRESH) {
885 pch_can_int_clr(priv, k); 803 pch_can_int_clr(priv, k);
886 } else if (k == PCH_FIFO_THRESH) { 804 } else if (k == PCH_FIFO_THRESH) {
@@ -890,9 +808,9 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
890 } 808 }
891RX_NEXT: 809RX_NEXT:
892 /* Reading the messsage object from the Message RAM */ 810 /* Reading the messsage object from the Message RAM */
893 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 811 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
894 pch_can_check_if_busy(&priv->regs->if1_creq, k + 1); 812 pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
895 reg = ioread32(&priv->regs->if1_mcont); 813 reg = ioread32(&priv->regs->ifregs[0].mcont);
896 } 814 }
897 815
898 return rcv_pkts; 816 return rcv_pkts;
@@ -906,14 +824,13 @@ static int pch_can_rx_poll(struct napi_struct *napi, int quota)
906 u32 int_stat; 824 u32 int_stat;
907 int rcv_pkts = 0; 825 int rcv_pkts = 0;
908 u32 reg_stat; 826 u32 reg_stat;
909 unsigned long flags;
910 827
911 int_stat = pch_can_int_pending(priv); 828 int_stat = pch_can_int_pending(priv);
912 if (!int_stat) 829 if (!int_stat)
913 return 0; 830 return 0;
914 831
915INT_STAT: 832INT_STAT:
916 if (int_stat == CAN_STATUS_INT) { 833 if (int_stat == PCH_STATUS_INT) {
917 reg_stat = ioread32(&priv->regs->stat); 834 reg_stat = ioread32(&priv->regs->stat);
918 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) { 835 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
919 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL) 836 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
@@ -921,11 +838,10 @@ INT_STAT:
921 } 838 }
922 839
923 if (reg_stat & PCH_TX_OK) { 840 if (reg_stat & PCH_TX_OK) {
924 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 841 iowrite32(PCH_CMASK_RX_TX_GET,
925 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 842 &priv->regs->ifregs[1].cmask);
926 pch_can_check_if_busy(&priv->regs->if2_creq, 843 pch_can_check_if_busy(&priv->regs->ifregs[1].creq,
927 ioread32(&priv->regs->intr)); 844 ioread32(&priv->regs->intr));
928 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
929 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK); 845 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
930 } 846 }
931 847
@@ -933,37 +849,32 @@ INT_STAT:
933 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK); 849 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
934 850
935 int_stat = pch_can_int_pending(priv); 851 int_stat = pch_can_int_pending(priv);
936 if (int_stat == CAN_STATUS_INT) 852 if (int_stat == PCH_STATUS_INT)
937 goto INT_STAT; 853 goto INT_STAT;
938 } 854 }
939 855
940MSG_OBJ: 856MSG_OBJ:
941 if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) { 857 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
942 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
943 rcv_pkts = pch_can_rx_normal(ndev, int_stat); 858 rcv_pkts = pch_can_rx_normal(ndev, int_stat);
944 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
945 if (rcv_pkts < 0) 859 if (rcv_pkts < 0)
946 return 0; 860 return 0;
947 } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) { 861 } else if ((int_stat >= PCH_TX_OBJ_START) &&
948 if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) { 862 (int_stat <= PCH_TX_OBJ_END)) {
949 /* Handle transmission interrupt */ 863 /* Handle transmission interrupt */
950 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1); 864 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
951 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 865 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
952 iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND, 866 &priv->regs->ifregs[1].cmask);
953 &priv->regs->if2_cmask); 867 dlc = ioread32(&priv->regs->ifregs[1].mcont) &
954 dlc = ioread32(&priv->regs->if2_mcont) & 868 PCH_IF_MCONT_DLC;
955 CAN_IF_MCONT_DLC; 869 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, int_stat);
956 pch_can_check_if_busy(&priv->regs->if2_creq, int_stat); 870 if (dlc > 8)
957 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 871 dlc = 8;
958 if (dlc > 8) 872 stats->tx_bytes += dlc;
959 dlc = 8; 873 stats->tx_packets++;
960 stats->tx_bytes += dlc;
961 stats->tx_packets++;
962 }
963 } 874 }
964 875
965 int_stat = pch_can_int_pending(priv); 876 int_stat = pch_can_int_pending(priv);
966 if (int_stat == CAN_STATUS_INT) 877 if (int_stat == PCH_STATUS_INT)
967 goto INT_STAT; 878 goto INT_STAT;
968 else if (int_stat >= 1 && int_stat <= 32) 879 else if (int_stat >= 1 && int_stat <= 32)
969 goto MSG_OBJ; 880 goto MSG_OBJ;
@@ -983,17 +894,17 @@ static int pch_set_bittiming(struct net_device *ndev)
983 u32 brp; 894 u32 brp;
984 895
985 /* Setting the CCE bit for accessing the Can Timing register. */ 896 /* Setting the CCE bit for accessing the Can Timing register. */
986 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE); 897 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
987 898
988 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1; 899 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
989 canbit = brp & MSK_BITT_BRP; 900 canbit = brp & PCH_MSK_BITT_BRP;
990 canbit |= (bt->sjw - 1) << BIT_BITT_SJW; 901 canbit |= (bt->sjw - 1) << PCH_BIT_SJW;
991 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1; 902 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1;
992 canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2; 903 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2;
993 bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE; 904 bepe = (brp & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE;
994 iowrite32(canbit, &priv->regs->bitt); 905 iowrite32(canbit, &priv->regs->bitt);
995 iowrite32(bepe, &priv->regs->brpe); 906 iowrite32(bepe, &priv->regs->brpe);
996 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE); 907 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
997 908
998 return 0; 909 return 0;
999} 910}
@@ -1008,8 +919,8 @@ static void pch_can_start(struct net_device *ndev)
1008 pch_set_bittiming(ndev); 919 pch_set_bittiming(ndev);
1009 pch_can_set_optmode(priv); 920 pch_can_set_optmode(priv);
1010 921
1011 pch_can_tx_enable_all(priv); 922 pch_can_set_tx_all(priv, 1);
1012 pch_can_rx_enable_all(priv); 923 pch_can_set_rx_all(priv, 1);
1013 924
1014 /* Setting the CAN to run mode. */ 925 /* Setting the CAN to run mode. */
1015 pch_can_set_run_mode(priv, PCH_CAN_RUN); 926 pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1113,7 +1024,6 @@ static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
1113static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) 1024static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1114{ 1025{
1115 int i, j; 1026 int i, j;
1116 unsigned long flags;
1117 struct pch_can_priv *priv = netdev_priv(ndev); 1027 struct pch_can_priv *priv = netdev_priv(ndev);
1118 struct can_frame *cf = (struct can_frame *)skb->data; 1028 struct can_frame *cf = (struct can_frame *)skb->data;
1119 int tx_buffer_avail = 0; 1029 int tx_buffer_avail = 0;
@@ -1121,72 +1031,68 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1121 if (can_dropped_invalid_skb(ndev, skb)) 1031 if (can_dropped_invalid_skb(ndev, skb))
1122 return NETDEV_TX_OK; 1032 return NETDEV_TX_OK;
1123 1033
1124 if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */ 1034 if (priv->tx_obj == PCH_TX_OBJ_END) { /* Point tail Obj */
1125 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) << 1035 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
1126 PCH_RX_OBJ_NUM))) 1036 PCH_RX_OBJ_NUM)))
1127 udelay(500); 1037 udelay(500);
1128 1038
1129 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */ 1039 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj ID */
1130 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */ 1040 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
1131 } else { 1041 } else {
1132 tx_buffer_avail = priv->tx_obj; 1042 tx_buffer_avail = priv->tx_obj;
1133 } 1043 }
1134 priv->tx_obj++; 1044 priv->tx_obj++;
1135 1045
1136 /* Attaining the lock. */
1137 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
1138
1139 /* Reading the Msg Obj from the Msg RAM to the Interface register. */ 1046 /* Reading the Msg Obj from the Msg RAM to the Interface register. */
1140 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 1047 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
1141 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail); 1048 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
1142 1049
1143 /* Setting the CMASK register. */ 1050 /* Setting the CMASK register. */
1144 pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL); 1051 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
1145 1052
1146 /* If ID extended is set. */ 1053 /* If ID extended is set. */
1147 pch_can_bit_clear(&priv->regs->if2_id1, 0xffff); 1054 pch_can_bit_clear(&priv->regs->ifregs[1].id1, 0xffff);
1148 pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD); 1055 pch_can_bit_clear(&priv->regs->ifregs[1].id2, 0x1fff | PCH_ID2_XTD);
1149 if (cf->can_id & CAN_EFF_FLAG) { 1056 if (cf->can_id & CAN_EFF_FLAG) {
1150 pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff); 1057 pch_can_bit_set(&priv->regs->ifregs[1].id1,
1151 pch_can_bit_set(&priv->regs->if2_id2, 1058 cf->can_id & 0xffff);
1152 ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD); 1059 pch_can_bit_set(&priv->regs->ifregs[1].id2,
1060 ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD);
1153 } else { 1061 } else {
1154 pch_can_bit_set(&priv->regs->if2_id1, 0); 1062 pch_can_bit_set(&priv->regs->ifregs[1].id1, 0);
1155 pch_can_bit_set(&priv->regs->if2_id2, 1063 pch_can_bit_set(&priv->regs->ifregs[1].id2,
1156 (cf->can_id & CAN_SFF_MASK) << 2); 1064 (cf->can_id & CAN_SFF_MASK) << 2);
1157 } 1065 }
1158 1066
1159 /* If remote frame has to be transmitted.. */ 1067 /* If remote frame has to be transmitted.. */
1160 if (cf->can_id & CAN_RTR_FLAG) 1068 if (cf->can_id & CAN_RTR_FLAG)
1161 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR); 1069 pch_can_bit_clear(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
1162 1070
1163 for (i = 0, j = 0; i < cf->can_dlc; j++) { 1071 for (i = 0, j = 0; i < cf->can_dlc; j++) {
1164 iowrite32(le32_to_cpu(cf->data[i++]), 1072 iowrite32(le32_to_cpu(cf->data[i++]),
1165 (&priv->regs->if2_dataa1) + j*4); 1073 (&priv->regs->ifregs[1].dataa1) + j*4);
1166 if (i == cf->can_dlc) 1074 if (i == cf->can_dlc)
1167 break; 1075 break;
1168 iowrite32(le32_to_cpu(cf->data[i++] << 8), 1076 iowrite32(le32_to_cpu(cf->data[i++] << 8),
1169 (&priv->regs->if2_dataa1) + j*4); 1077 (&priv->regs->ifregs[1].dataa1) + j*4);
1170 } 1078 }
1171 1079
1172 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1); 1080 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_END - 1);
1173 1081
1174 /* Updating the size of the data. */ 1082 /* Updating the size of the data. */
1175 pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f); 1083 pch_can_bit_clear(&priv->regs->ifregs[1].mcont, 0x0f);
1176 pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc); 1084 pch_can_bit_set(&priv->regs->ifregs[1].mcont, cf->can_dlc);
1177 1085
1178 /* Clearing IntPend, NewDat & TxRqst */ 1086 /* Clearing IntPend, NewDat & TxRqst */
1179 pch_can_bit_clear(&priv->regs->if2_mcont, 1087 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
1180 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND | 1088 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
1181 CAN_IF_MCONT_TXRQXT); 1089 PCH_IF_MCONT_TXRQXT);
1182 1090
1183 /* Setting NewDat, TxRqst bits */ 1091 /* Setting NewDat, TxRqst bits */
1184 pch_can_bit_set(&priv->regs->if2_mcont, 1092 pch_can_bit_set(&priv->regs->ifregs[1].mcont,
1185 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT); 1093 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT);
1186
1187 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1188 1094
1189 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 1095 pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
1190 1096
1191 return NETDEV_TX_OK; 1097 return NETDEV_TX_OK;
1192} 1098}
@@ -1244,27 +1150,20 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1244 pch_can_set_int_enables(priv, PCH_CAN_DISABLE); 1150 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1245 1151
1246 /* Save Tx buffer enable state */ 1152 /* Save Tx buffer enable state */
1247 for (i = 0; i < PCH_OBJ_NUM; i++) { 1153 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1248 if (priv->msg_obj[i] == MSG_OBJ_TX) 1154 priv->tx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG);
1249 pch_can_get_tx_enable(priv, i + 1,
1250 &(priv->tx_enable[i]));
1251 }
1252 1155
1253 /* Disable all Transmit buffers */ 1156 /* Disable all Transmit buffers */
1254 pch_can_tx_disable_all(priv); 1157 pch_can_set_tx_all(priv, 0);
1255 1158
1256 /* Save Rx buffer enable state */ 1159 /* Save Rx buffer enable state */
1257 for (i = 0; i < PCH_OBJ_NUM; i++) { 1160 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1258 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1161 priv->rx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG);
1259 pch_can_get_rx_enable(priv, i + 1, 1162 pch_can_get_rx_buffer_link(priv, i, &priv->rx_link[i]);
1260 &(priv->rx_enable[i]));
1261 pch_can_get_rx_buffer_link(priv, i + 1,
1262 &(priv->rx_link[i]));
1263 }
1264 } 1163 }
1265 1164
1266 /* Disable all Receive buffers */ 1165 /* Disable all Receive buffers */
1267 pch_can_rx_disable_all(priv); 1166 pch_can_set_rx_all(priv, 0);
1268 retval = pci_save_state(pdev); 1167 retval = pci_save_state(pdev);
1269 if (retval) { 1168 if (retval) {
1270 dev_err(&pdev->dev, "pci_save_state failed.\n"); 1169 dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1312,23 +1211,16 @@ static int pch_can_resume(struct pci_dev *pdev)
1312 pch_can_set_optmode(priv); 1211 pch_can_set_optmode(priv);
1313 1212
1314 /* Enabling the transmit buffer. */ 1213 /* Enabling the transmit buffer. */
1315 for (i = 0; i < PCH_OBJ_NUM; i++) { 1214 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1316 if (priv->msg_obj[i] == MSG_OBJ_TX) { 1215 pch_can_set_rxtx(priv, i, priv->tx_enable[i], PCH_TX_IFREG);
1317 pch_can_set_tx_enable(priv, i + 1,
1318 priv->tx_enable[i]);
1319 }
1320 }
1321 1216
1322 /* Configuring the receive buffer and enabling them. */ 1217 /* Configuring the receive buffer and enabling them. */
1323 for (i = 0; i < PCH_OBJ_NUM; i++) { 1218 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1324 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1219 /* Restore buffer link */
1325 /* Restore buffer link */ 1220 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i]);
1326 pch_can_set_rx_buffer_link(priv, i + 1, 1221
1327 priv->rx_link[i]); 1222 /* Restore buffer enables */
1328 1223 pch_can_set_rxtx(priv, i, priv->rx_enable[i], PCH_RX_IFREG);
1329 /* Restore buffer enables */
1330 pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
1331 }
1332 } 1224 }
1333 1225
1334 /* Enable CAN Interrupts */ 1226 /* Enable CAN Interrupts */
@@ -1349,8 +1241,8 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
1349{ 1241{
1350 struct pch_can_priv *priv = netdev_priv(dev); 1242 struct pch_can_priv *priv = netdev_priv(dev);
1351 1243
1352 bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC; 1244 bec->txerr = ioread32(&priv->regs->errc) & PCH_TEC;
1353 bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8; 1245 bec->rxerr = (ioread32(&priv->regs->errc) & PCH_REC) >> 8;
1354 1246
1355 return 0; 1247 return 0;
1356} 1248}
@@ -1361,7 +1253,6 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1361 struct net_device *ndev; 1253 struct net_device *ndev;
1362 struct pch_can_priv *priv; 1254 struct pch_can_priv *priv;
1363 int rc; 1255 int rc;
1364 int index;
1365 void __iomem *addr; 1256 void __iomem *addr;
1366 1257
1367 rc = pci_enable_device(pdev); 1258 rc = pci_enable_device(pdev);
@@ -1383,7 +1274,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1383 goto probe_exit_ipmap; 1274 goto probe_exit_ipmap;
1384 } 1275 }
1385 1276
1386 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM); 1277 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1387 if (!ndev) { 1278 if (!ndev) {
1388 rc = -ENOMEM; 1279 rc = -ENOMEM;
1389 dev_err(&pdev->dev, "Failed alloc_candev\n"); 1280 dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1290,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1399 priv->can.do_get_berr_counter = pch_can_get_berr_counter; 1290 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1400 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 1291 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1401 CAN_CTRLMODE_LOOPBACK; 1292 CAN_CTRLMODE_LOOPBACK;
1402 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */ 1293 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
1403 1294
1404 ndev->irq = pdev->irq; 1295 ndev->irq = pdev->irq;
1405 ndev->flags |= IFF_ECHO; 1296 ndev->flags |= IFF_ECHO;
@@ -1407,15 +1298,9 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1407 pci_set_drvdata(pdev, ndev); 1298 pci_set_drvdata(pdev, ndev);
1408 SET_NETDEV_DEV(ndev, &pdev->dev); 1299 SET_NETDEV_DEV(ndev, &pdev->dev);
1409 ndev->netdev_ops = &pch_can_netdev_ops; 1300 ndev->netdev_ops = &pch_can_netdev_ops;
1410
1411 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ 1301 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1412 for (index = 0; index < PCH_RX_OBJ_NUM;)
1413 priv->msg_obj[index++] = MSG_OBJ_RX;
1414
1415 for (index = index; index < PCH_OBJ_NUM;)
1416 priv->msg_obj[index++] = MSG_OBJ_TX;
1417 1302
1418 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM); 1303 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_END);
1419 1304
1420 rc = register_candev(ndev); 1305 rc = register_candev(ndev);
1421 if (rc) { 1306 if (rc) {
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bbb..09c3e9db9316 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
107 res_size = resource_size(&res); 107 res_size = resource_size(&res);
108 108
109 if (!request_mem_region(res.start, res_size, DRV_NAME)) { 109 if (!request_mem_region(res.start, res_size, DRV_NAME)) {
110 dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n", 110 dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
111 (unsigned long long)res.start,
112 (unsigned long long)res.end);
113 return -EBUSY; 111 return -EBUSY;
114 } 112 }
115 113
116 base = ioremap_nocache(res.start, res_size); 114 base = ioremap_nocache(res.start, res_size);
117 if (!base) { 115 if (!base) {
118 dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n", 116 dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
119 (unsigned long long)res.start,
120 (unsigned long long)res.end);
121 err = -ENOMEM; 117 err = -ENOMEM;
122 goto exit_release_mem; 118 goto exit_release_mem;
123 } 119 }
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 000000000000..b423965a78d1
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,756 @@
1/*
2 * slcan.c - serial line CAN interface driver (using tty line discipline)
3 *
4 * This file is derived from linux/drivers/net/slip.c
5 *
6 * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
7 * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
8 * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
23 * at http://www.gnu.org/licenses/gpl.html
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
36 * DAMAGE.
37 *
38 * Send feedback to <socketcan-users@lists.berlios.de>
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44
45#include <asm/system.h>
46#include <linux/uaccess.h>
47#include <linux/bitops.h>
48#include <linux/string.h>
49#include <linux/tty.h>
50#include <linux/errno.h>
51#include <linux/netdevice.h>
52#include <linux/skbuff.h>
53#include <linux/rtnetlink.h>
54#include <linux/if_arp.h>
55#include <linux/if_ether.h>
56#include <linux/sched.h>
57#include <linux/delay.h>
58#include <linux/init.h>
59#include <linux/can.h>
60
61static __initdata const char banner[] =
62 KERN_INFO "slcan: serial line CAN interface driver\n";
63
64MODULE_ALIAS_LDISC(N_SLCAN);
65MODULE_DESCRIPTION("serial line CAN interface");
66MODULE_LICENSE("GPL");
67MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
68
69#define SLCAN_MAGIC 0x53CA
70
71static int maxdev = 10; /* MAX number of SLCAN channels;
72 This can be overridden with
73 insmod slcan.ko maxdev=nnn */
74module_param(maxdev, int, 0);
75MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76
77/* maximum rx buffer len: extended CAN frame with timestamp */
78#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
79
80struct slcan {
81 int magic;
82
83 /* Various fields. */
84 struct tty_struct *tty; /* ptr to TTY structure */
85 struct net_device *dev; /* easy for intr handling */
86 spinlock_t lock;
87
88 /* These are pointers to the malloc()ed frame buffers. */
89 unsigned char rbuff[SLC_MTU]; /* receiver buffer */
90 int rcount; /* received chars counter */
91 unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
92 unsigned char *xhead; /* pointer to next XMIT byte */
93 int xleft; /* bytes left in XMIT queue */
94
95 unsigned long flags; /* Flag values/ mode etc */
96#define SLF_INUSE 0 /* Channel in use */
97#define SLF_ERROR 1 /* Parity, etc. error */
98
99 unsigned char leased;
100 dev_t line;
101 pid_t pid;
102};
103
104static struct net_device **slcan_devs;
105
106 /************************************************************************
107 * SLCAN ENCAPSULATION FORMAT *
108 ************************************************************************/
109
110/*
111 * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
112 * frame format) a data length code (can_dlc) which can be from 0 to 8
113 * and up to <can_dlc> data bytes as payload.
114 * Additionally a CAN frame may become a remote transmission frame if the
115 * RTR-bit is set. This causes another ECU to send a CAN frame with the
116 * given can_id.
117 *
118 * The SLCAN ASCII representation of these different frame types is:
119 * <type> <id> <dlc> <data>*
120 *
121 * Extended frames (29 bit) are defined by capital characters in the type.
122 * RTR frames are defined as 'r' types - normal frames have 't' type:
123 * t => 11 bit data frame
124 * r => 11 bit RTR frame
125 * T => 29 bit data frame
126 * R => 29 bit RTR frame
127 *
128 * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
129 * The <dlc> is a one byte ASCII number ('0' - '8')
130 * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
131 *
132 * Examples:
133 *
134 * t1230 : can_id 0x123, can_dlc 0, no data
135 * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
136 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
137 * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
138 *
139 */
140
141 /************************************************************************
142 * STANDARD SLCAN DECAPSULATION *
143 ************************************************************************/
144
145static int asc2nibble(char c)
146{
147
148 if ((c >= '0') && (c <= '9'))
149 return c - '0';
150
151 if ((c >= 'A') && (c <= 'F'))
152 return c - 'A' + 10;
153
154 if ((c >= 'a') && (c <= 'f'))
155 return c - 'a' + 10;
156
157 return 16; /* error */
158}
159
160/* Send one completely decapsulated can_frame to the network layer */
161static void slc_bump(struct slcan *sl)
162{
163 struct sk_buff *skb;
164 struct can_frame cf;
165 int i, dlc_pos, tmp;
166 unsigned long ultmp;
167 char cmd = sl->rbuff[0];
168
169 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
170 return;
171
172 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
173 dlc_pos = 4; /* dlc position tiiid */
174 else
175 dlc_pos = 9; /* dlc position Tiiiiiiiid */
176
177 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
178 return;
179
180 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
181
182 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
183
184 if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
185 return;
186
187 cf.can_id = ultmp;
188
189 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
190 cf.can_id |= CAN_EFF_FLAG;
191
192 if ((cmd | 0x20) == 'r') /* RTR frame */
193 cf.can_id |= CAN_RTR_FLAG;
194
195 *(u64 *) (&cf.data) = 0; /* clear payload */
196
197 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
198
199 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
200 if (tmp > 0x0F)
201 return;
202 cf.data[i] = (tmp << 4);
203 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
204 if (tmp > 0x0F)
205 return;
206 cf.data[i] |= tmp;
207 }
208
209
210 skb = dev_alloc_skb(sizeof(struct can_frame));
211 if (!skb)
212 return;
213
214 skb->dev = sl->dev;
215 skb->protocol = htons(ETH_P_CAN);
216 skb->pkt_type = PACKET_BROADCAST;
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218 memcpy(skb_put(skb, sizeof(struct can_frame)),
219 &cf, sizeof(struct can_frame));
220 netif_rx(skb);
221
222 sl->dev->stats.rx_packets++;
223 sl->dev->stats.rx_bytes += cf.can_dlc;
224}
225
226/* parse tty input stream */
227static void slcan_unesc(struct slcan *sl, unsigned char s)
228{
229
230 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
231 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
232 (sl->rcount > 4)) {
233 slc_bump(sl);
234 }
235 sl->rcount = 0;
236 } else {
237 if (!test_bit(SLF_ERROR, &sl->flags)) {
238 if (sl->rcount < SLC_MTU) {
239 sl->rbuff[sl->rcount++] = s;
240 return;
241 } else {
242 sl->dev->stats.rx_over_errors++;
243 set_bit(SLF_ERROR, &sl->flags);
244 }
245 }
246 }
247}
248
249 /************************************************************************
250 * STANDARD SLCAN ENCAPSULATION *
251 ************************************************************************/
252
253/* Encapsulate one can_frame and stuff into a TTY queue. */
254static void slc_encaps(struct slcan *sl, struct can_frame *cf)
255{
256 int actual, idx, i;
257 char cmd;
258
259 if (cf->can_id & CAN_RTR_FLAG)
260 cmd = 'R'; /* becomes 'r' in standard frame format */
261 else
262 cmd = 'T'; /* becomes 't' in standard frame format */
263
264 if (cf->can_id & CAN_EFF_FLAG)
265 sprintf(sl->xbuff, "%c%08X%d", cmd,
266 cf->can_id & CAN_EFF_MASK, cf->can_dlc);
267 else
268 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
269 cf->can_id & CAN_SFF_MASK, cf->can_dlc);
270
271 idx = strlen(sl->xbuff);
272
273 for (i = 0; i < cf->can_dlc; i++)
274 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
275
276 strcat(sl->xbuff, "\r"); /* add terminating character */
277
278 /* Order of next two lines is *very* important.
279 * When we are sending a little amount of data,
280 * the transfer may be completed inside the ops->write()
281 * routine, because it's running with interrupts enabled.
282 * In this case we *never* got WRITE_WAKEUP event,
283 * if we did not request it before write operation.
284 * 14 Oct 1994 Dmitry Gorodchanin.
285 */
286 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
287 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
288 sl->xleft = strlen(sl->xbuff) - actual;
289 sl->xhead = sl->xbuff + actual;
290 sl->dev->stats.tx_bytes += cf->can_dlc;
291}
292
293/*
294 * Called by the driver when there's room for more data. If we have
295 * more packets to send, we send them here.
296 */
297static void slcan_write_wakeup(struct tty_struct *tty)
298{
299 int actual;
300 struct slcan *sl = (struct slcan *) tty->disc_data;
301
302 /* First make sure we're connected. */
303 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
304 return;
305
306 if (sl->xleft <= 0) {
307 /* Now serial buffer is almost free & we can start
308 * transmission of another packet */
309 sl->dev->stats.tx_packets++;
310 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
311 netif_wake_queue(sl->dev);
312 return;
313 }
314
315 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
316 sl->xleft -= actual;
317 sl->xhead += actual;
318}
319
320/* Send a can_frame to a TTY queue. */
321static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
322{
323 struct slcan *sl = netdev_priv(dev);
324
325 if (skb->len != sizeof(struct can_frame))
326 goto out;
327
328 spin_lock(&sl->lock);
329 if (!netif_running(dev)) {
330 spin_unlock(&sl->lock);
331 printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
332 goto out;
333 }
334 if (sl->tty == NULL) {
335 spin_unlock(&sl->lock);
336 goto out;
337 }
338
339 netif_stop_queue(sl->dev);
340 slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
341 spin_unlock(&sl->lock);
342
343out:
344 kfree_skb(skb);
345 return NETDEV_TX_OK;
346}
347
348
349/******************************************
350 * Routines looking at netdevice side.
351 ******************************************/
352
353/* Netdevice UP -> DOWN routine */
354static int slc_close(struct net_device *dev)
355{
356 struct slcan *sl = netdev_priv(dev);
357
358 spin_lock_bh(&sl->lock);
359 if (sl->tty) {
360 /* TTY discipline is running. */
361 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
362 }
363 netif_stop_queue(dev);
364 sl->rcount = 0;
365 sl->xleft = 0;
366 spin_unlock_bh(&sl->lock);
367
368 return 0;
369}
370
371/* Netdevice DOWN -> UP routine */
372static int slc_open(struct net_device *dev)
373{
374 struct slcan *sl = netdev_priv(dev);
375
376 if (sl->tty == NULL)
377 return -ENODEV;
378
379 sl->flags &= (1 << SLF_INUSE);
380 netif_start_queue(dev);
381 return 0;
382}
383
384/* Hook the destructor so we can free slcan devs at the right point in time */
385static void slc_free_netdev(struct net_device *dev)
386{
387 int i = dev->base_addr;
388 free_netdev(dev);
389 slcan_devs[i] = NULL;
390}
391
392static const struct net_device_ops slc_netdev_ops = {
393 .ndo_open = slc_open,
394 .ndo_stop = slc_close,
395 .ndo_start_xmit = slc_xmit,
396};
397
398static void slc_setup(struct net_device *dev)
399{
400 dev->netdev_ops = &slc_netdev_ops;
401 dev->destructor = slc_free_netdev;
402
403 dev->hard_header_len = 0;
404 dev->addr_len = 0;
405 dev->tx_queue_len = 10;
406
407 dev->mtu = sizeof(struct can_frame);
408 dev->type = ARPHRD_CAN;
409
410 /* New-style flags. */
411 dev->flags = IFF_NOARP;
412 dev->features = NETIF_F_NO_CSUM;
413}
414
415/******************************************
416 Routines looking at TTY side.
417 ******************************************/
418
419/*
420 * Handle the 'receiver data ready' interrupt.
421 * This function is called by the 'tty_io' module in the kernel when
422 * a block of SLCAN data has been received, which can now be decapsulated
423 * and sent on to some IP layer for further processing. This will not
424 * be re-entered while running but other ldisc functions may be called
425 * in parallel
426 */
427
428static void slcan_receive_buf(struct tty_struct *tty,
429 const unsigned char *cp, char *fp, int count)
430{
431 struct slcan *sl = (struct slcan *) tty->disc_data;
432
433 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
434 return;
435
436 /* Read the characters out of the buffer */
437 while (count--) {
438 if (fp && *fp++) {
439 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
440 sl->dev->stats.rx_errors++;
441 cp++;
442 continue;
443 }
444 slcan_unesc(sl, *cp++);
445 }
446}
447
448/************************************
449 * slcan_open helper routines.
450 ************************************/
451
452/* Collect hanged up channels */
453static void slc_sync(void)
454{
455 int i;
456 struct net_device *dev;
457 struct slcan *sl;
458
459 for (i = 0; i < maxdev; i++) {
460 dev = slcan_devs[i];
461 if (dev == NULL)
462 break;
463
464 sl = netdev_priv(dev);
465 if (sl->tty || sl->leased)
466 continue;
467 if (dev->flags & IFF_UP)
468 dev_close(dev);
469 }
470}
471
472/* Find a free SLCAN channel, and link in this `tty' line. */
473static struct slcan *slc_alloc(dev_t line)
474{
475 int i;
476 struct net_device *dev = NULL;
477 struct slcan *sl;
478
479 if (slcan_devs == NULL)
480 return NULL; /* Master array missing ! */
481
482 for (i = 0; i < maxdev; i++) {
483 dev = slcan_devs[i];
484 if (dev == NULL)
485 break;
486
487 }
488
489 /* Sorry, too many, all slots in use */
490 if (i >= maxdev)
491 return NULL;
492
493 if (dev) {
494 sl = netdev_priv(dev);
495 if (test_bit(SLF_INUSE, &sl->flags)) {
496 unregister_netdevice(dev);
497 dev = NULL;
498 slcan_devs[i] = NULL;
499 }
500 }
501
502 if (!dev) {
503 char name[IFNAMSIZ];
504 sprintf(name, "slcan%d", i);
505
506 dev = alloc_netdev(sizeof(*sl), name, slc_setup);
507 if (!dev)
508 return NULL;
509 dev->base_addr = i;
510 }
511
512 sl = netdev_priv(dev);
513
514 /* Initialize channel control data */
515 sl->magic = SLCAN_MAGIC;
516 sl->dev = dev;
517 spin_lock_init(&sl->lock);
518 slcan_devs[i] = dev;
519
520 return sl;
521}
522
523/*
524 * Open the high-level part of the SLCAN channel.
525 * This function is called by the TTY module when the
526 * SLCAN line discipline is called for. Because we are
527 * sure the tty line exists, we only have to link it to
528 * a free SLCAN channel...
529 *
530 * Called in process context serialized from other ldisc calls.
531 */
532
533static int slcan_open(struct tty_struct *tty)
534{
535 struct slcan *sl;
536 int err;
537
538 if (!capable(CAP_NET_ADMIN))
539 return -EPERM;
540
541 if (tty->ops->write == NULL)
542 return -EOPNOTSUPP;
543
544 /* RTnetlink lock is misused here to serialize concurrent
545 opens of slcan channels. There are better ways, but it is
546 the simplest one.
547 */
548 rtnl_lock();
549
550 /* Collect hanged up channels. */
551 slc_sync();
552
553 sl = tty->disc_data;
554
555 err = -EEXIST;
556 /* First make sure we're not already connected. */
557 if (sl && sl->magic == SLCAN_MAGIC)
558 goto err_exit;
559
560 /* OK. Find a free SLCAN channel to use. */
561 err = -ENFILE;
562 sl = slc_alloc(tty_devnum(tty));
563 if (sl == NULL)
564 goto err_exit;
565
566 sl->tty = tty;
567 tty->disc_data = sl;
568 sl->line = tty_devnum(tty);
569 sl->pid = current->pid;
570
571 if (!test_bit(SLF_INUSE, &sl->flags)) {
572 /* Perform the low-level SLCAN initialization. */
573 sl->rcount = 0;
574 sl->xleft = 0;
575
576 set_bit(SLF_INUSE, &sl->flags);
577
578 err = register_netdevice(sl->dev);
579 if (err)
580 goto err_free_chan;
581 }
582
583 /* Done. We have linked the TTY line to a channel. */
584 rtnl_unlock();
585 tty->receive_room = 65536; /* We don't flow control */
586 return sl->dev->base_addr;
587
588err_free_chan:
589 sl->tty = NULL;
590 tty->disc_data = NULL;
591 clear_bit(SLF_INUSE, &sl->flags);
592
593err_exit:
594 rtnl_unlock();
595
596 /* Count references from TTY module */
597 return err;
598}
599
600/*
601 * Close down a SLCAN channel.
602 * This means flushing out any pending queues, and then returning. This
603 * call is serialized against other ldisc functions.
604 *
605 * We also use this method for a hangup event.
606 */
607
608static void slcan_close(struct tty_struct *tty)
609{
610 struct slcan *sl = (struct slcan *) tty->disc_data;
611
612 /* First make sure we're connected. */
613 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
614 return;
615
616 tty->disc_data = NULL;
617 sl->tty = NULL;
618 if (!sl->leased)
619 sl->line = 0;
620
621 /* Flush network side */
622 unregister_netdev(sl->dev);
623 /* This will complete via sl_free_netdev */
624}
625
626static int slcan_hangup(struct tty_struct *tty)
627{
628 slcan_close(tty);
629 return 0;
630}
631
632/* Perform I/O control on an active SLCAN channel. */
633static int slcan_ioctl(struct tty_struct *tty, struct file *file,
634 unsigned int cmd, unsigned long arg)
635{
636 struct slcan *sl = (struct slcan *) tty->disc_data;
637 unsigned int tmp;
638
639 /* First make sure we're connected. */
640 if (!sl || sl->magic != SLCAN_MAGIC)
641 return -EINVAL;
642
643 switch (cmd) {
644 case SIOCGIFNAME:
645 tmp = strlen(sl->dev->name) + 1;
646 if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
647 return -EFAULT;
648 return 0;
649
650 case SIOCSIFHWADDR:
651 return -EINVAL;
652
653 default:
654 return tty_mode_ioctl(tty, file, cmd, arg);
655 }
656}
657
658static struct tty_ldisc_ops slc_ldisc = {
659 .owner = THIS_MODULE,
660 .magic = TTY_LDISC_MAGIC,
661 .name = "slcan",
662 .open = slcan_open,
663 .close = slcan_close,
664 .hangup = slcan_hangup,
665 .ioctl = slcan_ioctl,
666 .receive_buf = slcan_receive_buf,
667 .write_wakeup = slcan_write_wakeup,
668};
669
670static int __init slcan_init(void)
671{
672 int status;
673
674 if (maxdev < 4)
675 maxdev = 4; /* Sanity */
676
677 printk(banner);
678 printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
679
680 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
681 if (!slcan_devs) {
682 printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
683 return -ENOMEM;
684 }
685
686 /* Fill in our line protocol discipline, and register it */
687 status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
688 if (status) {
689 printk(KERN_ERR "slcan: can't register line discipline\n");
690 kfree(slcan_devs);
691 }
692 return status;
693}
694
695static void __exit slcan_exit(void)
696{
697 int i;
698 struct net_device *dev;
699 struct slcan *sl;
700 unsigned long timeout = jiffies + HZ;
701 int busy = 0;
702
703 if (slcan_devs == NULL)
704 return;
705
706 /* First of all: check for active disciplines and hangup them.
707 */
708 do {
709 if (busy)
710 msleep_interruptible(100);
711
712 busy = 0;
713 for (i = 0; i < maxdev; i++) {
714 dev = slcan_devs[i];
715 if (!dev)
716 continue;
717 sl = netdev_priv(dev);
718 spin_lock_bh(&sl->lock);
719 if (sl->tty) {
720 busy++;
721 tty_hangup(sl->tty);
722 }
723 spin_unlock_bh(&sl->lock);
724 }
725 } while (busy && time_before(jiffies, timeout));
726
727 /* FIXME: hangup is async so we should wait when doing this second
728 phase */
729
730 for (i = 0; i < maxdev; i++) {
731 dev = slcan_devs[i];
732 if (!dev)
733 continue;
734 slcan_devs[i] = NULL;
735
736 sl = netdev_priv(dev);
737 if (sl->tty) {
738 printk(KERN_ERR "%s: tty discipline still running\n",
739 dev->name);
740 /* Intentionally leak the control block. */
741 dev->destructor = NULL;
742 }
743
744 unregister_netdev(dev);
745 }
746
747 kfree(slcan_devs);
748 slcan_devs = NULL;
749
750 i = tty_unregister_ldisc(N_SLCAN);
751 if (i)
752 printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
753}
754
755module_init(slcan_init);
756module_exit(slcan_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d6b6d6aa565a..a8a32bc9aae6 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3880,7 +3880,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3880 schedule_work(&cp->reset_task); 3880 schedule_work(&cp->reset_task);
3881#endif 3881#endif
3882 3882
3883 flush_scheduled_work(); 3883 flush_work_sync(&cp->reset_task);
3884 return 0; 3884 return 0;
3885} 3885}
3886 3886
@@ -5177,7 +5177,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5177 vfree(cp->fw_data); 5177 vfree(cp->fw_data);
5178 5178
5179 mutex_lock(&cp->pm_mutex); 5179 mutex_lock(&cp->pm_mutex);
5180 flush_scheduled_work(); 5180 cancel_work_sync(&cp->reset_task);
5181 if (cp->hw_running) 5181 if (cp->hw_running)
5182 cas_shutdown(cp); 5182 cas_shutdown(cp);
5183 mutex_unlock(&cp->pm_mutex); 5183 mutex_unlock(&cp->pm_mutex);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60a..594ca9c2c10a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1695,7 +1695,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1695 *work = num; 1695 *work = num;
1696 return -EINVAL; 1696 return -EINVAL;
1697 } 1697 }
1698 *work = 2 + req2->num_additional_wqes;; 1698 *work = 2 + req2->num_additional_wqes;
1699 1699
1700 l5_cid = req1->iscsi_conn_id; 1700 l5_cid = req1->iscsi_conn_id;
1701 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1701 if (l5_cid >= MAX_ISCSI_TBL_SZ)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1c..80c2feeefec5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
59 59
60/* Information that need to be kept for each board. */ 60/* Information that need to be kept for each board. */
61struct net_local { 61struct net_local {
62 struct net_device_stats stats;
63 struct mii_if_info mii_if; 62 struct mii_if_info mii_if;
64 63
65 /* Tx control lock. This protects the transmit buffer ring 64 /* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
1059 1058
1060 /* remember we got an error */ 1059 /* remember we got an error */
1061 1060
1062 np->stats.tx_errors++; 1061 dev->stats.tx_errors++;
1063 1062
1064 /* reset the TX DMA in case it has hung on something */ 1063 /* reset the TX DMA in case it has hung on something */
1065 1064
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1157 * allocate a new buffer to put a packet in. 1156 * allocate a new buffer to put a packet in.
1158 */ 1157 */
1159 e100_rx(dev); 1158 e100_rx(dev);
1160 np->stats.rx_packets++; 1159 dev->stats.rx_packets++;
1161 /* restart/continue on the channel, for safety */ 1160 /* restart/continue on the channel, for safety */
1162 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1161 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1163 /* clear dma channel 1 eop/descr irq bits */ 1162 /* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1173 /* Report any packets that have been sent */ 1172 /* Report any packets that have been sent */
1174 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && 1173 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1175 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { 1174 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1176 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1175 dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1177 np->stats.tx_packets++; 1176 dev->stats.tx_packets++;
1178 1177
1179 /* dma is ready with the transmission of the data in tx_skb, so now 1178 /* dma is ready with the transmission of the data in tx_skb, so now
1180 we can release the skb memory */ 1179 we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
1197e100nw_interrupt(int irq, void *dev_id) 1196e100nw_interrupt(int irq, void *dev_id)
1198{ 1197{
1199 struct net_device *dev = (struct net_device *)dev_id; 1198 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK0_RD; 1199 unsigned long irqbits = *R_IRQ_MASK0_RD;
1202 1200
1203 /* check for underrun irq */ 1201 /* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1203 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1206 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1204 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1207 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1208 np->stats.tx_errors++; 1206 dev->stats.tx_errors++;
1209 D(printk("ethernet receiver underrun!\n")); 1207 D(printk("ethernet receiver underrun!\n"));
1210 } 1208 }
1211 1209
1212 /* check for overrun irq */ 1210 /* check for overrun irq */
1213 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { 1211 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1214 update_rx_stats(&np->stats); /* this will ack the irq */ 1212 update_rx_stats(&dev->stats); /* this will ack the irq */
1215 D(printk("ethernet receiver overrun!\n")); 1213 D(printk("ethernet receiver overrun!\n"));
1216 } 1214 }
1217 /* check for excessive collision irq */ 1215 /* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1217 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1220 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1218 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1221 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1222 np->stats.tx_errors++; 1220 dev->stats.tx_errors++;
1223 D(printk("ethernet excessive collisions!\n")); 1221 D(printk("ethernet excessive collisions!\n"));
1224 } 1222 }
1225 return IRQ_HANDLED; 1223 return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
1250 spin_unlock(&np->led_lock); 1248 spin_unlock(&np->led_lock);
1251 1249
1252 length = myNextRxDesc->descr.hw_len - 4; 1250 length = myNextRxDesc->descr.hw_len - 4;
1253 np->stats.rx_bytes += length; 1251 dev->stats.rx_bytes += length;
1254 1252
1255#ifdef ETHDEBUG 1253#ifdef ETHDEBUG
1256 printk("Got a packet of length %d:\n", length); 1254 printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
1268 /* Small packet, copy data */ 1266 /* Small packet, copy data */
1269 skb = dev_alloc_skb(length - ETHER_HEAD_LEN); 1267 skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1270 if (!skb) { 1268 if (!skb) {
1271 np->stats.rx_errors++; 1269 dev->stats.rx_errors++;
1272 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1270 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1273 goto update_nextrxdesc; 1271 goto update_nextrxdesc;
1274 } 1272 }
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
1294 int align; 1292 int align;
1295 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1293 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1296 if (!new_skb) { 1294 if (!new_skb) {
1297 np->stats.rx_errors++; 1295 dev->stats.rx_errors++;
1298 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1296 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1299 goto update_nextrxdesc; 1297 goto update_nextrxdesc;
1300 } 1298 }
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
1333static int 1331static int
1334e100_close(struct net_device *dev) 1332e100_close(struct net_device *dev)
1335{ 1333{
1336 struct net_local *np = netdev_priv(dev);
1337
1338 printk(KERN_INFO "Closing %s.\n", dev->name); 1334 printk(KERN_INFO "Closing %s.\n", dev->name);
1339 1335
1340 netif_stop_queue(dev); 1336 netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
1366 1362
1367 /* Update the statistics here. */ 1363 /* Update the statistics here. */
1368 1364
1369 update_rx_stats(&np->stats); 1365 update_rx_stats(&dev->stats);
1370 update_tx_stats(&np->stats); 1366 update_tx_stats(&dev->stats);
1371 1367
1372 /* Stop speed/duplex timers */ 1368 /* Stop speed/duplex timers */
1373 del_timer(&speed_timer); 1369 del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
1545 1541
1546 spin_lock_irqsave(&lp->lock, flags); 1542 spin_lock_irqsave(&lp->lock, flags);
1547 1543
1548 update_rx_stats(&lp->stats); 1544 update_rx_stats(&dev->stats);
1549 update_tx_stats(&lp->stats); 1545 update_tx_stats(&dev->stats);
1550 1546
1551 spin_unlock_irqrestore(&lp->lock, flags); 1547 spin_unlock_irqrestore(&lp->lock, flags);
1552 return &lp->stats; 1548 return &dev->stats;
1553} 1549}
1554 1550
1555/* 1551/*
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e272075..4d538a4e9d55 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1359,6 +1359,7 @@ out:
1359static int offload_close(struct t3cdev *tdev) 1359static int offload_close(struct t3cdev *tdev)
1360{ 1360{
1361 struct adapter *adapter = tdev2adap(tdev); 1361 struct adapter *adapter = tdev2adap(tdev);
1362 struct t3c_data *td = T3C_DATA(tdev);
1362 1363
1363 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 1364 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1364 return 0; 1365 return 0;
@@ -1369,7 +1370,7 @@ static int offload_close(struct t3cdev *tdev)
1369 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1370 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1370 1371
1371 /* Flush work scheduled while releasing TIDs */ 1372 /* Flush work scheduled while releasing TIDs */
1372 flush_scheduled_work(); 1373 flush_work_sync(&td->tid_release_task);
1373 1374
1374 tdev->lldev = NULL; 1375 tdev->lldev = NULL;
1375 cxgb3_set_dummy_ops(tdev); 1376 cxgb3_set_dummy_ops(tdev);
@@ -3006,12 +3007,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3006 pci_channel_state_t state) 3007 pci_channel_state_t state)
3007{ 3008{
3008 struct adapter *adapter = pci_get_drvdata(pdev); 3009 struct adapter *adapter = pci_get_drvdata(pdev);
3009 int ret;
3010 3010
3011 if (state == pci_channel_io_perm_failure) 3011 if (state == pci_channel_io_perm_failure)
3012 return PCI_ERS_RESULT_DISCONNECT; 3012 return PCI_ERS_RESULT_DISCONNECT;
3013 3013
3014 ret = t3_adapter_error(adapter, 0, 0); 3014 t3_adapter_error(adapter, 0, 0);
3015 3015
3016 /* Request a slot reset. */ 3016 /* Request a slot reset. */
3017 return PCI_ERS_RESULT_NEED_RESET; 3017 return PCI_ERS_RESULT_NEED_RESET;
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3341 adapter->name = adapter->port[i]->name; 3341 adapter->name = adapter->port[i]->name;
3342 3342
3343 __set_bit(i, &adapter->registered_device_map); 3343 __set_bit(i, &adapter->registered_device_map);
3344 netif_tx_stop_all_queues(adapter->port[i]);
3345 } 3344 }
3346 } 3345 }
3347 if (!adapter->registered_device_map) { 3346 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953d..ef02aa68c926 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1164 */ 1164 */
1165void *cxgb_alloc_mem(unsigned long size) 1165void *cxgb_alloc_mem(unsigned long size)
1166{ 1166{
1167 void *p = kmalloc(size, GFP_KERNEL); 1167 void *p = kzalloc(size, GFP_KERNEL);
1168 1168
1169 if (!p) 1169 if (!p)
1170 p = vmalloc(size); 1170 p = vzalloc(size);
1171 if (p)
1172 memset(p, 0, size);
1173 return p; 1171 return p;
1174} 1172}
1175 1173
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f410b3..848f89d19fb7 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -868,12 +868,10 @@ out: release_firmware(fw);
868 */ 868 */
869void *t4_alloc_mem(size_t size) 869void *t4_alloc_mem(size_t size)
870{ 870{
871 void *p = kmalloc(size, GFP_KERNEL); 871 void *p = kzalloc(size, GFP_KERNEL);
872 872
873 if (!p) 873 if (!p)
874 p = vmalloc(size); 874 p = vzalloc(size);
875 if (p)
876 memset(p, 0, size);
877 return p; 875 return p;
878} 876}
879 877
@@ -3736,7 +3734,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3736 3734
3737 __set_bit(i, &adapter->registered_device_map); 3735 __set_bit(i, &adapter->registered_device_map);
3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; 3736 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3739 netif_tx_stop_all_queues(adapter->port[i]);
3740 } 3737 }
3741 } 3738 }
3742 if (!adapter->registered_device_map) { 3739 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d94aea8..e97521c801ea 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -2408,7 +2408,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2408 if (index < NEXACT_MAC) 2408 if (index < NEXACT_MAC)
2409 ret++; 2409 ret++;
2410 else if (hash) 2410 else if (hash)
2411 *hash |= (1 << hash_mac_addr(addr[i])); 2411 *hash |= (1ULL << hash_mac_addr(addr[i]));
2412 } 2412 }
2413 return ret; 2413 return ret;
2414} 2414}
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4766b4116b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
60 * MSI-X interrupt index usage. 60 * MSI-X interrupt index usage.
61 */ 61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */ 62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ 63 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1, 64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, 65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66 66
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..f54af48edb93 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
280 const struct port_info *pi = netdev_priv(dev); 280 const struct port_info *pi = netdev_priv(dev);
281 int qs, msi; 281 int qs, msi;
282 282
283 for (qs = 0, msi = MSIX_NIQFLINT; 283 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
284 qs < pi->nqsets;
285 qs++, msi++) {
286 snprintf(adapter->msix_info[msi].desc, namelen, 284 snprintf(adapter->msix_info[msi].desc, namelen,
287 "%s-%d", dev->name, qs); 285 "%s-%d", dev->name, qs);
288 adapter->msix_info[msi].desc[namelen] = 0; 286 adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
309 /* 307 /*
310 * Ethernet queues. 308 * Ethernet queues.
311 */ 309 */
312 msi = MSIX_NIQFLINT; 310 msi = MSIX_IQFLINT;
313 for_each_ethrxq(s, rxq) { 311 for_each_ethrxq(s, rxq) {
314 err = request_irq(adapter->msix_info[msi].vec, 312 err = request_irq(adapter->msix_info[msi].vec,
315 t4vf_sge_intr_msix, 0, 313 t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
337 int rxq, msi; 335 int rxq, msi;
338 336
339 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); 337 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
340 msi = MSIX_NIQFLINT; 338 msi = MSIX_IQFLINT;
341 for_each_ethrxq(s, rxq) 339 for_each_ethrxq(s, rxq)
342 free_irq(adapter->msix_info[msi++].vec, 340 free_irq(adapter->msix_info[msi++].vec,
343 &s->ethrxq[rxq].rspq); 341 &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
527 * brought up at which point lots of things get nailed down 525 * brought up at which point lots of things get nailed down
528 * permanently ... 526 * permanently ...
529 */ 527 */
530 msix = MSIX_NIQFLINT; 528 msix = MSIX_IQFLINT;
531 for_each_port(adapter, pidx) { 529 for_each_port(adapter, pidx) {
532 struct net_device *dev = adapter->port[pidx]; 530 struct net_device *dev = adapter->port[pidx];
533 struct port_info *pi = netdev_priv(dev); 531 struct port_info *pi = netdev_priv(dev);
@@ -753,7 +751,9 @@ static int cxgb4vf_open(struct net_device *dev)
753 if (err) 751 if (err)
754 return err; 752 return err;
755 set_bit(pi->port_id, &adapter->open_device_map); 753 set_bit(pi->port_id, &adapter->open_device_map);
756 link_start(dev); 754 err = link_start(dev);
755 if (err)
756 return err;
757 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
758 return 0; 758 return 0;
759} 759}
@@ -814,40 +814,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
814} 814}
815 815
816/* 816/*
817 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an 817 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
818 * array of addrss pointers and return the number collected. 818 * at a specified offset within the list, into an array of addrss pointers and
819 * return the number collected.
819 */ 820 */
820static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, 821static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
821 const u8 **addr, 822 const u8 **addr,
822 unsigned int maxaddrs) 823 unsigned int offset,
824 unsigned int maxaddrs)
823{ 825{
826 unsigned int index = 0;
824 unsigned int naddr = 0; 827 unsigned int naddr = 0;
825 const struct netdev_hw_addr *ha; 828 const struct netdev_hw_addr *ha;
826 829
827 for_each_dev_addr(dev, ha) { 830 for_each_dev_addr(dev, ha)
828 addr[naddr++] = ha->addr; 831 if (index++ >= offset) {
829 if (naddr >= maxaddrs) 832 addr[naddr++] = ha->addr;
830 break; 833 if (naddr >= maxaddrs)
831 } 834 break;
835 }
832 return naddr; 836 return naddr;
833} 837}
834 838
835/* 839/*
836 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an 840 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
837 * array of addrss pointers and return the number collected. 841 * at a specified offset within the list, into an array of addrss pointers and
842 * return the number collected.
838 */ 843 */
839static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, 844static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
840 const u8 **addr, 845 const u8 **addr,
841 unsigned int maxaddrs) 846 unsigned int offset,
847 unsigned int maxaddrs)
842{ 848{
849 unsigned int index = 0;
843 unsigned int naddr = 0; 850 unsigned int naddr = 0;
844 const struct netdev_hw_addr *ha; 851 const struct netdev_hw_addr *ha;
845 852
846 netdev_for_each_mc_addr(ha, dev) { 853 netdev_for_each_mc_addr(ha, dev)
847 addr[naddr++] = ha->addr; 854 if (index++ >= offset) {
848 if (naddr >= maxaddrs) 855 addr[naddr++] = ha->addr;
849 break; 856 if (naddr >= maxaddrs)
850 } 857 break;
858 }
851 return naddr; 859 return naddr;
852} 860}
853 861
@@ -860,16 +868,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
860 u64 mhash = 0; 868 u64 mhash = 0;
861 u64 uhash = 0; 869 u64 uhash = 0;
862 bool free = true; 870 bool free = true;
863 u16 filt_idx[7]; 871 unsigned int offset, naddr;
864 const u8 *addr[7]; 872 const u8 *addr[7];
865 int ret, naddr = 0; 873 int ret;
866 const struct port_info *pi = netdev_priv(dev); 874 const struct port_info *pi = netdev_priv(dev);
867 875
868 /* first do the secondary unicast addresses */ 876 /* first do the secondary unicast addresses */
869 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 877 for (offset = 0; ; offset += naddr) {
870 if (naddr > 0) { 878 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
879 ARRAY_SIZE(addr));
880 if (naddr == 0)
881 break;
882
871 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 883 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
872 naddr, addr, filt_idx, &uhash, sleep); 884 naddr, addr, NULL, &uhash, sleep);
873 if (ret < 0) 885 if (ret < 0)
874 return ret; 886 return ret;
875 887
@@ -877,12 +889,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
877 } 889 }
878 890
879 /* next set up the multicast addresses */ 891 /* next set up the multicast addresses */
880 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 892 for (offset = 0; ; offset += naddr) {
881 if (naddr > 0) { 893 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
894 ARRAY_SIZE(addr));
895 if (naddr == 0)
896 break;
897
882 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 898 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
883 naddr, addr, filt_idx, &mhash, sleep); 899 naddr, addr, NULL, &mhash, sleep);
884 if (ret < 0) 900 if (ret < 0)
885 return ret; 901 return ret;
902 free = false;
886 } 903 }
887 904
888 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, 905 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1120,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1103 return 0; 1120 return 0;
1104} 1121}
1105 1122
1106/*
1107 * Return a TX Queue on which to send the specified skb.
1108 */
1109static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1110{
1111 /*
1112 * XXX For now just use the default hash but we probably want to
1113 * XXX look at other possibilities ...
1114 */
1115 return skb_tx_hash(dev, skb);
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER 1123#ifdef CONFIG_NET_POLL_CONTROLLER
1119/* 1124/*
1120 * Poll all of our receive queues. This is called outside of normal interrupt 1125 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -1358,6 +1363,8 @@ struct queue_port_stats {
1358 u64 rx_csum; 1363 u64 rx_csum;
1359 u64 vlan_ex; 1364 u64 vlan_ex;
1360 u64 vlan_ins; 1365 u64 vlan_ins;
1366 u64 lro_pkts;
1367 u64 lro_merged;
1361}; 1368};
1362 1369
1363/* 1370/*
@@ -1395,6 +1402,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
1395 "RxCsumGood ", 1402 "RxCsumGood ",
1396 "VLANextractions ", 1403 "VLANextractions ",
1397 "VLANinsertions ", 1404 "VLANinsertions ",
1405 "GROPackets ",
1406 "GROMerged ",
1398}; 1407};
1399 1408
1400/* 1409/*
@@ -1444,6 +1453,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
1444 stats->rx_csum += rxq->stats.rx_cso; 1453 stats->rx_csum += rxq->stats.rx_cso;
1445 stats->vlan_ex += rxq->stats.vlan_ex; 1454 stats->vlan_ex += rxq->stats.vlan_ex;
1446 stats->vlan_ins += txq->vlan_ins; 1455 stats->vlan_ins += txq->vlan_ins;
1456 stats->lro_pkts += rxq->stats.lro_pkts;
1457 stats->lro_merged += rxq->stats.lro_merged;
1447 } 1458 }
1448} 1459}
1449 1460
@@ -1540,14 +1551,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
1540} 1551}
1541 1552
1542/* 1553/*
1554 * TCP Segmentation Offload flags which we support.
1555 */
1556#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1557
1558/*
1543 * Set TCP Segmentation Offloading feature capabilities. 1559 * Set TCP Segmentation Offloading feature capabilities.
1544 */ 1560 */
1545static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) 1561static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
1546{ 1562{
1547 if (tso) 1563 if (tso)
1548 dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1564 dev->features |= TSO_FLAGS;
1549 else 1565 else
1550 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 1566 dev->features &= ~TSO_FLAGS;
1551 return 0; 1567 return 0;
1552} 1568}
1553 1569
@@ -2038,7 +2054,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2038 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave 2054 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2039 * it to our caller to tear down the directory (debugfs_root). 2055 * it to our caller to tear down the directory (debugfs_root).
2040 */ 2056 */
2041static void __devexit cleanup_debugfs(struct adapter *adapter) 2057static void cleanup_debugfs(struct adapter *adapter)
2042{ 2058{
2043 BUG_ON(adapter->debugfs_root == NULL); 2059 BUG_ON(adapter->debugfs_root == NULL);
2044 2060
@@ -2056,7 +2072,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
2056 * adapter parameters we're going to be using and initialize basic adapter 2072 * adapter parameters we're going to be using and initialize basic adapter
2057 * hardware support. 2073 * hardware support.
2058 */ 2074 */
2059static int adap_init0(struct adapter *adapter) 2075static int __devinit adap_init0(struct adapter *adapter)
2060{ 2076{
2061 struct vf_resources *vfres = &adapter->params.vfres; 2077 struct vf_resources *vfres = &adapter->params.vfres;
2062 struct sge_params *sge_params = &adapter->params.sge; 2078 struct sge_params *sge_params = &adapter->params.sge;
@@ -2075,6 +2091,22 @@ static int adap_init0(struct adapter *adapter)
2075 } 2091 }
2076 2092
2077 /* 2093 /*
2094 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2095 * 2.6.31 and later we can't call pci_reset_function() in order to
2096 * issue an FLR because of a self- deadlock on the device semaphore.
2097 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2098 * cases where they're needed -- for instance, some versions of KVM
2099 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2100 * use the firmware based reset in order to reset any per function
2101 * state.
2102 */
2103 err = t4vf_fw_reset(adapter);
2104 if (err < 0) {
2105 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2106 return err;
2107 }
2108
2109 /*
2078 * Grab basic operational parameters. These will predominantly have 2110 * Grab basic operational parameters. These will predominantly have
2079 * been set up by the Physical Function Driver or will be hard coded 2111 * been set up by the Physical Function Driver or will be hard coded
2080 * into the adapter. We just have to live with them ... Note that 2112 * into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2449,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2417 .ndo_get_stats = cxgb4vf_get_stats, 2449 .ndo_get_stats = cxgb4vf_get_stats,
2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2450 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2451 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2420 .ndo_select_queue = cxgb4vf_select_queue,
2421 .ndo_validate_addr = eth_validate_addr, 2452 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2453 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2423 .ndo_change_mtu = cxgb4vf_change_mtu, 2454 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2465,7 +2496,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2465 version_printed = 1; 2496 version_printed = 1;
2466 } 2497 }
2467 2498
2468
2469 /* 2499 /*
2470 * Initialize generic PCI device state. 2500 * Initialize generic PCI device state.
2471 */ 2501 */
@@ -2600,10 +2630,9 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2600 pi->xact_addr_filt = -1; 2630 pi->xact_addr_filt = -1;
2601 pi->rx_offload = RX_CSO; 2631 pi->rx_offload = RX_CSO;
2602 netif_carrier_off(netdev); 2632 netif_carrier_off(netdev);
2603 netif_tx_stop_all_queues(netdev);
2604 netdev->irq = pdev->irq; 2633 netdev->irq = pdev->irq;
2605 2634
2606 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2635 netdev->features = (NETIF_F_SG | TSO_FLAGS |
2607 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2636 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2608 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2637 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2609 NETIF_F_GRO); 2638 NETIF_F_GRO);
@@ -2625,7 +2654,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2625 netdev->do_ioctl = cxgb4vf_do_ioctl; 2654 netdev->do_ioctl = cxgb4vf_do_ioctl;
2626 netdev->change_mtu = cxgb4vf_change_mtu; 2655 netdev->change_mtu = cxgb4vf_change_mtu;
2627 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2656 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2628 netdev->select_queue = cxgb4vf_select_queue;
2629#ifdef CONFIG_NET_POLL_CONTROLLER 2657#ifdef CONFIG_NET_POLL_CONTROLLER
2630 netdev->poll_controller = cxgb4vf_poll_controller; 2658 netdev->poll_controller = cxgb4vf_poll_controller;
2631#endif 2659#endif
@@ -2844,6 +2872,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2844 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2872 CH_DEVICE(0x4800, 0), /* T440-dbg */
2845 CH_DEVICE(0x4801, 0), /* T420-cr */ 2873 CH_DEVICE(0x4801, 0), /* T420-cr */
2846 CH_DEVICE(0x4802, 0), /* T422-cr */ 2874 CH_DEVICE(0x4802, 0), /* T422-cr */
2875 CH_DEVICE(0x4803, 0), /* T440-cr */
2876 CH_DEVICE(0x4804, 0), /* T420-bch */
2877 CH_DEVICE(0x4805, 0), /* T440-bch */
2878 CH_DEVICE(0x4806, 0), /* T460-ch */
2879 CH_DEVICE(0x4807, 0), /* T420-so */
2880 CH_DEVICE(0x4808, 0), /* T420-cx */
2881 CH_DEVICE(0x4809, 0), /* T420-bt */
2882 CH_DEVICE(0x480a, 0), /* T404-bt */
2847 { 0, } 2883 { 0, }
2848}; 2884};
2849 2885
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..e0b3d1bc2fdf 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1536,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1536 } else 1568 } else
1537 skb_checksum_none_assert(skb); 1569 skb_checksum_none_assert(skb);
1538 1570
1571 /*
1572 * Deliver the packet to the stack.
1573 */
1539 if (unlikely(pkt->vlan_ex)) { 1574 if (unlikely(pkt->vlan_ex)) {
1540 struct vlan_group *grp = pi->vlan_grp; 1575 struct vlan_group *grp = pi->vlan_grp;
1541 1576
@@ -1549,11 +1584,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1584 netif_receive_skb(skb);
1550 1585
1551 return 0; 1586 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1587}
1558 1588
1559/** 1589/**
@@ -1679,6 +1709,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1709 }
1680 len = RSPD_LEN(len); 1710 len = RSPD_LEN(len);
1681 } 1711 }
1712 gl.tot_len = len;
1682 1713
1683 /* 1714 /*
1684 * Gather packet fragments. 1715 * Gather packet fragments.
@@ -2115,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2115 2146
2116 /* 2147 /*
2117 * Calculate the size of the hardware free list ring plus 2148 * Calculate the size of the hardware free list ring plus
2118 * status page (which the SGE will place at the end of the 2149 * Status Page (which the SGE will place after the end of the
2119 * free list ring) in Egress Queue Units. 2150 * free list ring) in Egress Queue Units.
2120 */ 2151 */
2121 flsz = (fl->size / FL_PER_EQ_UNIT + 2152 flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2212,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2212 struct port_info *pi = netdev_priv(dev); 2243 struct port_info *pi = netdev_priv(dev);
2213 2244
2214 /* 2245 /*
2215 * Calculate the size of the hardware TX Queue (including the 2246 * Calculate the size of the hardware TX Queue (including the Status
2216 * status age on the end) in units of TX Descriptors. 2247 * Page on the end of the TX Queue) in units of TX Descriptors.
2217 */ 2248 */
2218 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2249 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2219 2250
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..35fc803a6a04 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 326}
327 327
328/** 328/**
329 * t4vf_fw_reset - issue a reset to FW
330 * @adapter: the adapter
331 *
332 * Issues a reset command to FW. For a Physical Function this would
333 * result in the Firmware reseting all of its state. For a Virtual
334 * Function this just resets the state associated with the VF.
335 */
336int t4vf_fw_reset(struct adapter *adapter)
337{
338 struct fw_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
342 FW_CMD_WRITE);
343 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
344 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
345}
346
347/**
329 * t4vf_query_params - query FW or device parameters 348 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 349 * @adapter: the adapter
331 * @nparams: the number of parameters 350 * @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
995 unsigned int naddr, const u8 **addr, u16 *idx, 1014 unsigned int naddr, const u8 **addr, u16 *idx,
996 u64 *hash, bool sleep_ok) 1015 u64 *hash, bool sleep_ok)
997{ 1016{
998 int i, ret; 1017 int offset, ret = 0;
1018 unsigned nfilters = 0;
1019 unsigned int rem = naddr;
999 struct fw_vi_mac_cmd cmd, rpl; 1020 struct fw_vi_mac_cmd cmd, rpl;
1000 struct fw_vi_mac_exact *p;
1001 size_t len16;
1002 1021
1003 if (naddr > ARRAY_SIZE(cmd.u.exact)) 1022 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
1004 return -EINVAL; 1023 return -EINVAL;
1005 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1006 u.exact[naddr]), 16);
1007 1024
1008 memset(&cmd, 0, sizeof(cmd)); 1025 for (offset = 0; offset < naddr; /**/) {
1009 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | 1026 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1010 FW_CMD_REQUEST | 1027 ? rem
1011 FW_CMD_WRITE | 1028 : ARRAY_SIZE(cmd.u.exact));
1012 (free ? FW_CMD_EXEC : 0) | 1029 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1013 FW_VI_MAC_CMD_VIID(viid)); 1030 u.exact[fw_naddr]), 16);
1014 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | 1031 struct fw_vi_mac_exact *p;
1015 FW_CMD_LEN16(len16)); 1032 int i;
1033
1034 memset(&cmd, 0, sizeof(cmd));
1035 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1036 FW_CMD_REQUEST |
1037 FW_CMD_WRITE |
1038 (free ? FW_CMD_EXEC : 0) |
1039 FW_VI_MAC_CMD_VIID(viid));
1040 cmd.freemacs_to_len16 =
1041 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1042 FW_CMD_LEN16(len16));
1043
1044 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1045 p->valid_to_idx = cpu_to_be16(
1046 FW_VI_MAC_CMD_VALID |
1047 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1048 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1049 }
1016 1050
1017 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
1018 p->valid_to_idx =
1019 cpu_to_be16(FW_VI_MAC_CMD_VALID |
1020 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1021 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
1022 }
1023 1051
1024 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); 1052 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1025 if (ret) 1053 sleep_ok);
1026 return ret; 1054 if (ret && ret != -ENOMEM)
1027 1055 break;
1028 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { 1056
1029 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1057 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1030 1058 u16 index = FW_VI_MAC_CMD_IDX_GET(
1031 if (idx) 1059 be16_to_cpu(p->valid_to_idx));
1032 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 1060
1033 ? 0xffff 1061 if (idx)
1034 : index); 1062 idx[offset+i] =
1035 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1063 (index >= FW_CLS_TCAM_NUM_ENTRIES
1036 ret++; 1064 ? 0xffff
1037 else if (hash) 1065 : index);
1038 *hash |= (1 << hash_mac_addr(addr[i])); 1066 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1067 nfilters++;
1068 else if (hash)
1069 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1070 }
1071
1072 free = false;
1073 offset += fw_naddr;
1074 rem -= fw_naddr;
1039 } 1075 }
1076
1077 /*
1078 * If there were no errors or we merely ran out of room in our MAC
1079 * address arena, return the number of filters actually written.
1080 */
1081 if (ret == 0 || ret == -ENOMEM)
1082 ret = nfilters;
1040 return ret; 1083 return ret;
1041} 1084}
1042 1085
@@ -1257,7 +1300,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1257 */ 1300 */
1258int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1301int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1259{ 1302{
1260 struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; 1303 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1261 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); 1304 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
1262 1305
1263 switch (opcode) { 1306 switch (opcode) {
@@ -1265,7 +1308,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1265 /* 1308 /*
1266 * Link/module state change message. 1309 * Link/module state change message.
1267 */ 1310 */
1268 const struct fw_port_cmd *port_cmd = (void *)rpl; 1311 const struct fw_port_cmd *port_cmd =
1312 (const struct fw_port_cmd *)rpl;
1269 u32 word; 1313 u32 word;
1270 int action, port_id, link_ok, speed, fc, pidx; 1314 int action, port_id, link_ok, speed, fc, pidx;
1271 1315
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06bf..2d4c4fc1d900 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1675 platform_set_drvdata(pdev, NULL); 1675 platform_set_drvdata(pdev, NULL);
1676 1676
1677 unregister_netdev(ndev); 1677 unregister_netdev(ndev);
1678 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev)); 1678 dm9000_release_board(pdev, netdev_priv(ndev));
1679 free_netdev(ndev); /* free device structure */ 1679 free_netdev(ndev); /* free device structure */
1680 1680
1681 dev_dbg(&pdev->dev, "released and freed device\n"); 1681 dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index c7e242b69a18..77d08e697b74 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4892,11 +4892,11 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
4892 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 4892 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
4893 u16 cur_agc_value; 4893 u16 cur_agc_value;
4894 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 4894 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
4895 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 4895 static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
4896 { IGP01E1000_PHY_AGC_A, 4896 IGP01E1000_PHY_AGC_A,
4897 IGP01E1000_PHY_AGC_B, 4897 IGP01E1000_PHY_AGC_B,
4898 IGP01E1000_PHY_AGC_C, 4898 IGP01E1000_PHY_AGC_C,
4899 IGP01E1000_PHY_AGC_D 4899 IGP01E1000_PHY_AGC_D
4900 }; 4900 };
4901 /* Read the AGC registers for all channels */ 4901 /* Read the AGC registers for all channels */
4902 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 4902 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5071,11 +5071,11 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5071{ 5071{
5072 s32 ret_val; 5072 s32 ret_val;
5073 u16 phy_data, phy_saved_data, speed, duplex, i; 5073 u16 phy_data, phy_saved_data, speed, duplex, i;
5074 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 5074 static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
5075 { IGP01E1000_PHY_AGC_PARAM_A, 5075 IGP01E1000_PHY_AGC_PARAM_A,
5076 IGP01E1000_PHY_AGC_PARAM_B, 5076 IGP01E1000_PHY_AGC_PARAM_B,
5077 IGP01E1000_PHY_AGC_PARAM_C, 5077 IGP01E1000_PHY_AGC_PARAM_C,
5078 IGP01E1000_PHY_AGC_PARAM_D 5078 IGP01E1000_PHY_AGC_PARAM_D
5079 }; 5079 };
5080 u16 min_length, max_length; 5080 u16 min_length, max_length;
5081 5081
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c3983fc3..491bf2a1babd 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k6-NAPI" 34#define DRV_VERSION "7.3.21-k8-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
485 struct net_device *netdev = adapter->netdev; 485 struct net_device *netdev = adapter->netdev;
486 u32 rctl, tctl; 486 u32 rctl, tctl;
487 487
488 /* signal that we're down so the interrupt handler does not
489 * reschedule our watchdog timer */
490 set_bit(__E1000_DOWN, &adapter->flags);
491 488
492 /* disable receives in the hardware */ 489 /* disable receives in the hardware */
493 rctl = er32(RCTL); 490 rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
508 505
509 e1000_irq_disable(adapter); 506 e1000_irq_disable(adapter);
510 507
508 /*
509 * Setting DOWN must be after irq_disable to prevent
510 * a screaming interrupt. Setting DOWN also prevents
511 * timers and tasks from rescheduling.
512 */
513 set_bit(__E1000_DOWN, &adapter->flags);
514
511 del_timer_sync(&adapter->tx_fifo_stall_timer); 515 del_timer_sync(&adapter->tx_fifo_stall_timer);
512 del_timer_sync(&adapter->watchdog_timer); 516 del_timer_sync(&adapter->watchdog_timer);
513 del_timer_sync(&adapter->phy_info_timer); 517 del_timer_sync(&adapter->phy_info_timer);
@@ -967,11 +971,13 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
967 */ 971 */
968 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 972 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
969 pci_using_dac = 1; 973 pci_using_dac = 1;
970 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
971 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
972 } else { 974 } else {
973 pr_err("No usable DMA config, aborting\n"); 975 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
974 goto err_dma; 976 if (err) {
977 pr_err("No usable DMA config, aborting\n");
978 goto err_dma;
979 }
980 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
975 } 981 }
976 982
977 netdev->netdev_ops = &e1000_netdev_ops; 983 netdev->netdev_ops = &e1000_netdev_ops;
@@ -1425,13 +1431,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1425 int size; 1431 int size;
1426 1432
1427 size = sizeof(struct e1000_buffer) * txdr->count; 1433 size = sizeof(struct e1000_buffer) * txdr->count;
1428 txdr->buffer_info = vmalloc(size); 1434 txdr->buffer_info = vzalloc(size);
1429 if (!txdr->buffer_info) { 1435 if (!txdr->buffer_info) {
1430 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1436 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1431 "ring\n"); 1437 "ring\n");
1432 return -ENOMEM; 1438 return -ENOMEM;
1433 } 1439 }
1434 memset(txdr->buffer_info, 0, size);
1435 1440
1436 /* round up to nearest 4K */ 1441 /* round up to nearest 4K */
1437 1442
@@ -1621,13 +1626,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1621 int size, desc_len; 1626 int size, desc_len;
1622 1627
1623 size = sizeof(struct e1000_buffer) * rxdr->count; 1628 size = sizeof(struct e1000_buffer) * rxdr->count;
1624 rxdr->buffer_info = vmalloc(size); 1629 rxdr->buffer_info = vzalloc(size);
1625 if (!rxdr->buffer_info) { 1630 if (!rxdr->buffer_info) {
1626 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1631 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1627 "ring\n"); 1632 "ring\n");
1628 return -ENOMEM; 1633 return -ENOMEM;
1629 } 1634 }
1630 memset(rxdr->buffer_info, 0, size);
1631 1635
1632 desc_len = sizeof(struct e1000_rx_desc); 1636 desc_len = sizeof(struct e1000_rx_desc);
1633 1637
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 10d8d98bb797..1301eba8b57a 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -352,12 +352,13 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
352 } 352 }
353 { /* Flow Control */ 353 { /* Flow Control */
354 354
355 struct e1000_opt_list fc_list[] = 355 static const struct e1000_opt_list fc_list[] = {
356 {{ E1000_FC_NONE, "Flow Control Disabled" }, 356 { E1000_FC_NONE, "Flow Control Disabled" },
357 { E1000_FC_RX_PAUSE,"Flow Control Receive Only" }, 357 { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
358 { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" }, 358 { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
359 { E1000_FC_FULL, "Flow Control Enabled" }, 359 { E1000_FC_FULL, "Flow Control Enabled" },
360 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; 360 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
361 };
361 362
362 opt = (struct e1000_option) { 363 opt = (struct e1000_option) {
363 .type = list_option, 364 .type = list_option,
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a53ba0..e57e4097ef1b 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,7 @@
52 (ID_LED_DEF1_DEF2)) 52 (ID_LED_DEF1_DEF2))
53 53
54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
55#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
55#define E1000_BASE1000T_STATUS 10 56#define E1000_BASE1000T_STATUS 10
56#define E1000_IDLE_ERROR_COUNT_MASK 0xFF 57#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
57#define E1000_RECEIVE_ERROR_COUNTER 21 58#define E1000_RECEIVE_ERROR_COUNTER 21
@@ -74,6 +75,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
74static s32 e1000_led_on_82574(struct e1000_hw *hw); 75static s32 e1000_led_on_82574(struct e1000_hw *hw);
75static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); 76static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
76static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); 77static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
77 81
78/** 82/**
79 * e1000_init_phy_params_82571 - Init PHY func ptrs. 83 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +111,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
107 case e1000_82574: 111 case e1000_82574:
108 case e1000_82583: 112 case e1000_82583:
109 phy->type = e1000_phy_bm; 113 phy->type = e1000_phy_bm;
114 phy->ops.acquire = e1000_get_hw_semaphore_82574;
115 phy->ops.release = e1000_put_hw_semaphore_82574;
110 break; 116 break;
111 default: 117 default:
112 return -E1000_ERR_PHY; 118 return -E1000_ERR_PHY;
@@ -200,6 +206,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
200 break; 206 break;
201 } 207 }
202 208
209 /* Function Pointers */
210 switch (hw->mac.type) {
211 case e1000_82574:
212 case e1000_82583:
213 nvm->ops.acquire = e1000_get_hw_semaphore_82574;
214 nvm->ops.release = e1000_put_hw_semaphore_82574;
215 break;
216 default:
217 break;
218 }
219
203 return 0; 220 return 0;
204} 221}
205 222
@@ -542,6 +559,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
542 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 559 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
543 ew32(SWSM, swsm); 560 ew32(SWSM, swsm);
544} 561}
562/**
563 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
564 * @hw: pointer to the HW structure
565 *
566 * Acquire the HW semaphore during reset.
567 *
568 **/
569static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
570{
571 u32 extcnf_ctrl;
572 s32 ret_val = 0;
573 s32 i = 0;
574
575 extcnf_ctrl = er32(EXTCNF_CTRL);
576 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
577 do {
578 ew32(EXTCNF_CTRL, extcnf_ctrl);
579 extcnf_ctrl = er32(EXTCNF_CTRL);
580
581 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
582 break;
583
584 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
585
586 msleep(2);
587 i++;
588 } while (i < MDIO_OWNERSHIP_TIMEOUT);
589
590 if (i == MDIO_OWNERSHIP_TIMEOUT) {
591 /* Release semaphores */
592 e1000_put_hw_semaphore_82573(hw);
593 e_dbg("Driver can't access the PHY\n");
594 ret_val = -E1000_ERR_PHY;
595 goto out;
596 }
597
598out:
599 return ret_val;
600}
601
602/**
603 * e1000_put_hw_semaphore_82573 - Release hardware semaphore
604 * @hw: pointer to the HW structure
605 *
606 * Release hardware semaphore used during reset.
607 *
608 **/
609static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
610{
611 u32 extcnf_ctrl;
612
613 extcnf_ctrl = er32(EXTCNF_CTRL);
614 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
615 ew32(EXTCNF_CTRL, extcnf_ctrl);
616}
617
618static DEFINE_MUTEX(swflag_mutex);
619
620/**
621 * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
622 * @hw: pointer to the HW structure
623 *
624 * Acquire the HW semaphore to access the PHY or NVM.
625 *
626 **/
627static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
628{
629 s32 ret_val;
630
631 mutex_lock(&swflag_mutex);
632 ret_val = e1000_get_hw_semaphore_82573(hw);
633 if (ret_val)
634 mutex_unlock(&swflag_mutex);
635 return ret_val;
636}
637
638/**
639 * e1000_put_hw_semaphore_82574 - Release hardware semaphore
640 * @hw: pointer to the HW structure
641 *
642 * Release hardware semaphore used to access the PHY or NVM
643 *
644 **/
645static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
646{
647 e1000_put_hw_semaphore_82573(hw);
648 mutex_unlock(&swflag_mutex);
649}
545 650
546/** 651/**
547 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 652 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +667,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
562 667
563 switch (hw->mac.type) { 668 switch (hw->mac.type) {
564 case e1000_82573: 669 case e1000_82573:
565 case e1000_82574:
566 case e1000_82583:
567 break; 670 break;
568 default: 671 default:
569 ret_val = e1000e_acquire_nvm(hw); 672 ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +956,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
853 **/ 956 **/
854static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 957static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
855{ 958{
856 u32 ctrl, extcnf_ctrl, ctrl_ext, icr; 959 u32 ctrl, ctrl_ext, icr;
857 s32 ret_val; 960 s32 ret_val;
858 u16 i = 0;
859 961
860 /* 962 /*
861 * Prevent the PCI-E bus from sticking if there is no TLP connection 963 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +982,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
880 */ 982 */
881 switch (hw->mac.type) { 983 switch (hw->mac.type) {
882 case e1000_82573: 984 case e1000_82573:
985 ret_val = e1000_get_hw_semaphore_82573(hw);
986 break;
883 case e1000_82574: 987 case e1000_82574:
884 case e1000_82583: 988 case e1000_82583:
885 extcnf_ctrl = er32(EXTCNF_CTRL); 989 ret_val = e1000_get_hw_semaphore_82574(hw);
886 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
887
888 do {
889 ew32(EXTCNF_CTRL, extcnf_ctrl);
890 extcnf_ctrl = er32(EXTCNF_CTRL);
891
892 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
893 break;
894
895 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
896
897 msleep(2);
898 i++;
899 } while (i < MDIO_OWNERSHIP_TIMEOUT);
900 break; 990 break;
901 default: 991 default:
902 break; 992 break;
903 } 993 }
994 if (ret_val)
995 e_dbg("Cannot acquire MDIO ownership\n");
904 996
905 ctrl = er32(CTRL); 997 ctrl = er32(CTRL);
906 998
907 e_dbg("Issuing a global reset to MAC\n"); 999 e_dbg("Issuing a global reset to MAC\n");
908 ew32(CTRL, ctrl | E1000_CTRL_RST); 1000 ew32(CTRL, ctrl | E1000_CTRL_RST);
909 1001
1002 /* Must release MDIO ownership and mutex after MAC reset. */
1003 switch (hw->mac.type) {
1004 case e1000_82574:
1005 case e1000_82583:
1006 e1000_put_hw_semaphore_82574(hw);
1007 break;
1008 default:
1009 break;
1010 }
1011
910 if (hw->nvm.type == e1000_nvm_flash_hw) { 1012 if (hw->nvm.type == e1000_nvm_flash_hw) {
911 udelay(10); 1013 udelay(10);
912 ctrl_ext = er32(CTRL_EXT); 1014 ctrl_ext = er32(CTRL_EXT);
@@ -1402,6 +1504,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1402 u32 rxcw; 1504 u32 rxcw;
1403 u32 ctrl; 1505 u32 ctrl;
1404 u32 status; 1506 u32 status;
1507 u32 txcw;
1508 u32 i;
1405 s32 ret_val = 0; 1509 s32 ret_val = 0;
1406 1510
1407 ctrl = er32(CTRL); 1511 ctrl = er32(CTRL);
@@ -1422,8 +1526,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1422 e1000_serdes_link_autoneg_progress; 1526 e1000_serdes_link_autoneg_progress;
1423 mac->serdes_has_link = false; 1527 mac->serdes_has_link = false;
1424 e_dbg("AN_UP -> AN_PROG\n"); 1528 e_dbg("AN_UP -> AN_PROG\n");
1529 } else {
1530 mac->serdes_has_link = true;
1425 } 1531 }
1426 break; 1532 break;
1427 1533
1428 case e1000_serdes_link_forced_up: 1534 case e1000_serdes_link_forced_up:
1429 /* 1535 /*
@@ -1431,8 +1537,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1431 * auto-negotiation in the TXCW register and disable 1537 * auto-negotiation in the TXCW register and disable
1432 * forced link in the Device Control register in an 1538 * forced link in the Device Control register in an
1433 * attempt to auto-negotiate with our link partner. 1539 * attempt to auto-negotiate with our link partner.
1540 * If the partner code word is null, stop forcing
1541 * and restart auto negotiation.
1434 */ 1542 */
1435 if (rxcw & E1000_RXCW_C) { 1543 if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1436 /* Enable autoneg, and unforce link up */ 1544 /* Enable autoneg, and unforce link up */
1437 ew32(TXCW, mac->txcw); 1545 ew32(TXCW, mac->txcw);
1438 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1546 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1440,6 +1548,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1440 e1000_serdes_link_autoneg_progress; 1548 e1000_serdes_link_autoneg_progress;
1441 mac->serdes_has_link = false; 1549 mac->serdes_has_link = false;
1442 e_dbg("FORCED_UP -> AN_PROG\n"); 1550 e_dbg("FORCED_UP -> AN_PROG\n");
1551 } else {
1552 mac->serdes_has_link = true;
1443 } 1553 }
1444 break; 1554 break;
1445 1555
@@ -1495,6 +1605,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1495 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1605 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1496 mac->serdes_link_state = 1606 mac->serdes_link_state =
1497 e1000_serdes_link_autoneg_progress; 1607 e1000_serdes_link_autoneg_progress;
1608 mac->serdes_has_link = false;
1498 e_dbg("DOWN -> AN_PROG\n"); 1609 e_dbg("DOWN -> AN_PROG\n");
1499 break; 1610 break;
1500 } 1611 }
@@ -1505,16 +1616,32 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1505 e_dbg("ANYSTATE -> DOWN\n"); 1616 e_dbg("ANYSTATE -> DOWN\n");
1506 } else { 1617 } else {
1507 /* 1618 /*
1508 * We have sync, and can tolerate one invalid (IV) 1619 * Check several times, if Sync and Config
1509 * codeword before declaring link down, so reread 1620 * both are consistently 1 then simply ignore
1510 * to look again. 1621 * the Invalid bit and restart Autoneg
1511 */ 1622 */
1512 udelay(10); 1623 for (i = 0; i < AN_RETRY_COUNT; i++) {
1513 rxcw = er32(RXCW); 1624 udelay(10);
1514 if (rxcw & E1000_RXCW_IV) { 1625 rxcw = er32(RXCW);
1515 mac->serdes_link_state = e1000_serdes_link_down; 1626 if ((rxcw & E1000_RXCW_IV) &&
1627 !((rxcw & E1000_RXCW_SYNCH) &&
1628 (rxcw & E1000_RXCW_C))) {
1629 mac->serdes_has_link = false;
1630 mac->serdes_link_state =
1631 e1000_serdes_link_down;
1632 e_dbg("ANYSTATE -> DOWN\n");
1633 break;
1634 }
1635 }
1636
1637 if (i == AN_RETRY_COUNT) {
1638 txcw = er32(TXCW);
1639 txcw |= E1000_TXCW_ANE;
1640 ew32(TXCW, txcw);
1641 mac->serdes_link_state =
1642 e1000_serdes_link_autoneg_progress;
1516 mac->serdes_has_link = false; 1643 mac->serdes_has_link = false;
1517 e_dbg("ANYSTATE -> DOWN\n"); 1644 e_dbg("ANYSTATE -> AN_PROG\n");
1518 } 1645 }
1519 } 1646 }
1520 } 1647 }
@@ -1897,7 +2024,7 @@ struct e1000_info e1000_82574_info = {
1897 | FLAG_HAS_AMT 2024 | FLAG_HAS_AMT
1898 | FLAG_HAS_CTRLEXT_ON_LOAD, 2025 | FLAG_HAS_CTRLEXT_ON_LOAD,
1899 .flags2 = FLAG2_CHECK_PHY_HANG, 2026 .flags2 = FLAG2_CHECK_PHY_HANG,
1900 .pba = 36, 2027 .pba = 32,
1901 .max_hw_frame_size = DEFAULT_JUMBO, 2028 .max_hw_frame_size = DEFAULT_JUMBO,
1902 .get_variants = e1000_get_variants_82571, 2029 .get_variants = e1000_get_variants_82571,
1903 .mac_ops = &e82571_mac_ops, 2030 .mac_ops = &e82571_mac_ops,
@@ -1914,7 +2041,7 @@ struct e1000_info e1000_82583_info = {
1914 | FLAG_HAS_SMART_POWER_DOWN 2041 | FLAG_HAS_SMART_POWER_DOWN
1915 | FLAG_HAS_AMT 2042 | FLAG_HAS_AMT
1916 | FLAG_HAS_CTRLEXT_ON_LOAD, 2043 | FLAG_HAS_CTRLEXT_ON_LOAD,
1917 .pba = 36, 2044 .pba = 32,
1918 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 2045 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1919 .get_variants = e1000_get_variants_82571, 2046 .get_variants = e1000_get_variants_82571,
1920 .mac_ops = &e82571_mac_ops, 2047 .mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f973..7245dc2e0b7c 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -488,6 +488,9 @@
488#define E1000_BLK_PHY_RESET 12 488#define E1000_BLK_PHY_RESET 12
489#define E1000_ERR_SWFW_SYNC 13 489#define E1000_ERR_SWFW_SYNC 13
490#define E1000_NOT_IMPLEMENTED 14 490#define E1000_NOT_IMPLEMENTED 14
491#define E1000_ERR_INVALID_ARGUMENT 16
492#define E1000_ERR_NO_SPACE 17
493#define E1000_ERR_NVM_PBA_SECTION 18
491 494
492/* Loop limit on how long we wait for auto-negotiation to complete */ 495/* Loop limit on how long we wait for auto-negotiation to complete */
493#define FIBER_LINK_UP_LIMIT 50 496#define FIBER_LINK_UP_LIMIT 50
@@ -516,6 +519,7 @@
516#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ 519#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
517 520
518/* Receive Configuration Word */ 521/* Receive Configuration Word */
522#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
519#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ 523#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
520#define E1000_RXCW_C 0x20000000 /* Receive config */ 524#define E1000_RXCW_C 0x20000000 /* Receive config */
521#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ 525#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
@@ -649,13 +653,16 @@
649/* Mask bits for fields in Word 0x03 of the EEPROM */ 653/* Mask bits for fields in Word 0x03 of the EEPROM */
650#define NVM_COMPAT_LOM 0x0800 654#define NVM_COMPAT_LOM 0x0800
651 655
656/* length of string needed to store PBA number */
657#define E1000_PBANUM_LENGTH 11
658
652/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 659/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
653#define NVM_SUM 0xBABA 660#define NVM_SUM 0xBABA
654 661
655/* PBA (printed board assembly) number words */ 662/* PBA (printed board assembly) number words */
656#define NVM_PBA_OFFSET_0 8 663#define NVM_PBA_OFFSET_0 8
657#define NVM_PBA_OFFSET_1 9 664#define NVM_PBA_OFFSET_1 9
658 665#define NVM_PBA_PTR_GUARD 0xFAFA
659#define NVM_WORD_SIZE_BASE_SHIFT 6 666#define NVM_WORD_SIZE_BASE_SHIFT 6
660 667
661/* NVM Commands - SPI */ 668/* NVM Commands - SPI */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index fdc67fead4ea..2c913b8e9116 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -482,6 +482,7 @@ extern const char e1000e_driver_version[];
482 482
483extern void e1000e_check_options(struct e1000_adapter *adapter); 483extern void e1000e_check_options(struct e1000_adapter *adapter);
484extern void e1000e_set_ethtool_ops(struct net_device *netdev); 484extern void e1000e_set_ethtool_ops(struct net_device *netdev);
485extern void e1000e_led_blink_task(struct work_struct *work);
485 486
486extern int e1000e_up(struct e1000_adapter *adapter); 487extern int e1000e_up(struct e1000_adapter *adapter);
487extern void e1000e_down(struct e1000_adapter *adapter); 488extern void e1000e_down(struct e1000_adapter *adapter);
@@ -513,7 +514,8 @@ extern struct e1000_info e1000_pch_info;
513extern struct e1000_info e1000_pch2_info; 514extern struct e1000_info e1000_pch2_info;
514extern struct e1000_info e1000_es2_info; 515extern struct e1000_info e1000_es2_info;
515 516
516extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 517extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
518 u32 pba_num_size);
517 519
518extern s32 e1000e_commit_phy(struct e1000_hw *hw); 520extern s32 e1000e_commit_phy(struct e1000_hw *hw);
519 521
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 8984d165a39b..39349d6dcd0b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -194,20 +194,6 @@ static int e1000_get_settings(struct net_device *netdev,
194 return 0; 194 return 0;
195} 195}
196 196
197static u32 e1000_get_link(struct net_device *netdev)
198{
199 struct e1000_adapter *adapter = netdev_priv(netdev);
200 struct e1000_hw *hw = &adapter->hw;
201
202 /*
203 * Avoid touching hardware registers when possible, otherwise
204 * link negotiation can get messed up when user-level scripts
205 * are rapidly polling the driver to see if link is up.
206 */
207 return netif_running(netdev) ? netif_carrier_ok(netdev) :
208 !!(er32(STATUS) & E1000_STATUS_LU);
209}
210
211static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 197static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
212{ 198{
213 struct e1000_mac_info *mac = &adapter->hw.mac; 199 struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -1263,6 +1249,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1263 u32 ctrl_reg = 0; 1249 u32 ctrl_reg = 0;
1264 u32 stat_reg = 0; 1250 u32 stat_reg = 0;
1265 u16 phy_reg = 0; 1251 u16 phy_reg = 0;
1252 s32 ret_val = 0;
1266 1253
1267 hw->mac.autoneg = 0; 1254 hw->mac.autoneg = 0;
1268 1255
@@ -1322,7 +1309,13 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1322 case e1000_phy_82577: 1309 case e1000_phy_82577:
1323 case e1000_phy_82578: 1310 case e1000_phy_82578:
1324 /* Workaround: K1 must be disabled for stable 1Gbps operation */ 1311 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1312 ret_val = hw->phy.ops.acquire(hw);
1313 if (ret_val) {
1314 e_err("Cannot setup 1Gbps loopback.\n");
1315 return ret_val;
1316 }
1325 e1000_configure_k1_ich8lan(hw, false); 1317 e1000_configure_k1_ich8lan(hw, false);
1318 hw->phy.ops.release(hw);
1326 break; 1319 break;
1327 case e1000_phy_82579: 1320 case e1000_phy_82579:
1328 /* Disable PHY energy detect power down */ 1321 /* Disable PHY energy detect power down */
@@ -1860,7 +1853,7 @@ static int e1000_set_wol(struct net_device *netdev,
1860/* bit defines for adapter->led_status */ 1853/* bit defines for adapter->led_status */
1861#define E1000_LED_ON 0 1854#define E1000_LED_ON 0
1862 1855
1863static void e1000e_led_blink_task(struct work_struct *work) 1856void e1000e_led_blink_task(struct work_struct *work)
1864{ 1857{
1865 struct e1000_adapter *adapter = container_of(work, 1858 struct e1000_adapter *adapter = container_of(work,
1866 struct e1000_adapter, led_blink_task); 1859 struct e1000_adapter, led_blink_task);
@@ -1892,7 +1885,6 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1892 (hw->mac.type == e1000_pch2lan) || 1885 (hw->mac.type == e1000_pch2lan) ||
1893 (hw->mac.type == e1000_82583) || 1886 (hw->mac.type == e1000_82583) ||
1894 (hw->mac.type == e1000_82574)) { 1887 (hw->mac.type == e1000_82574)) {
1895 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
1896 if (!adapter->blink_timer.function) { 1888 if (!adapter->blink_timer.function) {
1897 init_timer(&adapter->blink_timer); 1889 init_timer(&adapter->blink_timer);
1898 adapter->blink_timer.function = 1890 adapter->blink_timer.function =
@@ -1986,6 +1978,9 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1986 p = (char *) adapter + 1978 p = (char *) adapter +
1987 e1000_gstrings_stats[i].stat_offset; 1979 e1000_gstrings_stats[i].stat_offset;
1988 break; 1980 break;
1981 default:
1982 data[i] = 0;
1983 continue;
1989 } 1984 }
1990 1985
1991 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1986 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
@@ -2024,7 +2019,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2024 .get_msglevel = e1000_get_msglevel, 2019 .get_msglevel = e1000_get_msglevel,
2025 .set_msglevel = e1000_set_msglevel, 2020 .set_msglevel = e1000_set_msglevel,
2026 .nway_reset = e1000_nway_reset, 2021 .nway_reset = e1000_nway_reset,
2027 .get_link = e1000_get_link, 2022 .get_link = ethtool_op_get_link,
2028 .get_eeprom_len = e1000_get_eeprom_len, 2023 .get_eeprom_len = e1000_get_eeprom_len,
2029 .get_eeprom = e1000_get_eeprom, 2024 .get_eeprom = e1000_get_eeprom,
2030 .set_eeprom = e1000_set_eeprom, 2025 .set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e3374d9a2472..5080372b0fd7 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,12 +338,17 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
338 } 338 }
339 339
340 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
341 ret_val = e1000e_get_phy_id(hw); 341 switch (hw->mac.type) {
342 if (ret_val) 342 default:
343 goto out; 343 ret_val = e1000e_get_phy_id(hw);
344 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) { 344 if (ret_val)
345 goto out;
346 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
347 break;
348 /* fall-through */
349 case e1000_pch2lan:
345 /* 350 /*
346 * In case the PHY needs to be in mdio slow mode (eg. 82577), 351 * In case the PHY needs to be in mdio slow mode,
347 * set slow mode and try to get the PHY id again. 352 * set slow mode and try to get the PHY id again.
348 */ 353 */
349 ret_val = e1000_set_mdio_slow_mode_hv(hw); 354 ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -352,6 +357,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
352 ret_val = e1000e_get_phy_id(hw); 357 ret_val = e1000e_get_phy_id(hw);
353 if (ret_val) 358 if (ret_val)
354 goto out; 359 goto out;
360 break;
355 } 361 }
356 phy->type = e1000e_get_phy_type_from_id(phy->id); 362 phy->type = e1000e_get_phy_type_from_id(phy->id);
357 363
@@ -3591,7 +3597,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3591 ew32(PHY_CTRL, phy_ctrl); 3597 ew32(PHY_CTRL, phy_ctrl);
3592 3598
3593 if (hw->mac.type >= e1000_pchlan) { 3599 if (hw->mac.type >= e1000_pchlan) {
3594 e1000_oem_bits_config_ich8lan(hw, true); 3600 e1000_oem_bits_config_ich8lan(hw, false);
3595 ret_val = hw->phy.ops.acquire(hw); 3601 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val) 3602 if (ret_val)
3597 return; 3603 return;
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 0fd4eb5ac5fb..8377523c054a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2139,6 +2139,119 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2139} 2139}
2140 2140
2141/** 2141/**
2142 * e1000_read_pba_string_generic - Read device part number
2143 * @hw: pointer to the HW structure
2144 * @pba_num: pointer to device part number
2145 * @pba_num_size: size of part number buffer
2146 *
2147 * Reads the product board assembly (PBA) number from the EEPROM and stores
2148 * the value in pba_num.
2149 **/
2150s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
2151 u32 pba_num_size)
2152{
2153 s32 ret_val;
2154 u16 nvm_data;
2155 u16 pba_ptr;
2156 u16 offset;
2157 u16 length;
2158
2159 if (pba_num == NULL) {
2160 e_dbg("PBA string buffer was null\n");
2161 ret_val = E1000_ERR_INVALID_ARGUMENT;
2162 goto out;
2163 }
2164
2165 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2166 if (ret_val) {
2167 e_dbg("NVM Read Error\n");
2168 goto out;
2169 }
2170
2171 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
2172 if (ret_val) {
2173 e_dbg("NVM Read Error\n");
2174 goto out;
2175 }
2176
2177 /*
2178 * if nvm_data is not ptr guard the PBA must be in legacy format which
2179 * means pba_ptr is actually our second data word for the PBA number
2180 * and we can decode it into an ascii string
2181 */
2182 if (nvm_data != NVM_PBA_PTR_GUARD) {
2183 e_dbg("NVM PBA number is not stored as string\n");
2184
2185 /* we will need 11 characters to store the PBA */
2186 if (pba_num_size < 11) {
2187 e_dbg("PBA string buffer too small\n");
2188 return E1000_ERR_NO_SPACE;
2189 }
2190
2191 /* extract hex string from data and pba_ptr */
2192 pba_num[0] = (nvm_data >> 12) & 0xF;
2193 pba_num[1] = (nvm_data >> 8) & 0xF;
2194 pba_num[2] = (nvm_data >> 4) & 0xF;
2195 pba_num[3] = nvm_data & 0xF;
2196 pba_num[4] = (pba_ptr >> 12) & 0xF;
2197 pba_num[5] = (pba_ptr >> 8) & 0xF;
2198 pba_num[6] = '-';
2199 pba_num[7] = 0;
2200 pba_num[8] = (pba_ptr >> 4) & 0xF;
2201 pba_num[9] = pba_ptr & 0xF;
2202
2203 /* put a null character on the end of our string */
2204 pba_num[10] = '\0';
2205
2206 /* switch all the data but the '-' to hex char */
2207 for (offset = 0; offset < 10; offset++) {
2208 if (pba_num[offset] < 0xA)
2209 pba_num[offset] += '0';
2210 else if (pba_num[offset] < 0x10)
2211 pba_num[offset] += 'A' - 0xA;
2212 }
2213
2214 goto out;
2215 }
2216
2217 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
2218 if (ret_val) {
2219 e_dbg("NVM Read Error\n");
2220 goto out;
2221 }
2222
2223 if (length == 0xFFFF || length == 0) {
2224 e_dbg("NVM PBA number section invalid length\n");
2225 ret_val = E1000_ERR_NVM_PBA_SECTION;
2226 goto out;
2227 }
2228 /* check if pba_num buffer is big enough */
2229 if (pba_num_size < (((u32)length * 2) - 1)) {
2230 e_dbg("PBA string buffer too small\n");
2231 ret_val = E1000_ERR_NO_SPACE;
2232 goto out;
2233 }
2234
2235 /* trim pba length from start of string */
2236 pba_ptr++;
2237 length--;
2238
2239 for (offset = 0; offset < length; offset++) {
2240 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
2241 if (ret_val) {
2242 e_dbg("NVM Read Error\n");
2243 goto out;
2244 }
2245 pba_num[offset * 2] = (u8)(nvm_data >> 8);
2246 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
2247 }
2248 pba_num[offset * 2] = '\0';
2249
2250out:
2251 return ret_val;
2252}
2253
2254/**
2142 * e1000_read_mac_addr_generic - Read device MAC address 2255 * e1000_read_mac_addr_generic - Read device MAC address
2143 * @hw: pointer to the HW structure 2256 * @hw: pointer to the HW structure
2144 * 2257 *
@@ -2579,25 +2692,3 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2579out: 2692out:
2580 return ret_val; 2693 return ret_val;
2581} 2694}
2582
2583s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2584{
2585 s32 ret_val;
2586 u16 nvm_data;
2587
2588 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2589 if (ret_val) {
2590 e_dbg("NVM Read Error\n");
2591 return ret_val;
2592 }
2593 *pba_num = (u32)(nvm_data << 16);
2594
2595 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2596 if (ret_val) {
2597 e_dbg("NVM Read Error\n");
2598 return ret_val;
2599 }
2600 *pba_num |= nvm_data;
2601
2602 return 0;
2603}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f532..02d093d1dd5c 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
54 54
55#define DRV_EXTRAVERSION "-k2" 55#define DRV_EXTRAVERSION "-k2"
56 56
57#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION 57#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
58char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
59const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
60 60
@@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2059 int err = -ENOMEM, size; 2059 int err = -ENOMEM, size;
2060 2060
2061 size = sizeof(struct e1000_buffer) * tx_ring->count; 2061 size = sizeof(struct e1000_buffer) * tx_ring->count;
2062 tx_ring->buffer_info = vmalloc(size); 2062 tx_ring->buffer_info = vzalloc(size);
2063 if (!tx_ring->buffer_info) 2063 if (!tx_ring->buffer_info)
2064 goto err; 2064 goto err;
2065 memset(tx_ring->buffer_info, 0, size);
2066 2065
2067 /* round up to nearest 4K */ 2066 /* round up to nearest 4K */
2068 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2067 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2095 int i, size, desc_len, err = -ENOMEM; 2094 int i, size, desc_len, err = -ENOMEM;
2096 2095
2097 size = sizeof(struct e1000_buffer) * rx_ring->count; 2096 size = sizeof(struct e1000_buffer) * rx_ring->count;
2098 rx_ring->buffer_info = vmalloc(size); 2097 rx_ring->buffer_info = vzalloc(size);
2099 if (!rx_ring->buffer_info) 2098 if (!rx_ring->buffer_info)
2100 goto err; 2099 goto err;
2101 memset(rx_ring->buffer_info, 0, size);
2102 2100
2103 for (i = 0; i < rx_ring->count; i++) { 2101 for (i = 0; i < rx_ring->count; i++) {
2104 buffer_info = &rx_ring->buffer_info[i]; 2102 buffer_info = &rx_ring->buffer_info[i];
@@ -2132,7 +2130,7 @@ err_pages:
2132 } 2130 }
2133err: 2131err:
2134 vfree(rx_ring->buffer_info); 2132 vfree(rx_ring->buffer_info);
2135 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2133 e_err("Unable to allocate memory for the receive descriptor ring\n");
2136 return err; 2134 return err;
2137} 2135}
2138 2136
@@ -4595,7 +4593,7 @@ dma_error:
4595 i += tx_ring->count; 4593 i += tx_ring->count;
4596 i--; 4594 i--;
4597 buffer_info = &tx_ring->buffer_info[i]; 4595 buffer_info = &tx_ring->buffer_info[i];
4598 e1000_put_txbuf(adapter, buffer_info);; 4596 e1000_put_txbuf(adapter, buffer_info);
4599 } 4597 }
4600 4598
4601 return 0; 4599 return 0;
@@ -4631,7 +4629,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4631 4629
4632 i = tx_ring->next_to_use; 4630 i = tx_ring->next_to_use;
4633 4631
4634 while (count--) { 4632 do {
4635 buffer_info = &tx_ring->buffer_info[i]; 4633 buffer_info = &tx_ring->buffer_info[i];
4636 tx_desc = E1000_TX_DESC(*tx_ring, i); 4634 tx_desc = E1000_TX_DESC(*tx_ring, i);
4637 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4635 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4642,7 +4640,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4642 i++; 4640 i++;
4643 if (i == tx_ring->count) 4641 if (i == tx_ring->count)
4644 i = 0; 4642 i = 0;
4645 } 4643 } while (--count > 0);
4646 4644
4647 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4645 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4648 4646
@@ -5465,6 +5463,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
5465} 5463}
5466 5464
5467#ifdef CONFIG_NET_POLL_CONTROLLER 5465#ifdef CONFIG_NET_POLL_CONTROLLER
5466
5467static irqreturn_t e1000_intr_msix(int irq, void *data)
5468{
5469 struct net_device *netdev = data;
5470 struct e1000_adapter *adapter = netdev_priv(netdev);
5471 int vector, msix_irq;
5472
5473 if (adapter->msix_entries) {
5474 vector = 0;
5475 msix_irq = adapter->msix_entries[vector].vector;
5476 disable_irq(msix_irq);
5477 e1000_intr_msix_rx(msix_irq, netdev);
5478 enable_irq(msix_irq);
5479
5480 vector++;
5481 msix_irq = adapter->msix_entries[vector].vector;
5482 disable_irq(msix_irq);
5483 e1000_intr_msix_tx(msix_irq, netdev);
5484 enable_irq(msix_irq);
5485
5486 vector++;
5487 msix_irq = adapter->msix_entries[vector].vector;
5488 disable_irq(msix_irq);
5489 e1000_msix_other(msix_irq, netdev);
5490 enable_irq(msix_irq);
5491 }
5492
5493 return IRQ_HANDLED;
5494}
5495
5468/* 5496/*
5469 * Polling 'interrupt' - used by things like netconsole to send skbs 5497 * Polling 'interrupt' - used by things like netconsole to send skbs
5470 * without having to re-enable interrupts. It's not called while 5498 * without having to re-enable interrupts. It's not called while
@@ -5474,10 +5502,21 @@ static void e1000_netpoll(struct net_device *netdev)
5474{ 5502{
5475 struct e1000_adapter *adapter = netdev_priv(netdev); 5503 struct e1000_adapter *adapter = netdev_priv(netdev);
5476 5504
5477 disable_irq(adapter->pdev->irq); 5505 switch (adapter->int_mode) {
5478 e1000_intr(adapter->pdev->irq, netdev); 5506 case E1000E_INT_MODE_MSIX:
5479 5507 e1000_intr_msix(adapter->pdev->irq, netdev);
5480 enable_irq(adapter->pdev->irq); 5508 break;
5509 case E1000E_INT_MODE_MSI:
5510 disable_irq(adapter->pdev->irq);
5511 e1000_intr_msi(adapter->pdev->irq, netdev);
5512 enable_irq(adapter->pdev->irq);
5513 break;
5514 default: /* E1000E_INT_MODE_LEGACY */
5515 disable_irq(adapter->pdev->irq);
5516 e1000_intr(adapter->pdev->irq, netdev);
5517 enable_irq(adapter->pdev->irq);
5518 break;
5519 }
5481} 5520}
5482#endif 5521#endif
5483 5522
@@ -5587,7 +5626,8 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5587{ 5626{
5588 struct e1000_hw *hw = &adapter->hw; 5627 struct e1000_hw *hw = &adapter->hw;
5589 struct net_device *netdev = adapter->netdev; 5628 struct net_device *netdev = adapter->netdev;
5590 u32 pba_num; 5629 u32 ret_val;
5630 u8 pba_str[E1000_PBANUM_LENGTH];
5591 5631
5592 /* print bus type/speed/width info */ 5632 /* print bus type/speed/width info */
5593 e_info("(PCI Express:2.5GB/s:%s) %pM\n", 5633 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
@@ -5598,9 +5638,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5598 netdev->dev_addr); 5638 netdev->dev_addr);
5599 e_info("Intel(R) PRO/%s Network Connection\n", 5639 e_info("Intel(R) PRO/%s Network Connection\n",
5600 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 5640 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5601 e1000e_read_pba_num(hw, &pba_num); 5641 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5602 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 5642 E1000_PBANUM_LENGTH);
5603 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); 5643 if (ret_val)
5644 strcpy(pba_str, "Unknown");
5645 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5646 hw->mac.type, hw->phy.type, pba_str);
5604} 5647}
5605 5648
5606static void e1000_eeprom_checks(struct e1000_adapter *adapter) 5649static void e1000_eeprom_checks(struct e1000_adapter *adapter)
@@ -5864,6 +5907,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5864 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 5907 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5865 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 5908 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5866 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 5909 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5910 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
5867 5911
5868 /* Initialize link parameters. User can change them with ethtool */ 5912 /* Initialize link parameters. User can change them with ethtool */
5869 adapter->hw.mac.autoneg = 1; 5913 adapter->hw.mac.autoneg = 1;
@@ -5984,8 +6028,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5984 bool down = test_bit(__E1000_DOWN, &adapter->state); 6028 bool down = test_bit(__E1000_DOWN, &adapter->state);
5985 6029
5986 /* 6030 /*
5987 * flush_scheduled work may reschedule our watchdog task, so 6031 * The timers may be rescheduled, so explicitly disable them
5988 * explicitly disable watchdog tasks from being rescheduled 6032 * from being rescheduled.
5989 */ 6033 */
5990 if (!down) 6034 if (!down)
5991 set_bit(__E1000_DOWN, &adapter->state); 6035 set_bit(__E1000_DOWN, &adapter->state);
@@ -5996,8 +6040,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5996 cancel_work_sync(&adapter->watchdog_task); 6040 cancel_work_sync(&adapter->watchdog_task);
5997 cancel_work_sync(&adapter->downshift_task); 6041 cancel_work_sync(&adapter->downshift_task);
5998 cancel_work_sync(&adapter->update_phy_task); 6042 cancel_work_sync(&adapter->update_phy_task);
6043 cancel_work_sync(&adapter->led_blink_task);
5999 cancel_work_sync(&adapter->print_hang_task); 6044 cancel_work_sync(&adapter->print_hang_task);
6000 flush_scheduled_work();
6001 6045
6002 if (!(netdev->flags & IFF_UP)) 6046 if (!(netdev->flags & IFF_UP))
6003 e1000_power_down_phy(adapter); 6047 e1000_power_down_phy(adapter);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 3d36911f77f3..a9612b0e4bca 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -421,7 +421,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
421 static const struct e1000_option opt = { 421 static const struct e1000_option opt = {
422 .type = enable_option, 422 .type = enable_option,
423 .name = "CRC Stripping", 423 .name = "CRC Stripping",
424 .err = "defaulting to enabled", 424 .err = "defaulting to Enabled",
425 .def = OPTION_ENABLED 425 .def = OPTION_ENABLED
426 }; 426 };
427 427
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 3d3dc0c82355..95da38693b77 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -226,6 +226,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
226 } 226 }
227 *data = (u16) mdic; 227 *data = (u16) mdic;
228 228
229 /*
230 * Allow some time after each MDIC transaction to avoid
231 * reading duplicate data in the next MDIC transaction.
232 */
233 if (hw->mac.type == e1000_pch2lan)
234 udelay(100);
235
229 return 0; 236 return 0;
230} 237}
231 238
@@ -279,6 +286,13 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
279 return -E1000_ERR_PHY; 286 return -E1000_ERR_PHY;
280 } 287 }
281 288
289 /*
290 * Allow some time after each MDIC transaction to avoid
291 * reading duplicate data in the next MDIC transaction.
292 */
293 if (hw->mac.type == e1000_pch2lan)
294 udelay(100);
295
282 return 0; 296 return 0;
283} 297}
284 298
@@ -1840,11 +1854,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1840 u16 phy_data, i, agc_value = 0; 1854 u16 phy_data, i, agc_value = 0;
1841 u16 cur_agc_index, max_agc_index = 0; 1855 u16 cur_agc_index, max_agc_index = 0;
1842 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1856 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1843 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 1857 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1844 {IGP02E1000_PHY_AGC_A, 1858 IGP02E1000_PHY_AGC_A,
1845 IGP02E1000_PHY_AGC_B, 1859 IGP02E1000_PHY_AGC_B,
1846 IGP02E1000_PHY_AGC_C, 1860 IGP02E1000_PHY_AGC_C,
1847 IGP02E1000_PHY_AGC_D}; 1861 IGP02E1000_PHY_AGC_D
1862 };
1848 1863
1849 /* Read the AGC registers for all channels */ 1864 /* Read the AGC registers for all channels */
1850 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1865 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5a..9e19fbc2f176 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ 302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
303#define ee_id_eepro10p1 0x31 303#define ee_id_eepro10p1 0x31
304 304
305#define TX_TIMEOUT 40 305#define TX_TIMEOUT ((4*HZ)/10)
306 306
307/* Index to functions, as function prototypes. */ 307/* Index to functions, as function prototypes. */
308 308
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 8e745e74828d..a724a2d14506 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -130,19 +130,6 @@
130 130
131/* utility functions */ 131/* utility functions */
132 132
133#define ehea_info(fmt, args...) \
134 printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
135
136#define ehea_error(fmt, args...) \
137 printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
138
139#ifdef DEBUG
140#define ehea_debug(fmt, args...) \
141 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
142#else
143#define ehea_debug(fmt, args...) do {} while (0)
144#endif
145
146void ehea_dump(void *adr, int len, char *msg); 133void ehea_dump(void *adr, int len, char *msg);
147 134
148#define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) 135#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
@@ -515,6 +502,4 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
515int ehea_sense_port_attr(struct ehea_port *port); 502int ehea_sense_port_attr(struct ehea_port *port);
516int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); 503int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
517 504
518extern struct work_struct ehea_rereg_mr_task;
519
520#endif /* __EHEA_H__ */ 505#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099ce49c9..afebf2075779 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ehea.h" 31#include "ehea.h"
30#include "ehea_phyp.h" 32#include "ehea_phyp.h"
31 33
@@ -118,10 +120,10 @@ doit:
118 ret = ehea_set_portspeed(port, sp); 120 ret = ehea_set_portspeed(port, sp);
119 121
120 if (!ret) 122 if (!ret)
121 ehea_info("%s: Port speed successfully set: %dMbps " 123 netdev_info(dev,
122 "%s Duplex", 124 "Port speed successfully set: %dMbps %s Duplex\n",
123 port->netdev->name, port->port_speed, 125 port->port_speed,
124 port->full_duplex == 1 ? "Full" : "Half"); 126 port->full_duplex == 1 ? "Full" : "Half");
125out: 127out:
126 return ret; 128 return ret;
127} 129}
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev)
134 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); 136 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
135 137
136 if (!ret) 138 if (!ret)
137 ehea_info("%s: Port speed successfully set: %dMbps " 139 netdev_info(port->netdev,
138 "%s Duplex", 140 "Port speed successfully set: %dMbps %s Duplex\n",
139 port->netdev->name, port->port_speed, 141 port->port_speed,
140 port->full_duplex == 1 ? "Full" : "Half"); 142 port->full_duplex == 1 ? "Full" : "Half");
141 return ret; 143 return ret;
142} 144}
143 145
@@ -261,6 +263,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
261 263
262} 264}
263 265
266static int ehea_set_flags(struct net_device *dev, u32 data)
267{
268 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
269 | ETH_FLAG_TXVLAN
270 | ETH_FLAG_RXVLAN);
271}
272
264const struct ethtool_ops ehea_ethtool_ops = { 273const struct ethtool_ops ehea_ethtool_ops = {
265 .get_settings = ehea_get_settings, 274 .get_settings = ehea_get_settings,
266 .get_drvinfo = ehea_get_drvinfo, 275 .get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +282,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
273 .get_ethtool_stats = ehea_get_ethtool_stats, 282 .get_ethtool_stats = ehea_get_ethtool_stats,
274 .get_rx_csum = ehea_get_rx_csum, 283 .get_rx_csum = ehea_get_rx_csum,
275 .set_settings = ehea_set_settings, 284 .set_settings = ehea_set_settings,
285 .get_flags = ethtool_op_get_flags,
286 .set_flags = ehea_set_flags,
276 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ 287 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
277}; 288};
278 289
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..0dfef6d76445 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/in.h> 31#include <linux/in.h>
30#include <linux/ip.h> 32#include <linux/ip.h>
31#include <linux/tcp.h> 33#include <linux/tcp.h>
@@ -101,7 +103,6 @@ MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
101static int port_name_cnt; 103static int port_name_cnt;
102static LIST_HEAD(adapter_list); 104static LIST_HEAD(adapter_list);
103static unsigned long ehea_driver_flags; 105static unsigned long ehea_driver_flags;
104struct work_struct ehea_rereg_mr_task;
105static DEFINE_MUTEX(dlpar_mem_lock); 106static DEFINE_MUTEX(dlpar_mem_lock);
106struct ehea_fw_handle_array ehea_fw_handles; 107struct ehea_fw_handle_array ehea_fw_handles;
107struct ehea_bcmc_reg_array ehea_bcmc_regs; 108struct ehea_bcmc_reg_array ehea_bcmc_regs;
@@ -136,8 +137,8 @@ void ehea_dump(void *adr, int len, char *msg)
136 int x; 137 int x;
137 unsigned char *deb = adr; 138 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) { 139 for (x = 0; x < len; x += 16) {
139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg, 140 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 141 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
141 deb += 16; 142 deb += 16;
142 } 143 }
143} 144}
@@ -337,7 +338,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
337 338
338 cb2 = (void *)get_zeroed_page(GFP_KERNEL); 339 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
339 if (!cb2) { 340 if (!cb2) {
340 ehea_error("no mem for cb2"); 341 netdev_err(dev, "no mem for cb2\n");
341 goto out; 342 goto out;
342 } 343 }
343 344
@@ -345,7 +346,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
345 port->logical_port_id, 346 port->logical_port_id,
346 H_PORT_CB2, H_PORT_CB2_ALL, cb2); 347 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
347 if (hret != H_SUCCESS) { 348 if (hret != H_SUCCESS) {
348 ehea_error("query_ehea_port failed"); 349 netdev_err(dev, "query_ehea_port failed\n");
349 goto out_herr; 350 goto out_herr;
350 } 351 }
351 352
@@ -400,6 +401,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
400 skb_arr_rq1[index] = netdev_alloc_skb(dev, 401 skb_arr_rq1[index] = netdev_alloc_skb(dev,
401 EHEA_L_PKT_SIZE); 402 EHEA_L_PKT_SIZE);
402 if (!skb_arr_rq1[index]) { 403 if (!skb_arr_rq1[index]) {
404 netdev_info(dev, "Unable to allocate enough skb in the array\n");
403 pr->rq1_skba.os_skbs = fill_wqes - i; 405 pr->rq1_skba.os_skbs = fill_wqes - i;
404 break; 406 break;
405 } 407 }
@@ -422,13 +424,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
422 struct net_device *dev = pr->port->netdev; 424 struct net_device *dev = pr->port->netdev;
423 int i; 425 int i;
424 426
425 for (i = 0; i < pr->rq1_skba.len; i++) { 427 if (nr_rq1a > pr->rq1_skba.len) {
428 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
429 return;
430 }
431
432 for (i = 0; i < nr_rq1a; i++) {
426 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
427 if (!skb_arr_rq1[i]) 434 if (!skb_arr_rq1[i]) {
435 netdev_info(dev, "Not enough memory to allocate skb array\n");
428 break; 436 break;
437 }
429 } 438 }
430 /* Ring doorbell */ 439 /* Ring doorbell */
431 ehea_update_rq1a(pr->qp, nr_rq1a); 440 ehea_update_rq1a(pr->qp, i);
432} 441}
433 442
434static int ehea_refill_rq_def(struct ehea_port_res *pr, 443static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -461,8 +470,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
461 if (!skb) { 470 if (!skb) {
462 q_skba->os_skbs = fill_wqes - i; 471 q_skba->os_skbs = fill_wqes - i;
463 if (q_skba->os_skbs == q_skba->len - 2) { 472 if (q_skba->os_skbs == q_skba->len - 2) {
464 ehea_info("%s: rq%i ran dry - no mem for skb", 473 netdev_info(pr->port->netdev,
465 pr->port->netdev->name, rq_nr); 474 "rq%i ran dry - no mem for skb\n",
475 rq_nr);
466 ret = -ENOMEM; 476 ret = -ENOMEM;
467 } 477 }
468 break; 478 break;
@@ -627,8 +637,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
627 637
628 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 638 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
629 if (netif_msg_rx_err(pr->port)) { 639 if (netif_msg_rx_err(pr->port)) {
630 ehea_error("Critical receive error for QP %d. " 640 pr_err("Critical receive error for QP %d. Resetting port.\n",
631 "Resetting port.", pr->qp->init_attr.qp_nr); 641 pr->qp->init_attr.qp_nr);
632 ehea_dump(cqe, sizeof(*cqe), "CQE"); 642 ehea_dump(cqe, sizeof(*cqe), "CQE");
633 } 643 }
634 ehea_schedule_port_reset(pr->port); 644 ehea_schedule_port_reset(pr->port);
@@ -675,7 +685,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
675 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && 685 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
676 pr->port->vgrp); 686 pr->port->vgrp);
677 687
678 if (use_lro) { 688 if (skb->dev->features & NETIF_F_LRO) {
679 if (vlan_extracted) 689 if (vlan_extracted)
680 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, 690 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
681 pr->port->vgrp, 691 pr->port->vgrp,
@@ -730,13 +740,15 @@ static int ehea_proc_rwqes(struct net_device *dev,
730 skb_arr_rq1_len, 740 skb_arr_rq1_len,
731 wqe_index); 741 wqe_index);
732 if (unlikely(!skb)) { 742 if (unlikely(!skb)) {
733 if (netif_msg_rx_err(port)) 743 netif_err(port, rx_err, dev,
734 ehea_error("LL rq1: skb=NULL"); 744 "LL rq1: skb=NULL\n");
735 745
736 skb = netdev_alloc_skb(dev, 746 skb = netdev_alloc_skb(dev,
737 EHEA_L_PKT_SIZE); 747 EHEA_L_PKT_SIZE);
738 if (!skb) 748 if (!skb) {
749 netdev_info(dev, "Not enough memory to allocate skb\n");
739 break; 750 break;
751 }
740 } 752 }
741 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 753 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
742 cqe->num_bytes_transfered - 4); 754 cqe->num_bytes_transfered - 4);
@@ -746,8 +758,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
746 skb = get_skb_by_index(skb_arr_rq2, 758 skb = get_skb_by_index(skb_arr_rq2,
747 skb_arr_rq2_len, cqe); 759 skb_arr_rq2_len, cqe);
748 if (unlikely(!skb)) { 760 if (unlikely(!skb)) {
749 if (netif_msg_rx_err(port)) 761 netif_err(port, rx_err, dev,
750 ehea_error("rq2: skb=NULL"); 762 "rq2: skb=NULL\n");
751 break; 763 break;
752 } 764 }
753 ehea_fill_skb(dev, skb, cqe); 765 ehea_fill_skb(dev, skb, cqe);
@@ -757,8 +769,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
757 skb = get_skb_by_index(skb_arr_rq3, 769 skb = get_skb_by_index(skb_arr_rq3,
758 skb_arr_rq3_len, cqe); 770 skb_arr_rq3_len, cqe);
759 if (unlikely(!skb)) { 771 if (unlikely(!skb)) {
760 if (netif_msg_rx_err(port)) 772 netif_err(port, rx_err, dev,
761 ehea_error("rq3: skb=NULL"); 773 "rq3: skb=NULL\n");
762 break; 774 break;
763 } 775 }
764 ehea_fill_skb(dev, skb, cqe); 776 ehea_fill_skb(dev, skb, cqe);
@@ -777,7 +789,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
777 } 789 }
778 cqe = ehea_poll_rq1(qp, &wqe_index); 790 cqe = ehea_poll_rq1(qp, &wqe_index);
779 } 791 }
780 if (use_lro) 792 if (dev->features & NETIF_F_LRO)
781 lro_flush_all(&pr->lro_mgr); 793 lro_flush_all(&pr->lro_mgr);
782 794
783 pr->rx_packets += processed; 795 pr->rx_packets += processed;
@@ -830,7 +842,7 @@ static void check_sqs(struct ehea_port *port)
830 msecs_to_jiffies(100)); 842 msecs_to_jiffies(100));
831 843
832 if (!ret) { 844 if (!ret) {
833 ehea_error("HW/SW queues out of sync"); 845 pr_err("HW/SW queues out of sync\n");
834 ehea_schedule_port_reset(pr->port); 846 ehea_schedule_port_reset(pr->port);
835 return; 847 return;
836 } 848 }
@@ -863,14 +875,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
863 } 875 }
864 876
865 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 877 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
866 ehea_error("Bad send completion status=0x%04X", 878 pr_err("Bad send completion status=0x%04X\n",
867 cqe->status); 879 cqe->status);
868 880
869 if (netif_msg_tx_err(pr->port)) 881 if (netif_msg_tx_err(pr->port))
870 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 882 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
871 883
872 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { 884 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
873 ehea_error("Resetting port"); 885 pr_err("Resetting port\n");
874 ehea_schedule_port_reset(pr->port); 886 ehea_schedule_port_reset(pr->port);
875 break; 887 break;
876 } 888 }
@@ -988,8 +1000,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
988 1000
989 while (eqe) { 1001 while (eqe) {
990 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); 1002 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
991 ehea_error("QP aff_err: entry=0x%llx, token=0x%x", 1003 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
992 eqe->entry, qp_token); 1004 eqe->entry, qp_token);
993 1005
994 qp = port->port_res[qp_token].qp; 1006 qp = port->port_res[qp_token].qp;
995 1007
@@ -1007,7 +1019,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
1007 } 1019 }
1008 1020
1009 if (reset_port) { 1021 if (reset_port) {
1010 ehea_error("Resetting port"); 1022 pr_err("Resetting port\n");
1011 ehea_schedule_port_reset(port); 1023 ehea_schedule_port_reset(port);
1012 } 1024 }
1013 1025
@@ -1035,7 +1047,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
1035 /* may be called via ehea_neq_tasklet() */ 1047 /* may be called via ehea_neq_tasklet() */
1036 cb0 = (void *)get_zeroed_page(GFP_ATOMIC); 1048 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1037 if (!cb0) { 1049 if (!cb0) {
1038 ehea_error("no mem for cb0"); 1050 pr_err("no mem for cb0\n");
1039 ret = -ENOMEM; 1051 ret = -ENOMEM;
1040 goto out; 1052 goto out;
1041 } 1053 }
@@ -1127,7 +1139,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1127 1139
1128 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 1140 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1129 if (!cb4) { 1141 if (!cb4) {
1130 ehea_error("no mem for cb4"); 1142 pr_err("no mem for cb4\n");
1131 ret = -ENOMEM; 1143 ret = -ENOMEM;
1132 goto out; 1144 goto out;
1133 } 1145 }
@@ -1178,16 +1190,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1178 break; 1190 break;
1179 } 1191 }
1180 } else { 1192 } else {
1181 ehea_error("Failed sensing port speed"); 1193 pr_err("Failed sensing port speed\n");
1182 ret = -EIO; 1194 ret = -EIO;
1183 } 1195 }
1184 } else { 1196 } else {
1185 if (hret == H_AUTHORITY) { 1197 if (hret == H_AUTHORITY) {
1186 ehea_info("Hypervisor denied setting port speed"); 1198 pr_info("Hypervisor denied setting port speed\n");
1187 ret = -EPERM; 1199 ret = -EPERM;
1188 } else { 1200 } else {
1189 ret = -EIO; 1201 ret = -EIO;
1190 ehea_error("Failed setting port speed"); 1202 pr_err("Failed setting port speed\n");
1191 } 1203 }
1192 } 1204 }
1193 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1205 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1204,80 +1216,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1204 u8 ec; 1216 u8 ec;
1205 u8 portnum; 1217 u8 portnum;
1206 struct ehea_port *port; 1218 struct ehea_port *port;
1219 struct net_device *dev;
1207 1220
1208 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); 1221 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1209 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); 1222 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1210 port = ehea_get_port(adapter, portnum); 1223 port = ehea_get_port(adapter, portnum);
1224 dev = port->netdev;
1211 1225
1212 switch (ec) { 1226 switch (ec) {
1213 case EHEA_EC_PORTSTATE_CHG: /* port state change */ 1227 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1214 1228
1215 if (!port) { 1229 if (!port) {
1216 ehea_error("unknown portnum %x", portnum); 1230 netdev_err(dev, "unknown portnum %x\n", portnum);
1217 break; 1231 break;
1218 } 1232 }
1219 1233
1220 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 1234 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1221 if (!netif_carrier_ok(port->netdev)) { 1235 if (!netif_carrier_ok(dev)) {
1222 ret = ehea_sense_port_attr(port); 1236 ret = ehea_sense_port_attr(port);
1223 if (ret) { 1237 if (ret) {
1224 ehea_error("failed resensing port " 1238 netdev_err(dev, "failed resensing port attributes\n");
1225 "attributes");
1226 break; 1239 break;
1227 } 1240 }
1228 1241
1229 if (netif_msg_link(port)) 1242 netif_info(port, link, dev,
1230 ehea_info("%s: Logical port up: %dMbps " 1243 "Logical port up: %dMbps %s Duplex\n",
1231 "%s Duplex", 1244 port->port_speed,
1232 port->netdev->name, 1245 port->full_duplex == 1 ?
1233 port->port_speed, 1246 "Full" : "Half");
1234 port->full_duplex ==
1235 1 ? "Full" : "Half");
1236 1247
1237 netif_carrier_on(port->netdev); 1248 netif_carrier_on(dev);
1238 netif_wake_queue(port->netdev); 1249 netif_wake_queue(dev);
1239 } 1250 }
1240 } else 1251 } else
1241 if (netif_carrier_ok(port->netdev)) { 1252 if (netif_carrier_ok(dev)) {
1242 if (netif_msg_link(port)) 1253 netif_info(port, link, dev,
1243 ehea_info("%s: Logical port down", 1254 "Logical port down\n");
1244 port->netdev->name); 1255 netif_carrier_off(dev);
1245 netif_carrier_off(port->netdev); 1256 netif_stop_queue(dev);
1246 netif_stop_queue(port->netdev);
1247 } 1257 }
1248 1258
1249 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1259 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1250 port->phy_link = EHEA_PHY_LINK_UP; 1260 port->phy_link = EHEA_PHY_LINK_UP;
1251 if (netif_msg_link(port)) 1261 netif_info(port, link, dev,
1252 ehea_info("%s: Physical port up", 1262 "Physical port up\n");
1253 port->netdev->name);
1254 if (prop_carrier_state) 1263 if (prop_carrier_state)
1255 netif_carrier_on(port->netdev); 1264 netif_carrier_on(dev);
1256 } else { 1265 } else {
1257 port->phy_link = EHEA_PHY_LINK_DOWN; 1266 port->phy_link = EHEA_PHY_LINK_DOWN;
1258 if (netif_msg_link(port)) 1267 netif_info(port, link, dev,
1259 ehea_info("%s: Physical port down", 1268 "Physical port down\n");
1260 port->netdev->name);
1261 if (prop_carrier_state) 1269 if (prop_carrier_state)
1262 netif_carrier_off(port->netdev); 1270 netif_carrier_off(dev);
1263 } 1271 }
1264 1272
1265 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) 1273 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1266 ehea_info("External switch port is primary port"); 1274 netdev_info(dev,
1275 "External switch port is primary port\n");
1267 else 1276 else
1268 ehea_info("External switch port is backup port"); 1277 netdev_info(dev,
1278 "External switch port is backup port\n");
1269 1279
1270 break; 1280 break;
1271 case EHEA_EC_ADAPTER_MALFUNC: 1281 case EHEA_EC_ADAPTER_MALFUNC:
1272 ehea_error("Adapter malfunction"); 1282 netdev_err(dev, "Adapter malfunction\n");
1273 break; 1283 break;
1274 case EHEA_EC_PORT_MALFUNC: 1284 case EHEA_EC_PORT_MALFUNC:
1275 ehea_info("Port malfunction: Device: %s", port->netdev->name); 1285 netdev_info(dev, "Port malfunction\n");
1276 netif_carrier_off(port->netdev); 1286 netif_carrier_off(dev);
1277 netif_stop_queue(port->netdev); 1287 netif_stop_queue(dev);
1278 break; 1288 break;
1279 default: 1289 default:
1280 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe); 1290 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1281 break; 1291 break;
1282 } 1292 }
1283} 1293}
@@ -1289,13 +1299,13 @@ static void ehea_neq_tasklet(unsigned long data)
1289 u64 event_mask; 1299 u64 event_mask;
1290 1300
1291 eqe = ehea_poll_eq(adapter->neq); 1301 eqe = ehea_poll_eq(adapter->neq);
1292 ehea_debug("eqe=%p", eqe); 1302 pr_debug("eqe=%p\n", eqe);
1293 1303
1294 while (eqe) { 1304 while (eqe) {
1295 ehea_debug("*eqe=%lx", eqe->entry); 1305 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1296 ehea_parse_eqe(adapter, eqe->entry); 1306 ehea_parse_eqe(adapter, eqe->entry);
1297 eqe = ehea_poll_eq(adapter->neq); 1307 eqe = ehea_poll_eq(adapter->neq);
1298 ehea_debug("next eqe=%p", eqe); 1308 pr_debug("next eqe=%p\n", eqe);
1299 } 1309 }
1300 1310
1301 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) 1311 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1344,14 +1354,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
1344 ehea_qp_aff_irq_handler, 1354 ehea_qp_aff_irq_handler,
1345 IRQF_DISABLED, port->int_aff_name, port); 1355 IRQF_DISABLED, port->int_aff_name, port);
1346 if (ret) { 1356 if (ret) {
1347 ehea_error("failed registering irq for qp_aff_irq_handler:" 1357 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1348 "ist=%X", port->qp_eq->attr.ist1); 1358 port->qp_eq->attr.ist1);
1349 goto out_free_qpeq; 1359 goto out_free_qpeq;
1350 } 1360 }
1351 1361
1352 if (netif_msg_ifup(port)) 1362 netif_info(port, ifup, dev,
1353 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " 1363 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1354 "registered", port->qp_eq->attr.ist1); 1364 port->qp_eq->attr.ist1);
1355 1365
1356 1366
1357 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1367 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1363,14 +1373,13 @@ static int ehea_reg_interrupts(struct net_device *dev)
1363 IRQF_DISABLED, pr->int_send_name, 1373 IRQF_DISABLED, pr->int_send_name,
1364 pr); 1374 pr);
1365 if (ret) { 1375 if (ret) {
1366 ehea_error("failed registering irq for ehea_queue " 1376 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1367 "port_res_nr:%d, ist=%X", i, 1377 i, pr->eq->attr.ist1);
1368 pr->eq->attr.ist1);
1369 goto out_free_req; 1378 goto out_free_req;
1370 } 1379 }
1371 if (netif_msg_ifup(port)) 1380 netif_info(port, ifup, dev,
1372 ehea_info("irq_handle 0x%X for function ehea_queue_int " 1381 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1373 "%d registered", pr->eq->attr.ist1, i); 1382 pr->eq->attr.ist1, i);
1374 } 1383 }
1375out: 1384out:
1376 return ret; 1385 return ret;
@@ -1401,16 +1410,16 @@ static void ehea_free_interrupts(struct net_device *dev)
1401 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1410 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1402 pr = &port->port_res[i]; 1411 pr = &port->port_res[i];
1403 ibmebus_free_irq(pr->eq->attr.ist1, pr); 1412 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1404 if (netif_msg_intr(port)) 1413 netif_info(port, intr, dev,
1405 ehea_info("free send irq for res %d with handle 0x%X", 1414 "free send irq for res %d with handle 0x%X\n",
1406 i, pr->eq->attr.ist1); 1415 i, pr->eq->attr.ist1);
1407 } 1416 }
1408 1417
1409 /* associated events */ 1418 /* associated events */
1410 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1419 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1411 if (netif_msg_intr(port)) 1420 netif_info(port, intr, dev,
1412 ehea_info("associated event interrupt for handle 0x%X freed", 1421 "associated event interrupt for handle 0x%X freed\n",
1413 port->qp_eq->attr.ist1); 1422 port->qp_eq->attr.ist1);
1414} 1423}
1415 1424
1416static int ehea_configure_port(struct ehea_port *port) 1425static int ehea_configure_port(struct ehea_port *port)
@@ -1479,7 +1488,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
1479out_free: 1488out_free:
1480 ehea_rem_mr(&pr->send_mr); 1489 ehea_rem_mr(&pr->send_mr);
1481out: 1490out:
1482 ehea_error("Generating SMRS failed\n"); 1491 pr_err("Generating SMRS failed\n");
1483 return -EIO; 1492 return -EIO;
1484} 1493}
1485 1494
@@ -1496,12 +1505,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1496{ 1505{
1497 int arr_size = sizeof(void *) * max_q_entries; 1506 int arr_size = sizeof(void *) * max_q_entries;
1498 1507
1499 q_skba->arr = vmalloc(arr_size); 1508 q_skba->arr = vzalloc(arr_size);
1500 if (!q_skba->arr) 1509 if (!q_skba->arr)
1501 return -ENOMEM; 1510 return -ENOMEM;
1502 1511
1503 memset(q_skba->arr, 0, arr_size);
1504
1505 q_skba->len = max_q_entries; 1512 q_skba->len = max_q_entries;
1506 q_skba->index = 0; 1513 q_skba->index = 0;
1507 q_skba->os_skbs = 0; 1514 q_skba->os_skbs = 0;
@@ -1536,7 +1543,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1536 1543
1537 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1544 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1538 if (!pr->eq) { 1545 if (!pr->eq) {
1539 ehea_error("create_eq failed (eq)"); 1546 pr_err("create_eq failed (eq)\n");
1540 goto out_free; 1547 goto out_free;
1541 } 1548 }
1542 1549
@@ -1544,7 +1551,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1544 pr->eq->fw_handle, 1551 pr->eq->fw_handle,
1545 port->logical_port_id); 1552 port->logical_port_id);
1546 if (!pr->recv_cq) { 1553 if (!pr->recv_cq) {
1547 ehea_error("create_cq failed (cq_recv)"); 1554 pr_err("create_cq failed (cq_recv)\n");
1548 goto out_free; 1555 goto out_free;
1549 } 1556 }
1550 1557
@@ -1552,19 +1559,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1552 pr->eq->fw_handle, 1559 pr->eq->fw_handle,
1553 port->logical_port_id); 1560 port->logical_port_id);
1554 if (!pr->send_cq) { 1561 if (!pr->send_cq) {
1555 ehea_error("create_cq failed (cq_send)"); 1562 pr_err("create_cq failed (cq_send)\n");
1556 goto out_free; 1563 goto out_free;
1557 } 1564 }
1558 1565
1559 if (netif_msg_ifup(port)) 1566 if (netif_msg_ifup(port))
1560 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", 1567 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1561 pr->send_cq->attr.act_nr_of_cqes, 1568 pr->send_cq->attr.act_nr_of_cqes,
1562 pr->recv_cq->attr.act_nr_of_cqes); 1569 pr->recv_cq->attr.act_nr_of_cqes);
1563 1570
1564 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1571 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1565 if (!init_attr) { 1572 if (!init_attr) {
1566 ret = -ENOMEM; 1573 ret = -ENOMEM;
1567 ehea_error("no mem for ehea_qp_init_attr"); 1574 pr_err("no mem for ehea_qp_init_attr\n");
1568 goto out_free; 1575 goto out_free;
1569 } 1576 }
1570 1577
@@ -1589,18 +1596,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1589 1596
1590 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); 1597 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1591 if (!pr->qp) { 1598 if (!pr->qp) {
1592 ehea_error("create_qp failed"); 1599 pr_err("create_qp failed\n");
1593 ret = -EIO; 1600 ret = -EIO;
1594 goto out_free; 1601 goto out_free;
1595 } 1602 }
1596 1603
1597 if (netif_msg_ifup(port)) 1604 if (netif_msg_ifup(port))
1598 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " 1605 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1599 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, 1606 init_attr->qp_nr,
1600 init_attr->act_nr_send_wqes, 1607 init_attr->act_nr_send_wqes,
1601 init_attr->act_nr_rwqes_rq1, 1608 init_attr->act_nr_rwqes_rq1,
1602 init_attr->act_nr_rwqes_rq2, 1609 init_attr->act_nr_rwqes_rq2,
1603 init_attr->act_nr_rwqes_rq3); 1610 init_attr->act_nr_rwqes_rq3);
1604 1611
1605 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; 1612 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1606 1613
@@ -1751,7 +1758,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1751 swqe->descriptors++; 1758 swqe->descriptors++;
1752 } 1759 }
1753 } else 1760 } else
1754 ehea_error("cannot handle fragmented headers"); 1761 pr_err("cannot handle fragmented headers\n");
1755} 1762}
1756 1763
1757static void write_swqe2_nonTSO(struct sk_buff *skb, 1764static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1847,8 +1854,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1847 port->logical_port_id, 1854 port->logical_port_id,
1848 reg_type, port->mac_addr, 0, hcallid); 1855 reg_type, port->mac_addr, 0, hcallid);
1849 if (hret != H_SUCCESS) { 1856 if (hret != H_SUCCESS) {
1850 ehea_error("%sregistering bc address failed (tagged)", 1857 pr_err("%sregistering bc address failed (tagged)\n",
1851 hcallid == H_REG_BCMC ? "" : "de"); 1858 hcallid == H_REG_BCMC ? "" : "de");
1852 ret = -EIO; 1859 ret = -EIO;
1853 goto out_herr; 1860 goto out_herr;
1854 } 1861 }
@@ -1859,8 +1866,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1859 port->logical_port_id, 1866 port->logical_port_id,
1860 reg_type, port->mac_addr, 0, hcallid); 1867 reg_type, port->mac_addr, 0, hcallid);
1861 if (hret != H_SUCCESS) { 1868 if (hret != H_SUCCESS) {
1862 ehea_error("%sregistering bc address failed (vlan)", 1869 pr_err("%sregistering bc address failed (vlan)\n",
1863 hcallid == H_REG_BCMC ? "" : "de"); 1870 hcallid == H_REG_BCMC ? "" : "de");
1864 ret = -EIO; 1871 ret = -EIO;
1865 } 1872 }
1866out_herr: 1873out_herr:
@@ -1882,7 +1889,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1882 1889
1883 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1890 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1884 if (!cb0) { 1891 if (!cb0) {
1885 ehea_error("no mem for cb0"); 1892 pr_err("no mem for cb0\n");
1886 ret = -ENOMEM; 1893 ret = -ENOMEM;
1887 goto out; 1894 goto out;
1888 } 1895 }
@@ -1930,11 +1937,11 @@ out:
1930static void ehea_promiscuous_error(u64 hret, int enable) 1937static void ehea_promiscuous_error(u64 hret, int enable)
1931{ 1938{
1932 if (hret == H_AUTHORITY) 1939 if (hret == H_AUTHORITY)
1933 ehea_info("Hypervisor denied %sabling promiscuous mode", 1940 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1934 enable == 1 ? "en" : "dis"); 1941 enable == 1 ? "en" : "dis");
1935 else 1942 else
1936 ehea_error("failed %sabling promiscuous mode", 1943 pr_err("failed %sabling promiscuous mode\n",
1937 enable == 1 ? "en" : "dis"); 1944 enable == 1 ? "en" : "dis");
1938} 1945}
1939 1946
1940static void ehea_promiscuous(struct net_device *dev, int enable) 1947static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1948,7 +1955,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1948 1955
1949 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1956 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1950 if (!cb7) { 1957 if (!cb7) {
1951 ehea_error("no mem for cb7"); 1958 pr_err("no mem for cb7\n");
1952 goto out; 1959 goto out;
1953 } 1960 }
1954 1961
@@ -2008,7 +2015,7 @@ static int ehea_drop_multicast_list(struct net_device *dev)
2008 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, 2015 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2009 H_DEREG_BCMC); 2016 H_DEREG_BCMC);
2010 if (hret) { 2017 if (hret) {
2011 ehea_error("failed deregistering mcast MAC"); 2018 pr_err("failed deregistering mcast MAC\n");
2012 ret = -EIO; 2019 ret = -EIO;
2013 } 2020 }
2014 2021
@@ -2031,7 +2038,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2031 if (!hret) 2038 if (!hret)
2032 port->allmulti = 1; 2039 port->allmulti = 1;
2033 else 2040 else
2034 ehea_error("failed enabling IFF_ALLMULTI"); 2041 netdev_err(dev,
2042 "failed enabling IFF_ALLMULTI\n");
2035 } 2043 }
2036 } else 2044 } else
2037 if (!enable) { 2045 if (!enable) {
@@ -2040,7 +2048,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2040 if (!hret) 2048 if (!hret)
2041 port->allmulti = 0; 2049 port->allmulti = 0;
2042 else 2050 else
2043 ehea_error("failed disabling IFF_ALLMULTI"); 2051 netdev_err(dev,
2052 "failed disabling IFF_ALLMULTI\n");
2044 } 2053 }
2045} 2054}
2046 2055
@@ -2051,7 +2060,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2051 2060
2052 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); 2061 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2053 if (!ehea_mcl_entry) { 2062 if (!ehea_mcl_entry) {
2054 ehea_error("no mem for mcl_entry"); 2063 pr_err("no mem for mcl_entry\n");
2055 return; 2064 return;
2056 } 2065 }
2057 2066
@@ -2064,7 +2073,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2064 if (!hret) 2073 if (!hret)
2065 list_add(&ehea_mcl_entry->list, &port->mc_list->list); 2074 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2066 else { 2075 else {
2067 ehea_error("failed registering mcast MAC"); 2076 pr_err("failed registering mcast MAC\n");
2068 kfree(ehea_mcl_entry); 2077 kfree(ehea_mcl_entry);
2069 } 2078 }
2070} 2079}
@@ -2097,9 +2106,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
2097 } 2106 }
2098 2107
2099 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { 2108 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2100 ehea_info("Mcast registration limit reached (0x%llx). " 2109 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2101 "Use ALLMULTI!", 2110 port->adapter->max_mc_mac);
2102 port->adapter->max_mc_mac);
2103 goto out; 2111 goto out;
2104 } 2112 }
2105 2113
@@ -2305,10 +2313,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2305 } 2313 }
2306 pr->swqe_id_counter += 1; 2314 pr->swqe_id_counter += 1;
2307 2315
2308 if (netif_msg_tx_queued(port)) { 2316 netif_info(port, tx_queued, dev,
2309 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 2317 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2318 if (netif_msg_tx_queued(port))
2310 ehea_dump(swqe, 512, "swqe"); 2319 ehea_dump(swqe, 512, "swqe");
2311 }
2312 2320
2313 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2321 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2314 netif_stop_queue(dev); 2322 netif_stop_queue(dev);
@@ -2344,14 +2352,14 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2344 2352
2345 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2353 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2346 if (!cb1) { 2354 if (!cb1) {
2347 ehea_error("no mem for cb1"); 2355 pr_err("no mem for cb1\n");
2348 goto out; 2356 goto out;
2349 } 2357 }
2350 2358
2351 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2359 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2352 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2360 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2353 if (hret != H_SUCCESS) 2361 if (hret != H_SUCCESS)
2354 ehea_error("modify_ehea_port failed"); 2362 pr_err("modify_ehea_port failed\n");
2355 2363
2356 free_page((unsigned long)cb1); 2364 free_page((unsigned long)cb1);
2357out: 2365out:
@@ -2368,14 +2376,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2368 2376
2369 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2377 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2370 if (!cb1) { 2378 if (!cb1) {
2371 ehea_error("no mem for cb1"); 2379 pr_err("no mem for cb1\n");
2372 goto out; 2380 goto out;
2373 } 2381 }
2374 2382
2375 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2383 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2376 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2384 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2377 if (hret != H_SUCCESS) { 2385 if (hret != H_SUCCESS) {
2378 ehea_error("query_ehea_port failed"); 2386 pr_err("query_ehea_port failed\n");
2379 goto out; 2387 goto out;
2380 } 2388 }
2381 2389
@@ -2385,7 +2393,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2385 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2393 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2386 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2394 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2387 if (hret != H_SUCCESS) 2395 if (hret != H_SUCCESS)
2388 ehea_error("modify_ehea_port failed"); 2396 pr_err("modify_ehea_port failed\n");
2389out: 2397out:
2390 free_page((unsigned long)cb1); 2398 free_page((unsigned long)cb1);
2391 return; 2399 return;
@@ -2403,14 +2411,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2403 2411
2404 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2412 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2405 if (!cb1) { 2413 if (!cb1) {
2406 ehea_error("no mem for cb1"); 2414 pr_err("no mem for cb1\n");
2407 goto out; 2415 goto out;
2408 } 2416 }
2409 2417
2410 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2418 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2411 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2419 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2412 if (hret != H_SUCCESS) { 2420 if (hret != H_SUCCESS) {
2413 ehea_error("query_ehea_port failed"); 2421 pr_err("query_ehea_port failed\n");
2414 goto out; 2422 goto out;
2415 } 2423 }
2416 2424
@@ -2420,7 +2428,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2420 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2428 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2421 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2429 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2422 if (hret != H_SUCCESS) 2430 if (hret != H_SUCCESS)
2423 ehea_error("modify_ehea_port failed"); 2431 pr_err("modify_ehea_port failed\n");
2424out: 2432out:
2425 free_page((unsigned long)cb1); 2433 free_page((unsigned long)cb1);
2426} 2434}
@@ -2442,7 +2450,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2442 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2450 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2443 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2451 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2444 if (hret != H_SUCCESS) { 2452 if (hret != H_SUCCESS) {
2445 ehea_error("query_ehea_qp failed (1)"); 2453 pr_err("query_ehea_qp failed (1)\n");
2446 goto out; 2454 goto out;
2447 } 2455 }
2448 2456
@@ -2451,14 +2459,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2451 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2459 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2452 &dummy64, &dummy64, &dummy16, &dummy16); 2460 &dummy64, &dummy64, &dummy16, &dummy16);
2453 if (hret != H_SUCCESS) { 2461 if (hret != H_SUCCESS) {
2454 ehea_error("modify_ehea_qp failed (1)"); 2462 pr_err("modify_ehea_qp failed (1)\n");
2455 goto out; 2463 goto out;
2456 } 2464 }
2457 2465
2458 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2466 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2459 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2467 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2460 if (hret != H_SUCCESS) { 2468 if (hret != H_SUCCESS) {
2461 ehea_error("query_ehea_qp failed (2)"); 2469 pr_err("query_ehea_qp failed (2)\n");
2462 goto out; 2470 goto out;
2463 } 2471 }
2464 2472
@@ -2467,14 +2475,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2467 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2475 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2468 &dummy64, &dummy64, &dummy16, &dummy16); 2476 &dummy64, &dummy64, &dummy16, &dummy16);
2469 if (hret != H_SUCCESS) { 2477 if (hret != H_SUCCESS) {
2470 ehea_error("modify_ehea_qp failed (2)"); 2478 pr_err("modify_ehea_qp failed (2)\n");
2471 goto out; 2479 goto out;
2472 } 2480 }
2473 2481
2474 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2482 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2475 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2483 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2476 if (hret != H_SUCCESS) { 2484 if (hret != H_SUCCESS) {
2477 ehea_error("query_ehea_qp failed (3)"); 2485 pr_err("query_ehea_qp failed (3)\n");
2478 goto out; 2486 goto out;
2479 } 2487 }
2480 2488
@@ -2483,14 +2491,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2483 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2491 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2484 &dummy64, &dummy64, &dummy16, &dummy16); 2492 &dummy64, &dummy64, &dummy16, &dummy16);
2485 if (hret != H_SUCCESS) { 2493 if (hret != H_SUCCESS) {
2486 ehea_error("modify_ehea_qp failed (3)"); 2494 pr_err("modify_ehea_qp failed (3)\n");
2487 goto out; 2495 goto out;
2488 } 2496 }
2489 2497
2490 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2498 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2491 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2499 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2492 if (hret != H_SUCCESS) { 2500 if (hret != H_SUCCESS) {
2493 ehea_error("query_ehea_qp failed (4)"); 2501 pr_err("query_ehea_qp failed (4)\n");
2494 goto out; 2502 goto out;
2495 } 2503 }
2496 2504
@@ -2511,7 +2519,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2511 EHEA_MAX_ENTRIES_EQ, 1); 2519 EHEA_MAX_ENTRIES_EQ, 1);
2512 if (!port->qp_eq) { 2520 if (!port->qp_eq) {
2513 ret = -EINVAL; 2521 ret = -EINVAL;
2514 ehea_error("ehea_create_eq failed (qp_eq)"); 2522 pr_err("ehea_create_eq failed (qp_eq)\n");
2515 goto out_kill_eq; 2523 goto out_kill_eq;
2516 } 2524 }
2517 2525
@@ -2592,27 +2600,27 @@ static int ehea_up(struct net_device *dev)
2592 ret = ehea_port_res_setup(port, port->num_def_qps, 2600 ret = ehea_port_res_setup(port, port->num_def_qps,
2593 port->num_add_tx_qps); 2601 port->num_add_tx_qps);
2594 if (ret) { 2602 if (ret) {
2595 ehea_error("port_res_failed"); 2603 netdev_err(dev, "port_res_failed\n");
2596 goto out; 2604 goto out;
2597 } 2605 }
2598 2606
2599 /* Set default QP for this port */ 2607 /* Set default QP for this port */
2600 ret = ehea_configure_port(port); 2608 ret = ehea_configure_port(port);
2601 if (ret) { 2609 if (ret) {
2602 ehea_error("ehea_configure_port failed. ret:%d", ret); 2610 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2603 goto out_clean_pr; 2611 goto out_clean_pr;
2604 } 2612 }
2605 2613
2606 ret = ehea_reg_interrupts(dev); 2614 ret = ehea_reg_interrupts(dev);
2607 if (ret) { 2615 if (ret) {
2608 ehea_error("reg_interrupts failed. ret:%d", ret); 2616 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2609 goto out_clean_pr; 2617 goto out_clean_pr;
2610 } 2618 }
2611 2619
2612 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2620 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2613 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2621 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2614 if (ret) { 2622 if (ret) {
2615 ehea_error("activate_qp failed"); 2623 netdev_err(dev, "activate_qp failed\n");
2616 goto out_free_irqs; 2624 goto out_free_irqs;
2617 } 2625 }
2618 } 2626 }
@@ -2620,7 +2628,7 @@ static int ehea_up(struct net_device *dev)
2620 for (i = 0; i < port->num_def_qps; i++) { 2628 for (i = 0; i < port->num_def_qps; i++) {
2621 ret = ehea_fill_port_res(&port->port_res[i]); 2629 ret = ehea_fill_port_res(&port->port_res[i]);
2622 if (ret) { 2630 if (ret) {
2623 ehea_error("out_free_irqs"); 2631 netdev_err(dev, "out_free_irqs\n");
2624 goto out_free_irqs; 2632 goto out_free_irqs;
2625 } 2633 }
2626 } 2634 }
@@ -2643,7 +2651,7 @@ out_clean_pr:
2643 ehea_clean_all_portres(port); 2651 ehea_clean_all_portres(port);
2644out: 2652out:
2645 if (ret) 2653 if (ret)
2646 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2654 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2647 2655
2648 ehea_update_bcmc_registrations(); 2656 ehea_update_bcmc_registrations();
2649 ehea_update_firmware_handles(); 2657 ehea_update_firmware_handles();
@@ -2674,8 +2682,7 @@ static int ehea_open(struct net_device *dev)
2674 2682
2675 mutex_lock(&port->port_lock); 2683 mutex_lock(&port->port_lock);
2676 2684
2677 if (netif_msg_ifup(port)) 2685 netif_info(port, ifup, dev, "enabling port\n");
2678 ehea_info("enabling port %s", dev->name);
2679 2686
2680 ret = ehea_up(dev); 2687 ret = ehea_up(dev);
2681 if (!ret) { 2688 if (!ret) {
@@ -2710,8 +2717,7 @@ static int ehea_down(struct net_device *dev)
2710 2717
2711 ret = ehea_clean_all_portres(port); 2718 ret = ehea_clean_all_portres(port);
2712 if (ret) 2719 if (ret)
2713 ehea_info("Failed freeing resources for %s. ret=%i", 2720 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2714 dev->name, ret);
2715 2721
2716 ehea_update_firmware_handles(); 2722 ehea_update_firmware_handles();
2717 2723
@@ -2723,8 +2729,7 @@ static int ehea_stop(struct net_device *dev)
2723 int ret; 2729 int ret;
2724 struct ehea_port *port = netdev_priv(dev); 2730 struct ehea_port *port = netdev_priv(dev);
2725 2731
2726 if (netif_msg_ifdown(port)) 2732 netif_info(port, ifdown, dev, "disabling port\n");
2727 ehea_info("disabling port %s", dev->name);
2728 2733
2729 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2734 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2730 cancel_work_sync(&port->reset_task); 2735 cancel_work_sync(&port->reset_task);
@@ -2765,7 +2770,7 @@ static void ehea_flush_sq(struct ehea_port *port)
2765 msecs_to_jiffies(100)); 2770 msecs_to_jiffies(100));
2766 2771
2767 if (!ret) { 2772 if (!ret) {
2768 ehea_error("WARNING: sq not flushed completely"); 2773 pr_err("WARNING: sq not flushed completely\n");
2769 break; 2774 break;
2770 } 2775 }
2771 } 2776 }
@@ -2801,7 +2806,7 @@ int ehea_stop_qps(struct net_device *dev)
2801 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2806 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2802 cb0); 2807 cb0);
2803 if (hret != H_SUCCESS) { 2808 if (hret != H_SUCCESS) {
2804 ehea_error("query_ehea_qp failed (1)"); 2809 pr_err("query_ehea_qp failed (1)\n");
2805 goto out; 2810 goto out;
2806 } 2811 }
2807 2812
@@ -2813,7 +2818,7 @@ int ehea_stop_qps(struct net_device *dev)
2813 1), cb0, &dummy64, 2818 1), cb0, &dummy64,
2814 &dummy64, &dummy16, &dummy16); 2819 &dummy64, &dummy16, &dummy16);
2815 if (hret != H_SUCCESS) { 2820 if (hret != H_SUCCESS) {
2816 ehea_error("modify_ehea_qp failed (1)"); 2821 pr_err("modify_ehea_qp failed (1)\n");
2817 goto out; 2822 goto out;
2818 } 2823 }
2819 2824
@@ -2821,14 +2826,14 @@ int ehea_stop_qps(struct net_device *dev)
2821 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2826 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2822 cb0); 2827 cb0);
2823 if (hret != H_SUCCESS) { 2828 if (hret != H_SUCCESS) {
2824 ehea_error("query_ehea_qp failed (2)"); 2829 pr_err("query_ehea_qp failed (2)\n");
2825 goto out; 2830 goto out;
2826 } 2831 }
2827 2832
2828 /* deregister shared memory regions */ 2833 /* deregister shared memory regions */
2829 dret = ehea_rem_smrs(pr); 2834 dret = ehea_rem_smrs(pr);
2830 if (dret) { 2835 if (dret) {
2831 ehea_error("unreg shared memory region failed"); 2836 pr_err("unreg shared memory region failed\n");
2832 goto out; 2837 goto out;
2833 } 2838 }
2834 } 2839 }
@@ -2897,7 +2902,7 @@ int ehea_restart_qps(struct net_device *dev)
2897 2902
2898 ret = ehea_gen_smrs(pr); 2903 ret = ehea_gen_smrs(pr);
2899 if (ret) { 2904 if (ret) {
2900 ehea_error("creation of shared memory regions failed"); 2905 netdev_err(dev, "creation of shared memory regions failed\n");
2901 goto out; 2906 goto out;
2902 } 2907 }
2903 2908
@@ -2908,7 +2913,7 @@ int ehea_restart_qps(struct net_device *dev)
2908 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2913 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2909 cb0); 2914 cb0);
2910 if (hret != H_SUCCESS) { 2915 if (hret != H_SUCCESS) {
2911 ehea_error("query_ehea_qp failed (1)"); 2916 netdev_err(dev, "query_ehea_qp failed (1)\n");
2912 goto out; 2917 goto out;
2913 } 2918 }
2914 2919
@@ -2920,7 +2925,7 @@ int ehea_restart_qps(struct net_device *dev)
2920 1), cb0, &dummy64, 2925 1), cb0, &dummy64,
2921 &dummy64, &dummy16, &dummy16); 2926 &dummy64, &dummy16, &dummy16);
2922 if (hret != H_SUCCESS) { 2927 if (hret != H_SUCCESS) {
2923 ehea_error("modify_ehea_qp failed (1)"); 2928 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2924 goto out; 2929 goto out;
2925 } 2930 }
2926 2931
@@ -2928,7 +2933,7 @@ int ehea_restart_qps(struct net_device *dev)
2928 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2933 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2929 cb0); 2934 cb0);
2930 if (hret != H_SUCCESS) { 2935 if (hret != H_SUCCESS) {
2931 ehea_error("query_ehea_qp failed (2)"); 2936 netdev_err(dev, "query_ehea_qp failed (2)\n");
2932 goto out; 2937 goto out;
2933 } 2938 }
2934 2939
@@ -2965,8 +2970,7 @@ static void ehea_reset_port(struct work_struct *work)
2965 2970
2966 ehea_set_multicast_list(dev); 2971 ehea_set_multicast_list(dev);
2967 2972
2968 if (netif_msg_timer(port)) 2973 netif_info(port, timer, dev, "reset successful\n");
2969 ehea_info("Device %s resetted successfully", dev->name);
2970 2974
2971 port_napi_enable(port); 2975 port_napi_enable(port);
2972 2976
@@ -2976,12 +2980,12 @@ out:
2976 mutex_unlock(&dlpar_mem_lock); 2980 mutex_unlock(&dlpar_mem_lock);
2977} 2981}
2978 2982
2979static void ehea_rereg_mrs(struct work_struct *work) 2983static void ehea_rereg_mrs(void)
2980{ 2984{
2981 int ret, i; 2985 int ret, i;
2982 struct ehea_adapter *adapter; 2986 struct ehea_adapter *adapter;
2983 2987
2984 ehea_info("LPAR memory changed - re-initializing driver"); 2988 pr_info("LPAR memory changed - re-initializing driver\n");
2985 2989
2986 list_for_each_entry(adapter, &adapter_list, list) 2990 list_for_each_entry(adapter, &adapter_list, list)
2987 if (adapter->active_ports) { 2991 if (adapter->active_ports) {
@@ -3013,8 +3017,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3013 /* Unregister old memory region */ 3017 /* Unregister old memory region */
3014 ret = ehea_rem_mr(&adapter->mr); 3018 ret = ehea_rem_mr(&adapter->mr);
3015 if (ret) { 3019 if (ret) {
3016 ehea_error("unregister MR failed - driver" 3020 pr_err("unregister MR failed - driver inoperable!\n");
3017 " inoperable!");
3018 goto out; 3021 goto out;
3019 } 3022 }
3020 } 3023 }
@@ -3026,8 +3029,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3026 /* Register new memory region */ 3029 /* Register new memory region */
3027 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 3030 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3028 if (ret) { 3031 if (ret) {
3029 ehea_error("register MR failed - driver" 3032 pr_err("register MR failed - driver inoperable!\n");
3030 " inoperable!");
3031 goto out; 3033 goto out;
3032 } 3034 }
3033 3035
@@ -3050,7 +3052,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3050 } 3052 }
3051 } 3053 }
3052 } 3054 }
3053 ehea_info("re-initializing driver complete"); 3055 pr_info("re-initializing driver complete\n");
3054out: 3056out:
3055 return; 3057 return;
3056} 3058}
@@ -3103,7 +3105,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3103 /* (Try to) enable *jumbo frames */ 3105 /* (Try to) enable *jumbo frames */
3104 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 3106 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3105 if (!cb4) { 3107 if (!cb4) {
3106 ehea_error("no mem for cb4"); 3108 pr_err("no mem for cb4\n");
3107 ret = -ENOMEM; 3109 ret = -ENOMEM;
3108 goto out; 3110 goto out;
3109 } else { 3111 } else {
@@ -3165,13 +3167,13 @@ static struct device *ehea_register_port(struct ehea_port *port,
3165 3167
3166 ret = of_device_register(&port->ofdev); 3168 ret = of_device_register(&port->ofdev);
3167 if (ret) { 3169 if (ret) {
3168 ehea_error("failed to register device. ret=%d", ret); 3170 pr_err("failed to register device. ret=%d\n", ret);
3169 goto out; 3171 goto out;
3170 } 3172 }
3171 3173
3172 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 3174 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3173 if (ret) { 3175 if (ret) {
3174 ehea_error("failed to register attributes, ret=%d", ret); 3176 pr_err("failed to register attributes, ret=%d\n", ret);
3175 goto out_unreg_of_dev; 3177 goto out_unreg_of_dev;
3176 } 3178 }
3177 3179
@@ -3221,7 +3223,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3221 dev = alloc_etherdev(sizeof(struct ehea_port)); 3223 dev = alloc_etherdev(sizeof(struct ehea_port));
3222 3224
3223 if (!dev) { 3225 if (!dev) {
3224 ehea_error("no mem for net_device"); 3226 pr_err("no mem for net_device\n");
3225 ret = -ENOMEM; 3227 ret = -ENOMEM;
3226 goto out_err; 3228 goto out_err;
3227 } 3229 }
@@ -3268,11 +3270,14 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3268 | NETIF_F_LLTX; 3270 | NETIF_F_LLTX;
3269 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3271 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3270 3272
3273 if (use_lro)
3274 dev->features |= NETIF_F_LRO;
3275
3271 INIT_WORK(&port->reset_task, ehea_reset_port); 3276 INIT_WORK(&port->reset_task, ehea_reset_port);
3272 3277
3273 ret = register_netdev(dev); 3278 ret = register_netdev(dev);
3274 if (ret) { 3279 if (ret) {
3275 ehea_error("register_netdev failed. ret=%d", ret); 3280 pr_err("register_netdev failed. ret=%d\n", ret);
3276 goto out_unreg_port; 3281 goto out_unreg_port;
3277 } 3282 }
3278 3283
@@ -3280,11 +3285,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3280 3285
3281 ret = ehea_get_jumboframe_status(port, &jumbo); 3286 ret = ehea_get_jumboframe_status(port, &jumbo);
3282 if (ret) 3287 if (ret)
3283 ehea_error("failed determining jumbo frame status for %s", 3288 netdev_err(dev, "failed determining jumbo frame status\n");
3284 port->netdev->name);
3285 3289
3286 ehea_info("%s: Jumbo frames are %sabled", dev->name, 3290 netdev_info(dev, "Jumbo frames are %sabled\n",
3287 jumbo == 1 ? "en" : "dis"); 3291 jumbo == 1 ? "en" : "dis");
3288 3292
3289 adapter->active_ports++; 3293 adapter->active_ports++;
3290 3294
@@ -3300,14 +3304,16 @@ out_free_ethdev:
3300 free_netdev(dev); 3304 free_netdev(dev);
3301 3305
3302out_err: 3306out_err:
3303 ehea_error("setting up logical port with id=%d failed, ret=%d", 3307 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3304 logical_port_id, ret); 3308 logical_port_id, ret);
3305 return NULL; 3309 return NULL;
3306} 3310}
3307 3311
3308static void ehea_shutdown_single_port(struct ehea_port *port) 3312static void ehea_shutdown_single_port(struct ehea_port *port)
3309{ 3313{
3310 struct ehea_adapter *adapter = port->adapter; 3314 struct ehea_adapter *adapter = port->adapter;
3315
3316 cancel_work_sync(&port->reset_task);
3311 unregister_netdev(port->netdev); 3317 unregister_netdev(port->netdev);
3312 ehea_unregister_port(port); 3318 ehea_unregister_port(port);
3313 kfree(port->mc_list); 3319 kfree(port->mc_list);
@@ -3329,13 +3335,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3329 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3335 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3330 NULL); 3336 NULL);
3331 if (!dn_log_port_id) { 3337 if (!dn_log_port_id) {
3332 ehea_error("bad device node: eth_dn name=%s", 3338 pr_err("bad device node: eth_dn name=%s\n",
3333 eth_dn->full_name); 3339 eth_dn->full_name);
3334 continue; 3340 continue;
3335 } 3341 }
3336 3342
3337 if (ehea_add_adapter_mr(adapter)) { 3343 if (ehea_add_adapter_mr(adapter)) {
3338 ehea_error("creating MR failed"); 3344 pr_err("creating MR failed\n");
3339 of_node_put(eth_dn); 3345 of_node_put(eth_dn);
3340 return -EIO; 3346 return -EIO;
3341 } 3347 }
@@ -3344,9 +3350,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3344 *dn_log_port_id, 3350 *dn_log_port_id,
3345 eth_dn); 3351 eth_dn);
3346 if (adapter->port[i]) 3352 if (adapter->port[i])
3347 ehea_info("%s -> logical port id #%d", 3353 netdev_info(adapter->port[i]->netdev,
3348 adapter->port[i]->netdev->name, 3354 "logical port id #%d\n", *dn_log_port_id);
3349 *dn_log_port_id);
3350 else 3355 else
3351 ehea_remove_adapter_mr(adapter); 3356 ehea_remove_adapter_mr(adapter);
3352 3357
@@ -3391,21 +3396,20 @@ static ssize_t ehea_probe_port(struct device *dev,
3391 port = ehea_get_port(adapter, logical_port_id); 3396 port = ehea_get_port(adapter, logical_port_id);
3392 3397
3393 if (port) { 3398 if (port) {
3394 ehea_info("adding port with logical port id=%d failed. port " 3399 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3395 "already configured as %s.", logical_port_id, 3400 logical_port_id);
3396 port->netdev->name);
3397 return -EINVAL; 3401 return -EINVAL;
3398 } 3402 }
3399 3403
3400 eth_dn = ehea_get_eth_dn(adapter, logical_port_id); 3404 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3401 3405
3402 if (!eth_dn) { 3406 if (!eth_dn) {
3403 ehea_info("no logical port with id %d found", logical_port_id); 3407 pr_info("no logical port with id %d found\n", logical_port_id);
3404 return -EINVAL; 3408 return -EINVAL;
3405 } 3409 }
3406 3410
3407 if (ehea_add_adapter_mr(adapter)) { 3411 if (ehea_add_adapter_mr(adapter)) {
3408 ehea_error("creating MR failed"); 3412 pr_err("creating MR failed\n");
3409 return -EIO; 3413 return -EIO;
3410 } 3414 }
3411 3415
@@ -3420,8 +3424,8 @@ static ssize_t ehea_probe_port(struct device *dev,
3420 break; 3424 break;
3421 } 3425 }
3422 3426
3423 ehea_info("added %s (logical port id=%d)", port->netdev->name, 3427 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3424 logical_port_id); 3428 logical_port_id);
3425 } else { 3429 } else {
3426 ehea_remove_adapter_mr(adapter); 3430 ehea_remove_adapter_mr(adapter);
3427 return -EIO; 3431 return -EIO;
@@ -3444,8 +3448,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3444 port = ehea_get_port(adapter, logical_port_id); 3448 port = ehea_get_port(adapter, logical_port_id);
3445 3449
3446 if (port) { 3450 if (port) {
3447 ehea_info("removed %s (logical port id=%d)", port->netdev->name, 3451 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3448 logical_port_id); 3452 logical_port_id);
3449 3453
3450 ehea_shutdown_single_port(port); 3454 ehea_shutdown_single_port(port);
3451 3455
@@ -3455,8 +3459,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3455 break; 3459 break;
3456 } 3460 }
3457 } else { 3461 } else {
3458 ehea_error("removing port with logical port id=%d failed. port " 3462 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3459 "not configured.", logical_port_id); 3463 logical_port_id);
3460 return -EINVAL; 3464 return -EINVAL;
3461 } 3465 }
3462 3466
@@ -3493,7 +3497,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3493 int ret; 3497 int ret;
3494 3498
3495 if (!dev || !dev->dev.of_node) { 3499 if (!dev || !dev->dev.of_node) {
3496 ehea_error("Invalid ibmebus device probed"); 3500 pr_err("Invalid ibmebus device probed\n");
3497 return -EINVAL; 3501 return -EINVAL;
3498 } 3502 }
3499 3503
@@ -3597,8 +3601,6 @@ static int __devexit ehea_remove(struct platform_device *dev)
3597 3601
3598 ehea_remove_device_sysfs(dev); 3602 ehea_remove_device_sysfs(dev);
3599 3603
3600 flush_scheduled_work();
3601
3602 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3604 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3603 tasklet_kill(&adapter->neq_tasklet); 3605 tasklet_kill(&adapter->neq_tasklet);
3604 3606
@@ -3641,21 +3643,21 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3641 3643
3642 switch (action) { 3644 switch (action) {
3643 case MEM_CANCEL_OFFLINE: 3645 case MEM_CANCEL_OFFLINE:
3644 ehea_info("memory offlining canceled"); 3646 pr_info("memory offlining canceled");
3645 /* Readd canceled memory block */ 3647 /* Readd canceled memory block */
3646 case MEM_ONLINE: 3648 case MEM_ONLINE:
3647 ehea_info("memory is going online"); 3649 pr_info("memory is going online");
3648 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3650 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3649 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3651 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3650 goto out_unlock; 3652 goto out_unlock;
3651 ehea_rereg_mrs(NULL); 3653 ehea_rereg_mrs();
3652 break; 3654 break;
3653 case MEM_GOING_OFFLINE: 3655 case MEM_GOING_OFFLINE:
3654 ehea_info("memory is going offline"); 3656 pr_info("memory is going offline");
3655 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3657 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3656 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3658 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3657 goto out_unlock; 3659 goto out_unlock;
3658 ehea_rereg_mrs(NULL); 3660 ehea_rereg_mrs();
3659 break; 3661 break;
3660 default: 3662 default:
3661 break; 3663 break;
@@ -3677,7 +3679,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
3677 unsigned long action, void *unused) 3679 unsigned long action, void *unused)
3678{ 3680{
3679 if (action == SYS_RESTART) { 3681 if (action == SYS_RESTART) {
3680 ehea_info("Reboot: freeing all eHEA resources"); 3682 pr_info("Reboot: freeing all eHEA resources\n");
3681 ibmebus_unregister_driver(&ehea_driver); 3683 ibmebus_unregister_driver(&ehea_driver);
3682 } 3684 }
3683 return NOTIFY_DONE; 3685 return NOTIFY_DONE;
@@ -3693,22 +3695,22 @@ static int check_module_parm(void)
3693 3695
3694 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || 3696 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3695 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { 3697 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3696 ehea_info("Bad parameter: rq1_entries"); 3698 pr_info("Bad parameter: rq1_entries\n");
3697 ret = -EINVAL; 3699 ret = -EINVAL;
3698 } 3700 }
3699 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || 3701 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3700 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { 3702 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3701 ehea_info("Bad parameter: rq2_entries"); 3703 pr_info("Bad parameter: rq2_entries\n");
3702 ret = -EINVAL; 3704 ret = -EINVAL;
3703 } 3705 }
3704 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || 3706 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3705 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { 3707 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3706 ehea_info("Bad parameter: rq3_entries"); 3708 pr_info("Bad parameter: rq3_entries\n");
3707 ret = -EINVAL; 3709 ret = -EINVAL;
3708 } 3710 }
3709 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || 3711 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3710 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { 3712 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3711 ehea_info("Bad parameter: sq_entries"); 3713 pr_info("Bad parameter: sq_entries\n");
3712 ret = -EINVAL; 3714 ret = -EINVAL;
3713 } 3715 }
3714 3716
@@ -3728,11 +3730,8 @@ int __init ehea_module_init(void)
3728{ 3730{
3729 int ret; 3731 int ret;
3730 3732
3731 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3733 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3732 DRV_VERSION);
3733
3734 3734
3735 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3736 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); 3735 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3737 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); 3736 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3738 3737
@@ -3749,27 +3748,27 @@ int __init ehea_module_init(void)
3749 3748
3750 ret = register_reboot_notifier(&ehea_reboot_nb); 3749 ret = register_reboot_notifier(&ehea_reboot_nb);
3751 if (ret) 3750 if (ret)
3752 ehea_info("failed registering reboot notifier"); 3751 pr_info("failed registering reboot notifier\n");
3753 3752
3754 ret = register_memory_notifier(&ehea_mem_nb); 3753 ret = register_memory_notifier(&ehea_mem_nb);
3755 if (ret) 3754 if (ret)
3756 ehea_info("failed registering memory remove notifier"); 3755 pr_info("failed registering memory remove notifier\n");
3757 3756
3758 ret = crash_shutdown_register(ehea_crash_handler); 3757 ret = crash_shutdown_register(ehea_crash_handler);
3759 if (ret) 3758 if (ret)
3760 ehea_info("failed registering crash handler"); 3759 pr_info("failed registering crash handler\n");
3761 3760
3762 ret = ibmebus_register_driver(&ehea_driver); 3761 ret = ibmebus_register_driver(&ehea_driver);
3763 if (ret) { 3762 if (ret) {
3764 ehea_error("failed registering eHEA device driver on ebus"); 3763 pr_err("failed registering eHEA device driver on ebus\n");
3765 goto out2; 3764 goto out2;
3766 } 3765 }
3767 3766
3768 ret = driver_create_file(&ehea_driver.driver, 3767 ret = driver_create_file(&ehea_driver.driver,
3769 &driver_attr_capabilities); 3768 &driver_attr_capabilities);
3770 if (ret) { 3769 if (ret) {
3771 ehea_error("failed to register capabilities attribute, ret=%d", 3770 pr_err("failed to register capabilities attribute, ret=%d\n",
3772 ret); 3771 ret);
3773 goto out3; 3772 goto out3;
3774 } 3773 }
3775 3774
@@ -3789,13 +3788,12 @@ static void __exit ehea_module_exit(void)
3789{ 3788{
3790 int ret; 3789 int ret;
3791 3790
3792 flush_scheduled_work();
3793 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3791 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3794 ibmebus_unregister_driver(&ehea_driver); 3792 ibmebus_unregister_driver(&ehea_driver);
3795 unregister_reboot_notifier(&ehea_reboot_nb); 3793 unregister_reboot_notifier(&ehea_reboot_nb);
3796 ret = crash_shutdown_unregister(ehea_crash_handler); 3794 ret = crash_shutdown_unregister(ehea_crash_handler);
3797 if (ret) 3795 if (ret)
3798 ehea_info("failed unregistering crash handler"); 3796 pr_info("failed unregistering crash handler\n");
3799 unregister_memory_notifier(&ehea_mem_nb); 3797 unregister_memory_notifier(&ehea_mem_nb);
3800 kfree(ehea_fw_handles.arr); 3798 kfree(ehea_fw_handles.arr);
3801 kfree(ehea_bcmc_regs.arr); 3799 kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 8fe9dcaa7538..0506967b9044 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ehea_phyp.h" 31#include "ehea_phyp.h"
30 32
31 33
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode,
67 } 69 }
68 70
69 if (ret < H_SUCCESS) 71 if (ret < H_SUCCESS)
70 ehea_error("opcode=%lx ret=%lx" 72 pr_err("opcode=%lx ret=%lx"
71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 73 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
72 " arg5=%lx arg6=%lx arg7=%lx ", 74 " arg5=%lx arg6=%lx arg7=%lx\n",
73 opcode, ret, 75 opcode, ret,
74 arg1, arg2, arg3, arg4, arg5, 76 arg1, arg2, arg3, arg4, arg5, arg6, arg7);
75 arg6, arg7);
76 77
77 return ret; 78 return ret;
78 } 79 }
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode,
114 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) 115 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
115 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) 116 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
116 && (arg3 == H_PORT_CB7_DUCQPN))))) 117 && (arg3 == H_PORT_CB7_DUCQPN)))))
117 ehea_error("opcode=%lx ret=%lx" 118 pr_err("opcode=%lx ret=%lx"
118 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 119 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
119 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" 120 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
120 " arg9=%lx" 121 " arg9=%lx"
121 " out1=%lx out2=%lx out3=%lx out4=%lx" 122 " out1=%lx out2=%lx out3=%lx out4=%lx"
122 " out5=%lx out6=%lx out7=%lx out8=%lx" 123 " out5=%lx out6=%lx out7=%lx out8=%lx"
123 " out9=%lx", 124 " out9=%lx\n",
124 opcode, ret, 125 opcode, ret,
125 arg1, arg2, arg3, arg4, arg5, 126 arg1, arg2, arg3, arg4, arg5,
126 arg6, arg7, arg8, arg9, 127 arg6, arg7, arg8, arg9,
127 outs[0], outs[1], outs[2], outs[3], 128 outs[0], outs[1], outs[2], outs[3], outs[4],
128 outs[4], outs[5], outs[6], outs[7], 129 outs[5], outs[6], outs[7], outs[8]);
129 outs[8]);
130 return ret; 130 return ret;
131 } 131 }
132 132
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
515 const u64 log_pageaddr, const u64 count) 515 const u64 log_pageaddr, const u64 count)
516{ 516{
517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { 517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518 ehea_error("not on pageboundary"); 518 pr_err("not on pageboundary\n");
519 return H_PARAMETER; 519 return H_PARAMETER;
520 } 520 }
521 521
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 89128b6373e3..cd44bb8017d9 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include "ehea.h" 33#include "ehea.h"
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue)
45 queue->current_q_offset -= queue->pagesize; 47 queue->current_q_offset -= queue->pagesize;
46 retvalue = NULL; 48 retvalue = NULL;
47 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { 49 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 ehea_error("not on pageboundary"); 50 pr_err("not on pageboundary\n");
49 retvalue = NULL; 51 retvalue = NULL;
50 } 52 }
51 return retvalue; 53 return retvalue;
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
58 int i, k; 60 int i, k;
59 61
60 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { 62 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 ehea_error("pagesize conflict! kernel pagesize=%d, " 63 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize); 64 (int)PAGE_SIZE, (int)pagesize);
63 return -EINVAL; 65 return -EINVAL;
64 } 66 }
65 67
66 queue->queue_length = nr_of_pages * pagesize; 68 queue->queue_length = nr_of_pages * pagesize;
67 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 69 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
68 if (!queue->queue_pages) { 70 if (!queue->queue_pages) {
69 ehea_error("no mem for queue_pages"); 71 pr_err("no mem for queue_pages\n");
70 return -ENOMEM; 72 return -ENOMEM;
71 } 73 }
72 74
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
130 132
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 133 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132 if (!cq) { 134 if (!cq) {
133 ehea_error("no mem for cq"); 135 pr_err("no mem for cq\n");
134 goto out_nomem; 136 goto out_nomem;
135 } 137 }
136 138
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
147 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, 149 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
148 &cq->fw_handle, &cq->epas); 150 &cq->fw_handle, &cq->epas);
149 if (hret != H_SUCCESS) { 151 if (hret != H_SUCCESS) {
150 ehea_error("alloc_resource_cq failed"); 152 pr_err("alloc_resource_cq failed\n");
151 goto out_freemem; 153 goto out_freemem;
152 } 154 }
153 155
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
159 for (counter = 0; counter < cq->attr.nr_pages; counter++) { 161 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
160 vpage = hw_qpageit_get_inc(&cq->hw_queue); 162 vpage = hw_qpageit_get_inc(&cq->hw_queue);
161 if (!vpage) { 163 if (!vpage) {
162 ehea_error("hw_qpageit_get_inc failed"); 164 pr_err("hw_qpageit_get_inc failed\n");
163 goto out_kill_hwq; 165 goto out_kill_hwq;
164 } 166 }
165 167
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
168 0, EHEA_CQ_REGISTER_ORIG, 170 0, EHEA_CQ_REGISTER_ORIG,
169 cq->fw_handle, rpage, 1); 171 cq->fw_handle, rpage, 1);
170 if (hret < H_SUCCESS) { 172 if (hret < H_SUCCESS) {
171 ehea_error("register_rpage_cq failed ehea_cq=%p " 173 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
172 "hret=%llx counter=%i act_pages=%i", 174 cq, hret, counter, cq->attr.nr_pages);
173 cq, hret, counter, cq->attr.nr_pages);
174 goto out_kill_hwq; 175 goto out_kill_hwq;
175 } 176 }
176 177
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
178 vpage = hw_qpageit_get_inc(&cq->hw_queue); 179 vpage = hw_qpageit_get_inc(&cq->hw_queue);
179 180
180 if ((hret != H_SUCCESS) || (vpage)) { 181 if ((hret != H_SUCCESS) || (vpage)) {
181 ehea_error("registration of pages not " 182 pr_err("registration of pages not complete hret=%llx\n",
182 "complete hret=%llx\n", hret); 183 hret);
183 goto out_kill_hwq; 184 goto out_kill_hwq;
184 } 185 }
185 } else { 186 } else {
186 if (hret != H_PAGE_REGISTERED) { 187 if (hret != H_PAGE_REGISTERED) {
187 ehea_error("CQ: registration of page failed " 188 pr_err("CQ: registration of page failed hret=%llx\n",
188 "hret=%llx\n", hret); 189 hret);
189 goto out_kill_hwq; 190 goto out_kill_hwq;
190 } 191 }
191 } 192 }
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq)
241 } 242 }
242 243
243 if (hret != H_SUCCESS) { 244 if (hret != H_SUCCESS) {
244 ehea_error("destroy CQ failed"); 245 pr_err("destroy CQ failed\n");
245 return -EIO; 246 return -EIO;
246 } 247 }
247 248
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
259 260
260 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 261 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
261 if (!eq) { 262 if (!eq) {
262 ehea_error("no mem for eq"); 263 pr_err("no mem for eq\n");
263 return NULL; 264 return NULL;
264 } 265 }
265 266
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
272 hret = ehea_h_alloc_resource_eq(adapter->handle, 273 hret = ehea_h_alloc_resource_eq(adapter->handle,
273 &eq->attr, &eq->fw_handle); 274 &eq->attr, &eq->fw_handle);
274 if (hret != H_SUCCESS) { 275 if (hret != H_SUCCESS) {
275 ehea_error("alloc_resource_eq failed"); 276 pr_err("alloc_resource_eq failed\n");
276 goto out_freemem; 277 goto out_freemem;
277 } 278 }
278 279
279 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, 280 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
280 EHEA_PAGESIZE, sizeof(struct ehea_eqe)); 281 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
281 if (ret) { 282 if (ret) {
282 ehea_error("can't allocate eq pages"); 283 pr_err("can't allocate eq pages\n");
283 goto out_freeres; 284 goto out_freeres;
284 } 285 }
285 286
286 for (i = 0; i < eq->attr.nr_pages; i++) { 287 for (i = 0; i < eq->attr.nr_pages; i++) {
287 vpage = hw_qpageit_get_inc(&eq->hw_queue); 288 vpage = hw_qpageit_get_inc(&eq->hw_queue);
288 if (!vpage) { 289 if (!vpage) {
289 ehea_error("hw_qpageit_get_inc failed"); 290 pr_err("hw_qpageit_get_inc failed\n");
290 hret = H_RESOURCE; 291 hret = H_RESOURCE;
291 goto out_kill_hwq; 292 goto out_kill_hwq;
292 } 293 }
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
370 } 371 }
371 372
372 if (hret != H_SUCCESS) { 373 if (hret != H_SUCCESS) {
373 ehea_error("destroy EQ failed"); 374 pr_err("destroy EQ failed\n");
374 return -EIO; 375 return -EIO;
375 } 376 }
376 377
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
395 for (cnt = 0; cnt < nr_pages; cnt++) { 396 for (cnt = 0; cnt < nr_pages; cnt++) {
396 vpage = hw_qpageit_get_inc(hw_queue); 397 vpage = hw_qpageit_get_inc(hw_queue);
397 if (!vpage) { 398 if (!vpage) {
398 ehea_error("hw_qpageit_get_inc failed"); 399 pr_err("hw_qpageit_get_inc failed\n");
399 goto out_kill_hwq; 400 goto out_kill_hwq;
400 } 401 }
401 rpage = virt_to_abs(vpage); 402 rpage = virt_to_abs(vpage);
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
403 0, h_call_q_selector, 404 0, h_call_q_selector,
404 qp->fw_handle, rpage, 1); 405 qp->fw_handle, rpage, 1);
405 if (hret < H_SUCCESS) { 406 if (hret < H_SUCCESS) {
406 ehea_error("register_rpage_qp failed"); 407 pr_err("register_rpage_qp failed\n");
407 goto out_kill_hwq; 408 goto out_kill_hwq;
408 } 409 }
409 } 410 }
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
432 433
433 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 434 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
434 if (!qp) { 435 if (!qp) {
435 ehea_error("no mem for qp"); 436 pr_err("no mem for qp\n");
436 return NULL; 437 return NULL;
437 } 438 }
438 439
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
441 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, 442 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
442 &qp->fw_handle, &qp->epas); 443 &qp->fw_handle, &qp->epas);
443 if (hret != H_SUCCESS) { 444 if (hret != H_SUCCESS) {
444 ehea_error("ehea_h_alloc_resource_qp failed"); 445 pr_err("ehea_h_alloc_resource_qp failed\n");
445 goto out_freemem; 446 goto out_freemem;
446 } 447 }
447 448
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
455 init_attr->act_wqe_size_enc_sq, adapter, 456 init_attr->act_wqe_size_enc_sq, adapter,
456 0); 457 0);
457 if (ret) { 458 if (ret) {
458 ehea_error("can't register for sq ret=%x", ret); 459 pr_err("can't register for sq ret=%x\n", ret);
459 goto out_freeres; 460 goto out_freeres;
460 } 461 }
461 462
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
465 init_attr->act_wqe_size_enc_rq1, 466 init_attr->act_wqe_size_enc_rq1,
466 adapter, 1); 467 adapter, 1);
467 if (ret) { 468 if (ret) {
468 ehea_error("can't register for rq1 ret=%x", ret); 469 pr_err("can't register for rq1 ret=%x\n", ret);
469 goto out_kill_hwsq; 470 goto out_kill_hwsq;
470 } 471 }
471 472
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
476 init_attr->act_wqe_size_enc_rq2, 477 init_attr->act_wqe_size_enc_rq2,
477 adapter, 2); 478 adapter, 2);
478 if (ret) { 479 if (ret) {
479 ehea_error("can't register for rq2 ret=%x", ret); 480 pr_err("can't register for rq2 ret=%x\n", ret);
480 goto out_kill_hwr1q; 481 goto out_kill_hwr1q;
481 } 482 }
482 } 483 }
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
488 init_attr->act_wqe_size_enc_rq3, 489 init_attr->act_wqe_size_enc_rq3,
489 adapter, 3); 490 adapter, 3);
490 if (ret) { 491 if (ret) {
491 ehea_error("can't register for rq3 ret=%x", ret); 492 pr_err("can't register for rq3 ret=%x\n", ret);
492 goto out_kill_hwr2q; 493 goto out_kill_hwr2q;
493 } 494 }
494 } 495 }
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
553 } 554 }
554 555
555 if (hret != H_SUCCESS) { 556 if (hret != H_SUCCESS) {
556 ehea_error("destroy QP failed"); 557 pr_err("destroy QP failed\n");
557 return -EIO; 558 return -EIO;
558 } 559 }
559 560
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
842 (hret != H_PAGE_REGISTERED)) { 843 (hret != H_PAGE_REGISTERED)) {
843 ehea_h_free_resource(adapter->handle, mr->handle, 844 ehea_h_free_resource(adapter->handle, mr->handle,
844 FORCE_FREE); 845 FORCE_FREE);
845 ehea_error("register_rpage_mr failed"); 846 pr_err("register_rpage_mr failed\n");
846 return hret; 847 return hret;
847 } 848 }
848 } 849 }
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
896 897
897 pt = (void *)get_zeroed_page(GFP_KERNEL); 898 pt = (void *)get_zeroed_page(GFP_KERNEL);
898 if (!pt) { 899 if (!pt) {
899 ehea_error("no mem"); 900 pr_err("no mem\n");
900 ret = -ENOMEM; 901 ret = -ENOMEM;
901 goto out; 902 goto out;
902 } 903 }
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
906 &mr->handle, &mr->lkey); 907 &mr->handle, &mr->lkey);
907 908
908 if (hret != H_SUCCESS) { 909 if (hret != H_SUCCESS) {
909 ehea_error("alloc_resource_mr failed"); 910 pr_err("alloc_resource_mr failed\n");
910 ret = -EIO; 911 ret = -EIO;
911 goto out; 912 goto out;
912 } 913 }
913 914
914 if (!ehea_bmap) { 915 if (!ehea_bmap) {
915 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 916 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
916 ehea_error("no busmap available"); 917 pr_err("no busmap available\n");
917 ret = -EIO; 918 ret = -EIO;
918 goto out; 919 goto out;
919 } 920 }
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
929 930
930 if (hret != H_SUCCESS) { 931 if (hret != H_SUCCESS) {
931 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 932 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
932 ehea_error("registering mr failed"); 933 pr_err("registering mr failed\n");
933 ret = -EIO; 934 ret = -EIO;
934 goto out; 935 goto out;
935 } 936 }
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr)
952 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, 953 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
953 FORCE_FREE); 954 FORCE_FREE);
954 if (hret != H_SUCCESS) { 955 if (hret != H_SUCCESS) {
955 ehea_error("destroy MR failed"); 956 pr_err("destroy MR failed\n");
956 return -EIO; 957 return -EIO;
957 } 958 }
958 959
@@ -987,14 +988,14 @@ void print_error_data(u64 *data)
987 length = EHEA_PAGESIZE; 988 length = EHEA_PAGESIZE;
988 989
989 if (type == EHEA_AER_RESTYPE_QP) 990 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 991 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
991 "port=%llX", resource, data[6], data[12], data[22]); 992 resource, data[6], data[12], data[22]);
992 else if (type == EHEA_AER_RESTYPE_CQ) 993 else if (type == EHEA_AER_RESTYPE_CQ)
993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 994 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
994 data[6]); 995 resource, data[6]);
995 else if (type == EHEA_AER_RESTYPE_EQ) 996 else if (type == EHEA_AER_RESTYPE_EQ)
996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 997 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
997 data[6]); 998 resource, data[6]);
998 999
999 ehea_dump(data, length, "error data"); 1000 ehea_dump(data, length, "error data");
1000} 1001}
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1008 1009
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1010 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1011 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1012 pr_err("Cannot allocate rblock memory\n");
1012 goto out; 1013 goto out;
1013 } 1014 }
1014 1015
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1020 *aerr = rblock[12]; 1021 *aerr = rblock[12];
1021 print_error_data(rblock); 1022 print_error_data(rblock);
1022 } else if (ret == H_R_STATE) { 1023 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle); 1024 pr_err("No error data available: %llX\n", res_handle);
1024 } else 1025 } else
1025 ehea_error("Error data could not be fetched: %llX", res_handle); 1026 pr_err("Error data could not be fetched: %llX\n", res_handle);
1026 1027
1027 free_page((unsigned long)rblock); 1028 free_page((unsigned long)rblock);
1028out: 1029out:
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c5527..a937f49d9db7 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.6" 35#define DRV_VERSION "1.4.1.10"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
@@ -61,6 +61,8 @@ struct enic_port_profile {
61 char name[PORT_PROFILE_MAX]; 61 char name[PORT_PROFILE_MAX];
62 u8 instance_uuid[PORT_UUID_MAX]; 62 u8 instance_uuid[PORT_UUID_MAX];
63 u8 host_uuid[PORT_UUID_MAX]; 63 u8 host_uuid[PORT_UUID_MAX];
64 u8 vf_mac[ETH_ALEN];
65 u8 mac_addr[ETH_ALEN];
64}; 66};
65 67
66/* Per-instance private data structure */ 68/* Per-instance private data structure */
@@ -78,8 +80,10 @@ struct enic {
78 spinlock_t devcmd_lock; 80 spinlock_t devcmd_lock;
79 u8 mac_addr[ETH_ALEN]; 81 u8 mac_addr[ETH_ALEN];
80 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 82 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
83 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
81 unsigned int flags; 84 unsigned int flags;
82 unsigned int mc_count; 85 unsigned int mc_count;
86 unsigned int uc_count;
83 int csum_rx_enabled; 87 int csum_rx_enabled;
84 u32 port_mtu; 88 u32 port_mtu;
85 u32 rx_coalesce_usecs; 89 u32 rx_coalesce_usecs;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..77d91381a74d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1002,7 +1002,7 @@ static int enic_dev_packet_filter(struct enic *enic, int directed,
1002 return err; 1002 return err;
1003} 1003}
1004 1004
1005static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr) 1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{ 1006{
1007 int err; 1007 int err;
1008 1008
@@ -1013,7 +1013,7 @@ static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
1013 return err; 1013 return err;
1014} 1014}
1015 1015
1016static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr) 1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{ 1017{
1018 int err; 1018 int err;
1019 1019
@@ -1024,29 +1024,19 @@ static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
1024 return err; 1024 return err;
1025} 1025}
1026 1026
1027/* netif_tx_lock held, BHs disabled */ 1027static void enic_add_multicast_addr_list(struct enic *enic)
1028static void enic_set_multicast_list(struct net_device *netdev)
1029{ 1028{
1030 struct enic *enic = netdev_priv(netdev); 1029 struct net_device *netdev = enic->netdev;
1031 struct netdev_hw_addr *ha; 1030 struct netdev_hw_addr *ha;
1032 int directed = 1;
1033 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1034 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1035 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
1036 unsigned int mc_count = netdev_mc_count(netdev); 1031 unsigned int mc_count = netdev_mc_count(netdev);
1037 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1038 mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
1039 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
1040 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 1032 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
1041 unsigned int i, j; 1033 unsigned int i, j;
1042 1034
1043 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) 1035 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
1036 netdev_warn(netdev, "Registering only %d out of %d "
1037 "multicast addresses\n",
1038 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
1044 mc_count = ENIC_MULTICAST_PERFECT_FILTERS; 1039 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
1045
1046 if (enic->flags != flags) {
1047 enic->flags = flags;
1048 enic_dev_packet_filter(enic, directed,
1049 multicast, broadcast, promisc, allmulti);
1050 } 1040 }
1051 1041
1052 /* Is there an easier way? Trying to minimize to 1042 /* Is there an easier way? Trying to minimize to
@@ -1068,7 +1058,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
1068 mc_addr[j]) == 0) 1058 mc_addr[j]) == 0)
1069 break; 1059 break;
1070 if (j == mc_count) 1060 if (j == mc_count)
1071 enic_dev_del_multicast_addr(enic, enic->mc_addr[i]); 1061 enic_dev_del_addr(enic, enic->mc_addr[i]);
1072 } 1062 }
1073 1063
1074 for (i = 0; i < mc_count; i++) { 1064 for (i = 0; i < mc_count; i++) {
@@ -1077,7 +1067,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
1077 enic->mc_addr[j]) == 0) 1067 enic->mc_addr[j]) == 0)
1078 break; 1068 break;
1079 if (j == enic->mc_count) 1069 if (j == enic->mc_count)
1080 enic_dev_add_multicast_addr(enic, mc_addr[i]); 1070 enic_dev_add_addr(enic, mc_addr[i]);
1081 } 1071 }
1082 1072
1083 /* Save the list to compare against next time 1073 /* Save the list to compare against next time
@@ -1089,6 +1079,89 @@ static void enic_set_multicast_list(struct net_device *netdev)
1089 enic->mc_count = mc_count; 1079 enic->mc_count = mc_count;
1090} 1080}
1091 1081
1082static void enic_add_unicast_addr_list(struct enic *enic)
1083{
1084 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha;
1086 unsigned int uc_count = netdev_uc_count(netdev);
1087 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
1088 unsigned int i, j;
1089
1090 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
1091 netdev_warn(netdev, "Registering only %d out of %d "
1092 "unicast addresses\n",
1093 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
1094 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
1095 }
1096
1097 /* Is there an easier way? Trying to minimize to
1098 * calls to add/del unicast addrs. We keep the
1099 * addrs from the last call in enic->uc_addr and
1100 * look for changes to add/del.
1101 */
1102
1103 i = 0;
1104 netdev_for_each_uc_addr(ha, netdev) {
1105 if (i == uc_count)
1106 break;
1107 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
1108 }
1109
1110 for (i = 0; i < enic->uc_count; i++) {
1111 for (j = 0; j < uc_count; j++)
1112 if (compare_ether_addr(enic->uc_addr[i],
1113 uc_addr[j]) == 0)
1114 break;
1115 if (j == uc_count)
1116 enic_dev_del_addr(enic, enic->uc_addr[i]);
1117 }
1118
1119 for (i = 0; i < uc_count; i++) {
1120 for (j = 0; j < enic->uc_count; j++)
1121 if (compare_ether_addr(uc_addr[i],
1122 enic->uc_addr[j]) == 0)
1123 break;
1124 if (j == enic->uc_count)
1125 enic_dev_add_addr(enic, uc_addr[i]);
1126 }
1127
1128 /* Save the list to compare against next time
1129 */
1130
1131 for (i = 0; i < uc_count; i++)
1132 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1133
1134 enic->uc_count = uc_count;
1135}
1136
1137/* netif_tx_lock held, BHs disabled */
1138static void enic_set_rx_mode(struct net_device *netdev)
1139{
1140 struct enic *enic = netdev_priv(netdev);
1141 int directed = 1;
1142 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1143 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1144 int promisc = (netdev->flags & IFF_PROMISC) ||
1145 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1146 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1147 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1148 unsigned int flags = netdev->flags |
1149 (allmulti ? IFF_ALLMULTI : 0) |
1150 (promisc ? IFF_PROMISC : 0);
1151
1152 if (enic->flags != flags) {
1153 enic->flags = flags;
1154 enic_dev_packet_filter(enic, directed,
1155 multicast, broadcast, promisc, allmulti);
1156 }
1157
1158 if (!promisc) {
1159 enic_add_unicast_addr_list(enic);
1160 if (!allmulti)
1161 enic_add_multicast_addr_list(enic);
1162 }
1163}
1164
1092/* rtnl lock is held */ 1165/* rtnl lock is held */
1093static void enic_vlan_rx_register(struct net_device *netdev, 1166static void enic_vlan_rx_register(struct net_device *netdev,
1094 struct vlan_group *vlan_group) 1167 struct vlan_group *vlan_group)
@@ -1158,11 +1231,31 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1158 return err; 1231 return err;
1159} 1232}
1160 1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{
1236 struct enic *enic = netdev_priv(netdev);
1237
1238 if (vf != PORT_SELF_VF)
1239 return -EOPNOTSUPP;
1240
1241 /* Ignore the vf argument for now. We can assume the request
1242 * is coming on a vf.
1243 */
1244 if (is_valid_ether_addr(mac)) {
1245 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1246 return 0;
1247 } else
1248 return -EINVAL;
1249}
1250
1161static int enic_set_port_profile(struct enic *enic, u8 *mac) 1251static int enic_set_port_profile(struct enic *enic, u8 *mac)
1162{ 1252{
1163 struct vic_provinfo *vp; 1253 struct vic_provinfo *vp;
1164 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1254 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1255 u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1165 char uuid_str[38]; 1256 char uuid_str[38];
1257 char client_mac_str[18];
1258 u8 *client_mac;
1166 int err; 1259 int err;
1167 1260
1168 err = enic_vnic_dev_deinit(enic); 1261 err = enic_vnic_dev_deinit(enic);
@@ -1180,46 +1273,63 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1180 return -EADDRNOTAVAIL; 1273 return -EADDRNOTAVAIL;
1181 1274
1182 vp = vic_provinfo_alloc(GFP_KERNEL, oui, 1275 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1183 VIC_PROVINFO_LINUX_TYPE); 1276 VIC_PROVINFO_GENERIC_TYPE);
1184 if (!vp) 1277 if (!vp)
1185 return -ENOMEM; 1278 return -ENOMEM;
1186 1279
1187 vic_provinfo_add_tlv(vp, 1280 vic_provinfo_add_tlv(vp,
1188 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, 1281 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1189 strlen(enic->pp.name) + 1, enic->pp.name); 1282 strlen(enic->pp.name) + 1, enic->pp.name);
1190 1283
1284 if (!is_zero_ether_addr(enic->pp.mac_addr))
1285 client_mac = enic->pp.mac_addr;
1286 else
1287 client_mac = mac;
1288
1289 vic_provinfo_add_tlv(vp,
1290 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1291 ETH_ALEN, client_mac);
1292
1293 sprintf(client_mac_str, "%pM", client_mac);
1191 vic_provinfo_add_tlv(vp, 1294 vic_provinfo_add_tlv(vp,
1192 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, 1295 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1193 ETH_ALEN, mac); 1296 sizeof(client_mac_str), client_mac_str);
1194 1297
1195 if (enic->pp.set & ENIC_SET_INSTANCE) { 1298 if (enic->pp.set & ENIC_SET_INSTANCE) {
1196 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); 1299 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1197 vic_provinfo_add_tlv(vp, 1300 vic_provinfo_add_tlv(vp,
1198 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1301 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1199 sizeof(uuid_str), uuid_str); 1302 sizeof(uuid_str), uuid_str);
1200 } 1303 }
1201 1304
1202 if (enic->pp.set & ENIC_SET_HOST) { 1305 if (enic->pp.set & ENIC_SET_HOST) {
1203 sprintf(uuid_str, "%pUB", enic->pp.host_uuid); 1306 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1204 vic_provinfo_add_tlv(vp, 1307 vic_provinfo_add_tlv(vp,
1205 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1308 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1206 sizeof(uuid_str), uuid_str); 1309 sizeof(uuid_str), uuid_str);
1207 } 1310 }
1208 1311
1312 os_type = htons(os_type);
1313 vic_provinfo_add_tlv(vp,
1314 VIC_GENERIC_PROV_TLV_OS_TYPE,
1315 sizeof(os_type), &os_type);
1316
1209 err = enic_dev_init_prov(enic, vp); 1317 err = enic_dev_init_prov(enic, vp);
1210 vic_provinfo_free(vp); 1318 vic_provinfo_free(vp);
1211 if (err) 1319 if (err)
1212 return err; 1320 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1213 break; 1323 break;
1214 1324
1215 case PORT_REQUEST_DISASSOCIATE: 1325 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1216 break; 1327 break;
1217 1328
1218 default: 1329 default:
1219 return -EINVAL; 1330 return -EINVAL;
1220 } 1331 }
1221 1332
1222 enic->pp.set |= ENIC_SET_APPLIED;
1223 return 0; 1333 return 0;
1224} 1334}
1225 1335
@@ -1227,29 +1337,31 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1227 struct nlattr *port[]) 1337 struct nlattr *port[])
1228{ 1338{
1229 struct enic *enic = netdev_priv(netdev); 1339 struct enic *enic = netdev_priv(netdev);
1340 struct enic_port_profile new_pp;
1341 int err = 0;
1230 1342
1231 memset(&enic->pp, 0, sizeof(enic->pp)); 1343 memset(&new_pp, 0, sizeof(new_pp));
1232 1344
1233 if (port[IFLA_PORT_REQUEST]) { 1345 if (port[IFLA_PORT_REQUEST]) {
1234 enic->pp.set |= ENIC_SET_REQUEST; 1346 new_pp.set |= ENIC_SET_REQUEST;
1235 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1347 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1236 } 1348 }
1237 1349
1238 if (port[IFLA_PORT_PROFILE]) { 1350 if (port[IFLA_PORT_PROFILE]) {
1239 enic->pp.set |= ENIC_SET_NAME; 1351 new_pp.set |= ENIC_SET_NAME;
1240 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]), 1352 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1241 PORT_PROFILE_MAX); 1353 PORT_PROFILE_MAX);
1242 } 1354 }
1243 1355
1244 if (port[IFLA_PORT_INSTANCE_UUID]) { 1356 if (port[IFLA_PORT_INSTANCE_UUID]) {
1245 enic->pp.set |= ENIC_SET_INSTANCE; 1357 new_pp.set |= ENIC_SET_INSTANCE;
1246 memcpy(enic->pp.instance_uuid, 1358 memcpy(new_pp.instance_uuid,
1247 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1359 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1248 } 1360 }
1249 1361
1250 if (port[IFLA_PORT_HOST_UUID]) { 1362 if (port[IFLA_PORT_HOST_UUID]) {
1251 enic->pp.set |= ENIC_SET_HOST; 1363 new_pp.set |= ENIC_SET_HOST;
1252 memcpy(enic->pp.host_uuid, 1364 memcpy(new_pp.host_uuid,
1253 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1365 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1254 } 1366 }
1255 1367
@@ -1257,21 +1369,39 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1257 if (vf != PORT_SELF_VF) 1369 if (vf != PORT_SELF_VF)
1258 return -EOPNOTSUPP; 1370 return -EOPNOTSUPP;
1259 1371
1260 if (!(enic->pp.set & ENIC_SET_REQUEST)) 1372 if (!(new_pp.set & ENIC_SET_REQUEST))
1261 return -EOPNOTSUPP; 1373 return -EOPNOTSUPP;
1262 1374
1263 if (enic->pp.request == PORT_REQUEST_ASSOCIATE) { 1375 if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1264 1376 /* Special case handling */
1265 /* If the interface mac addr hasn't been assigned, 1377 if (!is_zero_ether_addr(enic->pp.vf_mac))
1266 * assign a random mac addr before setting port- 1378 memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1267 * profile.
1268 */
1269 1379
1270 if (is_zero_ether_addr(netdev->dev_addr)) 1380 if (is_zero_ether_addr(netdev->dev_addr))
1271 random_ether_addr(netdev->dev_addr); 1381 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1272 } 1385 }
1273 1386
1274 return enic_set_port_profile(enic, netdev->dev_addr); 1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
1388
1389 err = enic_set_port_profile(enic, netdev->dev_addr);
1390 if (err)
1391 goto set_port_profile_cleanup;
1392
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398
1399 if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1400 memset(netdev->dev_addr, 0, ETH_ALEN);
1401 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1402 }
1403
1404 return err;
1275} 1405}
1276 1406
1277static int enic_get_vf_port(struct net_device *netdev, int vf, 1407static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1851,8 +1981,11 @@ static int enic_open(struct net_device *netdev)
1851 for (i = 0; i < enic->rq_count; i++) 1981 for (i = 0; i < enic->rq_count; i++)
1852 vnic_rq_enable(&enic->rq[i]); 1982 vnic_rq_enable(&enic->rq[i]);
1853 1983
1854 enic_dev_add_station_addr(enic); 1984 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1855 enic_set_multicast_list(netdev); 1985 enic_dev_add_addr(enic, enic->pp.mac_addr);
1986 else
1987 enic_dev_add_station_addr(enic);
1988 enic_set_rx_mode(netdev);
1856 1989
1857 netif_wake_queue(netdev); 1990 netif_wake_queue(netdev);
1858 1991
@@ -1899,7 +2032,10 @@ static int enic_stop(struct net_device *netdev)
1899 2032
1900 netif_carrier_off(netdev); 2033 netif_carrier_off(netdev);
1901 netif_tx_disable(netdev); 2034 netif_tx_disable(netdev);
1902 enic_dev_del_station_addr(enic); 2035 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
2036 enic_dev_del_addr(enic, enic->pp.mac_addr);
2037 else
2038 enic_dev_del_station_addr(enic);
1903 2039
1904 for (i = 0; i < enic->wq_count; i++) { 2040 for (i = 0; i < enic->wq_count; i++) {
1905 err = vnic_wq_disable(&enic->wq[i]); 2041 err = vnic_wq_disable(&enic->wq[i]);
@@ -2042,7 +2178,7 @@ static int enic_dev_hang_reset(struct enic *enic)
2042 2178
2043static int enic_set_rsskey(struct enic *enic) 2179static int enic_set_rsskey(struct enic *enic)
2044{ 2180{
2045 u64 rss_key_buf_pa; 2181 dma_addr_t rss_key_buf_pa;
2046 union vnic_rss_key *rss_key_buf_va = NULL; 2182 union vnic_rss_key *rss_key_buf_va = NULL;
2047 union vnic_rss_key rss_key = { 2183 union vnic_rss_key rss_key = {
2048 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, 2184 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2209,7 @@ static int enic_set_rsskey(struct enic *enic)
2073 2209
2074static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2210static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2075{ 2211{
2076 u64 rss_cpu_buf_pa; 2212 dma_addr_t rss_cpu_buf_pa;
2077 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2213 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2078 unsigned int i; 2214 unsigned int i;
2079 int err; 2215 int err;
@@ -2328,7 +2464,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2328 .ndo_start_xmit = enic_hard_start_xmit, 2464 .ndo_start_xmit = enic_hard_start_xmit,
2329 .ndo_get_stats = enic_get_stats, 2465 .ndo_get_stats = enic_get_stats,
2330 .ndo_validate_addr = eth_validate_addr, 2466 .ndo_validate_addr = eth_validate_addr,
2331 .ndo_set_multicast_list = enic_set_multicast_list, 2467 .ndo_set_rx_mode = enic_set_rx_mode,
2468 .ndo_set_multicast_list = enic_set_rx_mode,
2332 .ndo_set_mac_address = enic_set_mac_address_dynamic, 2469 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2333 .ndo_change_mtu = enic_change_mtu, 2470 .ndo_change_mtu = enic_change_mtu,
2334 .ndo_vlan_rx_register = enic_vlan_rx_register, 2471 .ndo_vlan_rx_register = enic_vlan_rx_register,
@@ -2337,6 +2474,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2337 .ndo_tx_timeout = enic_tx_timeout, 2474 .ndo_tx_timeout = enic_tx_timeout,
2338 .ndo_set_vf_port = enic_set_vf_port, 2475 .ndo_set_vf_port = enic_set_vf_port,
2339 .ndo_get_vf_port = enic_get_vf_port, 2476 .ndo_get_vf_port = enic_get_vf_port,
2477#ifdef IFLA_VF_MAX
2478 .ndo_set_vf_mac = enic_set_vf_mac,
2479#endif
2340#ifdef CONFIG_NET_POLL_CONTROLLER 2480#ifdef CONFIG_NET_POLL_CONTROLLER
2341 .ndo_poll_controller = enic_poll_controller, 2481 .ndo_poll_controller = enic_poll_controller,
2342#endif 2482#endif
@@ -2349,7 +2489,8 @@ static const struct net_device_ops enic_netdev_ops = {
2349 .ndo_get_stats = enic_get_stats, 2489 .ndo_get_stats = enic_get_stats,
2350 .ndo_validate_addr = eth_validate_addr, 2490 .ndo_validate_addr = eth_validate_addr,
2351 .ndo_set_mac_address = enic_set_mac_address, 2491 .ndo_set_mac_address = enic_set_mac_address,
2352 .ndo_set_multicast_list = enic_set_multicast_list, 2492 .ndo_set_rx_mode = enic_set_rx_mode,
2493 .ndo_set_multicast_list = enic_set_rx_mode,
2353 .ndo_change_mtu = enic_change_mtu, 2494 .ndo_change_mtu = enic_change_mtu,
2354 .ndo_vlan_rx_register = enic_vlan_rx_register, 2495 .ndo_vlan_rx_register = enic_vlan_rx_register,
2355 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2496 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2693,7 +2834,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
2693 if (netdev) { 2834 if (netdev) {
2694 struct enic *enic = netdev_priv(netdev); 2835 struct enic *enic = netdev_priv(netdev);
2695 2836
2696 flush_scheduled_work(); 2837 cancel_work_sync(&enic->reset);
2697 unregister_netdev(netdev); 2838 unregister_netdev(netdev);
2698 enic_dev_deinit(enic); 2839 enic_dev_deinit(enic);
2699 vnic_dev_close(enic->vdev); 2840 vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 9a103d9ef9e2..25be2734c3fe 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -34,6 +34,7 @@
34#define ENIC_MAX_MTU 9000 34#define ENIC_MAX_MTU 9000
35 35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32 36#define ENIC_MULTICAST_PERFECT_FILTERS 32
37#define ENIC_UNICAST_PERFECT_FILTERS 32
37 38
38#define ENIC_NON_TSO_MAX_DESC 16 39#define ENIC_NON_TSO_MAX_DESC 16
39 40
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 7e46e5e8600f..f700f5d9e81d 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -24,14 +24,29 @@
24/* Note: String field lengths include null char */ 24/* Note: String field lengths include null char */
25 25
26#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c } 26#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
27#define VIC_PROVINFO_LINUX_TYPE 0x2 27#define VIC_PROVINFO_GENERIC_TYPE 0x4
28 28
29enum vic_linux_prov_tlv_type { 29enum vic_generic_prov_tlv_type {
30 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0, 30 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
31 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */ 31 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
32 VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2, 32 VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
33 VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8, 33 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
34 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9, 34 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
35 VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
36 VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
37 VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
38 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
39 VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
40 VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
41 VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
42 VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
43};
44
45enum vic_generic_prov_os_type {
46 VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
35}; 50};
36 51
37struct vic_provinfo { 52struct vic_provinfo {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8d..b79d7e1555d5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22#include <net/ethoc.h> 23#include <net/ethoc.h>
23 24
24static int buffer_size = 0x8000; /* 32 KBytes */ 25static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
184 * @netdev: pointer to network device structure 185 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 186 * @napi: NAPI structure
186 * @msg_enable: device state flags 187 * @msg_enable: device state flags
187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
189 * @phy: attached PHY 189 * @phy: attached PHY
190 * @mdio: MDIO bus for PHY access 190 * @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
209 struct napi_struct napi; 209 struct napi_struct napi;
210 u32 msg_enable; 210 u32 msg_enable;
211 211
212 spinlock_t rx_lock;
213 spinlock_t lock; 212 spinlock_t lock;
214 213
215 struct phy_device *phy; 214 struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 unsigned int entry; 412 unsigned int entry;
414 struct ethoc_bd bd; 413 struct ethoc_bd bd;
415 414
416 entry = priv->num_tx + (priv->cur_rx % priv->num_rx); 415 entry = priv->num_tx + priv->cur_rx;
417 ethoc_read_bd(priv, entry, &bd); 416 ethoc_read_bd(priv, entry, &bd);
418 if (bd.stat & RX_BD_EMPTY) 417 if (bd.stat & RX_BD_EMPTY) {
419 break; 418 ethoc_ack_irq(priv, INT_MASK_RX);
419 /* If packet (interrupt) came in between checking
420 * BD_EMTPY and clearing the interrupt source, then we
421 * risk missing the packet as the RX interrupt won't
422 * trigger right away when we reenable it; hence, check
423 * BD_EMTPY here again to make sure there isn't such a
424 * packet waiting for us...
425 */
426 ethoc_read_bd(priv, entry, &bd);
427 if (bd.stat & RX_BD_EMPTY)
428 break;
429 }
420 430
421 if (ethoc_update_rx_stats(priv, &bd) == 0) { 431 if (ethoc_update_rx_stats(priv, &bd) == 0) {
422 int size = bd.stat >> 16; 432 int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
446 bd.stat &= ~RX_BD_STATS; 456 bd.stat &= ~RX_BD_STATS;
447 bd.stat |= RX_BD_EMPTY; 457 bd.stat |= RX_BD_EMPTY;
448 ethoc_write_bd(priv, entry, &bd); 458 ethoc_write_bd(priv, entry, &bd);
449 priv->cur_rx++; 459 if (++priv->cur_rx == priv->num_rx)
460 priv->cur_rx = 0;
450 } 461 }
451 462
452 return count; 463 return count;
453} 464}
454 465
455static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
456{ 467{
457 struct net_device *netdev = dev->netdev; 468 struct net_device *netdev = dev->netdev;
458 469
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
482 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 493 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
483 netdev->stats.tx_bytes += bd->stat >> 16; 494 netdev->stats.tx_bytes += bd->stat >> 16;
484 netdev->stats.tx_packets++; 495 netdev->stats.tx_packets++;
485 return 0;
486} 496}
487 497
488static void ethoc_tx(struct net_device *dev) 498static int ethoc_tx(struct net_device *dev, int limit)
489{ 499{
490 struct ethoc *priv = netdev_priv(dev); 500 struct ethoc *priv = netdev_priv(dev);
501 int count;
502 struct ethoc_bd bd;
491 503
492 spin_lock(&priv->lock); 504 for (count = 0; count < limit; ++count) {
505 unsigned int entry;
493 506
494 while (priv->dty_tx != priv->cur_tx) { 507 entry = priv->dty_tx & (priv->num_tx-1);
495 unsigned int entry = priv->dty_tx % priv->num_tx;
496 struct ethoc_bd bd;
497 508
498 ethoc_read_bd(priv, entry, &bd); 509 ethoc_read_bd(priv, entry, &bd);
499 if (bd.stat & TX_BD_READY)
500 break;
501 510
502 entry = (++priv->dty_tx) % priv->num_tx; 511 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
503 (void)ethoc_update_tx_stats(priv, &bd); 512 ethoc_ack_irq(priv, INT_MASK_TX);
513 /* If interrupt came in between reading in the BD
514 * and clearing the interrupt source, then we risk
515 * missing the event as the TX interrupt won't trigger
516 * right away when we reenable it; hence, check
517 * BD_EMPTY here again to make sure there isn't such an
518 * event pending...
519 */
520 ethoc_read_bd(priv, entry, &bd);
521 if (bd.stat & TX_BD_READY ||
522 (priv->dty_tx == priv->cur_tx))
523 break;
524 }
525
526 ethoc_update_tx_stats(priv, &bd);
527 priv->dty_tx++;
504 } 528 }
505 529
506 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 530 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
507 netif_wake_queue(dev); 531 netif_wake_queue(dev);
508 532
509 ethoc_ack_irq(priv, INT_MASK_TX); 533 return count;
510 spin_unlock(&priv->lock);
511} 534}
512 535
513static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
515 struct net_device *dev = dev_id; 538 struct net_device *dev = dev_id;
516 struct ethoc *priv = netdev_priv(dev); 539 struct ethoc *priv = netdev_priv(dev);
517 u32 pending; 540 u32 pending;
518 541 u32 mask;
519 ethoc_disable_irq(priv, INT_MASK_ALL); 542
543 /* Figure out what triggered the interrupt...
544 * The tricky bit here is that the interrupt source bits get
545 * set in INT_SOURCE for an event irregardless of whether that
546 * event is masked or not. Thus, in order to figure out what
547 * triggered the interrupt, we need to remove the sources
548 * for all events that are currently masked. This behaviour
549 * is not particularly well documented but reasonable...
550 */
551 mask = ethoc_read(priv, INT_MASK);
520 pending = ethoc_read(priv, INT_SOURCE); 552 pending = ethoc_read(priv, INT_SOURCE);
553 pending &= mask;
554
521 if (unlikely(pending == 0)) { 555 if (unlikely(pending == 0)) {
522 ethoc_enable_irq(priv, INT_MASK_ALL);
523 return IRQ_NONE; 556 return IRQ_NONE;
524 } 557 }
525 558
526 ethoc_ack_irq(priv, pending); 559 ethoc_ack_irq(priv, pending);
527 560
561 /* We always handle the dropped packet interrupt */
528 if (pending & INT_MASK_BUSY) { 562 if (pending & INT_MASK_BUSY) {
529 dev_err(&dev->dev, "packet dropped\n"); 563 dev_err(&dev->dev, "packet dropped\n");
530 dev->stats.rx_dropped++; 564 dev->stats.rx_dropped++;
531 } 565 }
532 566
533 if (pending & INT_MASK_RX) { 567 /* Handle receive/transmit event by switching to polling */
534 if (napi_schedule_prep(&priv->napi)) 568 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
535 __napi_schedule(&priv->napi); 569 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
536 } else { 570 napi_schedule(&priv->napi);
537 ethoc_enable_irq(priv, INT_MASK_RX);
538 } 571 }
539 572
540 if (pending & INT_MASK_TX)
541 ethoc_tx(dev);
542
543 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
544 return IRQ_HANDLED; 573 return IRQ_HANDLED;
545} 574}
546 575
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
566static int ethoc_poll(struct napi_struct *napi, int budget) 595static int ethoc_poll(struct napi_struct *napi, int budget)
567{ 596{
568 struct ethoc *priv = container_of(napi, struct ethoc, napi); 597 struct ethoc *priv = container_of(napi, struct ethoc, napi);
569 int work_done = 0; 598 int rx_work_done = 0;
599 int tx_work_done = 0;
600
601 rx_work_done = ethoc_rx(priv->netdev, budget);
602 tx_work_done = ethoc_tx(priv->netdev, budget);
570 603
571 work_done = ethoc_rx(priv->netdev, budget); 604 if (rx_work_done < budget && tx_work_done < budget) {
572 if (work_done < budget) {
573 ethoc_enable_irq(priv, INT_MASK_RX);
574 napi_complete(napi); 605 napi_complete(napi);
606 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
575 } 607 }
576 608
577 return work_done; 609 return rx_work_done;
578} 610}
579 611
580static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
581{ 613{
582 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
583 struct ethoc *priv = bus->priv; 614 struct ethoc *priv = bus->priv;
615 int i;
584 616
585 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 617 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 618 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
587 619
588 while (time_before(jiffies, timeout)) { 620 for (i=0; i < 5; i++) {
589 u32 status = ethoc_read(priv, MIISTATUS); 621 u32 status = ethoc_read(priv, MIISTATUS);
590 if (!(status & MIISTATUS_BUSY)) { 622 if (!(status & MIISTATUS_BUSY)) {
591 u32 data = ethoc_read(priv, MIIRX_DATA); 623 u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
593 ethoc_write(priv, MIICOMMAND, 0); 625 ethoc_write(priv, MIICOMMAND, 0);
594 return data; 626 return data;
595 } 627 }
596 628 usleep_range(100,200);
597 schedule();
598 } 629 }
599 630
600 return -EBUSY; 631 return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
602 633
603static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
604{ 635{
605 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
606 struct ethoc *priv = bus->priv; 636 struct ethoc *priv = bus->priv;
637 int i;
607 638
608 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 639 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
609 ethoc_write(priv, MIITX_DATA, val); 640 ethoc_write(priv, MIITX_DATA, val);
610 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 641 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
611 642
612 while (time_before(jiffies, timeout)) { 643 for (i=0; i < 5; i++) {
613 u32 stat = ethoc_read(priv, MIISTATUS); 644 u32 stat = ethoc_read(priv, MIISTATUS);
614 if (!(stat & MIISTATUS_BUSY)) { 645 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */ 646 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0); 647 ethoc_write(priv, MIICOMMAND, 0);
617 return 0; 648 return 0;
618 } 649 }
619 650 usleep_range(100,200);
620 schedule();
621 } 651 }
622 652
623 return -EBUSY; 653 return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
971 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1001 /* calculate the number of TX/RX buffers, maximum 128 supported */
972 num_bd = min_t(unsigned int, 1002 num_bd = min_t(unsigned int,
973 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1003 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
974 priv->num_tx = max(2, num_bd / 4); 1004 if (num_bd < 4) {
1005 ret = -ENODEV;
1006 goto error;
1007 }
1008 /* num_tx must be a power of two */
1009 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
975 priv->num_rx = num_bd - priv->num_tx; 1010 priv->num_rx = num_bd - priv->num_tx;
976 1011
1012 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013 priv->num_tx, priv->num_rx);
1014
977 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1015 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
978 if (!priv->vma) { 1016 if (!priv->vma) {
979 ret = -ENOMEM; 1017 ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
982 1020
983 /* Allow the platform setup code to pass in a MAC address. */ 1021 /* Allow the platform setup code to pass in a MAC address. */
984 if (pdev->dev.platform_data) { 1022 if (pdev->dev.platform_data) {
985 struct ethoc_platform_data *pdata = 1023 struct ethoc_platform_data *pdata = pdev->dev.platform_data;
986 (struct ethoc_platform_data *)pdev->dev.platform_data;
987 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1024 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
988 priv->phy_id = pdata->phy_id; 1025 priv->phy_id = pdata->phy_id;
1026 } else {
1027 priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030 {
1031 const uint8_t* mac;
1032
1033 mac = of_get_property(pdev->dev.of_node,
1034 "local-mac-address",
1035 NULL);
1036 if (mac)
1037 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038 }
1039#endif
989 } 1040 }
990 1041
991 /* Check that the given MAC address is valid. If it isn't, read the 1042 /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1046 /* setup NAPI */ 1097 /* setup NAPI */
1047 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1098 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1048 1099
1049 spin_lock_init(&priv->rx_lock);
1050 spin_lock_init(&priv->lock); 1100 spin_lock_init(&priv->lock);
1051 1101
1052 ret = register_netdev(netdev); 1102 ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
1113# define ethoc_resume NULL 1163# define ethoc_resume NULL
1114#endif 1164#endif
1115 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168 {
1169 .compatible = "opencores,ethoc",
1170 },
1171 {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1116static struct platform_driver ethoc_driver = { 1176static struct platform_driver ethoc_driver = {
1117 .probe = ethoc_probe, 1177 .probe = ethoc_probe,
1118 .remove = __devexit_p(ethoc_remove), 1178 .remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
1120 .resume = ethoc_resume, 1180 .resume = ethoc_resume,
1121 .driver = { 1181 .driver = {
1122 .name = "ethoc", 1182 .name = "ethoc",
1183 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match,
1186#endif
1123 }, 1187 },
1124}; 1188};
1125 1189
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc26..50c1213f61fe 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
366{ 366{
367 struct net_device *dev = dev_id; 367 struct net_device *dev = dev_id;
368 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 368 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
369 unsigned long flags;
370 369
371 spin_lock_irqsave(&priv->lock, flags); 370 spin_lock(&priv->lock);
372 while (bcom_buffer_done(priv->tx_dmatsk)) { 371 while (bcom_buffer_done(priv->tx_dmatsk)) {
373 struct sk_buff *skb; 372 struct sk_buff *skb;
374 struct bcom_fec_bd *bd; 373 struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
379 378
380 dev_kfree_skb_irq(skb); 379 dev_kfree_skb_irq(skb);
381 } 380 }
382 spin_unlock_irqrestore(&priv->lock, flags); 381 spin_unlock(&priv->lock);
383 382
384 netif_wake_queue(dev); 383 netif_wake_queue(dev);
385 384
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
395 struct bcom_fec_bd *bd; 394 struct bcom_fec_bd *bd;
396 u32 status, physaddr; 395 u32 status, physaddr;
397 int length; 396 int length;
398 unsigned long flags;
399 397
400 spin_lock_irqsave(&priv->lock, flags); 398 spin_lock(&priv->lock);
401 399
402 while (bcom_buffer_done(priv->rx_dmatsk)) { 400 while (bcom_buffer_done(priv->rx_dmatsk)) {
403 401
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
429 427
430 /* Process the received skb - Drop the spin lock while 428 /* Process the received skb - Drop the spin lock while
431 * calling into the network stack */ 429 * calling into the network stack */
432 spin_unlock_irqrestore(&priv->lock, flags); 430 spin_unlock(&priv->lock);
433 431
434 dma_unmap_single(dev->dev.parent, physaddr, rskb->len, 432 dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
435 DMA_FROM_DEVICE); 433 DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
438 rskb->protocol = eth_type_trans(rskb, dev); 436 rskb->protocol = eth_type_trans(rskb, dev);
439 netif_rx(rskb); 437 netif_rx(rskb);
440 438
441 spin_lock_irqsave(&priv->lock, flags); 439 spin_lock(&priv->lock);
442 } 440 }
443 441
444 spin_unlock_irqrestore(&priv->lock, flags); 442 spin_unlock(&priv->lock);
445 443
446 return IRQ_HANDLED; 444 return IRQ_HANDLED;
447} 445}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
452 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 450 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
453 struct mpc52xx_fec __iomem *fec = priv->fec; 451 struct mpc52xx_fec __iomem *fec = priv->fec;
454 u32 ievent; 452 u32 ievent;
455 unsigned long flags;
456 453
457 ievent = in_be32(&fec->ievent); 454 ievent = in_be32(&fec->ievent);
458 455
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
470 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) 467 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
471 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); 468 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
472 469
473 spin_lock_irqsave(&priv->lock, flags); 470 spin_lock(&priv->lock);
474 mpc52xx_fec_reset(dev); 471 mpc52xx_fec_reset(dev);
475 spin_unlock_irqrestore(&priv->lock, flags); 472 spin_unlock(&priv->lock);
476 473
477 return IRQ_HANDLED; 474 return IRQ_HANDLED;
478 } 475 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..cd2d72d825df 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
41 */ 41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
42#define FORCEDETH_VERSION "0.64" 45#define FORCEDETH_VERSION "0.64"
43#define DRV_NAME "forcedeth" 46#define DRV_NAME "forcedeth"
44 47
@@ -60,18 +63,12 @@
60#include <linux/if_vlan.h> 63#include <linux/if_vlan.h>
61#include <linux/dma-mapping.h> 64#include <linux/dma-mapping.h>
62#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/uaccess.h>
67#include <linux/io.h>
63 68
64#include <asm/irq.h> 69#include <asm/irq.h>
65#include <asm/io.h>
66#include <asm/uaccess.h>
67#include <asm/system.h> 70#include <asm/system.h>
68 71
69#if 0
70#define dprintk printk
71#else
72#define dprintk(x...) do { } while (0)
73#endif
74
75#define TX_WORK_PER_LOOP 64 72#define TX_WORK_PER_LOOP 64
76#define RX_WORK_PER_LOOP 64 73#define RX_WORK_PER_LOOP 64
77 74
@@ -186,9 +183,9 @@ enum {
186 NvRegSlotTime = 0x9c, 183 NvRegSlotTime = 0x9c,
187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190#define NVREG_SLOTTIME_HALF 0x0000ff00 187#define NVREG_SLOTTIME_HALF 0x0000ff00
191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 188#define NVREG_SLOTTIME_DEFAULT 0x00007f00
192#define NVREG_SLOTTIME_MASK 0x000000ff 189#define NVREG_SLOTTIME_MASK 0x000000ff
193 190
194 NvRegTxDeferral = 0xA0, 191 NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@ enum {
297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 295
299 NvRegMgmtUnitGetVersion = 0x204, 296 NvRegMgmtUnitGetVersion = 0x204,
300#define NVREG_MGMTUNITGETVERSION 0x01 297#define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208, 298 NvRegMgmtUnitVersion = 0x208,
302#define NVREG_MGMTUNITVERSION 0x08 299#define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268, 300 NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@ struct ring_desc_ex {
368}; 365};
369 366
370union ring_type { 367union ring_type {
371 struct ring_desc* orig; 368 struct ring_desc *orig;
372 struct ring_desc_ex* ex; 369 struct ring_desc_ex *ex;
373}; 370};
374 371
375#define FLAG_MASK_V1 0xffff0000 372#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@ union ring_type {
444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 442
446/* Miscelaneous hardware related defines: */ 443/* Miscelaneous hardware related defines: */
447#define NV_PCI_REGSZ_VER1 0x270 444#define NV_PCI_REGSZ_VER1 0x270
448#define NV_PCI_REGSZ_VER2 0x2d4 445#define NV_PCI_REGSZ_VER2 0x2d4
449#define NV_PCI_REGSZ_VER3 0x604 446#define NV_PCI_REGSZ_VER3 0x604
450#define NV_PCI_REGSZ_MAX 0x604 447#define NV_PCI_REGSZ_MAX 0x604
451 448
452/* various timeout delays: all in usec */ 449/* various timeout delays: all in usec */
453#define NV_TXRX_RESET_DELAY 4 450#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +714,7 @@ static const struct register_test nv_registers_test[] = {
717 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff }, 715 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 }, 716 { NvRegWakeUpFlags, 0x07777 },
720 { 0,0 } 717 { 0, 0 }
721}; 718};
722 719
723struct nv_skb_map { 720struct nv_skb_map {
@@ -911,7 +908,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot; 908 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again) 909 * older Linux and other OSes may not power it up again)
913 */ 910 */
914static int phy_power_down = 0; 911static int phy_power_down;
915 912
916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 913static inline struct fe_priv *get_nvpriv(struct net_device *dev)
917{ 914{
@@ -948,7 +945,7 @@ static bool nv_optimized(struct fe_priv *np)
948} 945}
949 946
950static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 947static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
951 int delay, int delaymax, const char *msg) 948 int delay, int delaymax)
952{ 949{
953 u8 __iomem *base = get_hwbase(dev); 950 u8 __iomem *base = get_hwbase(dev);
954 951
@@ -956,11 +953,8 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
956 do { 953 do {
957 udelay(delay); 954 udelay(delay);
958 delaymax -= delay; 955 delaymax -= delay;
959 if (delaymax < 0) { 956 if (delaymax < 0)
960 if (msg)
961 printk("%s", msg);
962 return 1; 957 return 1;
963 }
964 } while ((readl(base + offset) & mask) != target); 958 } while ((readl(base + offset) & mask) != target);
965 return 0; 959 return 0;
966} 960}
@@ -984,12 +978,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
984 u8 __iomem *base = get_hwbase(dev); 978 u8 __iomem *base = get_hwbase(dev);
985 979
986 if (!nv_optimized(np)) { 980 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING) { 981 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 982 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 } 983 if (rxtx_flags & NV_SETUP_TX_RING)
990 if (rxtx_flags & NV_SETUP_TX_RING) {
991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 984 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 }
993 } else { 985 } else {
994 if (rxtx_flags & NV_SETUP_RX_RING) { 986 if (rxtx_flags & NV_SETUP_RX_RING) {
995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 987 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@ static void free_rings(struct net_device *dev)
1015 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1007 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1016 np->rx_ring.ex, np->ring_addr); 1008 np->rx_ring.ex, np->ring_addr);
1017 } 1009 }
1018 if (np->rx_skb) 1010 kfree(np->rx_skb);
1019 kfree(np->rx_skb); 1011 kfree(np->tx_skb);
1020 if (np->tx_skb)
1021 kfree(np->tx_skb);
1022} 1012}
1023 1013
1024static int using_multi_irqs(struct net_device *dev) 1014static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1145 writel(reg, base + NvRegMIIControl); 1135 writel(reg, base + NvRegMIIControl);
1146 1136
1147 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1137 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1148 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1138 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1149 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1150 dev->name, miireg, addr);
1151 retval = -1; 1139 retval = -1;
1152 } else if (value != MII_READ) { 1140 } else if (value != MII_READ) {
1153 /* it was a write operation - fewer failures are detectable */ 1141 /* it was a write operation - fewer failures are detectable */
1154 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1155 dev->name, value, miireg, addr);
1156 retval = 0; 1142 retval = 0;
1157 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1143 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1158 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1159 dev->name, miireg, addr);
1160 retval = -1; 1144 retval = -1;
1161 } else { 1145 } else {
1162 retval = readl(base + NvRegMIIData); 1146 retval = readl(base + NvRegMIIData);
1163 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1164 dev->name, miireg, addr, retval);
1165 } 1147 }
1166 1148
1167 return retval; 1149 return retval;
@@ -1174,16 +1156,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1174 unsigned int tries = 0; 1156 unsigned int tries = 0;
1175 1157
1176 miicontrol = BMCR_RESET | bmcr_setup; 1158 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1159 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1178 return -1; 1160 return -1;
1179 }
1180 1161
1181 /* wait for 500ms */ 1162 /* wait for 500ms */
1182 msleep(500); 1163 msleep(500);
1183 1164
1184 /* must wait till reset is deasserted */ 1165 /* must wait till reset is deasserted */
1185 while (miicontrol & BMCR_RESET) { 1166 while (miicontrol & BMCR_RESET) {
1186 msleep(10); 1167 usleep_range(10000, 20000);
1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1168 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1188 /* FIXME: 100 tries seem excessive */ 1169 /* FIXME: 100 tries seem excessive */
1189 if (tries++ > 100) 1170 if (tries++ > 100)
@@ -1192,106 +1173,239 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1192 return 0; 1173 return 0;
1193} 1174}
1194 1175
1176static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1177{
1178 static const struct {
1179 int reg;
1180 int init;
1181 } ri[] = {
1182 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1183 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1184 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1185 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1186 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1187 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1188 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1189 };
1190 int i;
1191
1192 for (i = 0; i < ARRAY_SIZE(ri); i++) {
1193 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1194 return PHY_ERROR;
1195 }
1196
1197 return 0;
1198}
1199
1200static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1201{
1202 u32 reg;
1203 u8 __iomem *base = get_hwbase(dev);
1204 u32 powerstate = readl(base + NvRegPowerState2);
1205
1206 /* need to perform hw phy reset */
1207 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1208 writel(powerstate, base + NvRegPowerState2);
1209 msleep(25);
1210
1211 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1212 writel(powerstate, base + NvRegPowerState2);
1213 msleep(25);
1214
1215 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1216 reg |= PHY_REALTEK_INIT9;
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1218 return PHY_ERROR;
1219 if (mii_rw(dev, np->phyaddr,
1220 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1221 return PHY_ERROR;
1222 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1223 if (!(reg & PHY_REALTEK_INIT11)) {
1224 reg |= PHY_REALTEK_INIT11;
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1226 return PHY_ERROR;
1227 }
1228 if (mii_rw(dev, np->phyaddr,
1229 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1230 return PHY_ERROR;
1231
1232 return 0;
1233}
1234
1235static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1236{
1237 u32 phy_reserved;
1238
1239 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1240 phy_reserved = mii_rw(dev, np->phyaddr,
1241 PHY_REALTEK_INIT_REG6, MII_READ);
1242 phy_reserved |= PHY_REALTEK_INIT7;
1243 if (mii_rw(dev, np->phyaddr,
1244 PHY_REALTEK_INIT_REG6, phy_reserved))
1245 return PHY_ERROR;
1246 }
1247
1248 return 0;
1249}
1250
1251static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1252{
1253 u32 phy_reserved;
1254
1255 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1256 if (mii_rw(dev, np->phyaddr,
1257 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1258 return PHY_ERROR;
1259 phy_reserved = mii_rw(dev, np->phyaddr,
1260 PHY_REALTEK_INIT_REG2, MII_READ);
1261 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1262 phy_reserved |= PHY_REALTEK_INIT3;
1263 if (mii_rw(dev, np->phyaddr,
1264 PHY_REALTEK_INIT_REG2, phy_reserved))
1265 return PHY_ERROR;
1266 if (mii_rw(dev, np->phyaddr,
1267 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1268 return PHY_ERROR;
1269 }
1270
1271 return 0;
1272}
1273
1274static int init_cicada(struct net_device *dev, struct fe_priv *np,
1275 u32 phyinterface)
1276{
1277 u32 phy_reserved;
1278
1279 if (phyinterface & PHY_RGMII) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1281 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1282 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1283 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1284 return PHY_ERROR;
1285 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1286 phy_reserved |= PHY_CICADA_INIT5;
1287 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1288 return PHY_ERROR;
1289 }
1290 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1291 phy_reserved |= PHY_CICADA_INIT6;
1292 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1293 return PHY_ERROR;
1294
1295 return 0;
1296}
1297
1298static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1299{
1300 u32 phy_reserved;
1301
1302 if (mii_rw(dev, np->phyaddr,
1303 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1304 return PHY_ERROR;
1305 if (mii_rw(dev, np->phyaddr,
1306 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1307 return PHY_ERROR;
1308 phy_reserved = mii_rw(dev, np->phyaddr,
1309 PHY_VITESSE_INIT_REG4, MII_READ);
1310 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1311 return PHY_ERROR;
1312 phy_reserved = mii_rw(dev, np->phyaddr,
1313 PHY_VITESSE_INIT_REG3, MII_READ);
1314 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1315 phy_reserved |= PHY_VITESSE_INIT3;
1316 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1317 return PHY_ERROR;
1318 if (mii_rw(dev, np->phyaddr,
1319 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1320 return PHY_ERROR;
1321 if (mii_rw(dev, np->phyaddr,
1322 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1323 return PHY_ERROR;
1324 phy_reserved = mii_rw(dev, np->phyaddr,
1325 PHY_VITESSE_INIT_REG4, MII_READ);
1326 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1327 phy_reserved |= PHY_VITESSE_INIT3;
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1329 return PHY_ERROR;
1330 phy_reserved = mii_rw(dev, np->phyaddr,
1331 PHY_VITESSE_INIT_REG3, MII_READ);
1332 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1333 return PHY_ERROR;
1334 if (mii_rw(dev, np->phyaddr,
1335 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1336 return PHY_ERROR;
1337 if (mii_rw(dev, np->phyaddr,
1338 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1339 return PHY_ERROR;
1340 phy_reserved = mii_rw(dev, np->phyaddr,
1341 PHY_VITESSE_INIT_REG4, MII_READ);
1342 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1343 return PHY_ERROR;
1344 phy_reserved = mii_rw(dev, np->phyaddr,
1345 PHY_VITESSE_INIT_REG3, MII_READ);
1346 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1347 phy_reserved |= PHY_VITESSE_INIT8;
1348 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1349 return PHY_ERROR;
1350 if (mii_rw(dev, np->phyaddr,
1351 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1352 return PHY_ERROR;
1353 if (mii_rw(dev, np->phyaddr,
1354 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1355 return PHY_ERROR;
1356
1357 return 0;
1358}
1359
1195static int phy_init(struct net_device *dev) 1360static int phy_init(struct net_device *dev)
1196{ 1361{
1197 struct fe_priv *np = get_nvpriv(dev); 1362 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev); 1363 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1364 u32 phyinterface;
1365 u32 mii_status, mii_control, mii_control_1000, reg;
1200 1366
1201 /* phy errata for E3016 phy */ 1367 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1368 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1203 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1369 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1204 reg &= ~PHY_MARVELL_E3016_INITMASK; 1370 reg &= ~PHY_MARVELL_E3016_INITMASK;
1205 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1371 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1206 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1372 netdev_info(dev, "%s: phy write to errata reg failed\n",
1373 pci_name(np->pci_dev));
1207 return PHY_ERROR; 1374 return PHY_ERROR;
1208 } 1375 }
1209 } 1376 }
1210 if (np->phy_oui == PHY_OUI_REALTEK) { 1377 if (np->phy_oui == PHY_OUI_REALTEK) {
1211 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1378 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1212 np->phy_rev == PHY_REV_REALTEK_8211B) { 1379 np->phy_rev == PHY_REV_REALTEK_8211B) {
1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1380 if (init_realtek_8211b(dev, np)) {
1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1381 netdev_info(dev, "%s: phy init failed\n",
1215 return PHY_ERROR; 1382 pci_name(np->pci_dev));
1216 }
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 return PHY_ERROR;
1220 }
1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1223 return PHY_ERROR; 1383 return PHY_ERROR;
1224 } 1384 }
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1385 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1386 np->phy_rev == PHY_REV_REALTEK_8211C) {
1387 if (init_realtek_8211c(dev, np)) {
1388 netdev_info(dev, "%s: phy init failed\n",
1389 pci_name(np->pci_dev));
1227 return PHY_ERROR; 1390 return PHY_ERROR;
1228 } 1391 }
1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1392 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1393 if (init_realtek_8201(dev, np)) {
1394 netdev_info(dev, "%s: phy init failed\n",
1395 pci_name(np->pci_dev));
1231 return PHY_ERROR; 1396 return PHY_ERROR;
1232 } 1397 }
1233 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1235 return PHY_ERROR;
1236 }
1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 return PHY_ERROR;
1240 }
1241 }
1242 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1243 np->phy_rev == PHY_REV_REALTEK_8211C) {
1244 u32 powerstate = readl(base + NvRegPowerState2);
1245
1246 /* need to perform hw phy reset */
1247 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1248 writel(powerstate, base + NvRegPowerState2);
1249 msleep(25);
1250
1251 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1252 writel(powerstate, base + NvRegPowerState2);
1253 msleep(25);
1254
1255 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1256 reg |= PHY_REALTEK_INIT9;
1257 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1259 return PHY_ERROR;
1260 }
1261 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1262 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1263 return PHY_ERROR;
1264 }
1265 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1266 if (!(reg & PHY_REALTEK_INIT11)) {
1267 reg |= PHY_REALTEK_INIT11;
1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1270 return PHY_ERROR;
1271 }
1272 }
1273 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1275 return PHY_ERROR;
1276 }
1277 }
1278 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1279 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1281 phy_reserved |= PHY_REALTEK_INIT7;
1282 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1284 return PHY_ERROR;
1285 }
1286 }
1287 } 1398 }
1288 } 1399 }
1289 1400
1290 /* set advertise register */ 1401 /* set advertise register */
1291 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1402 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1292 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1403 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1404 ADVERTISE_100HALF | ADVERTISE_100FULL |
1405 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1293 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1406 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1294 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1407 netdev_info(dev, "%s: phy write to advertise failed\n",
1408 pci_name(np->pci_dev));
1295 return PHY_ERROR; 1409 return PHY_ERROR;
1296 } 1410 }
1297 1411
@@ -1302,7 +1416,8 @@ static int phy_init(struct net_device *dev)
1302 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1416 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1303 if (mii_status & PHY_GIGABIT) { 1417 if (mii_status & PHY_GIGABIT) {
1304 np->gigabit = PHY_GIGABIT; 1418 np->gigabit = PHY_GIGABIT;
1305 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1419 mii_control_1000 = mii_rw(dev, np->phyaddr,
1420 MII_CTRL1000, MII_READ);
1306 mii_control_1000 &= ~ADVERTISE_1000HALF; 1421 mii_control_1000 &= ~ADVERTISE_1000HALF;
1307 if (phyinterface & PHY_RGMII) 1422 if (phyinterface & PHY_RGMII)
1308 mii_control_1000 |= ADVERTISE_1000FULL; 1423 mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@ static int phy_init(struct net_device *dev)
1310 mii_control_1000 &= ~ADVERTISE_1000FULL; 1425 mii_control_1000 &= ~ADVERTISE_1000FULL;
1311 1426
1312 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1427 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1428 netdev_info(dev, "%s: phy init failed\n",
1429 pci_name(np->pci_dev));
1314 return PHY_ERROR; 1430 return PHY_ERROR;
1315 } 1431 }
1316 } 1432 } else
1317 else
1318 np->gigabit = 0; 1433 np->gigabit = 0;
1319 1434
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1435 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@ static int phy_init(struct net_device *dev)
1326 /* start autoneg since we already performed hw reset above */ 1441 /* start autoneg since we already performed hw reset above */
1327 mii_control |= BMCR_ANRESTART; 1442 mii_control |= BMCR_ANRESTART;
1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1443 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1329 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1444 netdev_info(dev, "%s: phy init failed\n",
1445 pci_name(np->pci_dev));
1330 return PHY_ERROR; 1446 return PHY_ERROR;
1331 } 1447 }
1332 } else { 1448 } else {
@@ -1334,165 +1450,42 @@ static int phy_init(struct net_device *dev)
1334 * (certain phys need bmcr to be setup with reset) 1450 * (certain phys need bmcr to be setup with reset)
1335 */ 1451 */
1336 if (phy_reset(dev, mii_control)) { 1452 if (phy_reset(dev, mii_control)) {
1337 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1453 netdev_info(dev, "%s: phy reset failed\n",
1454 pci_name(np->pci_dev));
1338 return PHY_ERROR; 1455 return PHY_ERROR;
1339 } 1456 }
1340 } 1457 }
1341 1458
1342 /* phy vendor specific configuration */ 1459 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1460 if ((np->phy_oui == PHY_OUI_CICADA)) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1461 if (init_cicada(dev, np, phyinterface)) {
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1462 netdev_info(dev, "%s: phy init failed\n",
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1463 pci_name(np->pci_dev));
1347 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1349 return PHY_ERROR;
1350 }
1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1352 phy_reserved |= PHY_CICADA_INIT5;
1353 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1355 return PHY_ERROR;
1356 }
1357 }
1358 if (np->phy_oui == PHY_OUI_CICADA) {
1359 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1360 phy_reserved |= PHY_CICADA_INIT6;
1361 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1363 return PHY_ERROR;
1364 }
1365 }
1366 if (np->phy_oui == PHY_OUI_VITESSE) {
1367 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1368 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1369 return PHY_ERROR;
1370 }
1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1373 return PHY_ERROR;
1374 }
1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1378 return PHY_ERROR;
1379 }
1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1381 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1382 phy_reserved |= PHY_VITESSE_INIT3;
1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1385 return PHY_ERROR;
1386 }
1387 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1388 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1389 return PHY_ERROR;
1390 }
1391 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1392 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1393 return PHY_ERROR;
1394 }
1395 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1396 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1397 phy_reserved |= PHY_VITESSE_INIT3;
1398 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1400 return PHY_ERROR;
1401 }
1402 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1405 return PHY_ERROR; 1464 return PHY_ERROR;
1406 } 1465 }
1407 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1466 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1408 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1467 if (init_vitesse(dev, np)) {
1468 netdev_info(dev, "%s: phy init failed\n",
1469 pci_name(np->pci_dev));
1409 return PHY_ERROR; 1470 return PHY_ERROR;
1410 } 1471 }
1411 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1472 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1412 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1413 return PHY_ERROR;
1414 }
1415 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1416 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1417 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1418 return PHY_ERROR;
1419 }
1420 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1421 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1422 phy_reserved |= PHY_VITESSE_INIT8;
1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 return PHY_ERROR;
1426 }
1427 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 return PHY_ERROR;
1430 }
1431 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1432 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1433 return PHY_ERROR;
1434 }
1435 }
1436 if (np->phy_oui == PHY_OUI_REALTEK) {
1437 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1473 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1438 np->phy_rev == PHY_REV_REALTEK_8211B) { 1474 np->phy_rev == PHY_REV_REALTEK_8211B) {
1439 /* reset could have cleared these out, set them back */ 1475 /* reset could have cleared these out, set them back */
1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1476 if (init_realtek_8211b(dev, np)) {
1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1477 netdev_info(dev, "%s: phy init failed\n",
1442 return PHY_ERROR; 1478 pci_name(np->pci_dev));
1443 }
1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1446 return PHY_ERROR;
1447 }
1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1450 return PHY_ERROR;
1451 }
1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1454 return PHY_ERROR;
1455 }
1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458 return PHY_ERROR;
1459 }
1460 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1461 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1462 return PHY_ERROR; 1479 return PHY_ERROR;
1463 } 1480 }
1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1481 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1482 if (init_realtek_8201(dev, np) ||
1483 init_realtek_8201_cross(dev, np)) {
1484 netdev_info(dev, "%s: phy init failed\n",
1485 pci_name(np->pci_dev));
1466 return PHY_ERROR; 1486 return PHY_ERROR;
1467 } 1487 }
1468 } 1488 }
1469 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1470 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1471 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1472 phy_reserved |= PHY_REALTEK_INIT7;
1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1475 return PHY_ERROR;
1476 }
1477 }
1478 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1479 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1480 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1481 return PHY_ERROR;
1482 }
1483 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1484 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1485 phy_reserved |= PHY_REALTEK_INIT3;
1486 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1487 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1488 return PHY_ERROR;
1489 }
1490 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1491 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1492 return PHY_ERROR;
1493 }
1494 }
1495 }
1496 } 1489 }
1497 1490
1498 /* some phys clear out pause advertisment on reset, set it back */ 1491 /* some phys clear out pause advertisment on reset, set it back */
@@ -1501,12 +1494,10 @@ static int phy_init(struct net_device *dev)
1501 /* restart auto negotiation, power down phy */ 1494 /* restart auto negotiation, power down phy */
1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1495 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1496 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) { 1497 if (phy_power_down)
1505 mii_control |= BMCR_PDOWN; 1498 mii_control |= BMCR_PDOWN;
1506 } 1499 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1500 return PHY_ERROR;
1509 }
1510 1501
1511 return 0; 1502 return 0;
1512} 1503}
@@ -1517,7 +1508,6 @@ static void nv_start_rx(struct net_device *dev)
1517 u8 __iomem *base = get_hwbase(dev); 1508 u8 __iomem *base = get_hwbase(dev);
1518 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1509 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1519 1510
1520 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1521 /* Already running? Stop it. */ 1511 /* Already running? Stop it. */
1522 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1512 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1523 rx_ctrl &= ~NVREG_RCVCTL_START; 1513 rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@ static void nv_start_rx(struct net_device *dev)
1526 } 1516 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1517 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base); 1518 pci_push(base);
1529 rx_ctrl |= NVREG_RCVCTL_START; 1519 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use) 1520 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1521 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl); 1522 writel(rx_ctrl, base + NvRegReceiverControl);
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1534 dev->name, np->duplex, np->linkspeed);
1535 pci_push(base); 1523 pci_push(base);
1536} 1524}
1537 1525
@@ -1541,15 +1529,15 @@ static void nv_stop_rx(struct net_device *dev)
1541 u8 __iomem *base = get_hwbase(dev); 1529 u8 __iomem *base = get_hwbase(dev);
1542 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1530 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1543 1531
1544 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1545 if (!np->mac_in_use) 1532 if (!np->mac_in_use)
1546 rx_ctrl &= ~NVREG_RCVCTL_START; 1533 rx_ctrl &= ~NVREG_RCVCTL_START;
1547 else 1534 else
1548 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1535 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1549 writel(rx_ctrl, base + NvRegReceiverControl); 1536 writel(rx_ctrl, base + NvRegReceiverControl);
1550 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1537 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1551 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1538 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1552 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1539 netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1540 __func__);
1553 1541
1554 udelay(NV_RXSTOP_DELAY2); 1542 udelay(NV_RXSTOP_DELAY2);
1555 if (!np->mac_in_use) 1543 if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@ static void nv_start_tx(struct net_device *dev)
1562 u8 __iomem *base = get_hwbase(dev); 1550 u8 __iomem *base = get_hwbase(dev);
1563 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1551 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1564 1552
1565 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1566 tx_ctrl |= NVREG_XMITCTL_START; 1553 tx_ctrl |= NVREG_XMITCTL_START;
1567 if (np->mac_in_use) 1554 if (np->mac_in_use)
1568 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1555 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@ static void nv_stop_tx(struct net_device *dev)
1576 u8 __iomem *base = get_hwbase(dev); 1563 u8 __iomem *base = get_hwbase(dev);
1577 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1564 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1578 1565
1579 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1580 if (!np->mac_in_use) 1566 if (!np->mac_in_use)
1581 tx_ctrl &= ~NVREG_XMITCTL_START; 1567 tx_ctrl &= ~NVREG_XMITCTL_START;
1582 else 1568 else
1583 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1569 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1584 writel(tx_ctrl, base + NvRegTransmitterControl); 1570 writel(tx_ctrl, base + NvRegTransmitterControl);
1585 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1571 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1586 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1572 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1587 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1573 netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1574 __func__);
1588 1575
1589 udelay(NV_TXSTOP_DELAY2); 1576 udelay(NV_TXSTOP_DELAY2);
1590 if (!np->mac_in_use) 1577 if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@ static void nv_txrx_reset(struct net_device *dev)
1609 struct fe_priv *np = netdev_priv(dev); 1596 struct fe_priv *np = netdev_priv(dev);
1610 u8 __iomem *base = get_hwbase(dev); 1597 u8 __iomem *base = get_hwbase(dev);
1611 1598
1612 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1613 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1599 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1614 pci_push(base); 1600 pci_push(base);
1615 udelay(NV_TXRX_RESET_DELAY); 1601 udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@ static void nv_mac_reset(struct net_device *dev)
1623 u8 __iomem *base = get_hwbase(dev); 1609 u8 __iomem *base = get_hwbase(dev);
1624 u32 temp1, temp2, temp3; 1610 u32 temp1, temp2, temp3;
1625 1611
1626 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1627
1628 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1612 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1629 pci_push(base); 1613 pci_push(base);
1630 1614
@@ -1745,7 +1729,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1745static int nv_alloc_rx(struct net_device *dev) 1729static int nv_alloc_rx(struct net_device *dev)
1746{ 1730{
1747 struct fe_priv *np = netdev_priv(dev); 1731 struct fe_priv *np = netdev_priv(dev);
1748 struct ring_desc* less_rx; 1732 struct ring_desc *less_rx;
1749 1733
1750 less_rx = np->get_rx.orig; 1734 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig) 1735 if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@ static int nv_alloc_rx(struct net_device *dev)
1767 np->put_rx.orig = np->first_rx.orig; 1751 np->put_rx.orig = np->first_rx.orig;
1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1752 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1769 np->put_rx_ctx = np->first_rx_ctx; 1753 np->put_rx_ctx = np->first_rx_ctx;
1770 } else { 1754 } else
1771 return 1; 1755 return 1;
1772 }
1773 } 1756 }
1774 return 0; 1757 return 0;
1775} 1758}
@@ -1777,7 +1760,7 @@ static int nv_alloc_rx(struct net_device *dev)
1777static int nv_alloc_rx_optimized(struct net_device *dev) 1760static int nv_alloc_rx_optimized(struct net_device *dev)
1778{ 1761{
1779 struct fe_priv *np = netdev_priv(dev); 1762 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx; 1763 struct ring_desc_ex *less_rx;
1781 1764
1782 less_rx = np->get_rx.ex; 1765 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex) 1766 if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1800 np->put_rx.ex = np->first_rx.ex; 1783 np->put_rx.ex = np->first_rx.ex;
1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1784 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1802 np->put_rx_ctx = np->first_rx_ctx; 1785 np->put_rx_ctx = np->first_rx_ctx;
1803 } else { 1786 } else
1804 return 1; 1787 return 1;
1805 }
1806 } 1788 }
1807 return 0; 1789 return 0;
1808} 1790}
@@ -2018,24 +2000,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
2018 2000
2019/* Known Good seed sets */ 2001/* Known Good seed sets */
2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2002static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2003 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2004 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2005 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2006 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2007 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2008 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2009 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2010 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2029 2011
2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2012static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2013 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2014 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2015 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2016 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2017 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2018 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2019 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2020 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2039 2021
2040static void nv_gear_backoff_reseed(struct net_device *dev) 2022static void nv_gear_backoff_reseed(struct net_device *dev)
2041{ 2023{
@@ -2083,13 +2065,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2065 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2066 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2067 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2086 writel(temp,base + NvRegBackOffControl); 2068 writel(temp, base + NvRegBackOffControl);
2087 2069
2088 /* Setup seeds for all gear LFSRs. */ 2070 /* Setup seeds for all gear LFSRs. */
2089 get_random_bytes(&seedset, sizeof(seedset)); 2071 get_random_bytes(&seedset, sizeof(seedset));
2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2072 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2073 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2092 {
2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2074 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2075 temp |= main_seedset[seedset][i-1] & 0x3ff;
2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2076 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 u32 size = skb_headlen(skb); 2094 u32 size = skb_headlen(skb);
2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2095 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2115 u32 empty_slots; 2096 u32 empty_slots;
2116 struct ring_desc* put_tx; 2097 struct ring_desc *put_tx;
2117 struct ring_desc* start_tx; 2098 struct ring_desc *start_tx;
2118 struct ring_desc* prev_tx; 2099 struct ring_desc *prev_tx;
2119 struct nv_skb_map* prev_tx_ctx; 2100 struct nv_skb_map *prev_tx_ctx;
2120 unsigned long flags; 2101 unsigned long flags;
2121 2102
2122 /* add fragments to entries count */ 2103 /* add fragments to entries count */
@@ -2204,18 +2185,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2204 2185
2205 spin_unlock_irqrestore(&np->lock, flags); 2186 spin_unlock_irqrestore(&np->lock, flags);
2206 2187
2207 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2208 dev->name, entries, tx_flags_extra);
2209 {
2210 int j;
2211 for (j=0; j<64; j++) {
2212 if ((j%16) == 0)
2213 dprintk("\n%03x:", j);
2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2215 }
2216 dprintk("\n");
2217 }
2218
2219 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2188 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2220 return NETDEV_TX_OK; 2189 return NETDEV_TX_OK;
2221} 2190}
@@ -2233,11 +2202,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2233 u32 size = skb_headlen(skb); 2202 u32 size = skb_headlen(skb);
2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2203 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2235 u32 empty_slots; 2204 u32 empty_slots;
2236 struct ring_desc_ex* put_tx; 2205 struct ring_desc_ex *put_tx;
2237 struct ring_desc_ex* start_tx; 2206 struct ring_desc_ex *start_tx;
2238 struct ring_desc_ex* prev_tx; 2207 struct ring_desc_ex *prev_tx;
2239 struct nv_skb_map* prev_tx_ctx; 2208 struct nv_skb_map *prev_tx_ctx;
2240 struct nv_skb_map* start_tx_ctx; 2209 struct nv_skb_map *start_tx_ctx;
2241 unsigned long flags; 2210 unsigned long flags;
2242 2211
2243 /* add fragments to entries count */ 2212 /* add fragments to entries count */
@@ -2355,18 +2324,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2355 2324
2356 spin_unlock_irqrestore(&np->lock, flags); 2325 spin_unlock_irqrestore(&np->lock, flags);
2357 2326
2358 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2359 dev->name, entries, tx_flags_extra);
2360 {
2361 int j;
2362 for (j=0; j<64; j++) {
2363 if ((j%16) == 0)
2364 dprintk("\n%03x:", j);
2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2366 }
2367 dprintk("\n");
2368 }
2369
2370 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2327 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2371 return NETDEV_TX_OK; 2328 return NETDEV_TX_OK;
2372} 2329}
@@ -2399,15 +2356,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2399 struct fe_priv *np = netdev_priv(dev); 2356 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags; 2357 u32 flags;
2401 int tx_work = 0; 2358 int tx_work = 0;
2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2359 struct ring_desc *orig_get_tx = np->get_tx.orig;
2403 2360
2404 while ((np->get_tx.orig != np->put_tx.orig) && 2361 while ((np->get_tx.orig != np->put_tx.orig) &&
2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2362 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2406 (tx_work < limit)) { 2363 (tx_work < limit)) {
2407 2364
2408 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2409 dev->name, flags);
2410
2411 nv_unmap_txskb(np, np->get_tx_ctx); 2365 nv_unmap_txskb(np, np->get_tx_ctx);
2412 2366
2413 if (np->desc_ver == DESC_VER_1) { 2367 if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464 struct fe_priv *np = netdev_priv(dev); 2418 struct fe_priv *np = netdev_priv(dev);
2465 u32 flags; 2419 u32 flags;
2466 int tx_work = 0; 2420 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2421 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2468 2422
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2423 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2424 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2471 (tx_work < limit)) { 2425 (tx_work < limit)) {
2472 2426
2473 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2474 dev->name, flags);
2475
2476 nv_unmap_txskb(np, np->get_tx_ctx); 2427 nv_unmap_txskb(np, np->get_tx_ctx);
2477 2428
2478 if (flags & NV_TX2_LASTPACKET) { 2429 if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2491 np->get_tx_ctx->skb = NULL; 2442 np->get_tx_ctx->skb = NULL;
2492 tx_work++; 2443 tx_work++;
2493 2444
2494 if (np->tx_limit) { 2445 if (np->tx_limit)
2495 nv_tx_flip_ownership(dev); 2446 nv_tx_flip_ownership(dev);
2496 }
2497 } 2447 }
2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2448 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2499 np->get_tx.ex = np->first_tx.ex; 2449 np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@ static void nv_tx_timeout(struct net_device *dev)
2518 u32 status; 2468 u32 status;
2519 union ring_type put_tx; 2469 union ring_type put_tx;
2520 int saved_tx_limit; 2470 int saved_tx_limit;
2471 int i;
2521 2472
2522 if (np->msi_flags & NV_MSI_X_ENABLED) 2473 if (np->msi_flags & NV_MSI_X_ENABLED)
2523 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2474 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2524 else 2475 else
2525 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2476 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2526 2477
2527 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2478 netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2528 2479
2529 { 2480 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2530 int i; 2481 netdev_info(dev, "Dumping tx registers\n");
2531 2482 for (i = 0; i <= np->register_size; i += 32) {
2532 printk(KERN_INFO "%s: Ring at %lx\n", 2483 netdev_info(dev,
2533 dev->name, (unsigned long)np->ring_addr); 2484 "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2485 i,
2535 for (i=0;i<=np->register_size;i+= 32) { 2486 readl(base + i + 0), readl(base + i + 4),
2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2487 readl(base + i + 8), readl(base + i + 12),
2537 i, 2488 readl(base + i + 16), readl(base + i + 20),
2538 readl(base + i + 0), readl(base + i + 4), 2489 readl(base + i + 24), readl(base + i + 28));
2539 readl(base + i + 8), readl(base + i + 12), 2490 }
2540 readl(base + i + 16), readl(base + i + 20), 2491 netdev_info(dev, "Dumping tx ring\n");
2541 readl(base + i + 24), readl(base + i + 28)); 2492 for (i = 0; i < np->tx_ring_size; i += 4) {
2542 } 2493 if (!nv_optimized(np)) {
2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2494 netdev_info(dev,
2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2495 "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2545 if (!nv_optimized(np)) { 2496 i,
2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2497 le32_to_cpu(np->tx_ring.orig[i].buf),
2547 i, 2498 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2548 le32_to_cpu(np->tx_ring.orig[i].buf), 2499 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2549 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2500 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2550 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2501 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2551 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2502 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2552 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2503 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2553 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2504 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2554 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2505 } else {
2555 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2506 netdev_info(dev,
2556 } else { 2507 "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2557 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2508 i,
2558 i, 2509 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2559 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2510 le32_to_cpu(np->tx_ring.ex[i].buflow),
2560 le32_to_cpu(np->tx_ring.ex[i].buflow), 2511 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2561 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2512 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2562 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2513 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2563 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2514 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2564 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2515 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2565 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2516 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2566 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2517 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2567 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2518 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2568 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2519 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2569 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2520 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2570 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2571 }
2572 } 2521 }
2573 } 2522 }
2574 2523
@@ -2616,15 +2565,13 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2616 int protolen; /* length as stored in the proto field */ 2565 int protolen; /* length as stored in the proto field */
2617 2566
2618 /* 1) calculate len according to header */ 2567 /* 1) calculate len according to header */
2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2568 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2569 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2621 hdrlen = VLAN_HLEN; 2570 hdrlen = VLAN_HLEN;
2622 } else { 2571 } else {
2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2572 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2624 hdrlen = ETH_HLEN; 2573 hdrlen = ETH_HLEN;
2625 } 2574 }
2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2627 dev->name, datalen, protolen, hdrlen);
2628 if (protolen > ETH_DATA_LEN) 2575 if (protolen > ETH_DATA_LEN)
2629 return datalen; /* Value in proto field not a len, no checks possible */ 2576 return datalen; /* Value in proto field not a len, no checks possible */
2630 2577
@@ -2635,26 +2582,18 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2635 /* more data on wire than in 802 header, trim of 2582 /* more data on wire than in 802 header, trim of
2636 * additional data. 2583 * additional data.
2637 */ 2584 */
2638 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2639 dev->name, protolen);
2640 return protolen; 2585 return protolen;
2641 } else { 2586 } else {
2642 /* less data on wire than mentioned in header. 2587 /* less data on wire than mentioned in header.
2643 * Discard the packet. 2588 * Discard the packet.
2644 */ 2589 */
2645 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2646 dev->name);
2647 return -1; 2590 return -1;
2648 } 2591 }
2649 } else { 2592 } else {
2650 /* short packet. Accept only if 802 values are also short */ 2593 /* short packet. Accept only if 802 values are also short */
2651 if (protolen > ETH_ZLEN) { 2594 if (protolen > ETH_ZLEN) {
2652 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2653 dev->name);
2654 return -1; 2595 return -1;
2655 } 2596 }
2656 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2657 dev->name, datalen);
2658 return datalen; 2597 return datalen;
2659 } 2598 }
2660} 2599}
@@ -2667,13 +2606,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 struct sk_buff *skb; 2606 struct sk_buff *skb;
2668 int len; 2607 int len;
2669 2608
2670 while((np->get_rx.orig != np->put_rx.orig) && 2609 while ((np->get_rx.orig != np->put_rx.orig) &&
2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2610 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2672 (rx_work < limit)) { 2611 (rx_work < limit)) {
2673 2612
2674 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2675 dev->name, flags);
2676
2677 /* 2613 /*
2678 * the packet is for us - immediately tear down the pci mapping. 2614 * the packet is for us - immediately tear down the pci mapping.
2679 * TODO: check if a prefetch of the first cacheline improves 2615 * TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2685 skb = np->get_rx_ctx->skb; 2621 skb = np->get_rx_ctx->skb;
2686 np->get_rx_ctx->skb = NULL; 2622 np->get_rx_ctx->skb = NULL;
2687 2623
2688 {
2689 int j;
2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2691 for (j=0; j<64; j++) {
2692 if ((j%16) == 0)
2693 dprintk("\n%03x:", j);
2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2695 }
2696 dprintk("\n");
2697 }
2698 /* look at what we actually got: */ 2624 /* look at what we actually got: */
2699 if (np->desc_ver == DESC_VER_1) { 2625 if (np->desc_ver == DESC_VER_1) {
2700 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2626 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2710 } 2636 }
2711 /* framing errors are soft errors */ 2637 /* framing errors are soft errors */
2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2638 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2713 if (flags & NV_RX_SUBSTRACT1) { 2639 if (flags & NV_RX_SUBSTRACT1)
2714 len--; 2640 len--;
2715 }
2716 } 2641 }
2717 /* the rest are hard errors */ 2642 /* the rest are hard errors */
2718 else { 2643 else {
@@ -2745,9 +2670,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2745 } 2670 }
2746 /* framing errors are soft errors */ 2671 /* framing errors are soft errors */
2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2672 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2748 if (flags & NV_RX2_SUBSTRACT1) { 2673 if (flags & NV_RX2_SUBSTRACT1)
2749 len--; 2674 len--;
2750 }
2751 } 2675 }
2752 /* the rest are hard errors */ 2676 /* the rest are hard errors */
2753 else { 2677 else {
@@ -2771,8 +2695,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2771 /* got a valid packet - forward it to the network core */ 2695 /* got a valid packet - forward it to the network core */
2772 skb_put(skb, len); 2696 skb_put(skb, len);
2773 skb->protocol = eth_type_trans(skb, dev); 2697 skb->protocol = eth_type_trans(skb, dev);
2774 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2775 dev->name, len, skb->protocol);
2776 napi_gro_receive(&np->napi, skb); 2698 napi_gro_receive(&np->napi, skb);
2777 dev->stats.rx_packets++; 2699 dev->stats.rx_packets++;
2778 dev->stats.rx_bytes += len; 2700 dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2797 struct sk_buff *skb; 2719 struct sk_buff *skb;
2798 int len; 2720 int len;
2799 2721
2800 while((np->get_rx.ex != np->put_rx.ex) && 2722 while ((np->get_rx.ex != np->put_rx.ex) &&
2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2723 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2802 (rx_work < limit)) { 2724 (rx_work < limit)) {
2803 2725
2804 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2805 dev->name, flags);
2806
2807 /* 2726 /*
2808 * the packet is for us - immediately tear down the pci mapping. 2727 * the packet is for us - immediately tear down the pci mapping.
2809 * TODO: check if a prefetch of the first cacheline improves 2728 * TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2815 skb = np->get_rx_ctx->skb; 2734 skb = np->get_rx_ctx->skb;
2816 np->get_rx_ctx->skb = NULL; 2735 np->get_rx_ctx->skb = NULL;
2817 2736
2818 {
2819 int j;
2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2821 for (j=0; j<64; j++) {
2822 if ((j%16) == 0)
2823 dprintk("\n%03x:", j);
2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2825 }
2826 dprintk("\n");
2827 }
2828 /* look at what we actually got: */ 2737 /* look at what we actually got: */
2829 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2738 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2830 len = flags & LEN_MASK_V2; 2739 len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2838 } 2747 }
2839 /* framing errors are soft errors */ 2748 /* framing errors are soft errors */
2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2749 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2841 if (flags & NV_RX2_SUBSTRACT1) { 2750 if (flags & NV_RX2_SUBSTRACT1)
2842 len--; 2751 len--;
2843 }
2844 } 2752 }
2845 /* the rest are hard errors */ 2753 /* the rest are hard errors */
2846 else { 2754 else {
@@ -2858,9 +2766,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2858 skb->protocol = eth_type_trans(skb, dev); 2766 skb->protocol = eth_type_trans(skb, dev);
2859 prefetch(skb->data); 2767 prefetch(skb->data);
2860 2768
2861 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2862 dev->name, len, skb->protocol);
2863
2864 if (likely(!np->vlangrp)) { 2769 if (likely(!np->vlangrp)) {
2865 napi_gro_receive(&np->napi, skb); 2770 napi_gro_receive(&np->napi, skb);
2866 } else { 2771 } else {
@@ -2949,7 +2854,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2949 /* reinit nic view of the rx queue */ 2854 /* reinit nic view of the rx queue */
2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2855 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2856 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2857 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2953 base + NvRegRingSizes); 2858 base + NvRegRingSizes);
2954 pci_push(base); 2859 pci_push(base);
2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2860 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2891static int nv_set_mac_address(struct net_device *dev, void *addr)
2987{ 2892{
2988 struct fe_priv *np = netdev_priv(dev); 2893 struct fe_priv *np = netdev_priv(dev);
2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2894 struct sockaddr *macaddr = (struct sockaddr *)addr;
2990 2895
2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2896 if (!is_valid_ether_addr(macaddr->sa_data))
2992 return -EADDRNOTAVAIL; 2897 return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@ static void nv_set_multicast(struct net_device *dev)
3076 writel(mask[0], base + NvRegMulticastMaskA); 2981 writel(mask[0], base + NvRegMulticastMaskA);
3077 writel(mask[1], base + NvRegMulticastMaskB); 2982 writel(mask[1], base + NvRegMulticastMaskB);
3078 writel(pff, base + NvRegPacketFilterFlags); 2983 writel(pff, base + NvRegPacketFilterFlags);
3079 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3080 dev->name);
3081 nv_start_rx(dev); 2984 nv_start_rx(dev);
3082 spin_unlock_irq(&np->lock); 2985 spin_unlock_irq(&np->lock);
3083} 2986}
@@ -3152,8 +3055,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3152 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3055 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3153 3056
3154 if (!(mii_status & BMSR_LSTATUS)) { 3057 if (!(mii_status & BMSR_LSTATUS)) {
3155 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3156 dev->name);
3157 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3058 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3158 newdup = 0; 3059 newdup = 0;
3159 retval = 0; 3060 retval = 0;
@@ -3161,8 +3062,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3161 } 3062 }
3162 3063
3163 if (np->autoneg == 0) { 3064 if (np->autoneg == 0) {
3164 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3165 dev->name, np->fixed_mode);
3166 if (np->fixed_mode & LPA_100FULL) { 3065 if (np->fixed_mode & LPA_100FULL) {
3167 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3066 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3168 newdup = 1; 3067 newdup = 1;
@@ -3185,14 +3084,11 @@ static int nv_update_linkspeed(struct net_device *dev)
3185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3084 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3186 newdup = 0; 3085 newdup = 0;
3187 retval = 0; 3086 retval = 0;
3188 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3189 goto set_speed; 3087 goto set_speed;
3190 } 3088 }
3191 3089
3192 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3090 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3193 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3091 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3194 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3195 dev->name, adv, lpa);
3196 3092
3197 retval = 1; 3093 retval = 1;
3198 if (np->gigabit == PHY_GIGABIT) { 3094 if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3201 3097
3202 if ((control_1000 & ADVERTISE_1000FULL) && 3098 if ((control_1000 & ADVERTISE_1000FULL) &&
3203 (status_1000 & LPA_1000FULL)) { 3099 (status_1000 & LPA_1000FULL)) {
3204 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3205 dev->name);
3206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3100 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3207 newdup = 1; 3101 newdup = 1;
3208 goto set_speed; 3102 goto set_speed;
@@ -3224,7 +3118,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3224 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3118 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3225 newdup = 0; 3119 newdup = 0;
3226 } else { 3120 } else {
3227 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3121 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3229 newdup = 0; 3122 newdup = 0;
3230 } 3123 }
@@ -3233,9 +3126,6 @@ set_speed:
3233 if (np->duplex == newdup && np->linkspeed == newls) 3126 if (np->duplex == newdup && np->linkspeed == newls)
3234 return retval; 3127 return retval;
3235 3128
3236 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3237 dev->name, np->linkspeed, np->duplex, newls, newdup);
3238
3239 np->duplex = newdup; 3129 np->duplex = newdup;
3240 np->linkspeed = newls; 3130 np->linkspeed = newls;
3241 3131
@@ -3302,7 +3192,7 @@ set_speed:
3302 } 3192 }
3303 writel(txreg, base + NvRegTxWatermark); 3193 writel(txreg, base + NvRegTxWatermark);
3304 3194
3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3195 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3306 base + NvRegMisc1); 3196 base + NvRegMisc1);
3307 pci_push(base); 3197 pci_push(base);
3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3198 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@ set_speed:
3312 /* setup pause frame */ 3202 /* setup pause frame */
3313 if (np->duplex != 0) { 3203 if (np->duplex != 0) {
3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3204 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3205 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3206 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3317 3207
3318 switch (adv_pause) { 3208 switch (adv_pause) {
3319 case ADVERTISE_PAUSE_CAP: 3209 case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@ set_speed:
3324 } 3214 }
3325 break; 3215 break;
3326 case ADVERTISE_PAUSE_ASYM: 3216 case ADVERTISE_PAUSE_ASYM:
3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3217 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3328 {
3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3218 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 }
3331 break; 3219 break;
3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3220 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3333 if (lpa_pause & LPA_PAUSE_CAP) 3221 if (lpa_pause & LPA_PAUSE_CAP) {
3334 {
3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3222 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3223 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3224 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3338 } 3225 }
3339 if (lpa_pause == LPA_PAUSE_ASYM) 3226 if (lpa_pause == LPA_PAUSE_ASYM)
3340 {
3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3227 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3342 }
3343 break; 3228 break;
3344 } 3229 }
3345 } else { 3230 } else {
@@ -3361,14 +3246,14 @@ static void nv_linkchange(struct net_device *dev)
3361 if (nv_update_linkspeed(dev)) { 3246 if (nv_update_linkspeed(dev)) {
3362 if (!netif_carrier_ok(dev)) { 3247 if (!netif_carrier_ok(dev)) {
3363 netif_carrier_on(dev); 3248 netif_carrier_on(dev);
3364 printk(KERN_INFO "%s: link up.\n", dev->name); 3249 netdev_info(dev, "link up\n");
3365 nv_txrx_gate(dev, false); 3250 nv_txrx_gate(dev, false);
3366 nv_start_rx(dev); 3251 nv_start_rx(dev);
3367 } 3252 }
3368 } else { 3253 } else {
3369 if (netif_carrier_ok(dev)) { 3254 if (netif_carrier_ok(dev)) {
3370 netif_carrier_off(dev); 3255 netif_carrier_off(dev);
3371 printk(KERN_INFO "%s: link down.\n", dev->name); 3256 netdev_info(dev, "link down\n");
3372 nv_txrx_gate(dev, true); 3257 nv_txrx_gate(dev, true);
3373 nv_stop_rx(dev); 3258 nv_stop_rx(dev);
3374 } 3259 }
@@ -3382,11 +3267,9 @@ static void nv_link_irq(struct net_device *dev)
3382 3267
3383 miistat = readl(base + NvRegMIIStatus); 3268 miistat = readl(base + NvRegMIIStatus);
3384 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3269 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3385 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3386 3270
3387 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3271 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3388 nv_linkchange(dev); 3272 nv_linkchange(dev);
3389 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3390} 3273}
3391 3274
3392static void nv_msi_workaround(struct fe_priv *np) 3275static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3437 struct fe_priv *np = netdev_priv(dev); 3320 struct fe_priv *np = netdev_priv(dev);
3438 u8 __iomem *base = get_hwbase(dev); 3321 u8 __iomem *base = get_hwbase(dev);
3439 3322
3440 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3441
3442 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3323 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3443 np->events = readl(base + NvRegIrqStatus); 3324 np->events = readl(base + NvRegIrqStatus);
3444 writel(np->events, base + NvRegIrqStatus); 3325 writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3446 np->events = readl(base + NvRegMSIXIrqStatus); 3327 np->events = readl(base + NvRegMSIXIrqStatus);
3447 writel(np->events, base + NvRegMSIXIrqStatus); 3328 writel(np->events, base + NvRegMSIXIrqStatus);
3448 } 3329 }
3449 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3450 if (!(np->events & np->irqmask)) 3330 if (!(np->events & np->irqmask))
3451 return IRQ_NONE; 3331 return IRQ_NONE;
3452 3332
@@ -3460,8 +3340,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3460 __napi_schedule(&np->napi); 3340 __napi_schedule(&np->napi);
3461 } 3341 }
3462 3342
3463 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3464
3465 return IRQ_HANDLED; 3343 return IRQ_HANDLED;
3466} 3344}
3467 3345
@@ -3476,8 +3354,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3476 struct fe_priv *np = netdev_priv(dev); 3354 struct fe_priv *np = netdev_priv(dev);
3477 u8 __iomem *base = get_hwbase(dev); 3355 u8 __iomem *base = get_hwbase(dev);
3478 3356
3479 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3480
3481 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3357 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3482 np->events = readl(base + NvRegIrqStatus); 3358 np->events = readl(base + NvRegIrqStatus);
3483 writel(np->events, base + NvRegIrqStatus); 3359 writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3485 np->events = readl(base + NvRegMSIXIrqStatus); 3361 np->events = readl(base + NvRegMSIXIrqStatus);
3486 writel(np->events, base + NvRegMSIXIrqStatus); 3362 writel(np->events, base + NvRegMSIXIrqStatus);
3487 } 3363 }
3488 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3489 if (!(np->events & np->irqmask)) 3364 if (!(np->events & np->irqmask))
3490 return IRQ_NONE; 3365 return IRQ_NONE;
3491 3366
@@ -3498,7 +3373,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3498 writel(0, base + NvRegIrqMask); 3373 writel(0, base + NvRegIrqMask);
3499 __napi_schedule(&np->napi); 3374 __napi_schedule(&np->napi);
3500 } 3375 }
3501 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3502 3376
3503 return IRQ_HANDLED; 3377 return IRQ_HANDLED;
3504} 3378}
@@ -3512,12 +3386,9 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3512 int i; 3386 int i;
3513 unsigned long flags; 3387 unsigned long flags;
3514 3388
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3389 for (i = 0;; i++) {
3516
3517 for (i=0; ; i++) {
3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3390 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3391 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3521 if (!(events & np->irqmask)) 3392 if (!(events & np->irqmask))
3522 break; 3393 break;
3523 3394
@@ -3536,12 +3407,12 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3536 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3407 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3537 } 3408 }
3538 spin_unlock_irqrestore(&np->lock, flags); 3409 spin_unlock_irqrestore(&np->lock, flags);
3539 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3410 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3411 __func__, i);
3540 break; 3412 break;
3541 } 3413 }
3542 3414
3543 } 3415 }
3544 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3545 3416
3546 return IRQ_RETVAL(i); 3417 return IRQ_RETVAL(i);
3547} 3418}
@@ -3553,7 +3424,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3553 u8 __iomem *base = get_hwbase(dev); 3424 u8 __iomem *base = get_hwbase(dev);
3554 unsigned long flags; 3425 unsigned long flags;
3555 int retcode; 3426 int retcode;
3556 int rx_count, tx_work=0, rx_work=0; 3427 int rx_count, tx_work = 0, rx_work = 0;
3557 3428
3558 do { 3429 do {
3559 if (!nv_optimized(np)) { 3430 if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3626 int i; 3497 int i;
3627 unsigned long flags; 3498 unsigned long flags;
3628 3499
3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3500 for (i = 0;; i++) {
3630
3631 for (i=0; ; i++) {
3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3501 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3502 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3635 if (!(events & np->irqmask)) 3503 if (!(events & np->irqmask))
3636 break; 3504 break;
3637 3505
@@ -3655,11 +3523,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3655 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3523 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3656 } 3524 }
3657 spin_unlock_irqrestore(&np->lock, flags); 3525 spin_unlock_irqrestore(&np->lock, flags);
3658 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3526 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3527 __func__, i);
3659 break; 3528 break;
3660 } 3529 }
3661 } 3530 }
3662 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3663 3531
3664 return IRQ_RETVAL(i); 3532 return IRQ_RETVAL(i);
3665} 3533}
@@ -3673,12 +3541,9 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3673 int i; 3541 int i;
3674 unsigned long flags; 3542 unsigned long flags;
3675 3543
3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3544 for (i = 0;; i++) {
3677
3678 for (i=0; ; i++) {
3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3545 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3546 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3682 if (!(events & np->irqmask)) 3547 if (!(events & np->irqmask))
3683 break; 3548 break;
3684 3549
@@ -3723,12 +3588,12 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3588 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3724 } 3589 }
3725 spin_unlock_irqrestore(&np->lock, flags); 3590 spin_unlock_irqrestore(&np->lock, flags);
3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3591 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3592 __func__, i);
3727 break; 3593 break;
3728 } 3594 }
3729 3595
3730 } 3596 }
3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3732 3597
3733 return IRQ_RETVAL(i); 3598 return IRQ_RETVAL(i);
3734} 3599}
@@ -3740,8 +3605,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3740 u8 __iomem *base = get_hwbase(dev); 3605 u8 __iomem *base = get_hwbase(dev);
3741 u32 events; 3606 u32 events;
3742 3607
3743 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3744
3745 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3608 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3746 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3609 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3747 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3610 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3750 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3613 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3751 } 3614 }
3752 pci_push(base); 3615 pci_push(base);
3753 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3754 if (!(events & NVREG_IRQ_TIMER)) 3616 if (!(events & NVREG_IRQ_TIMER))
3755 return IRQ_RETVAL(0); 3617 return IRQ_RETVAL(0);
3756 3618
@@ -3760,8 +3622,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3760 np->intr_test = 1; 3622 np->intr_test = 1;
3761 spin_unlock(&np->lock); 3623 spin_unlock(&np->lock);
3762 3624
3763 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3764
3765 return IRQ_RETVAL(1); 3625 return IRQ_RETVAL(1);
3766} 3626}
3767 3627
@@ -3776,17 +3636,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3776 * the remaining 8 interrupts. 3636 * the remaining 8 interrupts.
3777 */ 3637 */
3778 for (i = 0; i < 8; i++) { 3638 for (i = 0; i < 8; i++) {
3779 if ((irqmask >> i) & 0x1) { 3639 if ((irqmask >> i) & 0x1)
3780 msixmap |= vector << (i << 2); 3640 msixmap |= vector << (i << 2);
3781 }
3782 } 3641 }
3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3642 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3784 3643
3785 msixmap = 0; 3644 msixmap = 0;
3786 for (i = 0; i < 8; i++) { 3645 for (i = 0; i < 8; i++) {
3787 if ((irqmask >> (i + 8)) & 0x1) { 3646 if ((irqmask >> (i + 8)) & 0x1)
3788 msixmap |= vector << (i << 2); 3647 msixmap |= vector << (i << 2);
3789 }
3790 } 3648 }
3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3649 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3792} 3650}
@@ -3809,17 +3667,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3809 } 3667 }
3810 3668
3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3669 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3670 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3813 np->msi_x_entry[i].entry = i; 3671 np->msi_x_entry[i].entry = i;
3814 } 3672 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3673 if (ret == 0) {
3816 np->msi_flags |= NV_MSI_X_ENABLED; 3674 np->msi_flags |= NV_MSI_X_ENABLED;
3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3675 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3818 /* Request irq for rx handling */ 3676 /* Request irq for rx handling */
3819 sprintf(np->name_rx, "%s-rx", dev->name); 3677 sprintf(np->name_rx, "%s-rx", dev->name);
3820 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3678 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3821 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3679 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3822 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3680 netdev_info(dev,
3681 "request_irq failed for rx %d\n",
3682 ret);
3823 pci_disable_msix(np->pci_dev); 3683 pci_disable_msix(np->pci_dev);
3824 np->msi_flags &= ~NV_MSI_X_ENABLED; 3684 np->msi_flags &= ~NV_MSI_X_ENABLED;
3825 goto out_err; 3685 goto out_err;
@@ -3828,7 +3688,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3828 sprintf(np->name_tx, "%s-tx", dev->name); 3688 sprintf(np->name_tx, "%s-tx", dev->name);
3829 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3689 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3830 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3690 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3831 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3691 netdev_info(dev,
3692 "request_irq failed for tx %d\n",
3693 ret);
3832 pci_disable_msix(np->pci_dev); 3694 pci_disable_msix(np->pci_dev);
3833 np->msi_flags &= ~NV_MSI_X_ENABLED; 3695 np->msi_flags &= ~NV_MSI_X_ENABLED;
3834 goto out_free_rx; 3696 goto out_free_rx;
@@ -3837,7 +3699,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3837 sprintf(np->name_other, "%s-other", dev->name); 3699 sprintf(np->name_other, "%s-other", dev->name);
3838 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3839 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3701 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3840 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3702 netdev_info(dev,
3703 "request_irq failed for link %d\n",
3704 ret);
3841 pci_disable_msix(np->pci_dev); 3705 pci_disable_msix(np->pci_dev);
3842 np->msi_flags &= ~NV_MSI_X_ENABLED; 3706 np->msi_flags &= ~NV_MSI_X_ENABLED;
3843 goto out_free_tx; 3707 goto out_free_tx;
@@ -3851,7 +3715,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3851 } else { 3715 } else {
3852 /* Request irq for all interrupts */ 3716 /* Request irq for all interrupts */
3853 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3717 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3854 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3718 netdev_info(dev,
3719 "request_irq failed %d\n",
3720 ret);
3855 pci_disable_msix(np->pci_dev); 3721 pci_disable_msix(np->pci_dev);
3856 np->msi_flags &= ~NV_MSI_X_ENABLED; 3722 np->msi_flags &= ~NV_MSI_X_ENABLED;
3857 goto out_err; 3723 goto out_err;
@@ -3864,11 +3730,13 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3864 } 3730 }
3865 } 3731 }
3866 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3732 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3867 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3733 ret = pci_enable_msi(np->pci_dev);
3734 if (ret == 0) {
3868 np->msi_flags |= NV_MSI_ENABLED; 3735 np->msi_flags |= NV_MSI_ENABLED;
3869 dev->irq = np->pci_dev->irq; 3736 dev->irq = np->pci_dev->irq;
3870 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3737 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3871 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3738 netdev_info(dev, "request_irq failed %d\n",
3739 ret);
3872 pci_disable_msi(np->pci_dev); 3740 pci_disable_msi(np->pci_dev);
3873 np->msi_flags &= ~NV_MSI_ENABLED; 3741 np->msi_flags &= ~NV_MSI_ENABLED;
3874 dev->irq = np->pci_dev->irq; 3742 dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@ static void nv_free_irq(struct net_device *dev)
3903 int i; 3771 int i;
3904 3772
3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3773 if (np->msi_flags & NV_MSI_X_ENABLED) {
3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3774 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3907 free_irq(np->msi_x_entry[i].vector, dev); 3775 free_irq(np->msi_x_entry[i].vector, dev);
3908 }
3909 pci_disable_msix(np->pci_dev); 3776 pci_disable_msix(np->pci_dev);
3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3777 np->msi_flags &= ~NV_MSI_X_ENABLED;
3911 } else { 3778 } else {
@@ -3954,7 +3821,7 @@ static void nv_do_nic_poll(unsigned long data)
3954 3821
3955 if (np->recover_error) { 3822 if (np->recover_error) {
3956 np->recover_error = 0; 3823 np->recover_error = 0;
3957 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 3824 netdev_info(dev, "MAC in recoverable error state\n");
3958 if (netif_running(dev)) { 3825 if (netif_running(dev)) {
3959 netif_tx_lock_bh(dev); 3826 netif_tx_lock_bh(dev);
3960 netif_addr_lock(dev); 3827 netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@ static void nv_do_nic_poll(unsigned long data)
3975 /* reinit nic view of the rx queue */ 3842 /* reinit nic view of the rx queue */
3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3843 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3844 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3845 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3979 base + NvRegRingSizes); 3846 base + NvRegRingSizes);
3980 pci_push(base); 3847 pci_push(base);
3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3848 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +3972,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4105 } 3972 }
4106 3973
4107 if (netif_carrier_ok(dev)) { 3974 if (netif_carrier_ok(dev)) {
4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3975 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4109 case NVREG_LINKSPEED_10: 3976 case NVREG_LINKSPEED_10:
4110 ecmd->speed = SPEED_10; 3977 ecmd->speed = SPEED_10;
4111 break; 3978 break;
@@ -4250,14 +4117,14 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4250 } 4117 }
4251 4118
4252 if (netif_running(dev)) 4119 if (netif_running(dev))
4253 printk(KERN_INFO "%s: link down.\n", dev->name); 4120 netdev_info(dev, "link down\n");
4254 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4121 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4255 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4122 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4256 bmcr |= BMCR_ANENABLE; 4123 bmcr |= BMCR_ANENABLE;
4257 /* reset the phy in order for settings to stick, 4124 /* reset the phy in order for settings to stick,
4258 * and cause autoneg to start */ 4125 * and cause autoneg to start */
4259 if (phy_reset(dev, bmcr)) { 4126 if (phy_reset(dev, bmcr)) {
4260 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4127 netdev_info(dev, "phy reset failed\n");
4261 return -EINVAL; 4128 return -EINVAL;
4262 } 4129 }
4263 } else { 4130 } else {
@@ -4306,7 +4173,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4306 if (np->phy_oui == PHY_OUI_MARVELL) { 4173 if (np->phy_oui == PHY_OUI_MARVELL) {
4307 /* reset the phy in order for forced mode settings to stick */ 4174 /* reset the phy in order for forced mode settings to stick */
4308 if (phy_reset(dev, bmcr)) { 4175 if (phy_reset(dev, bmcr)) {
4309 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4176 netdev_info(dev, "phy reset failed\n");
4310 return -EINVAL; 4177 return -EINVAL;
4311 } 4178 }
4312 } else { 4179 } else {
@@ -4344,7 +4211,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4344 4211
4345 regs->version = FORCEDETH_REGS_VER; 4212 regs->version = FORCEDETH_REGS_VER;
4346 spin_lock_irq(&np->lock); 4213 spin_lock_irq(&np->lock);
4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4214 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4348 rbuf[i] = readl(base + i*sizeof(u32)); 4215 rbuf[i] = readl(base + i*sizeof(u32));
4349 spin_unlock_irq(&np->lock); 4216 spin_unlock_irq(&np->lock);
4350} 4217}
@@ -4368,7 +4235,7 @@ static int nv_nway_reset(struct net_device *dev)
4368 spin_unlock(&np->lock); 4235 spin_unlock(&np->lock);
4369 netif_addr_unlock(dev); 4236 netif_addr_unlock(dev);
4370 netif_tx_unlock_bh(dev); 4237 netif_tx_unlock_bh(dev);
4371 printk(KERN_INFO "%s: link down.\n", dev->name); 4238 netdev_info(dev, "link down\n");
4372 } 4239 }
4373 4240
4374 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4241 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4243,7 @@ static int nv_nway_reset(struct net_device *dev)
4376 bmcr |= BMCR_ANENABLE; 4243 bmcr |= BMCR_ANENABLE;
4377 /* reset the phy in order for settings to stick*/ 4244 /* reset the phy in order for settings to stick*/
4378 if (phy_reset(dev, bmcr)) { 4245 if (phy_reset(dev, bmcr)) {
4379 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4246 netdev_info(dev, "phy reset failed\n");
4380 return -EINVAL; 4247 return -EINVAL;
4381 } 4248 }
4382 } else { 4249 } else {
@@ -4464,10 +4331,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4464 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4331 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4465 rxtx_ring, ring_addr); 4332 rxtx_ring, ring_addr);
4466 } 4333 }
4467 if (rx_skbuff) 4334
4468 kfree(rx_skbuff); 4335 kfree(rx_skbuff);
4469 if (tx_skbuff) 4336 kfree(tx_skbuff);
4470 kfree(tx_skbuff);
4471 goto exit; 4337 goto exit;
4472 } 4338 }
4473 4339
@@ -4491,14 +4357,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4491 np->tx_ring_size = ring->tx_pending; 4357 np->tx_ring_size = ring->tx_pending;
4492 4358
4493 if (!nv_optimized(np)) { 4359 if (!nv_optimized(np)) {
4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4360 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4361 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4496 } else { 4362 } else {
4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4363 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4364 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4499 } 4365 }
4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4366 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4367 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4502 np->ring_addr = ring_addr; 4368 np->ring_addr = ring_addr;
4503 4369
4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4370 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4381,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4515 /* reinit nic view of the queues */ 4381 /* reinit nic view of the queues */
4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4382 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4383 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4384 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4519 base + NvRegRingSizes); 4385 base + NvRegRingSizes);
4520 pci_push(base); 4386 pci_push(base);
4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4387 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4416,11 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4550 4416
4551 if ((!np->autoneg && np->duplex == 0) || 4417 if ((!np->autoneg && np->duplex == 0) ||
4552 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4418 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4553 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4419 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4554 dev->name);
4555 return -EINVAL; 4420 return -EINVAL;
4556 } 4421 }
4557 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4422 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4558 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4423 netdev_info(dev, "hardware does not support tx pause frames\n");
4559 return -EINVAL; 4424 return -EINVAL;
4560 } 4425 }
4561 4426
@@ -4590,7 +4455,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4590 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4455 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4591 4456
4592 if (netif_running(dev)) 4457 if (netif_running(dev))
4593 printk(KERN_INFO "%s: link down.\n", dev->name); 4458 netdev_info(dev, "link down\n");
4594 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4459 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4595 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4460 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4596 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4461 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4706,7 @@ static int nv_loopback_test(struct net_device *dev)
4841 /* reinit nic view of the rx queue */ 4706 /* reinit nic view of the rx queue */
4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4707 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4708 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4709 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4845 base + NvRegRingSizes); 4710 base + NvRegRingSizes);
4846 pci_push(base); 4711 pci_push(base);
4847 4712
@@ -4852,8 +4717,7 @@ static int nv_loopback_test(struct net_device *dev)
4852 pkt_len = ETH_DATA_LEN; 4717 pkt_len = ETH_DATA_LEN;
4853 tx_skb = dev_alloc_skb(pkt_len); 4718 tx_skb = dev_alloc_skb(pkt_len);
4854 if (!tx_skb) { 4719 if (!tx_skb) {
4855 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4720 netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4856 " of %s\n", dev->name);
4857 ret = 0; 4721 ret = 0;
4858 goto out; 4722 goto out;
4859 } 4723 }
@@ -4893,29 +4757,22 @@ static int nv_loopback_test(struct net_device *dev)
4893 if (flags & NV_RX_ERROR) 4757 if (flags & NV_RX_ERROR)
4894 ret = 0; 4758 ret = 0;
4895 } else { 4759 } else {
4896 if (flags & NV_RX2_ERROR) { 4760 if (flags & NV_RX2_ERROR)
4897 ret = 0; 4761 ret = 0;
4898 }
4899 } 4762 }
4900 4763
4901 if (ret) { 4764 if (ret) {
4902 if (len != pkt_len) { 4765 if (len != pkt_len) {
4903 ret = 0; 4766 ret = 0;
4904 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4905 dev->name, len, pkt_len);
4906 } else { 4767 } else {
4907 rx_skb = np->rx_skb[0].skb; 4768 rx_skb = np->rx_skb[0].skb;
4908 for (i = 0; i < pkt_len; i++) { 4769 for (i = 0; i < pkt_len; i++) {
4909 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4770 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4910 ret = 0; 4771 ret = 0;
4911 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4912 dev->name, i);
4913 break; 4772 break;
4914 } 4773 }
4915 } 4774 }
4916 } 4775 }
4917 } else {
4918 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4919 } 4776 }
4920 4777
4921 pci_unmap_single(np->pci_dev, test_dma_addr, 4778 pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4815,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4958 netif_addr_lock(dev); 4815 netif_addr_lock(dev);
4959 spin_lock_irq(&np->lock); 4816 spin_lock_irq(&np->lock);
4960 nv_disable_hw_interrupts(dev, np->irqmask); 4817 nv_disable_hw_interrupts(dev, np->irqmask);
4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4818 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4819 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4963 } else { 4820 else
4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4821 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4965 }
4966 /* stop engines */ 4822 /* stop engines */
4967 nv_stop_rxtx(dev); 4823 nv_stop_rxtx(dev);
4968 nv_txrx_reset(dev); 4824 nv_txrx_reset(dev);
@@ -5003,7 +4859,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5003 /* reinit nic view of the rx queue */ 4859 /* reinit nic view of the rx queue */
5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4860 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4861 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4862 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5007 base + NvRegRingSizes); 4863 base + NvRegRingSizes);
5008 pci_push(base); 4864 pci_push(base);
5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4865 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4962,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 4962 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5107 np->mgmt_sema = 1; 4963 np->mgmt_sema = 1;
5108 return 1; 4964 return 1;
5109 } 4965 } else
5110 else
5111 udelay(50); 4966 udelay(50);
5112 } 4967 }
5113 4968
@@ -5167,8 +5022,6 @@ static int nv_open(struct net_device *dev)
5167 int oom, i; 5022 int oom, i;
5168 u32 low; 5023 u32 low;
5169 5024
5170 dprintk(KERN_DEBUG "nv_open: begin\n");
5171
5172 /* power up phy */ 5025 /* power up phy */
5173 mii_rw(dev, np->phyaddr, MII_BMCR, 5026 mii_rw(dev, np->phyaddr, MII_BMCR,
5174 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5027 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5057,7 @@ static int nv_open(struct net_device *dev)
5204 5057
5205 /* give hw rings */ 5058 /* give hw rings */
5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5059 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5060 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5208 base + NvRegRingSizes); 5061 base + NvRegRingSizes);
5209 5062
5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5063 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5069,11 @@ static int nv_open(struct net_device *dev)
5216 writel(np->vlanctl_bits, base + NvRegVlanControl); 5069 writel(np->vlanctl_bits, base + NvRegVlanControl);
5217 pci_push(base); 5070 pci_push(base);
5218 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5071 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5219 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5072 if (reg_delay(dev, NvRegUnknownSetupReg5,
5220 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5073 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5221 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5074 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5075 netdev_info(dev,
5076 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5222 5077
5223 writel(0, base + NvRegMIIMask); 5078 writel(0, base + NvRegMIIMask);
5224 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5079 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5106,7 @@ static int nv_open(struct net_device *dev)
5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5106 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5252 else 5107 else
5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5108 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5254 } 5109 } else
5255 else
5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5110 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5111 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5112 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5117,7 @@ static int nv_open(struct net_device *dev)
5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5117 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5264 5118
5265 i = readl(base + NvRegPowerState); 5119 i = readl(base + NvRegPowerState);
5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5120 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5121 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5268 5122
5269 pci_push(base); 5123 pci_push(base);
@@ -5276,9 +5130,8 @@ static int nv_open(struct net_device *dev)
5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5130 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5277 pci_push(base); 5131 pci_push(base);
5278 5132
5279 if (nv_request_irq(dev, 0)) { 5133 if (nv_request_irq(dev, 0))
5280 goto out_drain; 5134 goto out_drain;
5281 }
5282 5135
5283 /* ask for interrupts */ 5136 /* ask for interrupts */
5284 nv_enable_hw_interrupts(dev, np->irqmask); 5137 nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5149,6 @@ static int nv_open(struct net_device *dev)
5296 u32 miistat; 5149 u32 miistat;
5297 miistat = readl(base + NvRegMIIStatus); 5150 miistat = readl(base + NvRegMIIStatus);
5298 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5151 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5299 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5300 } 5152 }
5301 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5153 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5302 * to init hw */ 5154 * to init hw */
@@ -5309,7 +5161,7 @@ static int nv_open(struct net_device *dev)
5309 if (ret) { 5161 if (ret) {
5310 netif_carrier_on(dev); 5162 netif_carrier_on(dev);
5311 } else { 5163 } else {
5312 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5164 netdev_info(dev, "no link during initialization\n");
5313 netif_carrier_off(dev); 5165 netif_carrier_off(dev);
5314 } 5166 }
5315 if (oom) 5167 if (oom)
@@ -5352,7 +5204,6 @@ static int nv_close(struct net_device *dev)
5352 base = get_hwbase(dev); 5204 base = get_hwbase(dev);
5353 nv_disable_hw_interrupts(dev, np->irqmask); 5205 nv_disable_hw_interrupts(dev, np->irqmask);
5354 pci_push(base); 5206 pci_push(base);
5355 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5356 5207
5357 spin_unlock_irq(&np->lock); 5208 spin_unlock_irq(&np->lock);
5358 5209
@@ -5421,8 +5272,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5421 static int printed_version; 5272 static int printed_version;
5422 5273
5423 if (!printed_version++) 5274 if (!printed_version++)
5424 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5275 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5425 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5276 FORCEDETH_VERSION);
5426 5277
5427 dev = alloc_etherdev(sizeof(struct fe_priv)); 5278 dev = alloc_etherdev(sizeof(struct fe_priv));
5428 err = -ENOMEM; 5279 err = -ENOMEM;
@@ -5465,10 +5316,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5465 err = -EINVAL; 5316 err = -EINVAL;
5466 addr = 0; 5317 addr = 0;
5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5318 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5470 pci_resource_len(pci_dev, i),
5471 pci_resource_flags(pci_dev, i));
5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5319 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5473 pci_resource_len(pci_dev, i) >= np->register_size) { 5320 pci_resource_len(pci_dev, i) >= np->register_size) {
5474 addr = pci_resource_start(pci_dev, i); 5321 addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5323,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5476 } 5323 }
5477 } 5324 }
5478 if (i == DEVICE_COUNT_RESOURCE) { 5325 if (i == DEVICE_COUNT_RESOURCE) {
5479 dev_printk(KERN_INFO, &pci_dev->dev, 5326 dev_info(&pci_dev->dev, "Couldn't find register window\n");
5480 "Couldn't find register window\n");
5481 goto out_relreg; 5327 goto out_relreg;
5482 } 5328 }
5483 5329
@@ -5493,13 +5339,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5493 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5339 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5494 if (dma_64bit) { 5340 if (dma_64bit) {
5495 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5341 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5496 dev_printk(KERN_INFO, &pci_dev->dev, 5342 dev_info(&pci_dev->dev,
5497 "64-bit DMA failed, using 32-bit addressing\n"); 5343 "64-bit DMA failed, using 32-bit addressing\n");
5498 else 5344 else
5499 dev->features |= NETIF_F_HIGHDMA; 5345 dev->features |= NETIF_F_HIGHDMA;
5500 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5346 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5501 dev_printk(KERN_INFO, &pci_dev->dev, 5347 dev_info(&pci_dev->dev,
5502 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5348 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5503 } 5349 }
5504 } 5350 }
5505 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5351 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5466,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5620 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5466 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5621 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5467 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5622 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5468 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5623 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5469 dev_dbg(&pci_dev->dev,
5470 "%s: set workaround bit for reversed mac addr\n",
5471 __func__);
5624 } 5472 }
5625 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5473 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5626 5474
@@ -5629,17 +5477,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5629 * Bad mac address. At least one bios sets the mac address 5477 * Bad mac address. At least one bios sets the mac address
5630 * to 01:23:45:67:89:ab 5478 * to 01:23:45:67:89:ab
5631 */ 5479 */
5632 dev_printk(KERN_ERR, &pci_dev->dev, 5480 dev_err(&pci_dev->dev,
5633 "Invalid Mac address detected: %pM\n", 5481 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5634 dev->dev_addr); 5482 dev->dev_addr);
5635 dev_printk(KERN_ERR, &pci_dev->dev,
5636 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5637 random_ether_addr(dev->dev_addr); 5483 random_ether_addr(dev->dev_addr);
5484 dev_err(&pci_dev->dev,
5485 "Using random MAC address: %pM\n", dev->dev_addr);
5638 } 5486 }
5639 5487
5640 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5641 pci_name(pci_dev), dev->dev_addr);
5642
5643 /* set mac address */ 5488 /* set mac address */
5644 nv_copy_mac_to_hw(dev); 5489 nv_copy_mac_to_hw(dev);
5645 5490
@@ -5663,16 +5508,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 writel(powerstate, base + NvRegPowerState2); 5508 writel(powerstate, base + NvRegPowerState2);
5664 } 5509 }
5665 5510
5666 if (np->desc_ver == DESC_VER_1) { 5511 if (np->desc_ver == DESC_VER_1)
5667 np->tx_flags = NV_TX_VALID; 5512 np->tx_flags = NV_TX_VALID;
5668 } else { 5513 else
5669 np->tx_flags = NV_TX2_VALID; 5514 np->tx_flags = NV_TX2_VALID;
5670 }
5671 5515
5672 np->msi_flags = 0; 5516 np->msi_flags = 0;
5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5517 if ((id->driver_data & DEV_HAS_MSI) && msi)
5674 np->msi_flags |= NV_MSI_CAPABLE; 5518 np->msi_flags |= NV_MSI_CAPABLE;
5675 } 5519
5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5520 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5677 /* msix has had reported issues when modifying irqmask 5521 /* msix has had reported issues when modifying irqmask
5678 as in the case of napi, therefore, disable for now 5522 as in the case of napi, therefore, disable for now
@@ -5702,11 +5546,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5702 if (id->driver_data & DEV_NEED_TIMERIRQ) 5546 if (id->driver_data & DEV_NEED_TIMERIRQ)
5703 np->irqmask |= NVREG_IRQ_TIMER; 5547 np->irqmask |= NVREG_IRQ_TIMER;
5704 if (id->driver_data & DEV_NEED_LINKTIMER) { 5548 if (id->driver_data & DEV_NEED_LINKTIMER) {
5705 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5706 np->need_linktimer = 1; 5549 np->need_linktimer = 1;
5707 np->link_timeout = jiffies + LINK_TIMEOUT; 5550 np->link_timeout = jiffies + LINK_TIMEOUT;
5708 } else { 5551 } else {
5709 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5710 np->need_linktimer = 0; 5552 np->need_linktimer = 0;
5711 } 5553 }
5712 5554
@@ -5735,19 +5577,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5735 nv_mgmt_acquire_sema(dev) && 5577 nv_mgmt_acquire_sema(dev) &&
5736 nv_mgmt_get_version(dev)) { 5578 nv_mgmt_get_version(dev)) {
5737 np->mac_in_use = 1; 5579 np->mac_in_use = 1;
5738 if (np->mgmt_version > 0) { 5580 if (np->mgmt_version > 0)
5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5581 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5740 }
5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5742 pci_name(pci_dev), np->mac_in_use);
5743 /* management unit setup the phy already? */ 5582 /* management unit setup the phy already? */
5744 if (np->mac_in_use && 5583 if (np->mac_in_use &&
5745 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5584 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5746 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5585 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5747 /* phy is inited by mgmt unit */ 5586 /* phy is inited by mgmt unit */
5748 phyinitialized = 1; 5587 phyinitialized = 1;
5749 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5750 pci_name(pci_dev));
5751 } else { 5588 } else {
5752 /* we need to init the phy */ 5589 /* we need to init the phy */
5753 } 5590 }
@@ -5773,8 +5610,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5773 np->phy_model = id2 & PHYID2_MODEL_MASK; 5610 np->phy_model = id2 & PHYID2_MODEL_MASK;
5774 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5611 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5775 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5612 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5776 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5777 pci_name(pci_dev), id1, id2, phyaddr);
5778 np->phyaddr = phyaddr; 5613 np->phyaddr = phyaddr;
5779 np->phy_oui = id1 | id2; 5614 np->phy_oui = id1 | id2;
5780 5615
@@ -5788,8 +5623,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5788 break; 5623 break;
5789 } 5624 }
5790 if (i == 33) { 5625 if (i == 33) {
5791 dev_printk(KERN_INFO, &pci_dev->dev, 5626 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5792 "open: Could not find a valid PHY.\n");
5793 goto out_error; 5627 goto out_error;
5794 } 5628 }
5795 5629
@@ -5799,9 +5633,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5799 } else { 5633 } else {
5800 /* see if it is a gigabit phy */ 5634 /* see if it is a gigabit phy */
5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5635 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5802 if (mii_status & PHY_GIGABIT) { 5636 if (mii_status & PHY_GIGABIT)
5803 np->gigabit = PHY_GIGABIT; 5637 np->gigabit = PHY_GIGABIT;
5804 }
5805 } 5638 }
5806 5639
5807 /* set default link speed settings */ 5640 /* set default link speed settings */
@@ -5811,37 +5644,27 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5811 5644
5812 err = register_netdev(dev); 5645 err = register_netdev(dev);
5813 if (err) { 5646 if (err) {
5814 dev_printk(KERN_INFO, &pci_dev->dev, 5647 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5815 "unable to register netdev: %d\n", err);
5816 goto out_error; 5648 goto out_error;
5817 } 5649 }
5818 5650
5819 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5651 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5820 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5652 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5821 dev->name, 5653
5822 np->phy_oui, 5654 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5823 np->phyaddr, 5655 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5824 dev->dev_addr[0], 5656 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5825 dev->dev_addr[1], 5657 "csum " : "",
5826 dev->dev_addr[2], 5658 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5827 dev->dev_addr[3], 5659 "vlan " : "",
5828 dev->dev_addr[4], 5660 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5829 dev->dev_addr[5]); 5661 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5830 5662 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5663 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5664 np->need_linktimer ? "lnktim " : "",
5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5665 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5834 "csum " : "", 5666 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5667 np->desc_ver);
5836 "vlan " : "",
5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5840 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5841 np->need_linktimer ? "lnktim " : "",
5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5844 np->desc_ver);
5845 5668
5846 return 0; 5669 return 0;
5847 5670
@@ -5931,13 +5754,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5931 int i; 5754 int i;
5932 5755
5933 if (netif_running(dev)) { 5756 if (netif_running(dev)) {
5934 // Gross. 5757 /* Gross. */
5935 nv_close(dev); 5758 nv_close(dev);
5936 } 5759 }
5937 netif_device_detach(dev); 5760 netif_device_detach(dev);
5938 5761
5939 /* save non-pci configuration space */ 5762 /* save non-pci configuration space */
5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5763 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5764 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5942 5765
5943 pci_save_state(pdev); 5766 pci_save_state(pdev);
@@ -5960,7 +5783,7 @@ static int nv_resume(struct pci_dev *pdev)
5960 pci_enable_wake(pdev, PCI_D0, 0); 5783 pci_enable_wake(pdev, PCI_D0, 0);
5961 5784
5962 /* restore non-pci configuration space */ 5785 /* restore non-pci configuration space */
5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5786 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5787 writel(np->saved_config_space[i], base+i*sizeof(u32));
5965 5788
5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5789 if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5813,8 @@ static void nv_shutdown(struct pci_dev *pdev)
5990 * If we really go for poweroff, we must not restore the MAC, 5813 * If we really go for poweroff, we must not restore the MAC,
5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5814 * otherwise the MAC for WOL will be reversed at least on some boards.
5992 */ 5815 */
5993 if (system_state != SYSTEM_POWER_OFF) { 5816 if (system_state != SYSTEM_POWER_OFF)
5994 nv_restore_mac_addr(pdev); 5817 nv_restore_mac_addr(pdev);
5995 }
5996 5818
5997 pci_disable_device(pdev); 5819 pci_disable_device(pdev);
5998 /* 5820 /*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1246a7..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
577 irq_of_parse_and_map(np, 1); 577 irq_of_parse_and_map(np, 1);
578 priv->gfargrp[priv->num_grps].interruptError = 578 priv->gfargrp[priv->num_grps].interruptError =
579 irq_of_parse_and_map(np,2); 579 irq_of_parse_and_map(np,2);
580 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 580 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
581 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 581 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
582 priv->gfargrp[priv->num_grps].interruptError < 0) { 582 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
583 return -EINVAL; 583 return -EINVAL;
584 }
585 } 584 }
586 585
587 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 586 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
635 if (wol->wolopts & ~WAKE_MAGIC) 635 if (wol->wolopts & ~WAKE_MAGIC)
636 return -EINVAL; 636 return -EINVAL;
637 637
638 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
639
638 spin_lock_irqsave(&priv->bflock, flags); 640 spin_lock_irqsave(&priv->bflock, flags);
639 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 641 priv->wol_en = !!device_may_wakeup(&dev->dev);
640 device_set_wakeup_enable(&dev->dev, priv->wol_en);
641 spin_unlock_irqrestore(&priv->bflock, flags); 642 spin_unlock_irqrestore(&priv->bflock, flags);
642 643
643 return 0; 644 return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..8f11d29a5828 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2872 2872
2873 netif_carrier_off(ndev); 2873 netif_carrier_off(ndev);
2874 netif_stop_queue(ndev);
2875 2874
2876 err = register_netdev(ndev); 2875 err = register_netdev(ndev);
2877 if (err) { 2876 if (err) {
@@ -2951,7 +2950,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2951 2950
2952 unregister_netdev(dev->ndev); 2951 unregister_netdev(dev->ndev);
2953 2952
2954 flush_scheduled_work(); 2953 cancel_work_sync(&dev->reset_work);
2955 2954
2956 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) 2955 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2957 tah_detach(dev->tah_dev, dev->tah_port); 2956 tah_detach(dev->tah_dev, dev->tah_port);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c454b45ca7ec..5522d459654c 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -729,11 +729,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
729 sizeof(info->version) - 1); 729 sizeof(info->version) - 1);
730} 730}
731 731
732static u32 netdev_get_link(struct net_device *dev)
733{
734 return 1;
735}
736
737static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) 732static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
738{ 733{
739 struct ibmveth_adapter *adapter = netdev_priv(dev); 734 struct ibmveth_adapter *adapter = netdev_priv(dev);
@@ -918,7 +913,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
918static const struct ethtool_ops netdev_ethtool_ops = { 913static const struct ethtool_ops netdev_ethtool_ops = {
919 .get_drvinfo = netdev_get_drvinfo, 914 .get_drvinfo = netdev_get_drvinfo,
920 .get_settings = netdev_get_settings, 915 .get_settings = netdev_get_settings,
921 .get_link = netdev_get_link, 916 .get_link = ethtool_op_get_link,
922 .set_tx_csum = ibmveth_set_tx_csum, 917 .set_tx_csum = ibmveth_set_tx_csum,
923 .get_rx_csum = ibmveth_get_rx_csum, 918 .get_rx_csum = ibmveth_get_rx_csum,
924 .set_rx_csum = ibmveth_set_rx_csum, 919 .set_rx_csum = ibmveth_set_rx_csum,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675c5b8b..bfa03db66691 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -36,22 +36,10 @@
36#include <net/pkt_sched.h> 36#include <net/pkt_sched.h>
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38 38
39#define TX_TIMEOUT (2*HZ)
40
41#define TX_Q_LIMIT 32 39#define TX_Q_LIMIT 32
42struct ifb_private { 40struct ifb_private {
43 struct tasklet_struct ifb_tasklet; 41 struct tasklet_struct ifb_tasklet;
44 int tasklet_pending; 42 int tasklet_pending;
45 /* mostly debug stats leave in for now */
46 unsigned long st_task_enter; /* tasklet entered */
47 unsigned long st_txq_refl_try; /* transmit queue refill attempt */
48 unsigned long st_rxq_enter; /* receive queue entered */
49 unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
50 unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
51 unsigned long st_rx_frm_egr; /* received from egress path */
52 unsigned long st_rx_frm_ing; /* received from ingress path */
53 unsigned long st_rxq_check;
54 unsigned long st_rxq_rsch;
55 struct sk_buff_head rq; 43 struct sk_buff_head rq;
56 struct sk_buff_head tq; 44 struct sk_buff_head tq;
57}; 45};
@@ -73,19 +61,12 @@ static void ri_tasklet(unsigned long dev)
73 struct sk_buff *skb; 61 struct sk_buff *skb;
74 62
75 txq = netdev_get_tx_queue(_dev, 0); 63 txq = netdev_get_tx_queue(_dev, 0);
76 dp->st_task_enter++;
77 if ((skb = skb_peek(&dp->tq)) == NULL) { 64 if ((skb = skb_peek(&dp->tq)) == NULL) {
78 dp->st_txq_refl_try++;
79 if (__netif_tx_trylock(txq)) { 65 if (__netif_tx_trylock(txq)) {
80 dp->st_rxq_enter++; 66 skb_queue_splice_tail_init(&dp->rq, &dp->tq);
81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
82 skb_queue_tail(&dp->tq, skb);
83 dp->st_rx2tx_tran++;
84 }
85 __netif_tx_unlock(txq); 67 __netif_tx_unlock(txq);
86 } else { 68 } else {
87 /* reschedule */ 69 /* reschedule */
88 dp->st_rxq_notenter++;
89 goto resched; 70 goto resched;
90 } 71 }
91 } 72 }
@@ -104,16 +85,16 @@ static void ri_tasklet(unsigned long dev)
104 rcu_read_unlock(); 85 rcu_read_unlock();
105 dev_kfree_skb(skb); 86 dev_kfree_skb(skb);
106 stats->tx_dropped++; 87 stats->tx_dropped++;
88 if (skb_queue_len(&dp->tq) != 0)
89 goto resched;
107 break; 90 break;
108 } 91 }
109 rcu_read_unlock(); 92 rcu_read_unlock();
110 skb->skb_iif = _dev->ifindex; 93 skb->skb_iif = _dev->ifindex;
111 94
112 if (from & AT_EGRESS) { 95 if (from & AT_EGRESS) {
113 dp->st_rx_frm_egr++;
114 dev_queue_xmit(skb); 96 dev_queue_xmit(skb);
115 } else if (from & AT_INGRESS) { 97 } else if (from & AT_INGRESS) {
116 dp->st_rx_frm_ing++;
117 skb_pull(skb, skb->dev->hard_header_len); 98 skb_pull(skb, skb->dev->hard_header_len);
118 netif_rx(skb); 99 netif_rx(skb);
119 } else 100 } else
@@ -121,13 +102,11 @@ static void ri_tasklet(unsigned long dev)
121 } 102 }
122 103
123 if (__netif_tx_trylock(txq)) { 104 if (__netif_tx_trylock(txq)) {
124 dp->st_rxq_check++;
125 if ((skb = skb_peek(&dp->rq)) == NULL) { 105 if ((skb = skb_peek(&dp->rq)) == NULL) {
126 dp->tasklet_pending = 0; 106 dp->tasklet_pending = 0;
127 if (netif_queue_stopped(_dev)) 107 if (netif_queue_stopped(_dev))
128 netif_wake_queue(_dev); 108 netif_wake_queue(_dev);
129 } else { 109 } else {
130 dp->st_rxq_rsch++;
131 __netif_tx_unlock(txq); 110 __netif_tx_unlock(txq);
132 goto resched; 111 goto resched;
133 } 112 }
@@ -182,7 +161,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
182 netif_stop_queue(dev); 161 netif_stop_queue(dev);
183 } 162 }
184 163
185 skb_queue_tail(&dp->rq, skb); 164 __skb_queue_tail(&dp->rq, skb);
186 if (!dp->tasklet_pending) { 165 if (!dp->tasklet_pending) {
187 dp->tasklet_pending = 1; 166 dp->tasklet_pending = 1;
188 tasklet_schedule(&dp->ifb_tasklet); 167 tasklet_schedule(&dp->ifb_tasklet);
@@ -197,8 +176,8 @@ static int ifb_close(struct net_device *dev)
197 176
198 tasklet_kill(&dp->ifb_tasklet); 177 tasklet_kill(&dp->ifb_tasklet);
199 netif_stop_queue(dev); 178 netif_stop_queue(dev);
200 skb_queue_purge(&dp->rq); 179 __skb_queue_purge(&dp->rq);
201 skb_queue_purge(&dp->tq); 180 __skb_queue_purge(&dp->tq);
202 return 0; 181 return 0;
203} 182}
204 183
@@ -207,8 +186,8 @@ static int ifb_open(struct net_device *dev)
207 struct ifb_private *dp = netdev_priv(dev); 186 struct ifb_private *dp = netdev_priv(dev);
208 187
209 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); 188 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
210 skb_queue_head_init(&dp->rq); 189 __skb_queue_head_init(&dp->rq);
211 skb_queue_head_init(&dp->tq); 190 __skb_queue_head_init(&dp->tq);
212 netif_start_queue(dev); 191 netif_start_queue(dev);
213 192
214 return 0; 193 return 0;
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 62222796a8b3..6319ed902bc0 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -419,6 +419,9 @@
419#define E1000_ERR_SWFW_SYNC 13 419#define E1000_ERR_SWFW_SYNC 13
420#define E1000_NOT_IMPLEMENTED 14 420#define E1000_NOT_IMPLEMENTED 14
421#define E1000_ERR_MBX 15 421#define E1000_ERR_MBX 15
422#define E1000_ERR_INVALID_ARGUMENT 16
423#define E1000_ERR_NO_SPACE 17
424#define E1000_ERR_NVM_PBA_SECTION 18
422 425
423/* Loop limit on how long we wait for auto-negotiation to complete */ 426/* Loop limit on how long we wait for auto-negotiation to complete */
424#define COPPER_LINK_UP_LIMIT 10 427#define COPPER_LINK_UP_LIMIT 10
@@ -580,11 +583,15 @@
580 583
581/* Mask bits for fields in Word 0x1a of the NVM */ 584/* Mask bits for fields in Word 0x1a of the NVM */
582 585
586/* length of string needed to store part num */
587#define E1000_PBANUM_LENGTH 11
588
583/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 589/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
584#define NVM_SUM 0xBABA 590#define NVM_SUM 0xBABA
585 591
586#define NVM_PBA_OFFSET_0 8 592#define NVM_PBA_OFFSET_0 8
587#define NVM_PBA_OFFSET_1 9 593#define NVM_PBA_OFFSET_1 9
594#define NVM_PBA_PTR_GUARD 0xFAFA
588#define NVM_WORD_SIZE_BASE_SHIFT 6 595#define NVM_WORD_SIZE_BASE_SHIFT 6
589 596
590/* NVM Commands - Microwire */ 597/* NVM Commands - Microwire */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index d83b77fa4038..6b5cc2cc453d 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -445,31 +445,112 @@ out:
445} 445}
446 446
447/** 447/**
448 * igb_read_part_num - Read device part number 448 * igb_read_part_string - Read device part number
449 * @hw: pointer to the HW structure 449 * @hw: pointer to the HW structure
450 * @part_num: pointer to device part number 450 * @part_num: pointer to device part number
451 * @part_num_size: size of part number buffer
451 * 452 *
452 * Reads the product board assembly (PBA) number from the EEPROM and stores 453 * Reads the product board assembly (PBA) number from the EEPROM and stores
453 * the value in part_num. 454 * the value in part_num.
454 **/ 455 **/
455s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num) 456s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
456{ 457{
457 s32 ret_val; 458 s32 ret_val;
458 u16 nvm_data; 459 u16 nvm_data;
460 u16 pointer;
461 u16 offset;
462 u16 length;
463
464 if (part_num == NULL) {
465 hw_dbg("PBA string buffer was null\n");
466 ret_val = E1000_ERR_INVALID_ARGUMENT;
467 goto out;
468 }
459 469
460 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 470 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
461 if (ret_val) { 471 if (ret_val) {
462 hw_dbg("NVM Read Error\n"); 472 hw_dbg("NVM Read Error\n");
463 goto out; 473 goto out;
464 } 474 }
465 *part_num = (u32)(nvm_data << 16);
466 475
467 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 476 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
477 if (ret_val) {
478 hw_dbg("NVM Read Error\n");
479 goto out;
480 }
481
482 /*
483 * if nvm_data is not ptr guard the PBA must be in legacy format which
484 * means pointer is actually our second data word for the PBA number
485 * and we can decode it into an ascii string
486 */
487 if (nvm_data != NVM_PBA_PTR_GUARD) {
488 hw_dbg("NVM PBA number is not stored as string\n");
489
490 /* we will need 11 characters to store the PBA */
491 if (part_num_size < 11) {
492 hw_dbg("PBA string buffer too small\n");
493 return E1000_ERR_NO_SPACE;
494 }
495
496 /* extract hex string from data and pointer */
497 part_num[0] = (nvm_data >> 12) & 0xF;
498 part_num[1] = (nvm_data >> 8) & 0xF;
499 part_num[2] = (nvm_data >> 4) & 0xF;
500 part_num[3] = nvm_data & 0xF;
501 part_num[4] = (pointer >> 12) & 0xF;
502 part_num[5] = (pointer >> 8) & 0xF;
503 part_num[6] = '-';
504 part_num[7] = 0;
505 part_num[8] = (pointer >> 4) & 0xF;
506 part_num[9] = pointer & 0xF;
507
508 /* put a null character on the end of our string */
509 part_num[10] = '\0';
510
511 /* switch all the data but the '-' to hex char */
512 for (offset = 0; offset < 10; offset++) {
513 if (part_num[offset] < 0xA)
514 part_num[offset] += '0';
515 else if (part_num[offset] < 0x10)
516 part_num[offset] += 'A' - 0xA;
517 }
518
519 goto out;
520 }
521
522 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
468 if (ret_val) { 523 if (ret_val) {
469 hw_dbg("NVM Read Error\n"); 524 hw_dbg("NVM Read Error\n");
470 goto out; 525 goto out;
471 } 526 }
472 *part_num |= nvm_data; 527
528 if (length == 0xFFFF || length == 0) {
529 hw_dbg("NVM PBA number section invalid length\n");
530 ret_val = E1000_ERR_NVM_PBA_SECTION;
531 goto out;
532 }
533 /* check if part_num buffer is big enough */
534 if (part_num_size < (((u32)length * 2) - 1)) {
535 hw_dbg("PBA string buffer too small\n");
536 ret_val = E1000_ERR_NO_SPACE;
537 goto out;
538 }
539
540 /* trim pba length from start of string */
541 pointer++;
542 length--;
543
544 for (offset = 0; offset < length; offset++) {
545 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
546 if (ret_val) {
547 hw_dbg("NVM Read Error\n");
548 goto out;
549 }
550 part_num[offset * 2] = (u8)(nvm_data >> 8);
551 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
552 }
553 part_num[offset * 2] = '\0';
473 554
474out: 555out:
475 return ret_val; 556 return ret_val;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 1041c34dcbe1..29c956a84bd0 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -32,6 +32,8 @@ s32 igb_acquire_nvm(struct e1000_hw *hw);
32void igb_release_nvm(struct e1000_hw *hw); 32void igb_release_nvm(struct e1000_hw *hw);
33s32 igb_read_mac_addr(struct e1000_hw *hw); 33s32 igb_read_mac_addr(struct e1000_hw *hw);
34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); 34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
36 u32 part_num_size);
35s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
36s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 39s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ddd036a78999..6694bf3e5ad9 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1757,11 +1757,12 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1757 u16 phy_data, i, agc_value = 0; 1757 u16 phy_data, i, agc_value = 0;
1758 u16 cur_agc_index, max_agc_index = 0; 1758 u16 cur_agc_index, max_agc_index = 0;
1759 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1759 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1760 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 1760 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1761 {IGP02E1000_PHY_AGC_A, 1761 IGP02E1000_PHY_AGC_A,
1762 IGP02E1000_PHY_AGC_B, 1762 IGP02E1000_PHY_AGC_B,
1763 IGP02E1000_PHY_AGC_C, 1763 IGP02E1000_PHY_AGC_C,
1764 IGP02E1000_PHY_AGC_D}; 1764 IGP02E1000_PHY_AGC_D
1765 };
1765 1766
1766 /* Read the AGC registers for all channels */ 1767 /* Read the AGC registers for all channels */
1767 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1768 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17ac..62348fc60e53 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1729,12 +1729,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1729 struct igb_adapter *adapter; 1729 struct igb_adapter *adapter;
1730 struct e1000_hw *hw; 1730 struct e1000_hw *hw;
1731 u16 eeprom_data = 0; 1731 u16 eeprom_data = 0;
1732 s32 ret_val;
1732 static int global_quad_port_a; /* global quad port a indication */ 1733 static int global_quad_port_a; /* global quad port a indication */
1733 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1734 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1734 unsigned long mmio_start, mmio_len; 1735 unsigned long mmio_start, mmio_len;
1735 int err, pci_using_dac; 1736 int err, pci_using_dac;
1736 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1737 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1737 u32 part_num; 1738 u8 part_str[E1000_PBANUM_LENGTH];
1738 1739
1739 /* Catch broken hardware that put the wrong VF device ID in 1740 /* Catch broken hardware that put the wrong VF device ID in
1740 * the PCIe SR-IOV capability. 1741 * the PCIe SR-IOV capability.
@@ -2000,10 +2001,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2000 "unknown"), 2001 "unknown"),
2001 netdev->dev_addr); 2002 netdev->dev_addr);
2002 2003
2003 igb_read_part_num(hw, &part_num); 2004 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2004 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name, 2005 if (ret_val)
2005 (part_num >> 8), (part_num & 0xff)); 2006 strcpy(part_str, "Unknown");
2006 2007 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2007 dev_info(&pdev->dev, 2008 dev_info(&pdev->dev,
2008 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 2009 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2009 adapter->msix_entries ? "MSI-X" : 2010 adapter->msix_entries ? "MSI-X" :
@@ -2049,13 +2050,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2049 struct igb_adapter *adapter = netdev_priv(netdev); 2050 struct igb_adapter *adapter = netdev_priv(netdev);
2050 struct e1000_hw *hw = &adapter->hw; 2051 struct e1000_hw *hw = &adapter->hw;
2051 2052
2052 /* flush_scheduled work may reschedule our watchdog task, so 2053 /*
2053 * explicitly disable watchdog tasks from being rescheduled */ 2054 * The watchdog timer may be rescheduled, so explicitly
2055 * disable watchdog from being rescheduled.
2056 */
2054 set_bit(__IGB_DOWN, &adapter->state); 2057 set_bit(__IGB_DOWN, &adapter->state);
2055 del_timer_sync(&adapter->watchdog_timer); 2058 del_timer_sync(&adapter->watchdog_timer);
2056 del_timer_sync(&adapter->phy_info_timer); 2059 del_timer_sync(&adapter->phy_info_timer);
2057 2060
2058 flush_scheduled_work(); 2061 cancel_work_sync(&adapter->reset_task);
2062 cancel_work_sync(&adapter->watchdog_task);
2059 2063
2060#ifdef CONFIG_IGB_DCA 2064#ifdef CONFIG_IGB_DCA
2061 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 2065 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2436,10 +2440,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2436 int size; 2440 int size;
2437 2441
2438 size = sizeof(struct igb_buffer) * tx_ring->count; 2442 size = sizeof(struct igb_buffer) * tx_ring->count;
2439 tx_ring->buffer_info = vmalloc(size); 2443 tx_ring->buffer_info = vzalloc(size);
2440 if (!tx_ring->buffer_info) 2444 if (!tx_ring->buffer_info)
2441 goto err; 2445 goto err;
2442 memset(tx_ring->buffer_info, 0, size);
2443 2446
2444 /* round up to nearest 4K */ 2447 /* round up to nearest 4K */
2445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2448 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2590,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2587 int size, desc_len; 2590 int size, desc_len;
2588 2591
2589 size = sizeof(struct igb_buffer) * rx_ring->count; 2592 size = sizeof(struct igb_buffer) * rx_ring->count;
2590 rx_ring->buffer_info = vmalloc(size); 2593 rx_ring->buffer_info = vzalloc(size);
2591 if (!rx_ring->buffer_info) 2594 if (!rx_ring->buffer_info)
2592 goto err; 2595 goto err;
2593 memset(rx_ring->buffer_info, 0, size);
2594 2596
2595 desc_len = sizeof(union e1000_adv_rx_desc); 2597 desc_len = sizeof(union e1000_adv_rx_desc);
2596 2598
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d9..0fa3db3dd8b6 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel(R) 82576 Virtual Function Linux driver 3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 Intel Corporation. 4# Copyright(c) 2009 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518a..79f2604673fe 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcbe..ed6e3d910247 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -110,11 +110,6 @@ static int igbvf_get_settings(struct net_device *netdev,
110 return 0; 110 return 0;
111} 111}
112 112
113static u32 igbvf_get_link(struct net_device *netdev)
114{
115 return netif_carrier_ok(netdev);
116}
117
118static int igbvf_set_settings(struct net_device *netdev, 113static int igbvf_set_settings(struct net_device *netdev,
119 struct ethtool_cmd *ecmd) 114 struct ethtool_cmd *ecmd)
120{ 115{
@@ -515,7 +510,7 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
515 .get_msglevel = igbvf_get_msglevel, 510 .get_msglevel = igbvf_get_msglevel,
516 .set_msglevel = igbvf_set_msglevel, 511 .set_msglevel = igbvf_set_msglevel,
517 .nway_reset = igbvf_nway_reset, 512 .nway_reset = igbvf_nway_reset,
518 .get_link = igbvf_get_link, 513 .get_link = ethtool_op_get_link,
519 .get_eeprom_len = igbvf_get_eeprom_len, 514 .get_eeprom_len = igbvf_get_eeprom_len,
520 .get_eeprom = igbvf_get_eeprom, 515 .get_eeprom = igbvf_get_eeprom,
521 .set_eeprom = igbvf_set_eeprom, 516 .set_eeprom = igbvf_set_eeprom,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc717..9d4d63e536d4 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@ struct igbvf_buffer {
126 unsigned int page_offset; 126 unsigned int page_offset;
127 }; 127 };
128 }; 128 };
129 struct page *page;
130}; 129};
131 130
132union igbvf_desc { 131union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901dc..3d6f4cc3998a 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb5..c2883c45d477 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97bb..4fb023bce785 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
44 44
45#include "igbvf.h" 45#include "igbvf.h"
46 46
47#define DRV_VERSION "1.0.0-k0" 47#define DRV_VERSION "1.0.8-k0"
48char igbvf_driver_name[] = "igbvf"; 48char igbvf_driver_name[] = "igbvf";
49const char igbvf_driver_version[] = DRV_VERSION; 49const char igbvf_driver_version[] = DRV_VERSION;
50static const char igbvf_driver_string[] = 50static const char igbvf_driver_string[] =
51 "Intel(R) Virtual Function Network Driver"; 51 "Intel(R) Virtual Function Network Driver";
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 52static const char igbvf_copyright[] =
53 "Copyright (c) 2009 - 2010 Intel Corporation.";
53 54
54static int igbvf_poll(struct napi_struct *napi, int budget); 55static int igbvf_poll(struct napi_struct *napi, int budget);
55static void igbvf_reset(struct igbvf_adapter *); 56static void igbvf_reset(struct igbvf_adapter *);
@@ -429,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
429 int size; 430 int size;
430 431
431 size = sizeof(struct igbvf_buffer) * tx_ring->count; 432 size = sizeof(struct igbvf_buffer) * tx_ring->count;
432 tx_ring->buffer_info = vmalloc(size); 433 tx_ring->buffer_info = vzalloc(size);
433 if (!tx_ring->buffer_info) 434 if (!tx_ring->buffer_info)
434 goto err; 435 goto err;
435 memset(tx_ring->buffer_info, 0, size);
436 436
437 /* round up to nearest 4K */ 437 /* round up to nearest 4K */
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
469 int size, desc_len; 469 int size, desc_len;
470 470
471 size = sizeof(struct igbvf_buffer) * rx_ring->count; 471 size = sizeof(struct igbvf_buffer) * rx_ring->count;
472 rx_ring->buffer_info = vmalloc(size); 472 rx_ring->buffer_info = vzalloc(size);
473 if (!rx_ring->buffer_info) 473 if (!rx_ring->buffer_info)
474 goto err; 474 goto err;
475 memset(rx_ring->buffer_info, 0, size);
476 475
477 desc_len = sizeof(union e1000_adv_rx_desc); 476 desc_len = sizeof(union e1000_adv_rx_desc);
478 477
@@ -1851,8 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1851 1850
1852 if (link) { 1851 if (link) {
1853 if (!netif_carrier_ok(netdev)) { 1852 if (!netif_carrier_ok(netdev)) {
1854 bool txb2b = 1;
1855
1856 mac->ops.get_link_up_info(&adapter->hw, 1853 mac->ops.get_link_up_info(&adapter->hw,
1857 &adapter->link_speed, 1854 &adapter->link_speed,
1858 &adapter->link_duplex); 1855 &adapter->link_duplex);
@@ -1862,11 +1859,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
1862 adapter->tx_timeout_factor = 1; 1859 adapter->tx_timeout_factor = 1;
1863 switch (adapter->link_speed) { 1860 switch (adapter->link_speed) {
1864 case SPEED_10: 1861 case SPEED_10:
1865 txb2b = 0;
1866 adapter->tx_timeout_factor = 16; 1862 adapter->tx_timeout_factor = 16;
1867 break; 1863 break;
1868 case SPEED_100: 1864 case SPEED_100:
1869 txb2b = 0;
1870 /* maybe add some timeout factor ? */ 1865 /* maybe add some timeout factor ? */
1871 break; 1866 break;
1872 } 1867 }
@@ -2830,13 +2825,14 @@ static void __devexit igbvf_remove(struct pci_dev *pdev)
2830 struct e1000_hw *hw = &adapter->hw; 2825 struct e1000_hw *hw = &adapter->hw;
2831 2826
2832 /* 2827 /*
2833 * flush_scheduled work may reschedule our watchdog task, so 2828 * The watchdog timer may be rescheduled, so explicitly
2834 * explicitly disable watchdog tasks from being rescheduled 2829 * disable it from being rescheduled.
2835 */ 2830 */
2836 set_bit(__IGBVF_DOWN, &adapter->state); 2831 set_bit(__IGBVF_DOWN, &adapter->state);
2837 del_timer_sync(&adapter->watchdog_timer); 2832 del_timer_sync(&adapter->watchdog_timer);
2838 2833
2839 flush_scheduled_work(); 2834 cancel_work_sync(&adapter->reset_task);
2835 cancel_work_sync(&adapter->watchdog_task);
2840 2836
2841 unregister_netdev(netdev); 2837 unregister_netdev(netdev);
2842 2838
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0a..77e18d3d6b15 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964c..0cc13c6ed418 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a67..c36ea21f17fa 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
88 "IC PLUS IP1000 1000/100/10 based NIC", 88 "IC PLUS IP1000 1000/100/10 based NIC",
89 "Sundance Technology ST2021 based NIC", 89 "Sundance Technology ST2021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "Tamarack Microelectronics TC9020/9021 based NIC",
92 "D-Link NIC IP1000A" 91 "D-Link NIC IP1000A"
93}; 92};
94 93
95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { 94static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 95 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 96 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 97 { PCI_VDEVICE(DLINK, 0x9021), 2 },
99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 98 { PCI_VDEVICE(DLINK, 0x4020), 3 },
100 { PCI_VDEVICE(DLINK, 0x4020), 4 },
101 { 0, } 99 { 0, }
102}; 100};
103 101
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 74b20f179cea..cc821de2c966 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -959,7 +959,7 @@ static void mcs_disconnect(struct usb_interface *intf)
959 if (!mcs) 959 if (!mcs)
960 return; 960 return;
961 961
962 flush_scheduled_work(); 962 cancel_work_sync(&mcs->work);
963 963
964 unregister_netdev(mcs->netdev); 964 unregister_netdev(mcs->netdev);
965 free_netdev(mcs->netdev); 965 free_netdev(mcs->netdev);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
258 258
259 /* Baud Rate Error Correction x 10000 */ 259 /* Baud Rate Error Correction x 10000 */
260 u32 rate_err_array[] = { 260 u32 rate_err_array[] = {
261 0000, 0625, 1250, 1875, 261 0, 625, 1250, 1875,
262 2500, 3125, 3750, 4375, 262 2500, 3125, 3750, 4375,
263 5000, 5625, 6250, 6875, 263 5000, 5625, 6250, 6875,
264 7500, 8125, 8750, 9375, 264 7500, 8125, 8750, 9375,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2e..9ece1fd9889d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -885,17 +885,8 @@ static void veth_stop_connection(struct veth_lpar_connection *cnx)
885 veth_kick_statemachine(cnx); 885 veth_kick_statemachine(cnx);
886 spin_unlock_irq(&cnx->lock); 886 spin_unlock_irq(&cnx->lock);
887 887
888 /* There's a slim chance the reset code has just queued the 888 /* ensure the statemachine runs now and waits for its completion */
889 * statemachine to run in five seconds. If so we need to cancel 889 flush_delayed_work_sync(&cnx->statemachine_wq);
890 * that and requeue the work to run now. */
891 if (cancel_delayed_work(&cnx->statemachine_wq)) {
892 spin_lock_irq(&cnx->lock);
893 veth_kick_statemachine(cnx);
894 spin_unlock_irq(&cnx->lock);
895 }
896
897 /* Wait for the state machine to run. */
898 flush_scheduled_work();
899} 890}
900 891
901static void veth_destroy_connection(struct veth_lpar_connection *cnx) 892static void veth_destroy_connection(struct veth_lpar_connection *cnx)
@@ -1009,15 +1000,10 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1009 return 0; 1000 return 0;
1010} 1001}
1011 1002
1012static u32 veth_get_link(struct net_device *dev)
1013{
1014 return 1;
1015}
1016
1017static const struct ethtool_ops ops = { 1003static const struct ethtool_ops ops = {
1018 .get_drvinfo = veth_get_drvinfo, 1004 .get_drvinfo = veth_get_drvinfo,
1019 .get_settings = veth_get_settings, 1005 .get_settings = veth_get_settings,
1020 .get_link = veth_get_link, 1006 .get_link = ethtool_op_get_link,
1021}; 1007};
1022 1008
1023static const struct net_device_ops veth_netdev_ops = { 1009static const struct net_device_ops veth_netdev_ops = {
@@ -1605,7 +1591,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1605 } 1591 }
1606 veth_dev[i] = dev; 1592 veth_dev[i] = dev;
1607 1593
1608 port = (struct veth_port*)netdev_priv(dev); 1594 port = netdev_priv(dev);
1609 1595
1610 /* Start the state machine on each connection on this vlan. If we're 1596 /* Start the state machine on each connection on this vlan. If we're
1611 * the first dev to do so this will commence link negotiation */ 1597 * the first dev to do so this will commence link negotiation */
@@ -1658,15 +1644,14 @@ static void __exit veth_module_cleanup(void)
1658 /* Disconnect our "irq" to stop events coming from the Hypervisor. */ 1644 /* Disconnect our "irq" to stop events coming from the Hypervisor. */
1659 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); 1645 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
1660 1646
1661 /* Make sure any work queued from Hypervisor callbacks is finished. */
1662 flush_scheduled_work();
1663
1664 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { 1647 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1665 cnx = veth_cnx[i]; 1648 cnx = veth_cnx[i];
1666 1649
1667 if (!cnx) 1650 if (!cnx)
1668 continue; 1651 continue;
1669 1652
1653 /* Cancel work queued from Hypervisor callbacks */
1654 cancel_delayed_work_sync(&cnx->statemachine_wq);
1670 /* Remove the connection from sysfs */ 1655 /* Remove the connection from sysfs */
1671 kobject_del(&cnx->kobject); 1656 kobject_del(&cnx->kobject);
1672 /* Drop the driver's reference to the connection */ 1657 /* Drop the driver's reference to the connection */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2a..b021798ef49f 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -98,6 +98,8 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
98static void ixgb_tx_timeout(struct net_device *dev); 98static void ixgb_tx_timeout(struct net_device *dev);
99static void ixgb_tx_timeout_task(struct work_struct *work); 99static void ixgb_tx_timeout_task(struct work_struct *work);
100 100
101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
101static void ixgb_vlan_rx_register(struct net_device *netdev, 103static void ixgb_vlan_rx_register(struct net_device *netdev,
102 struct vlan_group *grp); 104 struct vlan_group *grp);
103static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 105static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
@@ -525,7 +527,7 @@ ixgb_remove(struct pci_dev *pdev)
525 struct net_device *netdev = pci_get_drvdata(pdev); 527 struct net_device *netdev = pci_get_drvdata(pdev);
526 struct ixgb_adapter *adapter = netdev_priv(netdev); 528 struct ixgb_adapter *adapter = netdev_priv(netdev);
527 529
528 flush_scheduled_work(); 530 cancel_work_sync(&adapter->tx_timeout_task);
529 531
530 unregister_netdev(netdev); 532 unregister_netdev(netdev);
531 533
@@ -669,13 +671,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
669 int size; 671 int size;
670 672
671 size = sizeof(struct ixgb_buffer) * txdr->count; 673 size = sizeof(struct ixgb_buffer) * txdr->count;
672 txdr->buffer_info = vmalloc(size); 674 txdr->buffer_info = vzalloc(size);
673 if (!txdr->buffer_info) { 675 if (!txdr->buffer_info) {
674 netif_err(adapter, probe, adapter->netdev, 676 netif_err(adapter, probe, adapter->netdev,
675 "Unable to allocate transmit descriptor ring memory\n"); 677 "Unable to allocate transmit descriptor ring memory\n");
676 return -ENOMEM; 678 return -ENOMEM;
677 } 679 }
678 memset(txdr->buffer_info, 0, size);
679 680
680 /* round up to nearest 4K */ 681 /* round up to nearest 4K */
681 682
@@ -759,13 +760,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
759 int size; 760 int size;
760 761
761 size = sizeof(struct ixgb_buffer) * rxdr->count; 762 size = sizeof(struct ixgb_buffer) * rxdr->count;
762 rxdr->buffer_info = vmalloc(size); 763 rxdr->buffer_info = vzalloc(size);
763 if (!rxdr->buffer_info) { 764 if (!rxdr->buffer_info) {
764 netif_err(adapter, probe, adapter->netdev, 765 netif_err(adapter, probe, adapter->netdev,
765 "Unable to allocate receive descriptor ring\n"); 766 "Unable to allocate receive descriptor ring\n");
766 return -ENOMEM; 767 return -ENOMEM;
767 } 768 }
768 memset(rxdr->buffer_info, 0, size);
769 769
770 /* Round up to nearest 4K */ 770 /* Round up to nearest 4K */
771 771
@@ -1078,6 +1078,8 @@ ixgb_set_multi(struct net_device *netdev)
1078 1078
1079 if (netdev->flags & IFF_PROMISC) { 1079 if (netdev->flags & IFF_PROMISC) {
1080 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1080 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1081 /* disable VLAN filtering */
1082 rctl &= ~IXGB_RCTL_CFIEN;
1081 rctl &= ~IXGB_RCTL_VFE; 1083 rctl &= ~IXGB_RCTL_VFE;
1082 } else { 1084 } else {
1083 if (netdev->flags & IFF_ALLMULTI) { 1085 if (netdev->flags & IFF_ALLMULTI) {
@@ -1086,7 +1088,9 @@ ixgb_set_multi(struct net_device *netdev)
1086 } else { 1088 } else {
1087 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1089 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1088 } 1090 }
1091 /* enable VLAN filtering */
1089 rctl |= IXGB_RCTL_VFE; 1092 rctl |= IXGB_RCTL_VFE;
1093 rctl &= ~IXGB_RCTL_CFIEN;
1090 } 1094 }
1091 1095
1092 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1096 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
@@ -1105,6 +1109,12 @@ ixgb_set_multi(struct net_device *netdev)
1105 1109
1106 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); 1110 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1107 } 1111 }
1112
1113 if (netdev->features & NETIF_F_HW_VLAN_RX)
1114 ixgb_vlan_strip_enable(adapter);
1115 else
1116 ixgb_vlan_strip_disable(adapter);
1117
1108} 1118}
1109 1119
1110/** 1120/**
@@ -2152,33 +2162,30 @@ static void
2152ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2162ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2153{ 2163{
2154 struct ixgb_adapter *adapter = netdev_priv(netdev); 2164 struct ixgb_adapter *adapter = netdev_priv(netdev);
2155 u32 ctrl, rctl;
2156 2165
2157 ixgb_irq_disable(adapter);
2158 adapter->vlgrp = grp; 2166 adapter->vlgrp = grp;
2167}
2159 2168
2160 if (grp) { 2169static void
2161 /* enable VLAN tag insert/strip */ 2170ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2162 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2171{
2163 ctrl |= IXGB_CTRL0_VME; 2172 u32 ctrl;
2164 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2165
2166 /* enable VLAN receive filtering */
2167 2173
2168 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 2174 /* enable VLAN tag insert/strip */
2169 rctl &= ~IXGB_RCTL_CFIEN; 2175 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2170 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2176 ctrl |= IXGB_CTRL0_VME;
2171 } else { 2177 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2172 /* disable VLAN tag insert/strip */ 2178}
2173 2179
2174 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2180static void
2175 ctrl &= ~IXGB_CTRL0_VME; 2181ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2176 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2182{
2177 } 2183 u32 ctrl;
2178 2184
2179 /* don't enable interrupts unless we are UP */ 2185 /* disable VLAN tag insert/strip */
2180 if (adapter->netdev->flags & IFF_UP) 2186 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2181 ixgb_irq_enable(adapter); 2187 ctrl &= ~IXGB_CTRL0_VME;
2188 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2182} 2189}
2183 2190
2184static void 2191static void
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 88a08f056241..dd7fbeb1f7d1 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -191,9 +191,9 @@ struct ixgb_option {
191 } r; 191 } r;
192 struct { /* list_option info */ 192 struct { /* list_option info */
193 int nr; 193 int nr;
194 struct ixgb_opt_list { 194 const struct ixgb_opt_list {
195 int i; 195 int i;
196 char *str; 196 const char *str;
197 } *p; 197 } *p;
198 } l; 198 } l;
199 } arg; 199 } arg;
@@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
226 break; 226 break;
227 case list_option: { 227 case list_option: {
228 int i; 228 int i;
229 struct ixgb_opt_list *ent; 229 const struct ixgb_opt_list *ent;
230 230
231 for (i = 0; i < opt->arg.l.nr; i++) { 231 for (i = 0; i < opt->arg.l.nr; i++) {
232 ent = &opt->arg.l.p[i]; 232 ent = &opt->arg.l.p[i];
@@ -322,14 +322,15 @@ ixgb_check_options(struct ixgb_adapter *adapter)
322 } 322 }
323 { /* Flow Control */ 323 { /* Flow Control */
324 324
325 struct ixgb_opt_list fc_list[] = 325 static const struct ixgb_opt_list fc_list[] = {
326 {{ ixgb_fc_none, "Flow Control Disabled" }, 326 { ixgb_fc_none, "Flow Control Disabled" },
327 { ixgb_fc_rx_pause,"Flow Control Receive Only" }, 327 { ixgb_fc_rx_pause, "Flow Control Receive Only" },
328 { ixgb_fc_tx_pause,"Flow Control Transmit Only" }, 328 { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
329 { ixgb_fc_full, "Flow Control Enabled" }, 329 { ixgb_fc_full, "Flow Control Enabled" },
330 { ixgb_fc_default, "Flow Control Hardware Default" }}; 330 { ixgb_fc_default, "Flow Control Hardware Default" }
331 };
331 332
332 const struct ixgb_option opt = { 333 static const struct ixgb_option opt = {
333 .type = list_option, 334 .type = list_option,
334 .name = "Flow Control", 335 .name = "Flow Control",
335 .err = "reading default settings from EEPROM", 336 .err = "reading default settings from EEPROM",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o 37 ixgbe_mbx.o ixgbe_x540.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb7..3ae30b8cb7d6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
61#define IXGBE_MIN_RXD 64 61#define IXGBE_MIN_RXD 64
62 62
63/* flow control */ 63/* flow control */
64#define IXGBE_DEFAULT_FCRTL 0x10000
65#define IXGBE_MIN_FCRTL 0x40 64#define IXGBE_MIN_FCRTL 0x40
66#define IXGBE_MAX_FCRTL 0x7FF80 65#define IXGBE_MAX_FCRTL 0x7FF80
67#define IXGBE_DEFAULT_FCRTH 0x20000
68#define IXGBE_MIN_FCRTH 0x600 66#define IXGBE_MIN_FCRTH 0x600
69#define IXGBE_MAX_FCRTH 0x7FFF0 67#define IXGBE_MAX_FCRTH 0x7FFF0
70#define IXGBE_DEFAULT_FCPAUSE 0xFFFF 68#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
130 unsigned long time_stamp; 128 unsigned long time_stamp;
131 u16 length; 129 u16 length;
132 u16 next_to_watch; 130 u16 next_to_watch;
133 u16 mapped_as_page; 131 unsigned int bytecount;
132 u16 gso_segs;
133 u8 mapped_as_page;
134}; 134};
135 135
136struct ixgbe_rx_buffer { 136struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
146 u64 bytes; 146 u64 bytes;
147}; 147};
148 148
149struct ixgbe_tx_queue_stats {
150 u64 restart_queue;
151 u64 tx_busy;
152 u64 completed;
153 u64 tx_done_old;
154};
155
156struct ixgbe_rx_queue_stats {
157 u64 rsc_count;
158 u64 rsc_flush;
159 u64 non_eop_descs;
160 u64 alloc_rx_page_failed;
161 u64 alloc_rx_buff_failed;
162};
163
164enum ixbge_ring_state_t {
165 __IXGBE_TX_FDIR_INIT_DONE,
166 __IXGBE_TX_DETECT_HANG,
167 __IXGBE_HANG_CHECK_ARMED,
168 __IXGBE_RX_PS_ENABLED,
169 __IXGBE_RX_RSC_ENABLED,
170};
171
172#define ring_is_ps_enabled(ring) \
173 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
174#define set_ring_ps_enabled(ring) \
175 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
176#define clear_ring_ps_enabled(ring) \
177 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
178#define check_for_tx_hang(ring) \
179 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
180#define set_check_for_tx_hang(ring) \
181 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
182#define clear_check_for_tx_hang(ring) \
183 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
184#define ring_is_rsc_enabled(ring) \
185 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
186#define set_ring_rsc_enabled(ring) \
187 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
188#define clear_ring_rsc_enabled(ring) \
189 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
149struct ixgbe_ring { 190struct ixgbe_ring {
150 void *desc; /* descriptor ring memory */ 191 void *desc; /* descriptor ring memory */
192 struct device *dev; /* device for DMA mapping */
193 struct net_device *netdev; /* netdev ring belongs to */
151 union { 194 union {
152 struct ixgbe_tx_buffer *tx_buffer_info; 195 struct ixgbe_tx_buffer *tx_buffer_info;
153 struct ixgbe_rx_buffer *rx_buffer_info; 196 struct ixgbe_rx_buffer *rx_buffer_info;
154 }; 197 };
198 unsigned long state;
155 u8 atr_sample_rate; 199 u8 atr_sample_rate;
156 u8 atr_count; 200 u8 atr_count;
157 u16 count; /* amount of descriptors */ 201 u16 count; /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
160 u16 next_to_clean; 204 u16 next_to_clean;
161 205
162 u8 queue_index; /* needed for multiqueue queue management */ 206 u8 queue_index; /* needed for multiqueue queue management */
163 207 u8 reg_idx; /* holds the special value that gets
164#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
165 u8 flags; /* per ring feature flags */
166 u16 head;
167 u16 tail;
168
169 unsigned int total_bytes;
170 unsigned int total_packets;
171
172#ifdef CONFIG_IXGBE_DCA
173 /* cpu for tx queue */
174 int cpu;
175#endif
176
177 u16 work_limit; /* max work per interrupt */
178 u16 reg_idx; /* holds the special value that gets
179 * the hardware register offset 208 * the hardware register offset
180 * associated with this ring, which is 209 * associated with this ring, which is
181 * different for DCB and RSS modes 210 * different for DCB and RSS modes
182 */ 211 */
183 212
213 u16 work_limit; /* max work per interrupt */
214
215 u8 __iomem *tail;
216
217 unsigned int total_bytes;
218 unsigned int total_packets;
219
184 struct ixgbe_queue_stats stats; 220 struct ixgbe_queue_stats stats;
185 struct u64_stats_sync syncp; 221 struct u64_stats_sync syncp;
222 union {
223 struct ixgbe_tx_queue_stats tx_stats;
224 struct ixgbe_rx_queue_stats rx_stats;
225 };
186 int numa_node; 226 int numa_node;
187 unsigned long reinit_state;
188 u64 rsc_count; /* stat for coalesced packets */
189 u64 rsc_flush; /* stats for flushed packets */
190 u32 restart_queue; /* track tx queue restarts */
191 u32 non_eop_descs; /* track hardware descriptor chaining */
192
193 unsigned int size; /* length in bytes */ 227 unsigned int size; /* length in bytes */
194 dma_addr_t dma; /* phys. address of descriptor ring */ 228 dma_addr_t dma; /* phys. address of descriptor ring */
229 struct rcu_head rcu;
230 struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
195} ____cacheline_internodealigned_in_smp; 231} ____cacheline_internodealigned_in_smp;
196 232
197enum ixgbe_ring_f_enum { 233enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
237 unsigned int v_idx; /* index of q_vector within array, also used for 273 unsigned int v_idx; /* index of q_vector within array, also used for
238 * finding the bit in EICR and friends that 274 * finding the bit in EICR and friends that
239 * represents the vector for this ring */ 275 * represents the vector for this ring */
276#ifdef CONFIG_IXGBE_DCA
277 int cpu; /* CPU for DCA */
278#endif
240 struct napi_struct napi; 279 struct napi_struct napi;
241 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 280 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
242 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 281 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
246 u8 rx_itr; 285 u8 rx_itr;
247 u32 eitr; 286 u32 eitr;
248 cpumask_var_t affinity_mask; 287 cpumask_var_t affinity_mask;
288 char name[IFNAMSIZ + 9];
249}; 289};
250 290
251/* Helper macros to switch between ints/sec and what the register uses. 291/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
294 u16 bd_number; 334 u16 bd_number;
295 struct work_struct reset_task; 335 struct work_struct reset_task;
296 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
297 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
298 struct ixgbe_dcb_config dcb_cfg; 337 struct ixgbe_dcb_config dcb_cfg;
299 struct ixgbe_dcb_config temp_dcb_cfg; 338 struct ixgbe_dcb_config temp_dcb_cfg;
300 u8 dcb_set_bitmap; 339 u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
417 int node; 456 int node;
418 struct work_struct check_overtemp_task; 457 struct work_struct check_overtemp_task;
419 u32 interrupt_event; 458 u32 interrupt_event;
459 char lsc_int_name[IFNAMSIZ + 9];
420 460
421 /* SR-IOV */ 461 /* SR-IOV */
422 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 462 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
428 __IXGBE_TESTING, 468 __IXGBE_TESTING,
429 __IXGBE_RESETTING, 469 __IXGBE_RESETTING,
430 __IXGBE_DOWN, 470 __IXGBE_DOWN,
431 __IXGBE_FDIR_INIT_DONE,
432 __IXGBE_SFP_MODULE_NOT_FOUND 471 __IXGBE_SFP_MODULE_NOT_FOUND
433}; 472};
434 473
474struct ixgbe_rsc_cb {
475 dma_addr_t dma;
476 u16 skb_cnt;
477 bool delay_unmap;
478};
479#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
480
435enum ixgbe_boards { 481enum ixgbe_boards {
436 board_82598, 482 board_82598,
437 board_82599, 483 board_82599,
484 board_X540,
438}; 485};
439 486
440extern struct ixgbe_info ixgbe_82598_info; 487extern struct ixgbe_info ixgbe_82598_info;
441extern struct ixgbe_info ixgbe_82599_info; 488extern struct ixgbe_info ixgbe_82599_info;
489extern struct ixgbe_info ixgbe_X540_info;
442#ifdef CONFIG_IXGBE_DCB 490#ifdef CONFIG_IXGBE_DCB
443extern const struct dcbnl_rtnl_ops dcbnl_ops; 491extern const struct dcbnl_rtnl_ops dcbnl_ops;
444extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 492extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
454extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 502extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
455extern void ixgbe_reset(struct ixgbe_adapter *adapter); 503extern void ixgbe_reset(struct ixgbe_adapter *adapter);
456extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 504extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
457extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 505extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
458extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 506extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
459extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 507extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
460extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 508extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
461extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
462extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
463extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 511extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
464extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 512extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
465extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 513extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
466extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 514extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
467 struct net_device *,
468 struct ixgbe_adapter *, 515 struct ixgbe_adapter *,
469 struct ixgbe_ring *); 516 struct ixgbe_ring *);
470extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, 517extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
471 struct ixgbe_tx_buffer *); 518 struct ixgbe_tx_buffer *);
472extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 519extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
473 struct ixgbe_ring *rx_ring,
474 int cleaned_count);
475extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 520extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
476extern int ethtool_ioctl(struct ifreq *ifr); 521extern int ethtool_ioctl(struct ifreq *ifr);
522extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
477extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 523extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
478extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 524extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
479extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 525extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
498 u16 flex_byte); 544 u16 flex_byte);
499extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 545extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
500 u8 l4type); 546 u8 l4type);
547extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
548 struct ixgbe_ring *ring);
549extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
550 struct ixgbe_ring *ring);
501extern void ixgbe_set_rx_mode(struct net_device *netdev); 551extern void ixgbe_set_rx_mode(struct net_device *netdev);
502#ifdef IXGBE_FCOE 552#ifdef IXGBE_FCOE
503extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 553extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..d0f1d9d2c416 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
38#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
40 40
41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
46 bool autoneg, 43 bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 153 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 154 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
158 mac->ops.get_link_capabilities = 155 mac->ops.get_link_capabilities =
159 &ixgbe_get_copper_link_capabilities_82598; 156 &ixgbe_get_copper_link_capabilities_generic;
160 } 157 }
161 158
162 switch (hw->phy.type) { 159 switch (hw->phy.type) {
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
274} 271}
275 272
276/** 273/**
277 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
278 * @hw: pointer to hardware structure
279 * @speed: pointer to link speed
280 * @autoneg: boolean auto-negotiation value
281 *
282 * Determines the link capabilities by reading the AUTOC register.
283 **/
284static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
286 bool *autoneg)
287{
288 s32 status = IXGBE_ERR_LINK_SETUP;
289 u16 speed_ability;
290
291 *speed = 0;
292 *autoneg = true;
293
294 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
295 &speed_ability);
296
297 if (status == 0) {
298 if (speed_ability & MDIO_SPEED_10G)
299 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
300 if (speed_ability & MDIO_PMA_SPEED_1000)
301 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
302 }
303
304 return status;
305}
306
307/**
308 * ixgbe_get_media_type_82598 - Determines media type 274 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure 275 * @hw: pointer to hardware structure
310 * 276 *
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 323 u32 fctrl_reg;
358 u32 rmcs_reg; 324 u32 rmcs_reg;
359 u32 reg; 325 u32 reg;
326 u32 rx_pba_size;
360 u32 link_speed = 0; 327 u32 link_speed = 0;
361 bool link_up; 328 bool link_up;
362 329
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
459 426
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 427 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 428 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 if (hw->fc.send_xon) { 429 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
463 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 430 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
464 (hw->fc.low_water | IXGBE_FCRTL_XONE)); 431
465 } else { 432 reg = (rx_pba_size - hw->fc.low_water) << 6;
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 433 if (hw->fc.send_xon)
467 hw->fc.low_water); 434 reg |= IXGBE_FCRTL_XONE;
468 } 435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436
437 reg = (rx_pba_size - hw->fc.high_water) << 10;
438 reg |= IXGBE_FCRTH_FCEN;
469 439
470 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
471 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
472 } 441 }
473 442
474 /* Configure pause time (2 TCs per register) */ 443 /* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1192 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eerd_generic, 1193 .read = &ixgbe_read_eerd_generic,
1194 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1195 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1196 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1197};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb5bfd0..6827dddc383e 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed, 56 ixgbe_link_speed speed,
57 bool autoneg, 57 bool autoneg,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed *speed,
61 bool *autoneg);
62static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
63 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
64 bool autoneg, 61 bool autoneg,
@@ -68,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
68static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 65static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69{ 66{
70 struct ixgbe_mac_info *mac = &hw->mac; 67 struct ixgbe_mac_info *mac = &hw->mac;
71 if (hw->phy.multispeed_fiber) { 68
72 /* Set up dual speed SFP+ support */ 69 /* enable the laser control functions for SFP+ fiber */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 70 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
74 mac->ops.disable_tx_laser = 71 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber; 72 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser = 73 mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80 mac->ops.disable_tx_laser = NULL; 77 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL; 78 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL; 79 mac->ops.flap_tx_laser = NULL;
80 }
81
82 if (hw->phy.multispeed_fiber) {
83 /* Set up dual speed SFP+ support */
84 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
85 } else {
83 if ((mac->ops.get_media_type(hw) == 86 if ((mac->ops.get_media_type(hw) ==
84 ixgbe_media_type_backplane) && 87 ixgbe_media_type_backplane) &&
85 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 88 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
93static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 96static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
94{ 97{
95 s32 ret_val = 0; 98 s32 ret_val = 0;
99 u32 reg_anlp1 = 0;
100 u32 i = 0;
96 u16 list_offset, data_offset, data_value; 101 u16 list_offset, data_offset, data_value;
97 102
98 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 103 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -119,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
119 IXGBE_WRITE_FLUSH(hw); 124 IXGBE_WRITE_FLUSH(hw);
120 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 125 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
121 } 126 }
122 /* Now restart DSP by setting Restart_AN */
123 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
124 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
125 127
126 /* Release the semaphore */ 128 /* Release the semaphore */
127 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 129 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
128 /* Delay obtaining semaphore again to allow FW access */ 130 /* Delay obtaining semaphore again to allow FW access */
129 msleep(hw->eeprom.semaphore_delay); 131 msleep(hw->eeprom.semaphore_delay);
132
133 /* Now restart DSP by setting Restart_AN and clearing LMS */
134 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
135 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
136 IXGBE_AUTOC_AN_RESTART));
137
138 /* Wait for AN to leave state 0 */
139 for (i = 0; i < 10; i++) {
140 msleep(4);
141 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
142 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
143 break;
144 }
145 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
146 hw_dbg(hw, "sfp module setup not complete\n");
147 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
148 goto setup_sfp_out;
149 }
150
151 /* Restart DSP by setting Restart_AN and return to SFI mode */
152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
153 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
154 IXGBE_AUTOC_AN_RESTART));
130 } 155 }
131 156
132setup_sfp_out: 157setup_sfp_out:
@@ -174,7 +199,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
174 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 199 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
175 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 200 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
176 mac->ops.get_link_capabilities = 201 mac->ops.get_link_capabilities =
177 &ixgbe_get_copper_link_capabilities_82599; 202 &ixgbe_get_copper_link_capabilities_generic;
178 } 203 }
179 204
180 /* Set necessary function pointers based on phy type */ 205 /* Set necessary function pointers based on phy type */
@@ -184,6 +209,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
184 phy->ops.get_firmware_version = 209 phy->ops.get_firmware_version =
185 &ixgbe_get_phy_firmware_version_tnx; 210 &ixgbe_get_phy_firmware_version_tnx;
186 break; 211 break;
212 case ixgbe_phy_aq:
213 phy->ops.get_firmware_version =
214 &ixgbe_get_phy_firmware_version_generic;
215 break;
187 default: 216 default:
188 break; 217 break;
189 } 218 }
@@ -290,37 +319,6 @@ out:
290} 319}
291 320
292/** 321/**
293 * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
294 * @hw: pointer to hardware structure
295 * @speed: pointer to link speed
296 * @autoneg: boolean auto-negotiation value
297 *
298 * Determines the link capabilities by reading the AUTOC register.
299 **/
300static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
301 ixgbe_link_speed *speed,
302 bool *autoneg)
303{
304 s32 status = IXGBE_ERR_LINK_SETUP;
305 u16 speed_ability;
306
307 *speed = 0;
308 *autoneg = true;
309
310 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
311 &speed_ability);
312
313 if (status == 0) {
314 if (speed_ability & MDIO_SPEED_10G)
315 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
316 if (speed_ability & MDIO_PMA_SPEED_1000)
317 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
318 }
319
320 return status;
321}
322
323/**
324 * ixgbe_get_media_type_82599 - Get media type 322 * ixgbe_get_media_type_82599 - Get media type
325 * @hw: pointer to hardware structure 323 * @hw: pointer to hardware structure
326 * 324 *
@@ -332,7 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
332 330
333 /* Detect if there is a copper PHY attached. */ 331 /* Detect if there is a copper PHY attached. */
334 if (hw->phy.type == ixgbe_phy_cu_unknown || 332 if (hw->phy.type == ixgbe_phy_cu_unknown ||
335 hw->phy.type == ixgbe_phy_tn) { 333 hw->phy.type == ixgbe_phy_tn ||
334 hw->phy.type == ixgbe_phy_aq) {
336 media_type = ixgbe_media_type_copper; 335 media_type = ixgbe_media_type_copper;
337 goto out; 336 goto out;
338 } 337 }
@@ -342,11 +341,13 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
342 case IXGBE_DEV_ID_82599_KX4_MEZZ: 341 case IXGBE_DEV_ID_82599_KX4_MEZZ:
343 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 342 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
344 case IXGBE_DEV_ID_82599_KR: 343 case IXGBE_DEV_ID_82599_KR:
344 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
345 case IXGBE_DEV_ID_82599_XAUI_LOM: 345 case IXGBE_DEV_ID_82599_XAUI_LOM:
346 /* Default device ID is mezzanine card KX/KX4 */ 346 /* Default device ID is mezzanine card KX/KX4 */
347 media_type = ixgbe_media_type_backplane; 347 media_type = ixgbe_media_type_backplane;
348 break; 348 break;
349 case IXGBE_DEV_ID_82599_SFP: 349 case IXGBE_DEV_ID_82599_SFP:
350 case IXGBE_DEV_ID_82599_SFP_FCOE:
350 case IXGBE_DEV_ID_82599_SFP_EM: 351 case IXGBE_DEV_ID_82599_SFP_EM:
351 media_type = ixgbe_media_type_fiber; 352 media_type = ixgbe_media_type_fiber;
352 break; 353 break;
@@ -1924,6 +1925,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1924 hw->phy.ops.identify(hw); 1925 hw->phy.ops.identify(hw);
1925 1926
1926 if (hw->phy.type == ixgbe_phy_tn || 1927 if (hw->phy.type == ixgbe_phy_tn ||
1928 hw->phy.type == ixgbe_phy_aq ||
1927 hw->phy.type == ixgbe_phy_cu_unknown) { 1929 hw->phy.type == ixgbe_phy_cu_unknown) {
1928 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1930 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1929 &ext_ability); 1931 &ext_ability);
@@ -2125,51 +2127,6 @@ fw_version_out:
2125 return status; 2127 return status;
2126} 2128}
2127 2129
2128/**
2129 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2130 * the EEPROM
2131 * @hw: pointer to hardware structure
2132 * @wwnn_prefix: the alternative WWNN prefix
2133 * @wwpn_prefix: the alternative WWPN prefix
2134 *
2135 * This function will read the EEPROM from the alternative SAN MAC address
2136 * block to check the support for the alternative WWNN/WWPN prefix support.
2137 **/
2138static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2139 u16 *wwpn_prefix)
2140{
2141 u16 offset, caps;
2142 u16 alt_san_mac_blk_offset;
2143
2144 /* clear output first */
2145 *wwnn_prefix = 0xFFFF;
2146 *wwpn_prefix = 0xFFFF;
2147
2148 /* check if alternative SAN MAC is supported */
2149 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2150 &alt_san_mac_blk_offset);
2151
2152 if ((alt_san_mac_blk_offset == 0) ||
2153 (alt_san_mac_blk_offset == 0xFFFF))
2154 goto wwn_prefix_out;
2155
2156 /* check capability in alternative san mac address block */
2157 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2158 hw->eeprom.ops.read(hw, offset, &caps);
2159 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2160 goto wwn_prefix_out;
2161
2162 /* get the corresponding prefix for WWNN/WWPN */
2163 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2164 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2165
2166 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2167 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2168
2169wwn_prefix_out:
2170 return 0;
2171}
2172
2173static struct ixgbe_mac_operations mac_ops_82599 = { 2130static struct ixgbe_mac_operations mac_ops_82599 = {
2174 .init_hw = &ixgbe_init_hw_generic, 2131 .init_hw = &ixgbe_init_hw_generic,
2175 .reset_hw = &ixgbe_reset_hw_82599, 2132 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2181 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2138 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2182 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 2139 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2183 .get_device_caps = &ixgbe_get_device_caps_82599, 2140 .get_device_caps = &ixgbe_get_device_caps_82599,
2184 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 2141 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2185 .stop_adapter = &ixgbe_stop_adapter_generic, 2142 .stop_adapter = &ixgbe_stop_adapter_generic,
2186 .get_bus_info = &ixgbe_get_bus_info_generic, 2143 .get_bus_info = &ixgbe_get_bus_info_generic,
2187 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2144 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2171,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2214 .init_params = &ixgbe_init_eeprom_params_generic, 2171 .init_params = &ixgbe_init_eeprom_params_generic,
2215 .read = &ixgbe_read_eerd_generic, 2172 .read = &ixgbe_read_eerd_generic,
2216 .write = &ixgbe_write_eeprom_generic, 2173 .write = &ixgbe_write_eeprom_generic,
2174 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2217 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2175 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2218 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2176 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2219}; 2177};
@@ -2240,5 +2198,5 @@ struct ixgbe_info ixgbe_82599_info = {
2240 .mac_ops = &mac_ops_82599, 2198 .mac_ops = &mac_ops_82599,
2241 .eeprom_ops = &eeprom_ops_82599, 2199 .eeprom_ops = &eeprom_ops_82599,
2242 .phy_ops = &phy_ops_82599, 2200 .phy_ops = &phy_ops_82599,
2243 .mbx_ops = &mbx_ops_82599, 2201 .mbx_ops = &mbx_ops_generic,
2244}; 2202};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca1316389..cc11e422ce9b 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
49 48
50static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
55static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
56 54
57/** 55/**
58 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -198,30 +196,110 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
198} 196}
199 197
200/** 198/**
201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 199 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
202 * @hw: pointer to hardware structure 200 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM 201 * @pba_num: stores the part number string from the EEPROM
202 * @pba_num_size: part number string buffer length
204 * 203 *
205 * Reads the part number from the EEPROM. 204 * Reads the part number string from the EEPROM.
206 **/ 205 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 206s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
207 u32 pba_num_size)
208{ 208{
209 s32 ret_val; 209 s32 ret_val;
210 u16 data; 210 u16 data;
211 u16 pba_ptr;
212 u16 offset;
213 u16 length;
214
215 if (pba_num == NULL) {
216 hw_dbg(hw, "PBA string buffer was null\n");
217 return IXGBE_ERR_INVALID_ARGUMENT;
218 }
211 219
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 220 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) { 221 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n"); 222 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val; 223 return ret_val;
216 } 224 }
217 *pba_num = (u32)(data << 16);
218 225
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 226 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
220 if (ret_val) { 227 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n"); 228 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val; 229 return ret_val;
223 } 230 }
224 *pba_num |= data; 231
232 /*
233 * if data is not ptr guard the PBA must be in legacy format which
234 * means pba_ptr is actually our second data word for the PBA number
235 * and we can decode it into an ascii string
236 */
237 if (data != IXGBE_PBANUM_PTR_GUARD) {
238 hw_dbg(hw, "NVM PBA number is not stored as string\n");
239
240 /* we will need 11 characters to store the PBA */
241 if (pba_num_size < 11) {
242 hw_dbg(hw, "PBA string buffer too small\n");
243 return IXGBE_ERR_NO_SPACE;
244 }
245
246 /* extract hex string from data and pba_ptr */
247 pba_num[0] = (data >> 12) & 0xF;
248 pba_num[1] = (data >> 8) & 0xF;
249 pba_num[2] = (data >> 4) & 0xF;
250 pba_num[3] = data & 0xF;
251 pba_num[4] = (pba_ptr >> 12) & 0xF;
252 pba_num[5] = (pba_ptr >> 8) & 0xF;
253 pba_num[6] = '-';
254 pba_num[7] = 0;
255 pba_num[8] = (pba_ptr >> 4) & 0xF;
256 pba_num[9] = pba_ptr & 0xF;
257
258 /* put a null character on the end of our string */
259 pba_num[10] = '\0';
260
261 /* switch all the data but the '-' to hex char */
262 for (offset = 0; offset < 10; offset++) {
263 if (pba_num[offset] < 0xA)
264 pba_num[offset] += '0';
265 else if (pba_num[offset] < 0x10)
266 pba_num[offset] += 'A' - 0xA;
267 }
268
269 return 0;
270 }
271
272 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
273 if (ret_val) {
274 hw_dbg(hw, "NVM Read Error\n");
275 return ret_val;
276 }
277
278 if (length == 0xFFFF || length == 0) {
279 hw_dbg(hw, "NVM PBA number section invalid length\n");
280 return IXGBE_ERR_PBA_SECTION;
281 }
282
283 /* check if pba_num buffer is big enough */
284 if (pba_num_size < (((u32)length * 2) - 1)) {
285 hw_dbg(hw, "PBA string buffer too small\n");
286 return IXGBE_ERR_NO_SPACE;
287 }
288
289 /* trim pba length from start of string */
290 pba_ptr++;
291 length--;
292
293 for (offset = 0; offset < length; offset++) {
294 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
295 if (ret_val) {
296 hw_dbg(hw, "NVM Read Error\n");
297 return ret_val;
298 }
299 pba_num[offset * 2] = (u8)(data >> 8);
300 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
301 }
302 pba_num[offset * 2] = '\0';
225 303
226 return 0; 304 return 0;
227} 305}
@@ -638,7 +716,7 @@ out:
638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 716 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
639 * read or write is done respectively. 717 * read or write is done respectively.
640 **/ 718 **/
641static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 719s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
642{ 720{
643 u32 i; 721 u32 i;
644 u32 reg; 722 u32 reg;
@@ -1009,7 +1087,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1009 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1010 * @hw: pointer to hardware structure 1088 * @hw: pointer to hardware structure
1011 **/ 1089 **/
1012static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) 1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1013{ 1091{
1014 u16 i; 1092 u16 i;
1015 u16 j; 1093 u16 j;
@@ -1072,7 +1150,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1072 status = hw->eeprom.ops.read(hw, 0, &checksum); 1150 status = hw->eeprom.ops.read(hw, 0, &checksum);
1073 1151
1074 if (status == 0) { 1152 if (status == 0) {
1075 checksum = ixgbe_calc_eeprom_checksum(hw); 1153 checksum = hw->eeprom.ops.calc_checksum(hw);
1076 1154
1077 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1155 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1078 1156
@@ -1110,7 +1188,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1110 status = hw->eeprom.ops.read(hw, 0, &checksum); 1188 status = hw->eeprom.ops.read(hw, 0, &checksum);
1111 1189
1112 if (status == 0) { 1190 if (status == 0) {
1113 checksum = ixgbe_calc_eeprom_checksum(hw); 1191 checksum = hw->eeprom.ops.calc_checksum(hw);
1114 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1115 checksum); 1193 checksum);
1116 } else { 1194 } else {
@@ -1595,6 +1673,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1595 u32 mflcn_reg, fccfg_reg; 1673 u32 mflcn_reg, fccfg_reg;
1596 u32 reg; 1674 u32 reg;
1597 u32 rx_pba_size; 1675 u32 rx_pba_size;
1676 u32 fcrtl, fcrth;
1598 1677
1599#ifdef CONFIG_DCB 1678#ifdef CONFIG_DCB
1600 if (hw->fc.requested_mode == ixgbe_fc_pfc) 1679 if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1750,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1671 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 1750 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1672 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 1751 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1673 1752
1674 reg = IXGBE_READ_REG(hw, IXGBE_MTQC); 1753 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1675 /* Thresholds are different for link flow control when in DCB mode */ 1754 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
1676 if (reg & IXGBE_MTQC_RT_ENA) {
1677 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1678 1755
1679 /* Always disable XON for LFC when in DCB mode */ 1756 fcrth = (rx_pba_size - hw->fc.high_water) << 10;
1680 reg = (rx_pba_size >> 5) & 0xFFE0; 1757 fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
1681 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1682 1758
1683 reg = (rx_pba_size >> 2) & 0xFFE0; 1759 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1684 if (hw->fc.current_mode & ixgbe_fc_tx_pause) 1760 fcrth |= IXGBE_FCRTH_FCEN;
1685 reg |= IXGBE_FCRTH_FCEN; 1761 if (hw->fc.send_xon)
1686 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); 1762 fcrtl |= IXGBE_FCRTL_XONE;
1687 } else {
1688 /*
1689 * Set up and enable Rx high/low water mark thresholds,
1690 * enable XON.
1691 */
1692 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1693 if (hw->fc.send_xon) {
1694 IXGBE_WRITE_REG(hw,
1695 IXGBE_FCRTL_82599(packetbuf_num),
1696 (hw->fc.low_water |
1697 IXGBE_FCRTL_XONE));
1698 } else {
1699 IXGBE_WRITE_REG(hw,
1700 IXGBE_FCRTL_82599(packetbuf_num),
1701 hw->fc.low_water);
1702 }
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
1705 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
1706 }
1707 } 1763 }
1708 1764
1765 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
1766 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
1767
1709 /* Configure pause time (2 TCs per register) */ 1768 /* Configure pause time (2 TCs per register) */
1710 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 1769 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1711 if ((packetbuf_num & 1) == 0) 1770 if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2764,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2705 2764
2706 return 0; 2765 return 0;
2707} 2766}
2767
2768/**
2769 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
2770 * the EEPROM
2771 * @hw: pointer to hardware structure
2772 * @wwnn_prefix: the alternative WWNN prefix
2773 * @wwpn_prefix: the alternative WWPN prefix
2774 *
2775 * This function will read the EEPROM from the alternative SAN MAC address
2776 * block to check the support for the alternative WWNN/WWPN prefix support.
2777 **/
2778s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2779 u16 *wwpn_prefix)
2780{
2781 u16 offset, caps;
2782 u16 alt_san_mac_blk_offset;
2783
2784 /* clear output first */
2785 *wwnn_prefix = 0xFFFF;
2786 *wwpn_prefix = 0xFFFF;
2787
2788 /* check if alternative SAN MAC is supported */
2789 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2790 &alt_san_mac_blk_offset);
2791
2792 if ((alt_san_mac_blk_offset == 0) ||
2793 (alt_san_mac_blk_offset == 0xFFFF))
2794 goto wwn_prefix_out;
2795
2796 /* check capability in alternative san mac address block */
2797 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2798 hw->eeprom.ops.read(hw, offset, &caps);
2799 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2800 goto wwn_prefix_out;
2801
2802 /* get the corresponding prefix for WWNN/WWPN */
2803 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2804 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2805
2806 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2807 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2808
2809wwn_prefix_out:
2810 return 0;
2811}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437dc..e1f980a8a09d 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,7 +35,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); 38s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
39 u32 pba_num_size);
39s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 40s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
40s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 41s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
41void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); 42void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +50,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 50s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 51s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
51 u16 *data); 52 u16 *data);
53u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 54s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
53 u16 *checksum_val); 55 u16 *checksum_val);
54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 56s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
57s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
55 58
56s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 59s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
57 u32 enable_addr); 60 u32 enable_addr);
@@ -81,7 +84,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
81s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 84s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
82 ixgbe_link_speed *speed, 85 ixgbe_link_speed *speed,
83 bool *link_up, bool link_up_wait_to_complete); 86 bool *link_up, bool link_up_wait_to_complete);
84 87s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
88 u16 *wwpn_prefix);
85s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 89s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
86s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 90s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
87 91
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca3..d16c260c1f50 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
42 * It should be called only after the rules are checked by 42 * It should be called only after the rules are checked by
43 * ixgbe_dcb_check_config(). 43 * ixgbe_dcb_check_config().
44 */ 44 */
45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
46 struct ixgbe_dcb_config *dcb_config,
46 int max_frame, u8 direction) 47 int max_frame, u8 direction)
47{ 48{
48 struct tc_bw_alloc *p; 49 struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
124 * credit may not be enough to send out a TSO 125 * credit may not be enough to send out a TSO
125 * packet in descriptor plane arbitration. 126 * packet in descriptor plane arbitration.
126 */ 127 */
127 if (credit_max && 128 if ((hw->mac.type == ixgbe_mac_82598EB) &&
129 credit_max &&
128 (credit_max < MINIMUM_CREDIT_FOR_TSO)) 130 (credit_max < MINIMUM_CREDIT_FOR_TSO))
129 credit_max = MINIMUM_CREDIT_FOR_TSO; 131 credit_max = MINIMUM_CREDIT_FOR_TSO;
130 132
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
150 struct ixgbe_dcb_config *dcb_config) 152 struct ixgbe_dcb_config *dcb_config)
151{ 153{
152 s32 ret = 0; 154 s32 ret = 0;
153 if (hw->mac.type == ixgbe_mac_82598EB) 155 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB:
154 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
155 else if (hw->mac.type == ixgbe_mac_82599EB) 158 break;
159 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540:
156 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
162 break;
163 default:
164 break;
165 }
157 return ret; 166 return ret;
158} 167}
159 168
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129e..1cfe38ee1644 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB credits calculation */ 152/* DCB credits calculation */
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8);
154 155
155/* DCB hw initialization */ 156/* DCB hw initialization */
156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc59..9a5e89c12e05 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
256 * for each traffic class. 256 * for each traffic class.
257 */ 257 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
259 if (dcb_config->rx_pba_cfg == pba_equal) { 259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size = IXGBE_RXPBSIZE_64KB; 260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 } else { 261 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
263 : IXGBE_RXPBSIZE_48KB;
264 }
265 262
266 reg = ((rx_pba_size >> 5) & 0xFFF0);
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
269 reg |= IXGBE_FCRTL_XONE; 265 reg |= IXGBE_FCRTL_XONE;
270 266
271 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
272 268
273 reg = ((rx_pba_size >> 2) & 0xFFF0); 269 reg = (rx_pba_size - hw->fc.high_water) << 10;
274 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
275 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
276 reg |= IXGBE_FCRTH_FCEN; 272 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f224715073..374e1f74d0f5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
251 251
252 /* Configure PFC Tx thresholds per TC */ 252 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
254 if (dcb_config->rx_pba_cfg == pba_equal) 254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size = IXGBE_RXPBSIZE_64KB; 255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 else 256
257 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB 257 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 : IXGBE_RXPBSIZE_48KB;
259 258
260 reg = ((rx_pba_size >> 5) & 0xFFE0);
261 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
262 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
263 reg |= IXGBE_FCRTL_XONE; 261 reg |= IXGBE_FCRTL_XONE;
264 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
265 263
266 reg = ((rx_pba_size >> 2) & 0xFFE0); 264 reg = (rx_pba_size - hw->fc.high_water) << 10;
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
269 reg |= IXGBE_FCRTH_FCEN; 267 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..bf566e8a455e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 130 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 131 ixgbe_clear_interrupt_scheme(adapter);
132 132
133 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 133 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
134 switch (adapter->hw.mac.type) {
135 case ixgbe_mac_82598EB:
134 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 136 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
135 adapter->hw.fc.requested_mode = ixgbe_fc_none; 137 adapter->hw.fc.requested_mode = ixgbe_fc_none;
136 } 138 break;
137 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 139 case ixgbe_mac_82599EB:
138 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 140 case ixgbe_mac_X540:
139 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 141 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
140 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 142 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
143 break;
144 default:
145 break;
141 } 146 }
147
142 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 148 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
143 ixgbe_init_interrupt_scheme(adapter); 149 ixgbe_init_interrupt_scheme(adapter);
144 if (netif_running(netdev)) 150 if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
155 adapter->dcb_cfg.pfc_mode_enable = false; 161 adapter->dcb_cfg.pfc_mode_enable = false;
156 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 162 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
157 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 163 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 164 switch (adapter->hw.mac.type) {
165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540:
159 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 167 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
168 break;
169 default:
170 break;
171 }
160 172
161 ixgbe_init_interrupt_scheme(adapter); 173 ixgbe_init_interrupt_scheme(adapter);
162 if (netif_running(netdev)) 174 if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
178 for (i = 0; i < netdev->addr_len; i++) 190 for (i = 0; i < netdev->addr_len; i++)
179 perm_addr[i] = adapter->hw.mac.perm_addr[i]; 191 perm_addr[i] = adapter->hw.mac.perm_addr[i];
180 192
181 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 193 switch (adapter->hw.mac.type) {
194 case ixgbe_mac_82599EB:
195 case ixgbe_mac_X540:
182 for (j = 0; j < netdev->addr_len; j++, i++) 196 for (j = 0; j < netdev->addr_len; j++, i++)
183 perm_addr[i] = adapter->hw.mac.san_addr[j]; 197 perm_addr[i] = adapter->hw.mac.san_addr[j];
198 break;
199 default:
200 break;
184 } 201 }
185} 202}
186 203
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
366 } 383 }
367 384
368 if (adapter->dcb_cfg.pfc_mode_enable) { 385 if (adapter->dcb_cfg.pfc_mode_enable) {
369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 386 switch (adapter->hw.mac.type) {
370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 387 case ixgbe_mac_82599EB:
371 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 388 case ixgbe_mac_X540:
389 if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
390 adapter->last_lfc_mode =
391 adapter->hw.fc.current_mode;
392 break;
393 default:
394 break;
395 }
372 adapter->hw.fc.requested_mode = ixgbe_fc_pfc; 396 adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
373 } else { 397 } else {
374 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 398 switch (adapter->hw.mac.type) {
375 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 399 case ixgbe_mac_82598EB:
376 else
377 adapter->hw.fc.requested_mode = ixgbe_fc_none; 400 adapter->hw.fc.requested_mode = ixgbe_fc_none;
401 break;
402 case ixgbe_mac_82599EB:
403 case ixgbe_mac_X540:
404 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
405 break;
406 default:
407 break;
408 }
378 } 409 }
379 410
380 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff2..23ff23e8b393 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
185 ADVERTISED_FIBRE); 185 ADVERTISED_FIBRE);
186 ecmd->port = PORT_FIBRE; 186 ecmd->port = PORT_FIBRE;
187 ecmd->autoneg = AUTONEG_DISABLE; 187 ecmd->autoneg = AUTONEG_DISABLE;
188 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
189 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
190 ecmd->supported |= (SUPPORTED_1000baseT_Full |
191 SUPPORTED_Autoneg |
192 SUPPORTED_FIBRE);
193 ecmd->advertising = (ADVERTISED_10000baseT_Full |
194 ADVERTISED_1000baseT_Full |
195 ADVERTISED_Autoneg |
196 ADVERTISED_FIBRE);
197 ecmd->port = PORT_FIBRE;
188 } else { 198 } else {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 199 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_FIBRE); 200 SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
204 /* Get PHY type */ 214 /* Get PHY type */
205 switch (adapter->hw.phy.type) { 215 switch (adapter->hw.phy.type) {
206 case ixgbe_phy_tn: 216 case ixgbe_phy_tn:
217 case ixgbe_phy_aq:
207 case ixgbe_phy_cu_unknown: 218 case ixgbe_phy_cu_unknown:
208 /* Copper 10G-BASET */ 219 /* Copper 10G-BASET */
209 ecmd->port = PORT_TP; 220 ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
332 else 343 else
333 pause->autoneg = 1; 344 pause->autoneg = 1;
334 345
335#ifdef CONFIG_DCB
336 if (hw->fc.current_mode == ixgbe_fc_pfc) {
337 pause->rx_pause = 0;
338 pause->tx_pause = 0;
339 }
340
341#endif
342 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 346 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
343 pause->rx_pause = 1; 347 pause->rx_pause = 1;
344 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 348 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
346 } else if (hw->fc.current_mode == ixgbe_fc_full) { 350 } else if (hw->fc.current_mode == ixgbe_fc_full) {
347 pause->rx_pause = 1; 351 pause->rx_pause = 1;
348 pause->tx_pause = 1; 352 pause->tx_pause = 1;
353#ifdef CONFIG_DCB
354 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
355 pause->rx_pause = 0;
356 pause->tx_pause = 0;
357#endif
349 } 358 }
350} 359}
351 360
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
363 return -EINVAL; 372 return -EINVAL;
364 373
365#endif 374#endif
366
367 fc = hw->fc; 375 fc = hw->fc;
368 376
369 if (pause->autoneg != AUTONEG_ENABLE) 377 if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
412 else 420 else
413 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 421 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
414 422
415 if (netif_running(netdev))
416 ixgbe_reinit_locked(adapter);
417 else
418 ixgbe_reset(adapter);
419
420 return 0; 423 return 0;
421} 424}
422 425
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
428static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 431static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
429{ 432{
430 struct ixgbe_adapter *adapter = netdev_priv(netdev); 433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
434 u32 feature_list;
431 435
432 if (data) { 436 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
433 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 437 switch (adapter->hw.mac.type) {
434 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 438 case ixgbe_mac_82599EB:
435 netdev->features |= NETIF_F_SCTP_CSUM; 439 case ixgbe_mac_X540:
436 } else { 440 feature_list |= NETIF_F_SCTP_CSUM;
437 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 441 break;
438 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 442 default:
439 netdev->features &= ~NETIF_F_SCTP_CSUM; 443 break;
440 } 444 }
445 if (data)
446 netdev->features |= feature_list;
447 else
448 netdev->features &= ~feature_list;
441 449
442 return 0; 450 return 0;
443} 451}
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
530 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 538 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
531 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 539 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
532 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 540 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
533 for (i = 0; i < 8; i++) 541 for (i = 0; i < 8; i++) {
534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 542 switch (hw->mac.type) {
535 for (i = 0; i < 8; i++) 543 case ixgbe_mac_82598EB:
536 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 544 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
545 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
546 break;
547 case ixgbe_mac_82599EB:
548 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
549 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
550 break;
551 default:
552 break;
553 }
554 }
537 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 555 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
538 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 556 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
539 557
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
615 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 633 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
616 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 634 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
617 635
636 /* DCB */
618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 637 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 638 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 639 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -820,9 +839,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 839 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 840 char firmware_version[32];
822 841
823 strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 842 strncpy(drvinfo->driver, ixgbe_driver_name,
843 sizeof(drvinfo->driver) - 1);
824 strncpy(drvinfo->version, ixgbe_driver_version, 844 strncpy(drvinfo->version, ixgbe_driver_version,
825 sizeof(drvinfo->version)); 845 sizeof(drvinfo->version) - 1);
826 846
827 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", 847 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
828 (adapter->eeprom_version & 0xF000) >> 12, 848 (adapter->eeprom_version & 0xF000) >> 12,
@@ -905,13 +925,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
905 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 925 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
906 sizeof(struct ixgbe_ring)); 926 sizeof(struct ixgbe_ring));
907 temp_tx_ring[i].count = new_tx_count; 927 temp_tx_ring[i].count = new_tx_count;
908 err = ixgbe_setup_tx_resources(adapter, 928 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
909 &temp_tx_ring[i]);
910 if (err) { 929 if (err) {
911 while (i) { 930 while (i) {
912 i--; 931 i--;
913 ixgbe_free_tx_resources(adapter, 932 ixgbe_free_tx_resources(&temp_tx_ring[i]);
914 &temp_tx_ring[i]);
915 } 933 }
916 goto clear_reset; 934 goto clear_reset;
917 } 935 }
@@ -930,13 +948,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
930 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 948 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
931 sizeof(struct ixgbe_ring)); 949 sizeof(struct ixgbe_ring));
932 temp_rx_ring[i].count = new_rx_count; 950 temp_rx_ring[i].count = new_rx_count;
933 err = ixgbe_setup_rx_resources(adapter, 951 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
934 &temp_rx_ring[i]);
935 if (err) { 952 if (err) {
936 while (i) { 953 while (i) {
937 i--; 954 i--;
938 ixgbe_free_rx_resources(adapter, 955 ixgbe_free_rx_resources(&temp_rx_ring[i]);
939 &temp_rx_ring[i]);
940 } 956 }
941 goto err_setup; 957 goto err_setup;
942 } 958 }
@@ -951,8 +967,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
951 /* tx */ 967 /* tx */
952 if (new_tx_count != adapter->tx_ring_count) { 968 if (new_tx_count != adapter->tx_ring_count) {
953 for (i = 0; i < adapter->num_tx_queues; i++) { 969 for (i = 0; i < adapter->num_tx_queues; i++) {
954 ixgbe_free_tx_resources(adapter, 970 ixgbe_free_tx_resources(adapter->tx_ring[i]);
955 adapter->tx_ring[i]);
956 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 971 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
957 sizeof(struct ixgbe_ring)); 972 sizeof(struct ixgbe_ring));
958 } 973 }
@@ -962,8 +977,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
962 /* rx */ 977 /* rx */
963 if (new_rx_count != adapter->rx_ring_count) { 978 if (new_rx_count != adapter->rx_ring_count) {
964 for (i = 0; i < adapter->num_rx_queues; i++) { 979 for (i = 0; i < adapter->num_rx_queues; i++) {
965 ixgbe_free_rx_resources(adapter, 980 ixgbe_free_rx_resources(adapter->rx_ring[i]);
966 adapter->rx_ring[i]);
967 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 981 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
968 sizeof(struct ixgbe_ring)); 982 sizeof(struct ixgbe_ring));
969 } 983 }
@@ -1144,7 +1158,7 @@ struct ixgbe_reg_test {
1144#define TABLE64_TEST_HI 6 1158#define TABLE64_TEST_HI 6
1145 1159
1146/* default 82599 register test */ 1160/* default 82599 register test */
1147static struct ixgbe_reg_test reg_test_82599[] = { 1161static const struct ixgbe_reg_test reg_test_82599[] = {
1148 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1162 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1149 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1163 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1150 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1164 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1168,7 +1182,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
1168}; 1182};
1169 1183
1170/* default 82598 register test */ 1184/* default 82598 register test */
1171static struct ixgbe_reg_test reg_test_82598[] = { 1185static const struct ixgbe_reg_test reg_test_82598[] = {
1172 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1186 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1173 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1187 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1174 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1188 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1195,18 +1209,22 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1195 { 0, 0, 0, 0 } 1209 { 0, 0, 0, 0 }
1196}; 1210};
1197 1211
1212static const u32 register_test_patterns[] = {
1213 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
1214};
1215
1198#define REG_PATTERN_TEST(R, M, W) \ 1216#define REG_PATTERN_TEST(R, M, W) \
1199{ \ 1217{ \
1200 u32 pat, val, before; \ 1218 u32 pat, val, before; \
1201 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 1219 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
1202 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
1203 before = readl(adapter->hw.hw_addr + R); \ 1220 before = readl(adapter->hw.hw_addr + R); \
1204 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1221 writel((register_test_patterns[pat] & W), \
1222 (adapter->hw.hw_addr + R)); \
1205 val = readl(adapter->hw.hw_addr + R); \ 1223 val = readl(adapter->hw.hw_addr + R); \
1206 if (val != (_test[pat] & W & M)) { \ 1224 if (val != (register_test_patterns[pat] & W & M)) { \
1207 e_err(drv, "pattern test reg %04X failed: got " \ 1225 e_err(drv, "pattern test reg %04X failed: got " \
1208 "0x%08X expected 0x%08X\n", \ 1226 "0x%08X expected 0x%08X\n", \
1209 R, val, (_test[pat] & W & M)); \ 1227 R, val, (register_test_patterns[pat] & W & M)); \
1210 *data = R; \ 1228 *data = R; \
1211 writel(before, adapter->hw.hw_addr + R); \ 1229 writel(before, adapter->hw.hw_addr + R); \
1212 return 1; \ 1230 return 1; \
@@ -1233,16 +1251,24 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1233 1251
1234static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1252static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1235{ 1253{
1236 struct ixgbe_reg_test *test; 1254 const struct ixgbe_reg_test *test;
1237 u32 value, before, after; 1255 u32 value, before, after;
1238 u32 i, toggle; 1256 u32 i, toggle;
1239 1257
1240 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1258 switch (adapter->hw.mac.type) {
1241 toggle = 0x7FFFF30F; 1259 case ixgbe_mac_82598EB:
1242 test = reg_test_82599;
1243 } else {
1244 toggle = 0x7FFFF3FF; 1260 toggle = 0x7FFFF3FF;
1245 test = reg_test_82598; 1261 test = reg_test_82598;
1262 break;
1263 case ixgbe_mac_82599EB:
1264 case ixgbe_mac_X540:
1265 toggle = 0x7FFFF30F;
1266 test = reg_test_82599;
1267 break;
1268 default:
1269 *data = 1;
1270 return 1;
1271 break;
1246 } 1272 }
1247 1273
1248 /* 1274 /*
@@ -1460,16 +1486,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1460 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1486 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1461 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1487 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1462 1488
1463 if (hw->mac.type == ixgbe_mac_82599EB) { 1489 switch (hw->mac.type) {
1490 case ixgbe_mac_82599EB:
1491 case ixgbe_mac_X540:
1464 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1492 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1465 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1493 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1466 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1494 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1495 break;
1496 default:
1497 break;
1467 } 1498 }
1468 1499
1469 ixgbe_reset(adapter); 1500 ixgbe_reset(adapter);
1470 1501
1471 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); 1502 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1472 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); 1503 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1473} 1504}
1474 1505
1475static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1506static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1514,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1483 /* Setup Tx descriptor ring and Tx buffers */ 1514 /* Setup Tx descriptor ring and Tx buffers */
1484 tx_ring->count = IXGBE_DEFAULT_TXD; 1515 tx_ring->count = IXGBE_DEFAULT_TXD;
1485 tx_ring->queue_index = 0; 1516 tx_ring->queue_index = 0;
1517 tx_ring->dev = &adapter->pdev->dev;
1518 tx_ring->netdev = adapter->netdev;
1486 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1519 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1487 tx_ring->numa_node = adapter->node; 1520 tx_ring->numa_node = adapter->node;
1488 1521
1489 err = ixgbe_setup_tx_resources(adapter, tx_ring); 1522 err = ixgbe_setup_tx_resources(tx_ring);
1490 if (err) 1523 if (err)
1491 return 1; 1524 return 1;
1492 1525
1493 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1526 switch (adapter->hw.mac.type) {
1527 case ixgbe_mac_82599EB:
1528 case ixgbe_mac_X540:
1494 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1529 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1495 reg_data |= IXGBE_DMATXCTL_TE; 1530 reg_data |= IXGBE_DMATXCTL_TE;
1496 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1532 break;
1533 default:
1534 break;
1497 } 1535 }
1498 1536
1499 ixgbe_configure_tx_ring(adapter, tx_ring); 1537 ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1539,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1501 /* Setup Rx Descriptor ring and Rx buffers */ 1539 /* Setup Rx Descriptor ring and Rx buffers */
1502 rx_ring->count = IXGBE_DEFAULT_RXD; 1540 rx_ring->count = IXGBE_DEFAULT_RXD;
1503 rx_ring->queue_index = 0; 1541 rx_ring->queue_index = 0;
1542 rx_ring->dev = &adapter->pdev->dev;
1543 rx_ring->netdev = adapter->netdev;
1504 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1544 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1505 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; 1545 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1506 rx_ring->numa_node = adapter->node; 1546 rx_ring->numa_node = adapter->node;
1507 1547
1508 err = ixgbe_setup_rx_resources(adapter, rx_ring); 1548 err = ixgbe_setup_rx_resources(rx_ring);
1509 if (err) { 1549 if (err) {
1510 ret_val = 4; 1550 ret_val = 4;
1511 goto err_nomem; 1551 goto err_nomem;
@@ -1604,8 +1644,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1604 return 13; 1644 return 13;
1605} 1645}
1606 1646
1607static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, 1647static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1608 struct ixgbe_ring *rx_ring,
1609 struct ixgbe_ring *tx_ring, 1648 struct ixgbe_ring *tx_ring,
1610 unsigned int size) 1649 unsigned int size)
1611{ 1650{
@@ -1627,7 +1666,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1627 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1666 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1628 1667
1629 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1668 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1630 dma_unmap_single(&adapter->pdev->dev, 1669 dma_unmap_single(rx_ring->dev,
1631 rx_buffer_info->dma, 1670 rx_buffer_info->dma,
1632 bufsz, 1671 bufsz,
1633 DMA_FROM_DEVICE); 1672 DMA_FROM_DEVICE);
@@ -1639,7 +1678,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1639 1678
1640 /* unmap buffer on Tx side */ 1679 /* unmap buffer on Tx side */
1641 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1680 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1642 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1681 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1643 1682
1644 /* increment Rx/Tx next to clean counters */ 1683 /* increment Rx/Tx next to clean counters */
1645 rx_ntc++; 1684 rx_ntc++;
@@ -1655,7 +1694,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1655 } 1694 }
1656 1695
1657 /* re-map buffers to ring, store next to clean values */ 1696 /* re-map buffers to ring, store next to clean values */
1658 ixgbe_alloc_rx_buffers(adapter, rx_ring, count); 1697 ixgbe_alloc_rx_buffers(rx_ring, count);
1659 rx_ring->next_to_clean = rx_ntc; 1698 rx_ring->next_to_clean = rx_ntc;
1660 tx_ring->next_to_clean = tx_ntc; 1699 tx_ring->next_to_clean = tx_ntc;
1661 1700
@@ -1699,7 +1738,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1699 for (i = 0; i < 64; i++) { 1738 for (i = 0; i < 64; i++) {
1700 skb_get(skb); 1739 skb_get(skb);
1701 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1740 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1702 adapter->netdev,
1703 adapter, 1741 adapter,
1704 tx_ring); 1742 tx_ring);
1705 if (tx_ret_val == NETDEV_TX_OK) 1743 if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1752,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1714 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1752 /* allow 200 milliseconds for packets to go from Tx to Rx */
1715 msleep(200); 1753 msleep(200);
1716 1754
1717 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring, 1755 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1718 tx_ring, size);
1719 if (good_cnt != 64) { 1756 if (good_cnt != 64) {
1720 ret_val = 13; 1757 ret_val = 13;
1721 break; 1758 break;
@@ -1847,7 +1884,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1847 struct ixgbe_hw *hw = &adapter->hw; 1884 struct ixgbe_hw *hw = &adapter->hw;
1848 int retval = 1; 1885 int retval = 1;
1849 1886
1887 /* WOL not supported except for the following */
1850 switch(hw->device_id) { 1888 switch(hw->device_id) {
1889 case IXGBE_DEV_ID_82599_SFP:
1890 /* Only this subdevice supports WOL */
1891 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1892 wol->supported = 0;
1893 break;
1894 }
1895 retval = 0;
1896 break;
1897 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1898 /* All except this subdevice support WOL */
1899 if (hw->subsystem_device_id ==
1900 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1901 wol->supported = 0;
1902 break;
1903 }
1904 retval = 0;
1905 break;
1851 case IXGBE_DEV_ID_82599_KX4: 1906 case IXGBE_DEV_ID_82599_KX4:
1852 retval = 0; 1907 retval = 0;
1853 break; 1908 break;
@@ -1985,6 +2040,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1985 return 0; 2040 return 0;
1986} 2041}
1987 2042
2043/*
2044 * this function must be called before setting the new value of
2045 * rx_itr_setting
2046 */
2047static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2048 struct ethtool_coalesce *ec)
2049{
2050 struct net_device *netdev = adapter->netdev;
2051
2052 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2053 return false;
2054
2055 /* if interrupt rate is too high then disable RSC */
2056 if (ec->rx_coalesce_usecs != 1 &&
2057 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2058 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2059 e_info(probe, "rx-usecs set too low, "
2060 "disabling RSC\n");
2061 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2062 return true;
2063 }
2064 } else {
2065 /* check the feature flag value and enable RSC if necessary */
2066 if ((netdev->features & NETIF_F_LRO) &&
2067 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2068 e_info(probe, "rx-usecs set to %d, "
2069 "re-enabling RSC\n",
2070 ec->rx_coalesce_usecs);
2071 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2072 return true;
2073 }
2074 }
2075 return false;
2076}
2077
1988static int ixgbe_set_coalesce(struct net_device *netdev, 2078static int ixgbe_set_coalesce(struct net_device *netdev,
1989 struct ethtool_coalesce *ec) 2079 struct ethtool_coalesce *ec)
1990{ 2080{
@@ -2002,17 +2092,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2002 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2092 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2003 2093
2004 if (ec->rx_coalesce_usecs > 1) { 2094 if (ec->rx_coalesce_usecs > 1) {
2005 u32 max_int;
2006 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2007 max_int = IXGBE_MAX_RSC_INT_RATE;
2008 else
2009 max_int = IXGBE_MAX_INT_RATE;
2010
2011 /* check the limits */ 2095 /* check the limits */
2012 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2096 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2013 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2097 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2014 return -EINVAL; 2098 return -EINVAL;
2015 2099
2100 /* check the old value and enable RSC if necessary */
2101 need_reset = ixgbe_update_rsc(adapter, ec);
2102
2016 /* store the value in ints/second */ 2103 /* store the value in ints/second */
2017 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2104 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2018 2105
@@ -2021,32 +2108,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2021 /* clear the lower bit as its used for dynamic state */ 2108 /* clear the lower bit as its used for dynamic state */
2022 adapter->rx_itr_setting &= ~1; 2109 adapter->rx_itr_setting &= ~1;
2023 } else if (ec->rx_coalesce_usecs == 1) { 2110 } else if (ec->rx_coalesce_usecs == 1) {
2111 /* check the old value and enable RSC if necessary */
2112 need_reset = ixgbe_update_rsc(adapter, ec);
2113
2024 /* 1 means dynamic mode */ 2114 /* 1 means dynamic mode */
2025 adapter->rx_eitr_param = 20000; 2115 adapter->rx_eitr_param = 20000;
2026 adapter->rx_itr_setting = 1; 2116 adapter->rx_itr_setting = 1;
2027 } else { 2117 } else {
2118 /* check the old value and enable RSC if necessary */
2119 need_reset = ixgbe_update_rsc(adapter, ec);
2028 /* 2120 /*
2029 * any other value means disable eitr, which is best 2121 * any other value means disable eitr, which is best
2030 * served by setting the interrupt rate very high 2122 * served by setting the interrupt rate very high
2031 */ 2123 */
2032 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2124 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2033 adapter->rx_itr_setting = 0; 2125 adapter->rx_itr_setting = 0;
2034
2035 /*
2036 * if hardware RSC is enabled, disable it when
2037 * setting low latency mode, to avoid errata, assuming
2038 * that when the user set low latency mode they want
2039 * it at the cost of anything else
2040 */
2041 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2042 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2043 if (netdev->features & NETIF_F_LRO) {
2044 netdev->features &= ~NETIF_F_LRO;
2045 e_info(probe, "rx-usecs set to 0, "
2046 "disabling RSC\n");
2047 }
2048 need_reset = true;
2049 }
2050 } 2126 }
2051 2127
2052 if (ec->tx_coalesce_usecs > 1) { 2128 if (ec->tx_coalesce_usecs > 1) {
@@ -2127,34 +2203,45 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2127 need_reset = (data & ETH_FLAG_RXVLAN) != 2203 need_reset = (data & ETH_FLAG_RXVLAN) !=
2128 (netdev->features & NETIF_F_HW_VLAN_RX); 2204 (netdev->features & NETIF_F_HW_VLAN_RX);
2129 2205
2130 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | 2206 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2131 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); 2207 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
2132 if (rc) 2208 if (rc)
2133 return rc; 2209 return rc;
2134 2210
2135 /* if state changes we need to update adapter->flags and reset */ 2211 /* if state changes we need to update adapter->flags and reset */
2136 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2212 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2137 /* 2213 (!!(data & ETH_FLAG_LRO) !=
2138 * cast both to bool and verify if they are set the same 2214 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2139 * but only enable RSC if itr is non-zero, as 2215 if ((data & ETH_FLAG_LRO) &&
2140 * itr=0 and RSC are mutually exclusive 2216 (!adapter->rx_itr_setting ||
2141 */ 2217 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2142 if (((!!(data & ETH_FLAG_LRO)) != 2218 e_info(probe, "rx-usecs set too low, "
2143 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2219 "not enabling RSC.\n");
2144 adapter->rx_itr_setting) { 2220 } else {
2145 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2221 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2146 switch (adapter->hw.mac.type) { 2222 switch (adapter->hw.mac.type) {
2147 case ixgbe_mac_82599EB: 2223 case ixgbe_mac_82599EB:
2148 need_reset = true; 2224 need_reset = true;
2149 break; 2225 break;
2226 case ixgbe_mac_X540: {
2227 int i;
2228 for (i = 0; i < adapter->num_rx_queues; i++) {
2229 struct ixgbe_ring *ring =
2230 adapter->rx_ring[i];
2231 if (adapter->flags2 &
2232 IXGBE_FLAG2_RSC_ENABLED) {
2233 ixgbe_configure_rscctl(adapter,
2234 ring);
2235 } else {
2236 ixgbe_clear_rscctl(adapter,
2237 ring);
2238 }
2239 }
2240 }
2241 break;
2150 default: 2242 default:
2151 break; 2243 break;
2152 } 2244 }
2153 } else if (!adapter->rx_itr_setting) {
2154 netdev->features &= ~NETIF_F_LRO;
2155 if (data & ETH_FLAG_LRO)
2156 e_info(probe, "rx-usecs set to 0, "
2157 "LRO/RSC cannot be enabled.\n");
2158 } 2245 }
2159 } 2246 }
2160 2247
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8e..6342d4859790 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
69{ 69{
70 ddp->len = 0; 70 ddp->len = 0;
71 ddp->err = 0; 71 ddp->err = 1;
72 ddp->udl = NULL; 72 ddp->udl = NULL;
73 ddp->udp = 0UL; 73 ddp->udp = 0UL;
74 ddp->sgl = NULL; 74 ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
92 struct ixgbe_fcoe *fcoe; 92 struct ixgbe_fcoe *fcoe;
93 struct ixgbe_adapter *adapter; 93 struct ixgbe_adapter *adapter;
94 struct ixgbe_fcoe_ddp *ddp; 94 struct ixgbe_fcoe_ddp *ddp;
95 u32 fcbuff;
95 96
96 if (!netdev) 97 if (!netdev)
97 goto out_ddp_put; 98 goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
117 (xid | IXGBE_FCDMARW_WE)); 118 (xid | IXGBE_FCDMARW_WE));
119
120 /* guaranteed to be invalidated after 100us */
121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
122 (xid | IXGBE_FCDMARW_RE));
123 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
118 spin_unlock_bh(&fcoe->lock); 124 spin_unlock_bh(&fcoe->lock);
125 if (fcbuff & IXGBE_FCBUFF_VALID)
126 udelay(100);
119 } 127 }
120 if (ddp->sgl) 128 if (ddp->sgl)
121 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 129 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
168 return 0; 176 return 0;
169 } 177 }
170 178
179 /* no DDP if we are already down or resetting */
180 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
181 test_bit(__IXGBE_RESETTING, &adapter->state))
182 return 0;
183
171 fcoe = &adapter->fcoe; 184 fcoe = &adapter->fcoe;
172 if (!fcoe->pool) { 185 if (!fcoe->pool) {
173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 186 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a1..ca9036de49f9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "3.0.12-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 59static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 60 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info, 61 [board_82599] = &ixgbe_82599_info,
62 [board_X540] = &ixgbe_X540_info,
62}; 63};
63 64
64/* ixgbe_pci_tbl - PCI Device ID Table 65/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +109,16 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
108 board_82599 }, 109 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 }, 111 board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
113 board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
115 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 }, 117 board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 }, 119 board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
121 board_X540 },
115 122
116 /* required last entry */ 123 /* required last entry */
117 {0, } 124 {0, }
@@ -560,6 +567,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 567 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break; 568 break;
562 case ixgbe_mac_82599EB: 569 case ixgbe_mac_82599EB:
570 case ixgbe_mac_X540:
563 if (direction == -1) { 571 if (direction == -1) {
564 /* other causes */ 572 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 573 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +597,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589{ 597{
590 u32 mask; 598 u32 mask;
591 599
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 600 switch (adapter->hw.mac.type) {
601 case ixgbe_mac_82598EB:
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 602 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else { 604 break;
605 case ixgbe_mac_82599EB:
606 case ixgbe_mac_X540:
596 mask = (qmask & 0xFFFFFFFF); 607 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32); 609 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
611 break;
612 default:
613 break;
600 } 614 }
601} 615}
602 616
603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 617void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer 618 struct ixgbe_tx_buffer *tx_buffer_info)
605 *tx_buffer_info)
606{ 619{
607 if (tx_buffer_info->dma) { 620 if (tx_buffer_info->dma) {
608 if (tx_buffer_info->mapped_as_page) 621 if (tx_buffer_info->mapped_as_page)
609 dma_unmap_page(&adapter->pdev->dev, 622 dma_unmap_page(tx_ring->dev,
610 tx_buffer_info->dma, 623 tx_buffer_info->dma,
611 tx_buffer_info->length, 624 tx_buffer_info->length,
612 DMA_TO_DEVICE); 625 DMA_TO_DEVICE);
613 else 626 else
614 dma_unmap_single(&adapter->pdev->dev, 627 dma_unmap_single(tx_ring->dev,
615 tx_buffer_info->dma, 628 tx_buffer_info->dma,
616 tx_buffer_info->length, 629 tx_buffer_info->length,
617 DMA_TO_DEVICE); 630 DMA_TO_DEVICE);
@@ -626,92 +639,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
626} 639}
627 640
628/** 641/**
629 * ixgbe_tx_xon_state - check the tx ring xon state 642 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
630 * @adapter: the ixgbe adapter 643 * @adapter: driver private struct
631 * @tx_ring: the corresponding tx_ring 644 * @index: reg idx of queue to query (0-127)
632 * 645 *
633 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 646 * Helper function to determine the traffic index for a paticular
634 * corresponding TC of this tx_ring when checking TFCS. 647 * register index.
635 * 648 *
636 * Returns : true if in xon state (currently not paused) 649 * Returns : a tc index for use in range 0-7, or 0-3
637 */ 650 */
638static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
639 struct ixgbe_ring *tx_ring)
640{ 652{
641 u32 txoff = IXGBE_TFCS_TXOFF; 653 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
642 655
643#ifdef CONFIG_IXGBE_DCB 656 /* if DCB is not enabled the queues have no TC */
644 if (adapter->dcb_cfg.pfc_mode_enable) { 657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
645 int tc; 658 return tc;
646 int reg_idx = tx_ring->reg_idx; 659
647 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 660 /* check valid range */
661 if (reg_idx >= adapter->hw.mac.max_tx_queues)
662 return tc;
663
664 switch (adapter->hw.mac.type) {
665 case ixgbe_mac_82598EB:
666 tc = reg_idx >> 2;
667 break;
668 default:
669 if (dcb_i != 4 && dcb_i != 8)
670 break;
671
672 /* if VMDq is enabled the lowest order bits determine TC */
673 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
674 IXGBE_FLAG_VMDQ_ENABLED)) {
675 tc = reg_idx & (dcb_i - 1);
676 break;
677 }
678
679 /*
680 * Convert the reg_idx into the correct TC. This bitmask
681 * targets the last full 32 ring traffic class and assigns
682 * it a value of 1. From there the rest of the rings are
683 * based on shifting the mask further up to include the
684 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
685 * will only ever be 8 or 4 and that reg_idx will never
686 * be greater then 128. The code without the power of 2
687 * optimizations would be:
688 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
689 */
690 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
691 tc >>= 9 - (reg_idx >> 5);
692 }
693
694 return tc;
695}
696
697static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
698{
699 struct ixgbe_hw *hw = &adapter->hw;
700 struct ixgbe_hw_stats *hwstats = &adapter->stats;
701 u32 data = 0;
702 u32 xoff[8] = {0};
703 int i;
648 704
649 switch (adapter->hw.mac.type) { 705 if ((hw->fc.current_mode == ixgbe_fc_full) ||
706 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
707 switch (hw->mac.type) {
650 case ixgbe_mac_82598EB: 708 case ixgbe_mac_82598EB:
651 tc = reg_idx >> 2; 709 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
652 txoff = IXGBE_TFCS_TXOFF0;
653 break; 710 break;
654 case ixgbe_mac_82599EB: 711 default:
655 tc = 0; 712 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
656 txoff = IXGBE_TFCS_TXOFF; 713 }
657 if (dcb_i == 8) { 714 hwstats->lxoffrxc += data;
658 /* TC0, TC1 */ 715
659 tc = reg_idx >> 5; 716 /* refill credits (no tx hang) if we received xoff */
660 if (tc == 2) /* TC2, TC3 */ 717 if (!data)
661 tc += (reg_idx - 64) >> 4; 718 return;
662 else if (tc == 3) /* TC4, TC5, TC6, TC7 */ 719
663 tc += 1 + ((reg_idx - 96) >> 3); 720 for (i = 0; i < adapter->num_tx_queues; i++)
664 } else if (dcb_i == 4) { 721 clear_bit(__IXGBE_HANG_CHECK_ARMED,
665 /* TC0, TC1 */ 722 &adapter->tx_ring[i]->state);
666 tc = reg_idx >> 6; 723 return;
667 if (tc == 1) { 724 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
668 tc += (reg_idx - 64) >> 5; 725 return;
669 if (tc == 2) /* TC2, TC3 */ 726
670 tc += (reg_idx - 96) >> 4; 727 /* update stats for each tc, only valid with PFC enabled */
671 } 728 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
672 } 729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
673 break; 732 break;
674 default: 733 default:
675 tc = 0; 734 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
676 } 735 }
677 txoff <<= tc; 736 hwstats->pxoffrxc[i] += xoff[i];
737 }
738
739 /* disarm tx queues that have received xoff frames */
740 for (i = 0; i < adapter->num_tx_queues; i++) {
741 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
742 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
743
744 if (xoff[tc])
745 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
678 } 746 }
679#endif
680 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
681} 747}
682 748
683static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 749static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
684 struct ixgbe_ring *tx_ring,
685 unsigned int eop)
686{ 750{
751 return ring->tx_stats.completed;
752}
753
754static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
755{
756 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
687 struct ixgbe_hw *hw = &adapter->hw; 757 struct ixgbe_hw *hw = &adapter->hw;
688 758
689 /* Detect a transmit hang in hardware, this serializes the 759 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
690 * check with the clearing of time_stamp and movement of eop */ 760 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
691 adapter->detect_tx_hung = false; 761
692 if (tx_ring->tx_buffer_info[eop].time_stamp && 762 if (head != tail)
693 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 763 return (head < tail) ?
694 ixgbe_tx_xon_state(adapter, tx_ring)) { 764 tail - head : (tail + ring->count - head);
695 /* detected Tx unit hang */ 765
696 union ixgbe_adv_tx_desc *tx_desc; 766 return 0;
697 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 767}
698 e_err(drv, "Detected Tx Unit Hang\n" 768
699 " Tx Queue <%d>\n" 769static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
700 " TDH, TDT <%x>, <%x>\n" 770{
701 " next_to_use <%x>\n" 771 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
702 " next_to_clean <%x>\n" 772 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
703 "tx_buffer_info[next_to_clean]\n" 773 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
704 " time_stamp <%lx>\n" 774 bool ret = false;
705 " jiffies <%lx>\n", 775
706 tx_ring->queue_index, 776 clear_check_for_tx_hang(tx_ring);
707 IXGBE_READ_REG(hw, tx_ring->head), 777
708 IXGBE_READ_REG(hw, tx_ring->tail), 778 /*
709 tx_ring->next_to_use, eop, 779 * Check for a hung queue, but be thorough. This verifies
710 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 780 * that a transmit has been completed since the previous
711 return true; 781 * check AND there is at least one packet pending. The
782 * ARMED bit is set to indicate a potential hang. The
783 * bit is cleared if a pause frame is received to remove
784 * false hang detection due to PFC or 802.3x frames. By
785 * requiring this to fail twice we avoid races with
786 * pfc clearing the ARMED bit and conditions where we
787 * run the check_tx_hang logic with a transmit completion
788 * pending but without time to complete it yet.
789 */
790 if ((tx_done_old == tx_done) && tx_pending) {
791 /* make sure it is true for two checks in a row */
792 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
793 &tx_ring->state);
794 } else {
795 /* update completed stats and continue */
796 tx_ring->tx_stats.tx_done_old = tx_done;
797 /* reset the countdown */
798 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
712 } 799 }
713 800
714 return false; 801 return ret;
715} 802}
716 803
717#define IXGBE_MAX_TXD_PWR 14 804#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +821,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
734 struct ixgbe_ring *tx_ring) 821 struct ixgbe_ring *tx_ring)
735{ 822{
736 struct ixgbe_adapter *adapter = q_vector->adapter; 823 struct ixgbe_adapter *adapter = q_vector->adapter;
737 struct net_device *netdev = adapter->netdev;
738 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 824 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
739 struct ixgbe_tx_buffer *tx_buffer_info; 825 struct ixgbe_tx_buffer *tx_buffer_info;
740 unsigned int i, eop, count = 0;
741 unsigned int total_bytes = 0, total_packets = 0; 826 unsigned int total_bytes = 0, total_packets = 0;
827 u16 i, eop, count = 0;
742 828
743 i = tx_ring->next_to_clean; 829 i = tx_ring->next_to_clean;
744 eop = tx_ring->tx_buffer_info[i].next_to_watch; 830 eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,147 +835,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
749 bool cleaned = false; 835 bool cleaned = false;
750 rmb(); /* read buffer_info after eop_desc */ 836 rmb(); /* read buffer_info after eop_desc */
751 for ( ; !cleaned; count++) { 837 for ( ; !cleaned; count++) {
752 struct sk_buff *skb;
753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 838 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 839 tx_buffer_info = &tx_ring->tx_buffer_info[i];
755 cleaned = (i == eop);
756 skb = tx_buffer_info->skb;
757
758 if (cleaned && skb) {
759 unsigned int segs, bytecount;
760 unsigned int hlen = skb_headlen(skb);
761
762 /* gso_segs is currently only valid for tcp */
763 segs = skb_shinfo(skb)->gso_segs ?: 1;
764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && (skb->protocol == htons(ETH_P_FCOE)) &&
768 skb_is_gso(skb)) {
769 hlen = skb_transport_offset(skb) +
770 sizeof(struct fc_frame_header) +
771 sizeof(struct fcoe_crc_eof);
772 segs = DIV_ROUND_UP(skb->len - hlen,
773 skb_shinfo(skb)->gso_size);
774 }
775#endif /* IXGBE_FCOE */
776 /* multiply data chunks by size of headers */
777 bytecount = ((segs - 1) * hlen) + skb->len;
778 total_packets += segs;
779 total_bytes += bytecount;
780 }
781
782 ixgbe_unmap_and_free_tx_resource(adapter,
783 tx_buffer_info);
784 840
785 tx_desc->wb.status = 0; 841 tx_desc->wb.status = 0;
842 cleaned = (i == eop);
786 843
787 i++; 844 i++;
788 if (i == tx_ring->count) 845 if (i == tx_ring->count)
789 i = 0; 846 i = 0;
847
848 if (cleaned && tx_buffer_info->skb) {
849 total_bytes += tx_buffer_info->bytecount;
850 total_packets += tx_buffer_info->gso_segs;
851 }
852
853 ixgbe_unmap_and_free_tx_resource(tx_ring,
854 tx_buffer_info);
790 } 855 }
791 856
857 tx_ring->tx_stats.completed++;
792 eop = tx_ring->tx_buffer_info[i].next_to_watch; 858 eop = tx_ring->tx_buffer_info[i].next_to_watch;
793 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 859 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
794 } 860 }
795 861
796 tx_ring->next_to_clean = i; 862 tx_ring->next_to_clean = i;
863 tx_ring->total_bytes += total_bytes;
864 tx_ring->total_packets += total_packets;
865 u64_stats_update_begin(&tx_ring->syncp);
866 tx_ring->stats.packets += total_packets;
867 tx_ring->stats.bytes += total_bytes;
868 u64_stats_update_end(&tx_ring->syncp);
869
870 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
871 /* schedule immediate reset if we believe we hung */
872 struct ixgbe_hw *hw = &adapter->hw;
873 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
874 e_err(drv, "Detected Tx Unit Hang\n"
875 " Tx Queue <%d>\n"
876 " TDH, TDT <%x>, <%x>\n"
877 " next_to_use <%x>\n"
878 " next_to_clean <%x>\n"
879 "tx_buffer_info[next_to_clean]\n"
880 " time_stamp <%lx>\n"
881 " jiffies <%lx>\n",
882 tx_ring->queue_index,
883 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
884 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
885 tx_ring->next_to_use, eop,
886 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
887
888 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
889
890 e_info(probe,
891 "tx hang %d detected on queue %d, resetting adapter\n",
892 adapter->tx_timeout_count + 1, tx_ring->queue_index);
893
894 /* schedule immediate reset if we believe we hung */
895 ixgbe_tx_timeout(adapter->netdev);
896
897 /* the adapter is about to reset, no point in enabling stuff */
898 return true;
899 }
797 900
798#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 901#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
799 if (unlikely(count && netif_carrier_ok(netdev) && 902 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
800 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 903 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
801 /* Make sure that anybody stopping the queue after this 904 /* Make sure that anybody stopping the queue after this
802 * sees the new next_to_clean. 905 * sees the new next_to_clean.
803 */ 906 */
804 smp_mb(); 907 smp_mb();
805 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 908 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
806 !test_bit(__IXGBE_DOWN, &adapter->state)) { 909 !test_bit(__IXGBE_DOWN, &adapter->state)) {
807 netif_wake_subqueue(netdev, tx_ring->queue_index); 910 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
808 ++tx_ring->restart_queue; 911 ++tx_ring->tx_stats.restart_queue;
809 }
810 }
811
812 if (adapter->detect_tx_hung) {
813 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
814 /* schedule immediate reset if we believe we hung */
815 e_info(probe, "tx hang %d detected, resetting "
816 "adapter\n", adapter->tx_timeout_count + 1);
817 ixgbe_tx_timeout(adapter->netdev);
818 } 912 }
819 } 913 }
820 914
821 /* re-arm the interrupt */
822 if (count >= tx_ring->work_limit)
823 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
824
825 tx_ring->total_bytes += total_bytes;
826 tx_ring->total_packets += total_packets;
827 u64_stats_update_begin(&tx_ring->syncp);
828 tx_ring->stats.packets += total_packets;
829 tx_ring->stats.bytes += total_bytes;
830 u64_stats_update_end(&tx_ring->syncp);
831 return count < tx_ring->work_limit; 915 return count < tx_ring->work_limit;
832} 916}
833 917
834#ifdef CONFIG_IXGBE_DCA 918#ifdef CONFIG_IXGBE_DCA
835static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 919static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
836 struct ixgbe_ring *rx_ring) 920 struct ixgbe_ring *rx_ring,
921 int cpu)
837{ 922{
923 struct ixgbe_hw *hw = &adapter->hw;
838 u32 rxctrl; 924 u32 rxctrl;
839 int cpu = get_cpu(); 925 u8 reg_idx = rx_ring->reg_idx;
840 int q = rx_ring->reg_idx; 926
841 927 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
842 if (rx_ring->cpu != cpu) { 928 switch (hw->mac.type) {
843 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 929 case ixgbe_mac_82598EB:
844 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 930 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
845 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 931 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
846 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 932 break;
847 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 933 case ixgbe_mac_82599EB:
848 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 934 case ixgbe_mac_X540:
849 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 935 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
850 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 936 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
851 } 937 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
852 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 938 break;
853 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 939 default:
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 940 break;
855 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
856 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
857 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
858 rx_ring->cpu = cpu;
859 } 941 }
860 put_cpu(); 942 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
943 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
944 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
945 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
946 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
947 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
861} 948}
862 949
863static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 950static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
864 struct ixgbe_ring *tx_ring) 951 struct ixgbe_ring *tx_ring,
952 int cpu)
865{ 953{
954 struct ixgbe_hw *hw = &adapter->hw;
866 u32 txctrl; 955 u32 txctrl;
956 u8 reg_idx = tx_ring->reg_idx;
957
958 switch (hw->mac.type) {
959 case ixgbe_mac_82598EB:
960 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
961 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
962 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
963 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
964 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
965 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
966 break;
967 case ixgbe_mac_82599EB:
968 case ixgbe_mac_X540:
969 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
970 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
971 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
972 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
973 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
974 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
975 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
976 break;
977 default:
978 break;
979 }
980}
981
982static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
983{
984 struct ixgbe_adapter *adapter = q_vector->adapter;
867 int cpu = get_cpu(); 985 int cpu = get_cpu();
868 int q = tx_ring->reg_idx; 986 long r_idx;
869 struct ixgbe_hw *hw = &adapter->hw; 987 int i;
870 988
871 if (tx_ring->cpu != cpu) { 989 if (q_vector->cpu == cpu)
872 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 990 goto out_no_update;
873 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); 991
874 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 992 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
875 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 993 for (i = 0; i < q_vector->txr_count; i++) {
876 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 994 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
877 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); 995 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
878 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 996 r_idx + 1);
879 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
880 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
881 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
882 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
883 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
884 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
885 }
886 tx_ring->cpu = cpu;
887 } 997 }
998
999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1000 for (i = 0; i < q_vector->rxr_count; i++) {
1001 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
1002 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1003 r_idx + 1);
1004 }
1005
1006 q_vector->cpu = cpu;
1007out_no_update:
888 put_cpu(); 1008 put_cpu();
889} 1009}
890 1010
891static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1011static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
892{ 1012{
1013 int num_q_vectors;
893 int i; 1014 int i;
894 1015
895 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1016 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -898,22 +1019,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
898 /* always use CB2 mode, difference is masked in the CB driver */ 1019 /* always use CB2 mode, difference is masked in the CB driver */
899 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
900 1021
901 for (i = 0; i < adapter->num_tx_queues; i++) { 1022 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
902 adapter->tx_ring[i]->cpu = -1; 1023 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
903 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); 1024 else
904 } 1025 num_q_vectors = 1;
905 for (i = 0; i < adapter->num_rx_queues; i++) { 1026
906 adapter->rx_ring[i]->cpu = -1; 1027 for (i = 0; i < num_q_vectors; i++) {
907 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); 1028 adapter->q_vector[i]->cpu = -1;
1029 ixgbe_update_dca(adapter->q_vector[i]);
908 } 1030 }
909} 1031}
910 1032
911static int __ixgbe_notify_dca(struct device *dev, void *data) 1033static int __ixgbe_notify_dca(struct device *dev, void *data)
912{ 1034{
913 struct net_device *netdev = dev_get_drvdata(dev); 1035 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
914 struct ixgbe_adapter *adapter = netdev_priv(netdev);
915 unsigned long event = *(unsigned long *)data; 1036 unsigned long event = *(unsigned long *)data;
916 1037
1038 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1039 return 0;
1040
917 switch (event) { 1041 switch (event) {
918 case DCA_PROVIDER_ADD: 1042 case DCA_PROVIDER_ADD:
919 /* if we're already enabled, don't do it again */ 1043 /* if we're already enabled, don't do it again */
@@ -1012,8 +1136,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1012 skb->ip_summed = CHECKSUM_UNNECESSARY; 1136 skb->ip_summed = CHECKSUM_UNNECESSARY;
1013} 1137}
1014 1138
1015static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1139static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1016 struct ixgbe_ring *rx_ring, u32 val)
1017{ 1140{
1018 /* 1141 /*
1019 * Force memory writes to complete before letting h/w 1142 * Force memory writes to complete before letting h/w
@@ -1022,72 +1145,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1022 * such as IA-64). 1145 * such as IA-64).
1023 */ 1146 */
1024 wmb(); 1147 wmb();
1025 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 1148 writel(val, rx_ring->tail);
1026} 1149}
1027 1150
1028/** 1151/**
1029 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1152 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1030 * @adapter: address of board private structure 1153 * @rx_ring: ring to place buffers on
1154 * @cleaned_count: number of buffers to replace
1031 **/ 1155 **/
1032void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1156void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1033 struct ixgbe_ring *rx_ring,
1034 int cleaned_count)
1035{ 1157{
1036 struct net_device *netdev = adapter->netdev;
1037 struct pci_dev *pdev = adapter->pdev;
1038 union ixgbe_adv_rx_desc *rx_desc; 1158 union ixgbe_adv_rx_desc *rx_desc;
1039 struct ixgbe_rx_buffer *bi; 1159 struct ixgbe_rx_buffer *bi;
1040 unsigned int i; 1160 struct sk_buff *skb;
1041 unsigned int bufsz = rx_ring->rx_buf_len; 1161 u16 i = rx_ring->next_to_use;
1042 1162
1043 i = rx_ring->next_to_use; 1163 /* do nothing if no valid netdev defined */
1044 bi = &rx_ring->rx_buffer_info[i]; 1164 if (!rx_ring->netdev)
1165 return;
1045 1166
1046 while (cleaned_count--) { 1167 while (cleaned_count--) {
1047 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1168 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1169 bi = &rx_ring->rx_buffer_info[i];
1170 skb = bi->skb;
1048 1171
1049 if (!bi->page_dma && 1172 if (!skb) {
1050 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1173 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1051 if (!bi->page) { 1174 rx_ring->rx_buf_len);
1052 bi->page = netdev_alloc_page(netdev);
1053 if (!bi->page) {
1054 adapter->alloc_rx_page_failed++;
1055 goto no_buffers;
1056 }
1057 bi->page_offset = 0;
1058 } else {
1059 /* use a half page if we're re-using */
1060 bi->page_offset ^= (PAGE_SIZE / 2);
1061 }
1062
1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1064 bi->page_offset,
1065 (PAGE_SIZE / 2),
1066 DMA_FROM_DEVICE);
1067 }
1068
1069 if (!bi->skb) {
1070 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1071 bufsz);
1072 bi->skb = skb;
1073
1074 if (!skb) { 1175 if (!skb) {
1075 adapter->alloc_rx_buff_failed++; 1176 rx_ring->rx_stats.alloc_rx_buff_failed++;
1076 goto no_buffers; 1177 goto no_buffers;
1077 } 1178 }
1078 /* initialize queue mapping */ 1179 /* initialize queue mapping */
1079 skb_record_rx_queue(skb, rx_ring->queue_index); 1180 skb_record_rx_queue(skb, rx_ring->queue_index);
1181 bi->skb = skb;
1080 } 1182 }
1081 1183
1082 if (!bi->dma) { 1184 if (!bi->dma) {
1083 bi->dma = dma_map_single(&pdev->dev, 1185 bi->dma = dma_map_single(rx_ring->dev,
1084 bi->skb->data, 1186 skb->data,
1085 rx_ring->rx_buf_len, 1187 rx_ring->rx_buf_len,
1086 DMA_FROM_DEVICE); 1188 DMA_FROM_DEVICE);
1189 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1190 rx_ring->rx_stats.alloc_rx_buff_failed++;
1191 bi->dma = 0;
1192 goto no_buffers;
1193 }
1087 } 1194 }
1088 /* Refresh the desc even if buffer_addrs didn't change because 1195
1089 * each write-back erases this info. */ 1196 if (ring_is_ps_enabled(rx_ring)) {
1090 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1197 if (!bi->page) {
1198 bi->page = netdev_alloc_page(rx_ring->netdev);
1199 if (!bi->page) {
1200 rx_ring->rx_stats.alloc_rx_page_failed++;
1201 goto no_buffers;
1202 }
1203 }
1204
1205 if (!bi->page_dma) {
1206 /* use a half page if we're re-using */
1207 bi->page_offset ^= PAGE_SIZE / 2;
1208 bi->page_dma = dma_map_page(rx_ring->dev,
1209 bi->page,
1210 bi->page_offset,
1211 PAGE_SIZE / 2,
1212 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(rx_ring->dev,
1214 bi->page_dma)) {
1215 rx_ring->rx_stats.alloc_rx_page_failed++;
1216 bi->page_dma = 0;
1217 goto no_buffers;
1218 }
1219 }
1220
1221 /* Refresh the desc even if buffer_addrs didn't change
1222 * because each write-back erases this info. */
1091 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1223 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1092 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1224 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1093 } else { 1225 } else {
@@ -1098,56 +1230,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1098 i++; 1230 i++;
1099 if (i == rx_ring->count) 1231 if (i == rx_ring->count)
1100 i = 0; 1232 i = 0;
1101 bi = &rx_ring->rx_buffer_info[i];
1102 } 1233 }
1103 1234
1104no_buffers: 1235no_buffers:
1105 if (rx_ring->next_to_use != i) { 1236 if (rx_ring->next_to_use != i) {
1106 rx_ring->next_to_use = i; 1237 rx_ring->next_to_use = i;
1107 if (i-- == 0) 1238 ixgbe_release_rx_desc(rx_ring, i);
1108 i = (rx_ring->count - 1);
1109
1110 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1111 } 1239 }
1112} 1240}
1113 1241
1114static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 1242static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1115{ 1243{
1116 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 1244 /* HW will not DMA in data larger than the given buffer, even if it
1117} 1245 * parses the (NFS, of course) header to be larger. In that case, it
1118 1246 * fills the header buffer and spills the rest into the page.
1119static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 1247 */
1120{ 1248 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1121 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1249 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1122} 1250 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1123 1251 if (hlen > IXGBE_RX_HDR_SIZE)
1124static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1252 hlen = IXGBE_RX_HDR_SIZE;
1125{ 1253 return hlen;
1126 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1127 IXGBE_RXDADV_RSCCNT_MASK) >>
1128 IXGBE_RXDADV_RSCCNT_SHIFT;
1129} 1254}
1130 1255
1131/** 1256/**
1132 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 1257 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1133 * @skb: pointer to the last skb in the rsc queue 1258 * @skb: pointer to the last skb in the rsc queue
1134 * @count: pointer to number of packets coalesced in this context
1135 * 1259 *
1136 * This function changes a queue full of hw rsc buffers into a completed 1260 * This function changes a queue full of hw rsc buffers into a completed
1137 * packet. It uses the ->prev pointers to find the first packet and then 1261 * packet. It uses the ->prev pointers to find the first packet and then
1138 * turns it into the frag list owner. 1262 * turns it into the frag list owner.
1139 **/ 1263 **/
1140static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1264static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
1141 u64 *count)
1142{ 1265{
1143 unsigned int frag_list_size = 0; 1266 unsigned int frag_list_size = 0;
1267 unsigned int skb_cnt = 1;
1144 1268
1145 while (skb->prev) { 1269 while (skb->prev) {
1146 struct sk_buff *prev = skb->prev; 1270 struct sk_buff *prev = skb->prev;
1147 frag_list_size += skb->len; 1271 frag_list_size += skb->len;
1148 skb->prev = NULL; 1272 skb->prev = NULL;
1149 skb = prev; 1273 skb = prev;
1150 *count += 1; 1274 skb_cnt++;
1151 } 1275 }
1152 1276
1153 skb_shinfo(skb)->frag_list = skb->next; 1277 skb_shinfo(skb)->frag_list = skb->next;
@@ -1155,68 +1279,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1155 skb->len += frag_list_size; 1279 skb->len += frag_list_size;
1156 skb->data_len += frag_list_size; 1280 skb->data_len += frag_list_size;
1157 skb->truesize += frag_list_size; 1281 skb->truesize += frag_list_size;
1282 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1283
1158 return skb; 1284 return skb;
1159} 1285}
1160 1286
1161struct ixgbe_rsc_cb { 1287static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1162 dma_addr_t dma; 1288{
1163 bool delay_unmap; 1289 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1164}; 1290 IXGBE_RXDADV_RSCCNT_MASK);
1165 1291}
1166#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1167 1292
1168static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1293static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1169 struct ixgbe_ring *rx_ring, 1294 struct ixgbe_ring *rx_ring,
1170 int *work_done, int work_to_do) 1295 int *work_done, int work_to_do)
1171{ 1296{
1172 struct ixgbe_adapter *adapter = q_vector->adapter; 1297 struct ixgbe_adapter *adapter = q_vector->adapter;
1173 struct pci_dev *pdev = adapter->pdev;
1174 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1298 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1175 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1299 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1176 struct sk_buff *skb; 1300 struct sk_buff *skb;
1177 unsigned int i, rsc_count = 0;
1178 u32 len, staterr;
1179 u16 hdr_info;
1180 bool cleaned = false;
1181 int cleaned_count = 0;
1182 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1301 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1302 const int current_node = numa_node_id();
1183#ifdef IXGBE_FCOE 1303#ifdef IXGBE_FCOE
1184 int ddp_bytes = 0; 1304 int ddp_bytes = 0;
1185#endif /* IXGBE_FCOE */ 1305#endif /* IXGBE_FCOE */
1306 u32 staterr;
1307 u16 i;
1308 u16 cleaned_count = 0;
1309 bool pkt_is_rsc = false;
1186 1310
1187 i = rx_ring->next_to_clean; 1311 i = rx_ring->next_to_clean;
1188 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1312 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1189 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1313 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1190 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1191 1314
1192 while (staterr & IXGBE_RXD_STAT_DD) { 1315 while (staterr & IXGBE_RXD_STAT_DD) {
1193 u32 upper_len = 0; 1316 u32 upper_len = 0;
1194 if (*work_done >= work_to_do)
1195 break;
1196 (*work_done)++;
1197 1317
1198 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1318 rmb(); /* read descriptor and rx_buffer_info after status DD */
1199 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1200 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1201 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1202 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1203 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1204 if ((len > IXGBE_RX_HDR_SIZE) ||
1205 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1206 len = IXGBE_RX_HDR_SIZE;
1207 } else {
1208 len = le16_to_cpu(rx_desc->wb.upper.length);
1209 }
1210 1319
1211 cleaned = true; 1320 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1321
1212 skb = rx_buffer_info->skb; 1322 skb = rx_buffer_info->skb;
1213 prefetch(skb->data);
1214 rx_buffer_info->skb = NULL; 1323 rx_buffer_info->skb = NULL;
1324 prefetch(skb->data);
1325
1326 if (ring_is_rsc_enabled(rx_ring))
1327 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1215 1328
1329 /* if this is a skb from previous receive DMA will be 0 */
1216 if (rx_buffer_info->dma) { 1330 if (rx_buffer_info->dma) {
1217 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1331 u16 hlen;
1218 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1332 if (pkt_is_rsc &&
1219 (!(skb->prev))) { 1333 !(staterr & IXGBE_RXD_STAT_EOP) &&
1334 !skb->prev) {
1220 /* 1335 /*
1221 * When HWRSC is enabled, delay unmapping 1336 * When HWRSC is enabled, delay unmapping
1222 * of the first packet. It carries the 1337 * of the first packet. It carries the
@@ -1227,29 +1342,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1227 IXGBE_RSC_CB(skb)->delay_unmap = true; 1342 IXGBE_RSC_CB(skb)->delay_unmap = true;
1228 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1343 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1229 } else { 1344 } else {
1230 dma_unmap_single(&pdev->dev, 1345 dma_unmap_single(rx_ring->dev,
1231 rx_buffer_info->dma, 1346 rx_buffer_info->dma,
1232 rx_ring->rx_buf_len, 1347 rx_ring->rx_buf_len,
1233 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1234 } 1349 }
1235 rx_buffer_info->dma = 0; 1350 rx_buffer_info->dma = 0;
1236 skb_put(skb, len); 1351
1352 if (ring_is_ps_enabled(rx_ring)) {
1353 hlen = ixgbe_get_hlen(rx_desc);
1354 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1355 } else {
1356 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1357 }
1358
1359 skb_put(skb, hlen);
1360 } else {
1361 /* assume packet split since header is unmapped */
1362 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1237 } 1363 }
1238 1364
1239 if (upper_len) { 1365 if (upper_len) {
1240 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1366 dma_unmap_page(rx_ring->dev,
1241 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1367 rx_buffer_info->page_dma,
1368 PAGE_SIZE / 2,
1369 DMA_FROM_DEVICE);
1242 rx_buffer_info->page_dma = 0; 1370 rx_buffer_info->page_dma = 0;
1243 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1371 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1244 rx_buffer_info->page, 1372 rx_buffer_info->page,
1245 rx_buffer_info->page_offset, 1373 rx_buffer_info->page_offset,
1246 upper_len); 1374 upper_len);
1247 1375
1248 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1376 if ((page_count(rx_buffer_info->page) == 1) &&
1249 (page_count(rx_buffer_info->page) != 1)) 1377 (page_to_nid(rx_buffer_info->page) == current_node))
1250 rx_buffer_info->page = NULL;
1251 else
1252 get_page(rx_buffer_info->page); 1378 get_page(rx_buffer_info->page);
1379 else
1380 rx_buffer_info->page = NULL;
1253 1381
1254 skb->len += upper_len; 1382 skb->len += upper_len;
1255 skb->data_len += upper_len; 1383 skb->data_len += upper_len;
@@ -1264,10 +1392,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1264 prefetch(next_rxd); 1392 prefetch(next_rxd);
1265 cleaned_count++; 1393 cleaned_count++;
1266 1394
1267 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1395 if (pkt_is_rsc) {
1268 rsc_count = ixgbe_get_rsc_count(rx_desc);
1269
1270 if (rsc_count) {
1271 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1396 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1272 IXGBE_RXDADV_NEXTP_SHIFT; 1397 IXGBE_RXDADV_NEXTP_SHIFT;
1273 next_buffer = &rx_ring->rx_buffer_info[nextp]; 1398 next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1275,32 +1400,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1275 next_buffer = &rx_ring->rx_buffer_info[i]; 1400 next_buffer = &rx_ring->rx_buffer_info[i];
1276 } 1401 }
1277 1402
1278 if (staterr & IXGBE_RXD_STAT_EOP) { 1403 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1279 if (skb->prev) 1404 if (ring_is_ps_enabled(rx_ring)) {
1280 skb = ixgbe_transform_rsc_queue(skb,
1281 &(rx_ring->rsc_count));
1282 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1283 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1284 dma_unmap_single(&pdev->dev,
1285 IXGBE_RSC_CB(skb)->dma,
1286 rx_ring->rx_buf_len,
1287 DMA_FROM_DEVICE);
1288 IXGBE_RSC_CB(skb)->dma = 0;
1289 IXGBE_RSC_CB(skb)->delay_unmap = false;
1290 }
1291 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1292 rx_ring->rsc_count +=
1293 skb_shinfo(skb)->nr_frags;
1294 else
1295 rx_ring->rsc_count++;
1296 rx_ring->rsc_flush++;
1297 }
1298 u64_stats_update_begin(&rx_ring->syncp);
1299 rx_ring->stats.packets++;
1300 rx_ring->stats.bytes += skb->len;
1301 u64_stats_update_end(&rx_ring->syncp);
1302 } else {
1303 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1304 rx_buffer_info->skb = next_buffer->skb; 1405 rx_buffer_info->skb = next_buffer->skb;
1305 rx_buffer_info->dma = next_buffer->dma; 1406 rx_buffer_info->dma = next_buffer->dma;
1306 next_buffer->skb = skb; 1407 next_buffer->skb = skb;
@@ -1309,12 +1410,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1309 skb->next = next_buffer->skb; 1410 skb->next = next_buffer->skb;
1310 skb->next->prev = skb; 1411 skb->next->prev = skb;
1311 } 1412 }
1312 rx_ring->non_eop_descs++; 1413 rx_ring->rx_stats.non_eop_descs++;
1313 goto next_desc; 1414 goto next_desc;
1314 } 1415 }
1315 1416
1417 if (skb->prev) {
1418 skb = ixgbe_transform_rsc_queue(skb);
1419 /* if we got here without RSC the packet is invalid */
1420 if (!pkt_is_rsc) {
1421 __pskb_trim(skb, 0);
1422 rx_buffer_info->skb = skb;
1423 goto next_desc;
1424 }
1425 }
1426
1427 if (ring_is_rsc_enabled(rx_ring)) {
1428 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1429 dma_unmap_single(rx_ring->dev,
1430 IXGBE_RSC_CB(skb)->dma,
1431 rx_ring->rx_buf_len,
1432 DMA_FROM_DEVICE);
1433 IXGBE_RSC_CB(skb)->dma = 0;
1434 IXGBE_RSC_CB(skb)->delay_unmap = false;
1435 }
1436 }
1437 if (pkt_is_rsc) {
1438 if (ring_is_ps_enabled(rx_ring))
1439 rx_ring->rx_stats.rsc_count +=
1440 skb_shinfo(skb)->nr_frags;
1441 else
1442 rx_ring->rx_stats.rsc_count +=
1443 IXGBE_RSC_CB(skb)->skb_cnt;
1444 rx_ring->rx_stats.rsc_flush++;
1445 }
1446
1447 /* ERR_MASK will only have valid bits if EOP set */
1316 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1448 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1317 dev_kfree_skb_irq(skb); 1449 /* trim packet back to size 0 and recycle it */
1450 __pskb_trim(skb, 0);
1451 rx_buffer_info->skb = skb;
1318 goto next_desc; 1452 goto next_desc;
1319 } 1453 }
1320 1454
@@ -1324,7 +1458,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1324 total_rx_bytes += skb->len; 1458 total_rx_bytes += skb->len;
1325 total_rx_packets++; 1459 total_rx_packets++;
1326 1460
1327 skb->protocol = eth_type_trans(skb, adapter->netdev); 1461 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1328#ifdef IXGBE_FCOE 1462#ifdef IXGBE_FCOE
1329 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1463 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1330 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 1464 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1338,16 +1472,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1338next_desc: 1472next_desc:
1339 rx_desc->wb.upper.status_error = 0; 1473 rx_desc->wb.upper.status_error = 0;
1340 1474
1475 (*work_done)++;
1476 if (*work_done >= work_to_do)
1477 break;
1478
1341 /* return some buffers to hardware, one at a time is too slow */ 1479 /* return some buffers to hardware, one at a time is too slow */
1342 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1480 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1343 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1481 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1344 cleaned_count = 0; 1482 cleaned_count = 0;
1345 } 1483 }
1346 1484
1347 /* use prefetched values */ 1485 /* use prefetched values */
1348 rx_desc = next_rxd; 1486 rx_desc = next_rxd;
1349 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1350
1351 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1487 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1352 } 1488 }
1353 1489
@@ -1355,14 +1491,14 @@ next_desc:
1355 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 1491 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1356 1492
1357 if (cleaned_count) 1493 if (cleaned_count)
1358 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1494 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1359 1495
1360#ifdef IXGBE_FCOE 1496#ifdef IXGBE_FCOE
1361 /* include DDPed FCoE data */ 1497 /* include DDPed FCoE data */
1362 if (ddp_bytes > 0) { 1498 if (ddp_bytes > 0) {
1363 unsigned int mss; 1499 unsigned int mss;
1364 1500
1365 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - 1501 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1366 sizeof(struct fc_frame_header) - 1502 sizeof(struct fc_frame_header) -
1367 sizeof(struct fcoe_crc_eof); 1503 sizeof(struct fcoe_crc_eof);
1368 if (mss > 512) 1504 if (mss > 512)
@@ -1374,8 +1510,10 @@ next_desc:
1374 1510
1375 rx_ring->total_packets += total_rx_packets; 1511 rx_ring->total_packets += total_rx_packets;
1376 rx_ring->total_bytes += total_rx_bytes; 1512 rx_ring->total_bytes += total_rx_bytes;
1377 1513 u64_stats_update_begin(&rx_ring->syncp);
1378 return cleaned; 1514 rx_ring->stats.packets += total_rx_packets;
1515 rx_ring->stats.bytes += total_rx_bytes;
1516 u64_stats_update_end(&rx_ring->syncp);
1379} 1517}
1380 1518
1381static int ixgbe_clean_rxonly(struct napi_struct *, int); 1519static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1389,7 +1527,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
1389static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1527static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1390{ 1528{
1391 struct ixgbe_q_vector *q_vector; 1529 struct ixgbe_q_vector *q_vector;
1392 int i, j, q_vectors, v_idx, r_idx; 1530 int i, q_vectors, v_idx, r_idx;
1393 u32 mask; 1531 u32 mask;
1394 1532
1395 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1533 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1405,8 +1543,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1405 adapter->num_rx_queues); 1543 adapter->num_rx_queues);
1406 1544
1407 for (i = 0; i < q_vector->rxr_count; i++) { 1545 for (i = 0; i < q_vector->rxr_count; i++) {
1408 j = adapter->rx_ring[r_idx]->reg_idx; 1546 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1409 ixgbe_set_ivar(adapter, 0, j, v_idx); 1547 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1410 r_idx = find_next_bit(q_vector->rxr_idx, 1548 r_idx = find_next_bit(q_vector->rxr_idx,
1411 adapter->num_rx_queues, 1549 adapter->num_rx_queues,
1412 r_idx + 1); 1550 r_idx + 1);
@@ -1415,8 +1553,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1415 adapter->num_tx_queues); 1553 adapter->num_tx_queues);
1416 1554
1417 for (i = 0; i < q_vector->txr_count; i++) { 1555 for (i = 0; i < q_vector->txr_count; i++) {
1418 j = adapter->tx_ring[r_idx]->reg_idx; 1556 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1419 ixgbe_set_ivar(adapter, 1, j, v_idx); 1557 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1420 r_idx = find_next_bit(q_vector->txr_idx, 1558 r_idx = find_next_bit(q_vector->txr_idx,
1421 adapter->num_tx_queues, 1559 adapter->num_tx_queues,
1422 r_idx + 1); 1560 r_idx + 1);
@@ -1447,11 +1585,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1447 } 1585 }
1448 } 1586 }
1449 1587
1450 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1588 switch (adapter->hw.mac.type) {
1589 case ixgbe_mac_82598EB:
1451 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1590 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1452 v_idx); 1591 v_idx);
1453 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1592 break;
1593 case ixgbe_mac_82599EB:
1594 case ixgbe_mac_X540:
1454 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1595 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1596 break;
1597
1598 default:
1599 break;
1600 }
1455 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1456 1602
1457 /* set up to autoclear timer, and the vectors */ 1603 /* set up to autoclear timer, and the vectors */
@@ -1547,12 +1693,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1547 int v_idx = q_vector->v_idx; 1693 int v_idx = q_vector->v_idx;
1548 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1694 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1549 1695
1550 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1696 switch (adapter->hw.mac.type) {
1697 case ixgbe_mac_82598EB:
1551 /* must write high and low 16 bits to reset counter */ 1698 /* must write high and low 16 bits to reset counter */
1552 itr_reg |= (itr_reg << 16); 1699 itr_reg |= (itr_reg << 16);
1553 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1700 break;
1701 case ixgbe_mac_82599EB:
1702 case ixgbe_mac_X540:
1554 /* 1703 /*
1555 * 82599 can support a value of zero, so allow it for 1704 * 82599 and X540 can support a value of zero, so allow it for
1556 * max interrupt rate, but there is an errata where it can 1705 * max interrupt rate, but there is an errata where it can
1557 * not be zero with RSC 1706 * not be zero with RSC
1558 */ 1707 */
@@ -1565,6 +1714,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1565 * immediate assertion of the interrupt 1714 * immediate assertion of the interrupt
1566 */ 1715 */
1567 itr_reg |= IXGBE_EITR_CNT_WDIS; 1716 itr_reg |= IXGBE_EITR_CNT_WDIS;
1717 break;
1718 default:
1719 break;
1568 } 1720 }
1569 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1721 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1570} 1722}
@@ -1572,14 +1724,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1572static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1724static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1573{ 1725{
1574 struct ixgbe_adapter *adapter = q_vector->adapter; 1726 struct ixgbe_adapter *adapter = q_vector->adapter;
1727 int i, r_idx;
1575 u32 new_itr; 1728 u32 new_itr;
1576 u8 current_itr, ret_itr; 1729 u8 current_itr, ret_itr;
1577 int i, r_idx;
1578 struct ixgbe_ring *rx_ring, *tx_ring;
1579 1730
1580 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1731 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1581 for (i = 0; i < q_vector->txr_count; i++) { 1732 for (i = 0; i < q_vector->txr_count; i++) {
1582 tx_ring = adapter->tx_ring[r_idx]; 1733 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1583 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1734 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1584 q_vector->tx_itr, 1735 q_vector->tx_itr,
1585 tx_ring->total_packets, 1736 tx_ring->total_packets,
@@ -1594,7 +1745,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1594 1745
1595 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1746 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1596 for (i = 0; i < q_vector->rxr_count; i++) { 1747 for (i = 0; i < q_vector->rxr_count; i++) {
1597 rx_ring = adapter->rx_ring[r_idx]; 1748 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1598 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1749 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1599 q_vector->rx_itr, 1750 q_vector->rx_itr,
1600 rx_ring->total_packets, 1751 rx_ring->total_packets,
@@ -1625,7 +1776,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1625 1776
1626 if (new_itr != q_vector->eitr) { 1777 if (new_itr != q_vector->eitr) {
1627 /* do an exponential smoothing */ 1778 /* do an exponential smoothing */
1628 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1779 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1629 1780
1630 /* save the algorithm value here, not the smoothed one */ 1781 /* save the algorithm value here, not the smoothed one */
1631 q_vector->eitr = new_itr; 1782 q_vector->eitr = new_itr;
@@ -1693,17 +1844,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1693{ 1844{
1694 struct ixgbe_hw *hw = &adapter->hw; 1845 struct ixgbe_hw *hw = &adapter->hw;
1695 1846
1847 if (eicr & IXGBE_EICR_GPI_SDP2) {
1848 /* Clear the interrupt */
1849 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1850 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1851 schedule_work(&adapter->sfp_config_module_task);
1852 }
1853
1696 if (eicr & IXGBE_EICR_GPI_SDP1) { 1854 if (eicr & IXGBE_EICR_GPI_SDP1) {
1697 /* Clear the interrupt */ 1855 /* Clear the interrupt */
1698 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1856 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1699 schedule_work(&adapter->multispeed_fiber_task); 1857 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1700 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 1858 schedule_work(&adapter->multispeed_fiber_task);
1701 /* Clear the interrupt */
1702 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1703 schedule_work(&adapter->sfp_config_module_task);
1704 } else {
1705 /* Interrupt isn't for us... */
1706 return;
1707 } 1859 }
1708} 1860}
1709 1861
@@ -1743,16 +1895,16 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1743 if (eicr & IXGBE_EICR_MAILBOX) 1895 if (eicr & IXGBE_EICR_MAILBOX)
1744 ixgbe_msg_task(adapter); 1896 ixgbe_msg_task(adapter);
1745 1897
1746 if (hw->mac.type == ixgbe_mac_82598EB) 1898 switch (hw->mac.type) {
1747 ixgbe_check_fan_failure(adapter, eicr); 1899 case ixgbe_mac_82599EB:
1748
1749 if (hw->mac.type == ixgbe_mac_82599EB) {
1750 ixgbe_check_sfp_event(adapter, eicr); 1900 ixgbe_check_sfp_event(adapter, eicr);
1751 adapter->interrupt_event = eicr;
1752 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 1901 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1753 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 1902 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1903 adapter->interrupt_event = eicr;
1754 schedule_work(&adapter->check_overtemp_task); 1904 schedule_work(&adapter->check_overtemp_task);
1755 1905 }
1906 /* now fallthrough to handle Flow Director */
1907 case ixgbe_mac_X540:
1756 /* Handle Flow Director Full threshold interrupt */ 1908 /* Handle Flow Director Full threshold interrupt */
1757 if (eicr & IXGBE_EICR_FLOW_DIR) { 1909 if (eicr & IXGBE_EICR_FLOW_DIR) {
1758 int i; 1910 int i;
@@ -1762,12 +1914,18 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1762 for (i = 0; i < adapter->num_tx_queues; i++) { 1914 for (i = 0; i < adapter->num_tx_queues; i++) {
1763 struct ixgbe_ring *tx_ring = 1915 struct ixgbe_ring *tx_ring =
1764 adapter->tx_ring[i]; 1916 adapter->tx_ring[i];
1765 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1917 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1766 &tx_ring->reinit_state)) 1918 &tx_ring->state))
1767 schedule_work(&adapter->fdir_reinit_task); 1919 schedule_work(&adapter->fdir_reinit_task);
1768 } 1920 }
1769 } 1921 }
1922 break;
1923 default:
1924 break;
1770 } 1925 }
1926
1927 ixgbe_check_fan_failure(adapter, eicr);
1928
1771 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1929 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1772 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1930 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1773 1931
@@ -1778,15 +1936,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1778 u64 qmask) 1936 u64 qmask)
1779{ 1937{
1780 u32 mask; 1938 u32 mask;
1939 struct ixgbe_hw *hw = &adapter->hw;
1781 1940
1782 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1941 switch (hw->mac.type) {
1942 case ixgbe_mac_82598EB:
1783 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1943 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1944 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1785 } else { 1945 break;
1946 case ixgbe_mac_82599EB:
1947 case ixgbe_mac_X540:
1786 mask = (qmask & 0xFFFFFFFF); 1948 mask = (qmask & 0xFFFFFFFF);
1787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1949 if (mask)
1950 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1788 mask = (qmask >> 32); 1951 mask = (qmask >> 32);
1789 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 1952 if (mask)
1953 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1954 break;
1955 default:
1956 break;
1790 } 1957 }
1791 /* skip the flush */ 1958 /* skip the flush */
1792} 1959}
@@ -1795,15 +1962,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1795 u64 qmask) 1962 u64 qmask)
1796{ 1963{
1797 u32 mask; 1964 u32 mask;
1965 struct ixgbe_hw *hw = &adapter->hw;
1798 1966
1799 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1967 switch (hw->mac.type) {
1968 case ixgbe_mac_82598EB:
1800 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1969 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1801 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 1970 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1802 } else { 1971 break;
1972 case ixgbe_mac_82599EB:
1973 case ixgbe_mac_X540:
1803 mask = (qmask & 0xFFFFFFFF); 1974 mask = (qmask & 0xFFFFFFFF);
1804 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 1975 if (mask)
1976 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1805 mask = (qmask >> 32); 1977 mask = (qmask >> 32);
1806 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 1978 if (mask)
1979 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1980 break;
1981 default:
1982 break;
1807 } 1983 }
1808 /* skip the flush */ 1984 /* skip the flush */
1809} 1985}
@@ -1846,8 +2022,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1846 int r_idx; 2022 int r_idx;
1847 int i; 2023 int i;
1848 2024
2025#ifdef CONFIG_IXGBE_DCA
2026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2027 ixgbe_update_dca(q_vector);
2028#endif
2029
1849 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2030 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1850 for (i = 0; i < q_vector->rxr_count; i++) { 2031 for (i = 0; i < q_vector->rxr_count; i++) {
1851 rx_ring = adapter->rx_ring[r_idx]; 2032 rx_ring = adapter->rx_ring[r_idx];
1852 rx_ring->total_bytes = 0; 2033 rx_ring->total_bytes = 0;
1853 rx_ring->total_packets = 0; 2034 rx_ring->total_packets = 0;
@@ -1858,7 +2039,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1858 if (!q_vector->rxr_count) 2039 if (!q_vector->rxr_count)
1859 return IRQ_HANDLED; 2040 return IRQ_HANDLED;
1860 2041
1861 /* disable interrupts on this vector only */
1862 /* EIAM disabled interrupts (on this vector) for us */ 2042 /* EIAM disabled interrupts (on this vector) for us */
1863 napi_schedule(&q_vector->napi); 2043 napi_schedule(&q_vector->napi);
1864 2044
@@ -1917,13 +2097,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1917 int work_done = 0; 2097 int work_done = 0;
1918 long r_idx; 2098 long r_idx;
1919 2099
1920 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1921 rx_ring = adapter->rx_ring[r_idx];
1922#ifdef CONFIG_IXGBE_DCA 2100#ifdef CONFIG_IXGBE_DCA
1923 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2101 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1924 ixgbe_update_rx_dca(adapter, rx_ring); 2102 ixgbe_update_dca(q_vector);
1925#endif 2103#endif
1926 2104
2105 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2106 rx_ring = adapter->rx_ring[r_idx];
2107
1927 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2108 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1928 2109
1929 /* If all Rx work done, exit the polling mode */ 2110 /* If all Rx work done, exit the polling mode */
@@ -1957,13 +2138,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1957 long r_idx; 2138 long r_idx;
1958 bool tx_clean_complete = true; 2139 bool tx_clean_complete = true;
1959 2140
2141#ifdef CONFIG_IXGBE_DCA
2142 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2143 ixgbe_update_dca(q_vector);
2144#endif
2145
1960 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2146 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1961 for (i = 0; i < q_vector->txr_count; i++) { 2147 for (i = 0; i < q_vector->txr_count; i++) {
1962 ring = adapter->tx_ring[r_idx]; 2148 ring = adapter->tx_ring[r_idx];
1963#ifdef CONFIG_IXGBE_DCA
1964 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1965 ixgbe_update_tx_dca(adapter, ring);
1966#endif
1967 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2149 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1968 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2150 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1969 r_idx + 1); 2151 r_idx + 1);
@@ -1976,10 +2158,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2158 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1977 for (i = 0; i < q_vector->rxr_count; i++) { 2159 for (i = 0; i < q_vector->rxr_count; i++) {
1978 ring = adapter->rx_ring[r_idx]; 2160 ring = adapter->rx_ring[r_idx];
1979#ifdef CONFIG_IXGBE_DCA
1980 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1981 ixgbe_update_rx_dca(adapter, ring);
1982#endif
1983 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2161 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1984 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2162 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1985 r_idx + 1); 2163 r_idx + 1);
@@ -2018,13 +2196,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2018 int work_done = 0; 2196 int work_done = 0;
2019 long r_idx; 2197 long r_idx;
2020 2198
2021 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2022 tx_ring = adapter->tx_ring[r_idx];
2023#ifdef CONFIG_IXGBE_DCA 2199#ifdef CONFIG_IXGBE_DCA
2024 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2200 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2025 ixgbe_update_tx_dca(adapter, tx_ring); 2201 ixgbe_update_dca(q_vector);
2026#endif 2202#endif
2027 2203
2204 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2205 tx_ring = adapter->tx_ring[r_idx];
2206
2028 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2207 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2029 work_done = budget; 2208 work_done = budget;
2030 2209
@@ -2045,24 +2224,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2045 int r_idx) 2224 int r_idx)
2046{ 2225{
2047 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2226 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2227 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2048 2228
2049 set_bit(r_idx, q_vector->rxr_idx); 2229 set_bit(r_idx, q_vector->rxr_idx);
2050 q_vector->rxr_count++; 2230 q_vector->rxr_count++;
2231 rx_ring->q_vector = q_vector;
2051} 2232}
2052 2233
2053static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2234static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2054 int t_idx) 2235 int t_idx)
2055{ 2236{
2056 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2237 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2238 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2057 2239
2058 set_bit(t_idx, q_vector->txr_idx); 2240 set_bit(t_idx, q_vector->txr_idx);
2059 q_vector->txr_count++; 2241 q_vector->txr_count++;
2242 tx_ring->q_vector = q_vector;
2060} 2243}
2061 2244
2062/** 2245/**
2063 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 2246 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2064 * @adapter: board private structure to initialize 2247 * @adapter: board private structure to initialize
2065 * @vectors: allotted vector count for descriptor rings
2066 * 2248 *
2067 * This function maps descriptor rings to the queue-specific vectors 2249 * This function maps descriptor rings to the queue-specific vectors
2068 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2250 * we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2070,9 +2252,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2070 * group the rings as "efficiently" as possible. You would add new 2252 * group the rings as "efficiently" as possible. You would add new
2071 * mapping configurations in here. 2253 * mapping configurations in here.
2072 **/ 2254 **/
2073static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2255static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2074 int vectors)
2075{ 2256{
2257 int q_vectors;
2076 int v_start = 0; 2258 int v_start = 0;
2077 int rxr_idx = 0, txr_idx = 0; 2259 int rxr_idx = 0, txr_idx = 0;
2078 int rxr_remaining = adapter->num_rx_queues; 2260 int rxr_remaining = adapter->num_rx_queues;
@@ -2085,11 +2267,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2085 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2267 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2086 goto out; 2268 goto out;
2087 2269
2270 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2271
2088 /* 2272 /*
2089 * The ideal configuration... 2273 * The ideal configuration...
2090 * We have enough vectors to map one per queue. 2274 * We have enough vectors to map one per queue.
2091 */ 2275 */
2092 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2276 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2093 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2277 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2094 map_vector_to_rxq(adapter, v_start, rxr_idx); 2278 map_vector_to_rxq(adapter, v_start, rxr_idx);
2095 2279
@@ -2105,23 +2289,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2105 * multiple queues per vector. 2289 * multiple queues per vector.
2106 */ 2290 */
2107 /* Re-adjusting *qpv takes care of the remainder. */ 2291 /* Re-adjusting *qpv takes care of the remainder. */
2108 for (i = v_start; i < vectors; i++) { 2292 for (i = v_start; i < q_vectors; i++) {
2109 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 2293 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
2110 for (j = 0; j < rqpv; j++) { 2294 for (j = 0; j < rqpv; j++) {
2111 map_vector_to_rxq(adapter, i, rxr_idx); 2295 map_vector_to_rxq(adapter, i, rxr_idx);
2112 rxr_idx++; 2296 rxr_idx++;
2113 rxr_remaining--; 2297 rxr_remaining--;
2114 } 2298 }
2115 } 2299 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2116 for (i = v_start; i < vectors; i++) {
2117 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2118 for (j = 0; j < tqpv; j++) { 2300 for (j = 0; j < tqpv; j++) {
2119 map_vector_to_txq(adapter, i, txr_idx); 2301 map_vector_to_txq(adapter, i, txr_idx);
2120 txr_idx++; 2302 txr_idx++;
2121 txr_remaining--; 2303 txr_remaining--;
2122 } 2304 }
2123 } 2305 }
2124
2125out: 2306out:
2126 return err; 2307 return err;
2127} 2308}
@@ -2143,30 +2324,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2143 /* Decrement for Other and TCP Timer vectors */ 2324 /* Decrement for Other and TCP Timer vectors */
2144 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2325 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2145 2326
2146 /* Map the Tx/Rx rings to the vectors we were allotted. */ 2327 err = ixgbe_map_rings_to_vectors(adapter);
2147 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2148 if (err) 2328 if (err)
2149 goto out; 2329 return err;
2150 2330
2151#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2331#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2152 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2332 ? &ixgbe_msix_clean_many : \
2153 &ixgbe_msix_clean_many) 2333 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2334 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2335 NULL)
2154 for (vector = 0; vector < q_vectors; vector++) { 2336 for (vector = 0; vector < q_vectors; vector++) {
2155 handler = SET_HANDLER(adapter->q_vector[vector]); 2337 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2338 handler = SET_HANDLER(q_vector);
2156 2339
2157 if (handler == &ixgbe_msix_clean_rx) { 2340 if (handler == &ixgbe_msix_clean_rx) {
2158 sprintf(adapter->name[vector], "%s-%s-%d", 2341 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2159 netdev->name, "rx", ri++); 2342 "%s-%s-%d", netdev->name, "rx", ri++);
2160 } else if (handler == &ixgbe_msix_clean_tx) { 2343 } else if (handler == &ixgbe_msix_clean_tx) {
2161 sprintf(adapter->name[vector], "%s-%s-%d", 2344 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2162 netdev->name, "tx", ti++); 2345 "%s-%s-%d", netdev->name, "tx", ti++);
2163 } else 2346 } else if (handler == &ixgbe_msix_clean_many) {
2164 sprintf(adapter->name[vector], "%s-%s-%d", 2347 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2165 netdev->name, "TxRx", vector); 2348 "%s-%s-%d", netdev->name, "TxRx", ri++);
2166 2349 ti++;
2350 } else {
2351 /* skip this unused q_vector */
2352 continue;
2353 }
2167 err = request_irq(adapter->msix_entries[vector].vector, 2354 err = request_irq(adapter->msix_entries[vector].vector,
2168 handler, 0, adapter->name[vector], 2355 handler, 0, q_vector->name,
2169 adapter->q_vector[vector]); 2356 q_vector);
2170 if (err) { 2357 if (err) {
2171 e_err(probe, "request_irq failed for MSIX interrupt " 2358 e_err(probe, "request_irq failed for MSIX interrupt "
2172 "Error: %d\n", err); 2359 "Error: %d\n", err);
@@ -2174,9 +2361,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2174 } 2361 }
2175 } 2362 }
2176 2363
2177 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2364 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2178 err = request_irq(adapter->msix_entries[vector].vector, 2365 err = request_irq(adapter->msix_entries[vector].vector,
2179 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2366 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
2180 if (err) { 2367 if (err) {
2181 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2368 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2182 goto free_queue_irqs; 2369 goto free_queue_irqs;
@@ -2192,17 +2379,16 @@ free_queue_irqs:
2192 pci_disable_msix(adapter->pdev); 2379 pci_disable_msix(adapter->pdev);
2193 kfree(adapter->msix_entries); 2380 kfree(adapter->msix_entries);
2194 adapter->msix_entries = NULL; 2381 adapter->msix_entries = NULL;
2195out:
2196 return err; 2382 return err;
2197} 2383}
2198 2384
2199static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 2385static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2200{ 2386{
2201 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2387 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2202 u8 current_itr;
2203 u32 new_itr = q_vector->eitr;
2204 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 2388 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2205 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2389 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2390 u32 new_itr = q_vector->eitr;
2391 u8 current_itr;
2206 2392
2207 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2393 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2208 q_vector->tx_itr, 2394 q_vector->tx_itr,
@@ -2232,9 +2418,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2232 2418
2233 if (new_itr != q_vector->eitr) { 2419 if (new_itr != q_vector->eitr) {
2234 /* do an exponential smoothing */ 2420 /* do an exponential smoothing */
2235 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 2421 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2236 2422
2237 /* save the algorithm value here, not the smoothed one */ 2423 /* save the algorithm value here */
2238 q_vector->eitr = new_itr; 2424 q_vector->eitr = new_itr;
2239 2425
2240 ixgbe_write_eitr(q_vector); 2426 ixgbe_write_eitr(q_vector);
@@ -2255,12 +2441,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2255 mask |= IXGBE_EIMS_GPI_SDP0; 2441 mask |= IXGBE_EIMS_GPI_SDP0;
2256 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2442 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2257 mask |= IXGBE_EIMS_GPI_SDP1; 2443 mask |= IXGBE_EIMS_GPI_SDP1;
2258 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2444 switch (adapter->hw.mac.type) {
2445 case ixgbe_mac_82599EB:
2446 case ixgbe_mac_X540:
2259 mask |= IXGBE_EIMS_ECC; 2447 mask |= IXGBE_EIMS_ECC;
2260 mask |= IXGBE_EIMS_GPI_SDP1; 2448 mask |= IXGBE_EIMS_GPI_SDP1;
2261 mask |= IXGBE_EIMS_GPI_SDP2; 2449 mask |= IXGBE_EIMS_GPI_SDP2;
2262 if (adapter->num_vfs) 2450 if (adapter->num_vfs)
2263 mask |= IXGBE_EIMS_MAILBOX; 2451 mask |= IXGBE_EIMS_MAILBOX;
2452 break;
2453 default:
2454 break;
2264 } 2455 }
2265 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 2456 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2266 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 2457 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2316,13 +2507,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2316 if (eicr & IXGBE_EICR_LSC) 2507 if (eicr & IXGBE_EICR_LSC)
2317 ixgbe_check_lsc(adapter); 2508 ixgbe_check_lsc(adapter);
2318 2509
2319 if (hw->mac.type == ixgbe_mac_82599EB) 2510 switch (hw->mac.type) {
2511 case ixgbe_mac_82599EB:
2320 ixgbe_check_sfp_event(adapter, eicr); 2512 ixgbe_check_sfp_event(adapter, eicr);
2513 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2514 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2515 adapter->interrupt_event = eicr;
2516 schedule_work(&adapter->check_overtemp_task);
2517 }
2518 break;
2519 default:
2520 break;
2521 }
2321 2522
2322 ixgbe_check_fan_failure(adapter, eicr); 2523 ixgbe_check_fan_failure(adapter, eicr);
2323 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2324 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2325 schedule_work(&adapter->check_overtemp_task);
2326 2524
2327 if (napi_schedule_prep(&(q_vector->napi))) { 2525 if (napi_schedule_prep(&(q_vector->napi))) {
2328 adapter->tx_ring[0]->total_packets = 0; 2526 adapter->tx_ring[0]->total_packets = 0;
@@ -2415,14 +2613,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2415 **/ 2613 **/
2416static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 2614static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2417{ 2615{
2418 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2616 switch (adapter->hw.mac.type) {
2617 case ixgbe_mac_82598EB:
2419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 2618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2420 } else { 2619 break;
2620 case ixgbe_mac_82599EB:
2621 case ixgbe_mac_X540:
2421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2423 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2424 if (adapter->num_vfs > 32) 2625 if (adapter->num_vfs > 32)
2425 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 2626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2627 break;
2628 default:
2629 break;
2426 } 2630 }
2427 IXGBE_WRITE_FLUSH(&adapter->hw); 2631 IXGBE_WRITE_FLUSH(&adapter->hw);
2428 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2632 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2468,7 +2672,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2468 u64 tdba = ring->dma; 2672 u64 tdba = ring->dma;
2469 int wait_loop = 10; 2673 int wait_loop = 10;
2470 u32 txdctl; 2674 u32 txdctl;
2471 u16 reg_idx = ring->reg_idx; 2675 u8 reg_idx = ring->reg_idx;
2472 2676
2473 /* disable queue to avoid issues while updating state */ 2677 /* disable queue to avoid issues while updating state */
2474 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 2678 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2483,8 +2687,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2483 ring->count * sizeof(union ixgbe_adv_tx_desc)); 2687 ring->count * sizeof(union ixgbe_adv_tx_desc));
2484 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 2688 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2485 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 2689 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2486 ring->head = IXGBE_TDH(reg_idx); 2690 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2487 ring->tail = IXGBE_TDT(reg_idx);
2488 2691
2489 /* configure fetching thresholds */ 2692 /* configure fetching thresholds */
2490 if (adapter->rx_itr_setting == 0) { 2693 if (adapter->rx_itr_setting == 0) {
@@ -2500,7 +2703,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2500 } 2703 }
2501 2704
2502 /* reinitialize flowdirector state */ 2705 /* reinitialize flowdirector state */
2503 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); 2706 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2707 adapter->atr_sample_rate) {
2708 ring->atr_sample_rate = adapter->atr_sample_rate;
2709 ring->atr_count = 0;
2710 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2711 } else {
2712 ring->atr_sample_rate = 0;
2713 }
2714
2715 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2504 2716
2505 /* enable queue */ 2717 /* enable queue */
2506 txdctl |= IXGBE_TXDCTL_ENABLE; 2718 txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2591,16 +2803,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2591 struct ixgbe_ring *rx_ring) 2803 struct ixgbe_ring *rx_ring)
2592{ 2804{
2593 u32 srrctl; 2805 u32 srrctl;
2594 int index; 2806 u8 reg_idx = rx_ring->reg_idx;
2595 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2596 2807
2597 index = rx_ring->reg_idx; 2808 switch (adapter->hw.mac.type) {
2598 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2809 case ixgbe_mac_82598EB: {
2599 unsigned long mask; 2810 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2600 mask = (unsigned long) feature[RING_F_RSS].mask; 2811 const int mask = feature[RING_F_RSS].mask;
2601 index = index & mask; 2812 reg_idx = reg_idx & mask;
2813 }
2814 break;
2815 case ixgbe_mac_82599EB:
2816 case ixgbe_mac_X540:
2817 default:
2818 break;
2602 } 2819 }
2603 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2820
2821 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2604 2822
2605 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2823 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2606 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2824 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2610,7 +2828,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2610 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2828 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2611 IXGBE_SRRCTL_BSIZEHDR_MASK; 2829 IXGBE_SRRCTL_BSIZEHDR_MASK;
2612 2830
2613 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2831 if (ring_is_ps_enabled(rx_ring)) {
2614#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2832#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2615 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2833 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2616#else 2834#else
@@ -2623,7 +2841,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2623 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2841 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2624 } 2842 }
2625 2843
2626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
2627} 2845}
2628 2846
2629static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2847static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2693,19 +2911,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2693} 2911}
2694 2912
2695/** 2913/**
2914 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2915 * @adapter: address of board private structure
2916 * @ring: structure containing ring specific data
2917 **/
2918void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2919 struct ixgbe_ring *ring)
2920{
2921 struct ixgbe_hw *hw = &adapter->hw;
2922 u32 rscctrl;
2923 u8 reg_idx = ring->reg_idx;
2924
2925 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2926 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2927 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2928}
2929
2930/**
2696 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2931 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2697 * @adapter: address of board private structure 2932 * @adapter: address of board private structure
2698 * @index: index of ring to set 2933 * @index: index of ring to set
2699 **/ 2934 **/
2700static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 2935void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2701 struct ixgbe_ring *ring) 2936 struct ixgbe_ring *ring)
2702{ 2937{
2703 struct ixgbe_hw *hw = &adapter->hw; 2938 struct ixgbe_hw *hw = &adapter->hw;
2704 u32 rscctrl; 2939 u32 rscctrl;
2705 int rx_buf_len; 2940 int rx_buf_len;
2706 u16 reg_idx = ring->reg_idx; 2941 u8 reg_idx = ring->reg_idx;
2707 2942
2708 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 2943 if (!ring_is_rsc_enabled(ring))
2709 return; 2944 return;
2710 2945
2711 rx_buf_len = ring->rx_buf_len; 2946 rx_buf_len = ring->rx_buf_len;
@@ -2716,7 +2951,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2716 * total size of max desc * buf_len is not greater 2951 * total size of max desc * buf_len is not greater
2717 * than 65535 2952 * than 65535
2718 */ 2953 */
2719 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2954 if (ring_is_ps_enabled(ring)) {
2720#if (MAX_SKB_FRAGS > 16) 2955#if (MAX_SKB_FRAGS > 16)
2721 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2956 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2722#elif (MAX_SKB_FRAGS > 8) 2957#elif (MAX_SKB_FRAGS > 8)
@@ -2769,9 +3004,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2769 struct ixgbe_ring *ring) 3004 struct ixgbe_ring *ring)
2770{ 3005{
2771 struct ixgbe_hw *hw = &adapter->hw; 3006 struct ixgbe_hw *hw = &adapter->hw;
2772 int reg_idx = ring->reg_idx;
2773 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3007 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2774 u32 rxdctl; 3008 u32 rxdctl;
3009 u8 reg_idx = ring->reg_idx;
2775 3010
2776 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3011 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2777 if (hw->mac.type == ixgbe_mac_82598EB && 3012 if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2795,7 +3030,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2795 struct ixgbe_hw *hw = &adapter->hw; 3030 struct ixgbe_hw *hw = &adapter->hw;
2796 u64 rdba = ring->dma; 3031 u64 rdba = ring->dma;
2797 u32 rxdctl; 3032 u32 rxdctl;
2798 u16 reg_idx = ring->reg_idx; 3033 u8 reg_idx = ring->reg_idx;
2799 3034
2800 /* disable queue to avoid issues while updating state */ 3035 /* disable queue to avoid issues while updating state */
2801 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2809,8 +3044,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2809 ring->count * sizeof(union ixgbe_adv_rx_desc)); 3044 ring->count * sizeof(union ixgbe_adv_rx_desc));
2810 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 3045 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2811 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 3046 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2812 ring->head = IXGBE_RDH(reg_idx); 3047 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
2813 ring->tail = IXGBE_RDT(reg_idx);
2814 3048
2815 ixgbe_configure_srrctl(adapter, ring); 3049 ixgbe_configure_srrctl(adapter, ring);
2816 ixgbe_configure_rscctl(adapter, ring); 3050 ixgbe_configure_rscctl(adapter, ring);
@@ -2832,7 +3066,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2832 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3066 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2833 3067
2834 ixgbe_rx_desc_queue_enable(adapter, ring); 3068 ixgbe_rx_desc_queue_enable(adapter, ring);
2835 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); 3069 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
2836} 3070}
2837 3071
2838static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3072static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2955,24 +3189,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2955 rx_ring->rx_buf_len = rx_buf_len; 3189 rx_ring->rx_buf_len = rx_buf_len;
2956 3190
2957 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2958 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 3192 set_ring_ps_enabled(rx_ring);
3193 else
3194 clear_ring_ps_enabled(rx_ring);
3195
3196 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3197 set_ring_rsc_enabled(rx_ring);
2959 else 3198 else
2960 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3199 clear_ring_rsc_enabled(rx_ring);
2961 3200
2962#ifdef IXGBE_FCOE 3201#ifdef IXGBE_FCOE
2963 if (netdev->features & NETIF_F_FCOE_MTU) { 3202 if (netdev->features & NETIF_F_FCOE_MTU) {
2964 struct ixgbe_ring_feature *f; 3203 struct ixgbe_ring_feature *f;
2965 f = &adapter->ring_feature[RING_F_FCOE]; 3204 f = &adapter->ring_feature[RING_F_FCOE];
2966 if ((i >= f->mask) && (i < f->mask + f->indices)) { 3205 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2967 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3206 clear_ring_ps_enabled(rx_ring);
2968 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 3207 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2969 rx_ring->rx_buf_len = 3208 rx_ring->rx_buf_len =
2970 IXGBE_FCOE_JUMBO_FRAME_SIZE; 3209 IXGBE_FCOE_JUMBO_FRAME_SIZE;
3210 } else if (!ring_is_rsc_enabled(rx_ring) &&
3211 !ring_is_ps_enabled(rx_ring)) {
3212 rx_ring->rx_buf_len =
3213 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2971 } 3214 }
2972 } 3215 }
2973#endif /* IXGBE_FCOE */ 3216#endif /* IXGBE_FCOE */
2974 } 3217 }
2975
2976} 3218}
2977 3219
2978static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 3220static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2995,6 +3237,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2995 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3237 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2996 break; 3238 break;
2997 case ixgbe_mac_82599EB: 3239 case ixgbe_mac_82599EB:
3240 case ixgbe_mac_X540:
2998 /* Disable RSC for ACK packets */ 3241 /* Disable RSC for ACK packets */
2999 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3242 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3000 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3243 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3122,6 +3365,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3122 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3365 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3123 break; 3366 break;
3124 case ixgbe_mac_82599EB: 3367 case ixgbe_mac_82599EB:
3368 case ixgbe_mac_X540:
3125 for (i = 0; i < adapter->num_rx_queues; i++) { 3369 for (i = 0; i < adapter->num_rx_queues; i++) {
3126 j = adapter->rx_ring[i]->reg_idx; 3370 j = adapter->rx_ring[i]->reg_idx;
3127 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3371 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3151,6 +3395,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3151 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3395 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3152 break; 3396 break;
3153 case ixgbe_mac_82599EB: 3397 case ixgbe_mac_82599EB:
3398 case ixgbe_mac_X540:
3154 for (i = 0; i < adapter->num_rx_queues; i++) { 3399 for (i = 0; i < adapter->num_rx_queues; i++) {
3155 j = adapter->rx_ring[i]->reg_idx; 3400 j = adapter->rx_ring[i]->reg_idx;
3156 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3401 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3348,8 +3593,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3348{ 3593{
3349 struct ixgbe_hw *hw = &adapter->hw; 3594 struct ixgbe_hw *hw = &adapter->hw;
3350 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3595 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3351 u32 txdctl;
3352 int i, j;
3353 3596
3354 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 3597 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3355 if (hw->mac.type == ixgbe_mac_82598EB) 3598 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3365,25 +3608,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3365 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3608 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3366#endif 3609#endif
3367 3610
3368 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3611 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3369 DCB_TX_CONFIG); 3612 DCB_TX_CONFIG);
3370 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3613 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3371 DCB_RX_CONFIG); 3614 DCB_RX_CONFIG);
3372 3615
3373 /* reconfigure the hardware */
3374 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3375
3376 for (i = 0; i < adapter->num_tx_queues; i++) {
3377 j = adapter->tx_ring[i]->reg_idx;
3378 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3379 /* PThresh workaround for Tx hang with DFP enabled. */
3380 txdctl |= 32;
3381 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3382 }
3383 /* Enable VLAN tag insert/strip */ 3616 /* Enable VLAN tag insert/strip */
3384 adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3617 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3385 3618
3386 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3619 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3620
3621 /* reconfigure the hardware */
3622 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3387} 3623}
3388 3624
3389#endif 3625#endif
@@ -3515,8 +3751,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3515 case ixgbe_mac_82598EB: 3751 case ixgbe_mac_82598EB:
3516 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3752 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3517 break; 3753 break;
3518 default:
3519 case ixgbe_mac_82599EB: 3754 case ixgbe_mac_82599EB:
3755 case ixgbe_mac_X540:
3756 default:
3520 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3757 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3521 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3758 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3522 break; 3759 break;
@@ -3560,13 +3797,24 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3560 else 3797 else
3561 ixgbe_configure_msi_and_legacy(adapter); 3798 ixgbe_configure_msi_and_legacy(adapter);
3562 3799
3563 /* enable the optics */ 3800 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3564 if (hw->phy.multispeed_fiber) 3801 if (hw->mac.ops.enable_tx_laser &&
3802 ((hw->phy.multispeed_fiber) ||
3803 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
3804 (hw->mac.type == ixgbe_mac_82599EB))))
3565 hw->mac.ops.enable_tx_laser(hw); 3805 hw->mac.ops.enable_tx_laser(hw);
3566 3806
3567 clear_bit(__IXGBE_DOWN, &adapter->state); 3807 clear_bit(__IXGBE_DOWN, &adapter->state);
3568 ixgbe_napi_enable_all(adapter); 3808 ixgbe_napi_enable_all(adapter);
3569 3809
3810 if (ixgbe_is_sfp(hw)) {
3811 ixgbe_sfp_link_config(adapter);
3812 } else {
3813 err = ixgbe_non_sfp_link_config(hw);
3814 if (err)
3815 e_err(probe, "link_config FAILED %d\n", err);
3816 }
3817
3570 /* clear any pending interrupts, may auto mask */ 3818 /* clear any pending interrupts, may auto mask */
3571 IXGBE_READ_REG(hw, IXGBE_EICR); 3819 IXGBE_READ_REG(hw, IXGBE_EICR);
3572 ixgbe_irq_enable(adapter, true, true); 3820 ixgbe_irq_enable(adapter, true, true);
@@ -3589,26 +3837,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3589 * If we're not hot-pluggable SFP+, we just need to configure link 3837 * If we're not hot-pluggable SFP+, we just need to configure link
3590 * and bring it up. 3838 * and bring it up.
3591 */ 3839 */
3592 if (hw->phy.type == ixgbe_phy_unknown) { 3840 if (hw->phy.type == ixgbe_phy_unknown)
3593 err = hw->phy.ops.identify(hw); 3841 schedule_work(&adapter->sfp_config_module_task);
3594 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3595 /*
3596 * Take the device down and schedule the sfp tasklet
3597 * which will unregister_netdev and log it.
3598 */
3599 ixgbe_down(adapter);
3600 schedule_work(&adapter->sfp_config_module_task);
3601 return err;
3602 }
3603 }
3604
3605 if (ixgbe_is_sfp(hw)) {
3606 ixgbe_sfp_link_config(adapter);
3607 } else {
3608 err = ixgbe_non_sfp_link_config(hw);
3609 if (err)
3610 e_err(probe, "link_config FAILED %d\n", err);
3611 }
3612 3842
3613 /* enable transmits */ 3843 /* enable transmits */
3614 netif_tx_start_all_queues(adapter->netdev); 3844 netif_tx_start_all_queues(adapter->netdev);
@@ -3686,15 +3916,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3686 3916
3687/** 3917/**
3688 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 3918 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3689 * @adapter: board private structure
3690 * @rx_ring: ring to free buffers from 3919 * @rx_ring: ring to free buffers from
3691 **/ 3920 **/
3692static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3921static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3693 struct ixgbe_ring *rx_ring)
3694{ 3922{
3695 struct pci_dev *pdev = adapter->pdev; 3923 struct device *dev = rx_ring->dev;
3696 unsigned long size; 3924 unsigned long size;
3697 unsigned int i; 3925 u16 i;
3698 3926
3699 /* ring already cleared, nothing to do */ 3927 /* ring already cleared, nothing to do */
3700 if (!rx_ring->rx_buffer_info) 3928 if (!rx_ring->rx_buffer_info)
@@ -3706,7 +3934,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3706 3934
3707 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3935 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3708 if (rx_buffer_info->dma) { 3936 if (rx_buffer_info->dma) {
3709 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3937 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3710 rx_ring->rx_buf_len, 3938 rx_ring->rx_buf_len,
3711 DMA_FROM_DEVICE); 3939 DMA_FROM_DEVICE);
3712 rx_buffer_info->dma = 0; 3940 rx_buffer_info->dma = 0;
@@ -3717,7 +3945,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3717 do { 3945 do {
3718 struct sk_buff *this = skb; 3946 struct sk_buff *this = skb;
3719 if (IXGBE_RSC_CB(this)->delay_unmap) { 3947 if (IXGBE_RSC_CB(this)->delay_unmap) {
3720 dma_unmap_single(&pdev->dev, 3948 dma_unmap_single(dev,
3721 IXGBE_RSC_CB(this)->dma, 3949 IXGBE_RSC_CB(this)->dma,
3722 rx_ring->rx_buf_len, 3950 rx_ring->rx_buf_len,
3723 DMA_FROM_DEVICE); 3951 DMA_FROM_DEVICE);
@@ -3731,7 +3959,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3731 if (!rx_buffer_info->page) 3959 if (!rx_buffer_info->page)
3732 continue; 3960 continue;
3733 if (rx_buffer_info->page_dma) { 3961 if (rx_buffer_info->page_dma) {
3734 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 3962 dma_unmap_page(dev, rx_buffer_info->page_dma,
3735 PAGE_SIZE / 2, DMA_FROM_DEVICE); 3963 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3736 rx_buffer_info->page_dma = 0; 3964 rx_buffer_info->page_dma = 0;
3737 } 3965 }
@@ -3748,24 +3976,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3748 3976
3749 rx_ring->next_to_clean = 0; 3977 rx_ring->next_to_clean = 0;
3750 rx_ring->next_to_use = 0; 3978 rx_ring->next_to_use = 0;
3751
3752 if (rx_ring->head)
3753 writel(0, adapter->hw.hw_addr + rx_ring->head);
3754 if (rx_ring->tail)
3755 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3756} 3979}
3757 3980
3758/** 3981/**
3759 * ixgbe_clean_tx_ring - Free Tx Buffers 3982 * ixgbe_clean_tx_ring - Free Tx Buffers
3760 * @adapter: board private structure
3761 * @tx_ring: ring to be cleaned 3983 * @tx_ring: ring to be cleaned
3762 **/ 3984 **/
3763static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3985static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3764 struct ixgbe_ring *tx_ring)
3765{ 3986{
3766 struct ixgbe_tx_buffer *tx_buffer_info; 3987 struct ixgbe_tx_buffer *tx_buffer_info;
3767 unsigned long size; 3988 unsigned long size;
3768 unsigned int i; 3989 u16 i;
3769 3990
3770 /* ring already cleared, nothing to do */ 3991 /* ring already cleared, nothing to do */
3771 if (!tx_ring->tx_buffer_info) 3992 if (!tx_ring->tx_buffer_info)
@@ -3774,7 +3995,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3774 /* Free all the Tx ring sk_buffs */ 3995 /* Free all the Tx ring sk_buffs */
3775 for (i = 0; i < tx_ring->count; i++) { 3996 for (i = 0; i < tx_ring->count; i++) {
3776 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3997 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3777 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3998 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3778 } 3999 }
3779 4000
3780 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4001 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3785,11 +4006,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3785 4006
3786 tx_ring->next_to_use = 0; 4007 tx_ring->next_to_use = 0;
3787 tx_ring->next_to_clean = 0; 4008 tx_ring->next_to_clean = 0;
3788
3789 if (tx_ring->head)
3790 writel(0, adapter->hw.hw_addr + tx_ring->head);
3791 if (tx_ring->tail)
3792 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3793} 4009}
3794 4010
3795/** 4011/**
@@ -3801,7 +4017,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3801 int i; 4017 int i;
3802 4018
3803 for (i = 0; i < adapter->num_rx_queues; i++) 4019 for (i = 0; i < adapter->num_rx_queues; i++)
3804 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); 4020 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3805} 4021}
3806 4022
3807/** 4023/**
@@ -3813,7 +4029,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3813 int i; 4029 int i;
3814 4030
3815 for (i = 0; i < adapter->num_tx_queues; i++) 4031 for (i = 0; i < adapter->num_tx_queues; i++)
3816 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); 4032 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3817} 4033}
3818 4034
3819void ixgbe_down(struct ixgbe_adapter *adapter) 4035void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3822,7 +4038,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3822 struct ixgbe_hw *hw = &adapter->hw; 4038 struct ixgbe_hw *hw = &adapter->hw;
3823 u32 rxctrl; 4039 u32 rxctrl;
3824 u32 txdctl; 4040 u32 txdctl;
3825 int i, j; 4041 int i;
3826 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4042 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3827 4043
3828 /* signal that we are down to the interrupt handler */ 4044 /* signal that we are down to the interrupt handler */
@@ -3880,26 +4096,36 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3880 4096
3881 /* disable transmits in the hardware now that interrupts are off */ 4097 /* disable transmits in the hardware now that interrupts are off */
3882 for (i = 0; i < adapter->num_tx_queues; i++) { 4098 for (i = 0; i < adapter->num_tx_queues; i++) {
3883 j = adapter->tx_ring[i]->reg_idx; 4099 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
3884 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 4100 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3885 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 4101 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
3886 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 4102 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3887 } 4103 }
3888 /* Disable the Tx DMA engine on 82599 */ 4104 /* Disable the Tx DMA engine on 82599 */
3889 if (hw->mac.type == ixgbe_mac_82599EB) 4105 switch (hw->mac.type) {
4106 case ixgbe_mac_82599EB:
4107 case ixgbe_mac_X540:
3890 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 4108 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3891 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 4109 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3892 ~IXGBE_DMATXCTL_TE)); 4110 ~IXGBE_DMATXCTL_TE));
3893 4111 break;
3894 /* power down the optics */ 4112 default:
3895 if (hw->phy.multispeed_fiber) 4113 break;
3896 hw->mac.ops.disable_tx_laser(hw); 4114 }
3897 4115
3898 /* clear n-tuple filters that are cached */ 4116 /* clear n-tuple filters that are cached */
3899 ethtool_ntuple_flush(netdev); 4117 ethtool_ntuple_flush(netdev);
3900 4118
3901 if (!pci_channel_offline(adapter->pdev)) 4119 if (!pci_channel_offline(adapter->pdev))
3902 ixgbe_reset(adapter); 4120 ixgbe_reset(adapter);
4121
4122 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4123 if (hw->mac.ops.disable_tx_laser &&
4124 ((hw->phy.multispeed_fiber) ||
4125 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4126 (hw->mac.type == ixgbe_mac_82599EB))))
4127 hw->mac.ops.disable_tx_laser(hw);
4128
3903 ixgbe_clean_all_tx_rings(adapter); 4129 ixgbe_clean_all_tx_rings(adapter);
3904 ixgbe_clean_all_rx_rings(adapter); 4130 ixgbe_clean_all_rx_rings(adapter);
3905 4131
@@ -3924,10 +4150,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
3924 int tx_clean_complete, work_done = 0; 4150 int tx_clean_complete, work_done = 0;
3925 4151
3926#ifdef CONFIG_IXGBE_DCA 4152#ifdef CONFIG_IXGBE_DCA
3927 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 4153 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3928 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); 4154 ixgbe_update_dca(q_vector);
3929 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3930 }
3931#endif 4155#endif
3932 4156
3933 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); 4157 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3955,6 +4179,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
3955{ 4179{
3956 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4180 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3957 4181
4182 adapter->tx_timeout_count++;
4183
3958 /* Do the reset outside of interrupt context */ 4184 /* Do the reset outside of interrupt context */
3959 schedule_work(&adapter->reset_task); 4185 schedule_work(&adapter->reset_task);
3960} 4186}
@@ -3969,8 +4195,6 @@ static void ixgbe_reset_task(struct work_struct *work)
3969 test_bit(__IXGBE_RESETTING, &adapter->state)) 4195 test_bit(__IXGBE_RESETTING, &adapter->state))
3970 return; 4196 return;
3971 4197
3972 adapter->tx_timeout_count++;
3973
3974 ixgbe_dump(adapter); 4198 ixgbe_dump(adapter);
3975 netdev_err(adapter->netdev, "Reset adapter\n"); 4199 netdev_err(adapter->netdev, "Reset adapter\n");
3976 ixgbe_reinit_locked(adapter); 4200 ixgbe_reinit_locked(adapter);
@@ -4220,19 +4444,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4220static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 4444static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4221{ 4445{
4222 int i; 4446 int i;
4223 bool ret = false;
4224 4447
4225 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4448 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4226 for (i = 0; i < adapter->num_rx_queues; i++) 4449 return false;
4227 adapter->rx_ring[i]->reg_idx = i;
4228 for (i = 0; i < adapter->num_tx_queues; i++)
4229 adapter->tx_ring[i]->reg_idx = i;
4230 ret = true;
4231 } else {
4232 ret = false;
4233 }
4234 4450
4235 return ret; 4451 for (i = 0; i < adapter->num_rx_queues; i++)
4452 adapter->rx_ring[i]->reg_idx = i;
4453 for (i = 0; i < adapter->num_tx_queues; i++)
4454 adapter->tx_ring[i]->reg_idx = i;
4455
4456 return true;
4236} 4457}
4237 4458
4238#ifdef CONFIG_IXGBE_DCB 4459#ifdef CONFIG_IXGBE_DCB
@@ -4249,71 +4470,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4249 bool ret = false; 4470 bool ret = false;
4250 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4471 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4251 4472
4252 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4473 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4253 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 4474 return false;
4254 /* the number of queues is assumed to be symmetric */
4255 for (i = 0; i < dcb_i; i++) {
4256 adapter->rx_ring[i]->reg_idx = i << 3;
4257 adapter->tx_ring[i]->reg_idx = i << 2;
4258 }
4259 ret = true;
4260 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4261 if (dcb_i == 8) {
4262 /*
4263 * Tx TC0 starts at: descriptor queue 0
4264 * Tx TC1 starts at: descriptor queue 32
4265 * Tx TC2 starts at: descriptor queue 64
4266 * Tx TC3 starts at: descriptor queue 80
4267 * Tx TC4 starts at: descriptor queue 96
4268 * Tx TC5 starts at: descriptor queue 104
4269 * Tx TC6 starts at: descriptor queue 112
4270 * Tx TC7 starts at: descriptor queue 120
4271 *
4272 * Rx TC0-TC7 are offset by 16 queues each
4273 */
4274 for (i = 0; i < 3; i++) {
4275 adapter->tx_ring[i]->reg_idx = i << 5;
4276 adapter->rx_ring[i]->reg_idx = i << 4;
4277 }
4278 for ( ; i < 5; i++) {
4279 adapter->tx_ring[i]->reg_idx =
4280 ((i + 2) << 4);
4281 adapter->rx_ring[i]->reg_idx = i << 4;
4282 }
4283 for ( ; i < dcb_i; i++) {
4284 adapter->tx_ring[i]->reg_idx =
4285 ((i + 8) << 3);
4286 adapter->rx_ring[i]->reg_idx = i << 4;
4287 }
4288 4475
4289 ret = true; 4476 /* the number of queues is assumed to be symmetric */
4290 } else if (dcb_i == 4) { 4477 switch (adapter->hw.mac.type) {
4291 /* 4478 case ixgbe_mac_82598EB:
4292 * Tx TC0 starts at: descriptor queue 0 4479 for (i = 0; i < dcb_i; i++) {
4293 * Tx TC1 starts at: descriptor queue 64 4480 adapter->rx_ring[i]->reg_idx = i << 3;
4294 * Tx TC2 starts at: descriptor queue 96 4481 adapter->tx_ring[i]->reg_idx = i << 2;
4295 * Tx TC3 starts at: descriptor queue 112 4482 }
4296 * 4483 ret = true;
4297 * Rx TC0-TC3 are offset by 32 queues each 4484 break;
4298 */ 4485 case ixgbe_mac_82599EB:
4299 adapter->tx_ring[0]->reg_idx = 0; 4486 case ixgbe_mac_X540:
4300 adapter->tx_ring[1]->reg_idx = 64; 4487 if (dcb_i == 8) {
4301 adapter->tx_ring[2]->reg_idx = 96; 4488 /*
4302 adapter->tx_ring[3]->reg_idx = 112; 4489 * Tx TC0 starts at: descriptor queue 0
4303 for (i = 0 ; i < dcb_i; i++) 4490 * Tx TC1 starts at: descriptor queue 32
4304 adapter->rx_ring[i]->reg_idx = i << 5; 4491 * Tx TC2 starts at: descriptor queue 64
4305 4492 * Tx TC3 starts at: descriptor queue 80
4306 ret = true; 4493 * Tx TC4 starts at: descriptor queue 96
4307 } else { 4494 * Tx TC5 starts at: descriptor queue 104
4308 ret = false; 4495 * Tx TC6 starts at: descriptor queue 112
4496 * Tx TC7 starts at: descriptor queue 120
4497 *
4498 * Rx TC0-TC7 are offset by 16 queues each
4499 */
4500 for (i = 0; i < 3; i++) {
4501 adapter->tx_ring[i]->reg_idx = i << 5;
4502 adapter->rx_ring[i]->reg_idx = i << 4;
4309 } 4503 }
4310 } else { 4504 for ( ; i < 5; i++) {
4311 ret = false; 4505 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4506 adapter->rx_ring[i]->reg_idx = i << 4;
4507 }
4508 for ( ; i < dcb_i; i++) {
4509 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4510 adapter->rx_ring[i]->reg_idx = i << 4;
4511 }
4512 ret = true;
4513 } else if (dcb_i == 4) {
4514 /*
4515 * Tx TC0 starts at: descriptor queue 0
4516 * Tx TC1 starts at: descriptor queue 64
4517 * Tx TC2 starts at: descriptor queue 96
4518 * Tx TC3 starts at: descriptor queue 112
4519 *
4520 * Rx TC0-TC3 are offset by 32 queues each
4521 */
4522 adapter->tx_ring[0]->reg_idx = 0;
4523 adapter->tx_ring[1]->reg_idx = 64;
4524 adapter->tx_ring[2]->reg_idx = 96;
4525 adapter->tx_ring[3]->reg_idx = 112;
4526 for (i = 0 ; i < dcb_i; i++)
4527 adapter->rx_ring[i]->reg_idx = i << 5;
4528 ret = true;
4312 } 4529 }
4313 } else { 4530 break;
4314 ret = false; 4531 default:
4532 break;
4315 } 4533 }
4316
4317 return ret; 4534 return ret;
4318} 4535}
4319#endif 4536#endif
@@ -4353,55 +4570,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4353 */ 4570 */
4354static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 4571static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4355{ 4572{
4356 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4357 bool ret = false;
4358 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4573 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4574 int i;
4575 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4576
4577 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4578 return false;
4359 4579
4360 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4361#ifdef CONFIG_IXGBE_DCB 4580#ifdef CONFIG_IXGBE_DCB
4362 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4581 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4363 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 4582 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4364 4583
4365 ixgbe_cache_ring_dcb(adapter); 4584 ixgbe_cache_ring_dcb(adapter);
4366 /* find out queues in TC for FCoE */ 4585 /* find out queues in TC for FCoE */
4367 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; 4586 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4368 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; 4587 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4369 /* 4588 /*
4370 * In 82599, the number of Tx queues for each traffic 4589 * In 82599, the number of Tx queues for each traffic
4371 * class for both 8-TC and 4-TC modes are: 4590 * class for both 8-TC and 4-TC modes are:
4372 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 4591 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4373 * 8 TCs: 32 32 16 16 8 8 8 8 4592 * 8 TCs: 32 32 16 16 8 8 8 8
4374 * 4 TCs: 64 64 32 32 4593 * 4 TCs: 64 64 32 32
4375 * We have max 8 queues for FCoE, where 8 the is 4594 * We have max 8 queues for FCoE, where 8 the is
4376 * FCoE redirection table size. If TC for FCoE is 4595 * FCoE redirection table size. If TC for FCoE is
4377 * less than or equal to TC3, we have enough queues 4596 * less than or equal to TC3, we have enough queues
4378 * to add max of 8 queues for FCoE, so we start FCoE 4597 * to add max of 8 queues for FCoE, so we start FCoE
4379 * tx descriptor from the next one, i.e., reg_idx + 1. 4598 * Tx queue from the next one, i.e., reg_idx + 1.
4380 * If TC for FCoE is above TC3, implying 8 TC mode, 4599 * If TC for FCoE is above TC3, implying 8 TC mode,
4381 * and we need 8 for FCoE, we have to take all queues 4600 * and we need 8 for FCoE, we have to take all queues
4382 * in that traffic class for FCoE. 4601 * in that traffic class for FCoE.
4383 */ 4602 */
4384 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) 4603 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4385 fcoe_tx_i--; 4604 fcoe_tx_i--;
4386 } 4605 }
4387#endif /* CONFIG_IXGBE_DCB */ 4606#endif /* CONFIG_IXGBE_DCB */
4388 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4607 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4389 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4608 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4390 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4609 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4391 ixgbe_cache_ring_fdir(adapter); 4610 ixgbe_cache_ring_fdir(adapter);
4392 else 4611 else
4393 ixgbe_cache_ring_rss(adapter); 4612 ixgbe_cache_ring_rss(adapter);
4394 4613
4395 fcoe_rx_i = f->mask; 4614 fcoe_rx_i = f->mask;
4396 fcoe_tx_i = f->mask; 4615 fcoe_tx_i = f->mask;
4397 }
4398 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4399 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4400 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4401 }
4402 ret = true;
4403 } 4616 }
4404 return ret; 4617 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4618 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4619 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4620 }
4621 return true;
4405} 4622}
4406 4623
4407#endif /* IXGBE_FCOE */ 4624#endif /* IXGBE_FCOE */
@@ -4470,65 +4687,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4470 **/ 4687 **/
4471static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 4688static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4472{ 4689{
4473 int i; 4690 int rx = 0, tx = 0, nid = adapter->node;
4474 int orig_node = adapter->node;
4475 4691
4476 for (i = 0; i < adapter->num_tx_queues; i++) { 4692 if (nid < 0 || !node_online(nid))
4477 struct ixgbe_ring *ring = adapter->tx_ring[i]; 4693 nid = first_online_node;
4478 if (orig_node == -1) { 4694
4479 int cur_node = next_online_node(adapter->node); 4695 for (; tx < adapter->num_tx_queues; tx++) {
4480 if (cur_node == MAX_NUMNODES) 4696 struct ixgbe_ring *ring;
4481 cur_node = first_online_node; 4697
4482 adapter->node = cur_node; 4698 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4483 }
4484 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4485 adapter->node);
4486 if (!ring) 4699 if (!ring)
4487 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4700 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4488 if (!ring) 4701 if (!ring)
4489 goto err_tx_ring_allocation; 4702 goto err_allocation;
4490 ring->count = adapter->tx_ring_count; 4703 ring->count = adapter->tx_ring_count;
4491 ring->queue_index = i; 4704 ring->queue_index = tx;
4492 ring->numa_node = adapter->node; 4705 ring->numa_node = nid;
4706 ring->dev = &adapter->pdev->dev;
4707 ring->netdev = adapter->netdev;
4493 4708
4494 adapter->tx_ring[i] = ring; 4709 adapter->tx_ring[tx] = ring;
4495 } 4710 }
4496 4711
4497 /* Restore the adapter's original node */ 4712 for (; rx < adapter->num_rx_queues; rx++) {
4498 adapter->node = orig_node; 4713 struct ixgbe_ring *ring;
4499 4714
4500 for (i = 0; i < adapter->num_rx_queues; i++) { 4715 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4501 struct ixgbe_ring *ring = adapter->rx_ring[i];
4502 if (orig_node == -1) {
4503 int cur_node = next_online_node(adapter->node);
4504 if (cur_node == MAX_NUMNODES)
4505 cur_node = first_online_node;
4506 adapter->node = cur_node;
4507 }
4508 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4509 adapter->node);
4510 if (!ring) 4716 if (!ring)
4511 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4717 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4512 if (!ring) 4718 if (!ring)
4513 goto err_rx_ring_allocation; 4719 goto err_allocation;
4514 ring->count = adapter->rx_ring_count; 4720 ring->count = adapter->rx_ring_count;
4515 ring->queue_index = i; 4721 ring->queue_index = rx;
4516 ring->numa_node = adapter->node; 4722 ring->numa_node = nid;
4723 ring->dev = &adapter->pdev->dev;
4724 ring->netdev = adapter->netdev;
4517 4725
4518 adapter->rx_ring[i] = ring; 4726 adapter->rx_ring[rx] = ring;
4519 } 4727 }
4520 4728
4521 /* Restore the adapter's original node */
4522 adapter->node = orig_node;
4523
4524 ixgbe_cache_ring_register(adapter); 4729 ixgbe_cache_ring_register(adapter);
4525 4730
4526 return 0; 4731 return 0;
4527 4732
4528err_rx_ring_allocation: 4733err_allocation:
4529 for (i = 0; i < adapter->num_tx_queues; i++) 4734 while (tx)
4530 kfree(adapter->tx_ring[i]); 4735 kfree(adapter->tx_ring[--tx]);
4531err_tx_ring_allocation: 4736
4737 while (rx)
4738 kfree(adapter->rx_ring[--rx]);
4532 return -ENOMEM; 4739 return -ENOMEM;
4533} 4740}
4534 4741
@@ -4750,6 +4957,11 @@ err_set_interrupt:
4750 return err; 4957 return err;
4751} 4958}
4752 4959
4960static void ring_free_rcu(struct rcu_head *head)
4961{
4962 kfree(container_of(head, struct ixgbe_ring, rcu));
4963}
4964
4753/** 4965/**
4754 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 4966 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4755 * @adapter: board private structure to clear interrupt scheme on 4967 * @adapter: board private structure to clear interrupt scheme on
@@ -4766,10 +4978,18 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4766 adapter->tx_ring[i] = NULL; 4978 adapter->tx_ring[i] = NULL;
4767 } 4979 }
4768 for (i = 0; i < adapter->num_rx_queues; i++) { 4980 for (i = 0; i < adapter->num_rx_queues; i++) {
4769 kfree(adapter->rx_ring[i]); 4981 struct ixgbe_ring *ring = adapter->rx_ring[i];
4982
4983 /* ixgbe_get_stats64() might access this ring, we must wait
4984 * a grace period before freeing it.
4985 */
4986 call_rcu(&ring->rcu, ring_free_rcu);
4770 adapter->rx_ring[i] = NULL; 4987 adapter->rx_ring[i] = NULL;
4771 } 4988 }
4772 4989
4990 adapter->num_tx_queues = 0;
4991 adapter->num_rx_queues = 0;
4992
4773 ixgbe_free_q_vectors(adapter); 4993 ixgbe_free_q_vectors(adapter);
4774 ixgbe_reset_interrupt_capability(adapter); 4994 ixgbe_reset_interrupt_capability(adapter);
4775} 4995}
@@ -4843,6 +5063,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4843 int j; 5063 int j;
4844 struct tc_configuration *tc; 5064 struct tc_configuration *tc;
4845#endif 5065#endif
5066 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4846 5067
4847 /* PCI config space info */ 5068 /* PCI config space info */
4848 5069
@@ -4857,11 +5078,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4857 adapter->ring_feature[RING_F_RSS].indices = rss; 5078 adapter->ring_feature[RING_F_RSS].indices = rss;
4858 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 5079 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4859 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 5080 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4860 if (hw->mac.type == ixgbe_mac_82598EB) { 5081 switch (hw->mac.type) {
5082 case ixgbe_mac_82598EB:
4861 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5083 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4862 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5084 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4863 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 5085 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4864 } else if (hw->mac.type == ixgbe_mac_82599EB) { 5086 break;
5087 case ixgbe_mac_82599EB:
5088 case ixgbe_mac_X540:
4865 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 5089 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4866 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5090 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4867 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5091 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4890,6 +5114,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4890 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5114 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4891#endif 5115#endif
4892#endif /* IXGBE_FCOE */ 5116#endif /* IXGBE_FCOE */
5117 break;
5118 default:
5119 break;
4893 } 5120 }
4894 5121
4895#ifdef CONFIG_IXGBE_DCB 5122#ifdef CONFIG_IXGBE_DCB
@@ -4919,8 +5146,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4919#ifdef CONFIG_DCB 5146#ifdef CONFIG_DCB
4920 adapter->last_lfc_mode = hw->fc.current_mode; 5147 adapter->last_lfc_mode = hw->fc.current_mode;
4921#endif 5148#endif
4922 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 5149 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4923 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 5150 hw->fc.low_water = FC_LOW_WATER(max_frame);
4924 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5151 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4925 hw->fc.send_xon = true; 5152 hw->fc.send_xon = true;
4926 hw->fc.disable_fc_autoneg = false; 5153 hw->fc.disable_fc_autoneg = false;
@@ -4958,30 +5185,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4958 5185
4959/** 5186/**
4960 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5187 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4961 * @adapter: board private structure
4962 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5188 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4963 * 5189 *
4964 * Return 0 on success, negative on failure 5190 * Return 0 on success, negative on failure
4965 **/ 5191 **/
4966int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 5192int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4967 struct ixgbe_ring *tx_ring)
4968{ 5193{
4969 struct pci_dev *pdev = adapter->pdev; 5194 struct device *dev = tx_ring->dev;
4970 int size; 5195 int size;
4971 5196
4972 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5197 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4973 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); 5198 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
4974 if (!tx_ring->tx_buffer_info) 5199 if (!tx_ring->tx_buffer_info)
4975 tx_ring->tx_buffer_info = vmalloc(size); 5200 tx_ring->tx_buffer_info = vzalloc(size);
4976 if (!tx_ring->tx_buffer_info) 5201 if (!tx_ring->tx_buffer_info)
4977 goto err; 5202 goto err;
4978 memset(tx_ring->tx_buffer_info, 0, size);
4979 5203
4980 /* round up to nearest 4K */ 5204 /* round up to nearest 4K */
4981 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5205 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4982 tx_ring->size = ALIGN(tx_ring->size, 4096); 5206 tx_ring->size = ALIGN(tx_ring->size, 4096);
4983 5207
4984 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 5208 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4985 &tx_ring->dma, GFP_KERNEL); 5209 &tx_ring->dma, GFP_KERNEL);
4986 if (!tx_ring->desc) 5210 if (!tx_ring->desc)
4987 goto err; 5211 goto err;
@@ -4994,7 +5218,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4994err: 5218err:
4995 vfree(tx_ring->tx_buffer_info); 5219 vfree(tx_ring->tx_buffer_info);
4996 tx_ring->tx_buffer_info = NULL; 5220 tx_ring->tx_buffer_info = NULL;
4997 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); 5221 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4998 return -ENOMEM; 5222 return -ENOMEM;
4999} 5223}
5000 5224
@@ -5013,7 +5237,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5013 int i, err = 0; 5237 int i, err = 0;
5014 5238
5015 for (i = 0; i < adapter->num_tx_queues; i++) { 5239 for (i = 0; i < adapter->num_tx_queues; i++) {
5016 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 5240 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5017 if (!err) 5241 if (!err)
5018 continue; 5242 continue;
5019 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5243 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5025,48 +5249,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5025 5249
5026/** 5250/**
5027 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5251 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5028 * @adapter: board private structure
5029 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5252 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5030 * 5253 *
5031 * Returns 0 on success, negative on failure 5254 * Returns 0 on success, negative on failure
5032 **/ 5255 **/
5033int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 5256int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5034 struct ixgbe_ring *rx_ring)
5035{ 5257{
5036 struct pci_dev *pdev = adapter->pdev; 5258 struct device *dev = rx_ring->dev;
5037 int size; 5259 int size;
5038 5260
5039 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5261 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5040 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); 5262 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
5041 if (!rx_ring->rx_buffer_info) 5263 if (!rx_ring->rx_buffer_info)
5042 rx_ring->rx_buffer_info = vmalloc(size); 5264 rx_ring->rx_buffer_info = vzalloc(size);
5043 if (!rx_ring->rx_buffer_info) { 5265 if (!rx_ring->rx_buffer_info)
5044 e_err(probe, "vmalloc allocation failed for the Rx " 5266 goto err;
5045 "descriptor ring\n");
5046 goto alloc_failed;
5047 }
5048 memset(rx_ring->rx_buffer_info, 0, size);
5049 5267
5050 /* Round up to nearest 4K */ 5268 /* Round up to nearest 4K */
5051 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5269 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5052 rx_ring->size = ALIGN(rx_ring->size, 4096); 5270 rx_ring->size = ALIGN(rx_ring->size, 4096);
5053 5271
5054 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 5272 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5055 &rx_ring->dma, GFP_KERNEL); 5273 &rx_ring->dma, GFP_KERNEL);
5056 5274
5057 if (!rx_ring->desc) { 5275 if (!rx_ring->desc)
5058 e_err(probe, "Memory allocation failed for the Rx " 5276 goto err;
5059 "descriptor ring\n");
5060 vfree(rx_ring->rx_buffer_info);
5061 goto alloc_failed;
5062 }
5063 5277
5064 rx_ring->next_to_clean = 0; 5278 rx_ring->next_to_clean = 0;
5065 rx_ring->next_to_use = 0; 5279 rx_ring->next_to_use = 0;
5066 5280
5067 return 0; 5281 return 0;
5068 5282err:
5069alloc_failed: 5283 vfree(rx_ring->rx_buffer_info);
5284 rx_ring->rx_buffer_info = NULL;
5285 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5070 return -ENOMEM; 5286 return -ENOMEM;
5071} 5287}
5072 5288
@@ -5080,13 +5296,12 @@ alloc_failed:
5080 * 5296 *
5081 * Return 0 on success, negative on failure 5297 * Return 0 on success, negative on failure
5082 **/ 5298 **/
5083
5084static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5299static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5085{ 5300{
5086 int i, err = 0; 5301 int i, err = 0;
5087 5302
5088 for (i = 0; i < adapter->num_rx_queues; i++) { 5303 for (i = 0; i < adapter->num_rx_queues; i++) {
5089 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 5304 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5090 if (!err) 5305 if (!err)
5091 continue; 5306 continue;
5092 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5307 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5098,23 +5313,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5098 5313
5099/** 5314/**
5100 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5315 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5101 * @adapter: board private structure
5102 * @tx_ring: Tx descriptor ring for a specific queue 5316 * @tx_ring: Tx descriptor ring for a specific queue
5103 * 5317 *
5104 * Free all transmit software resources 5318 * Free all transmit software resources
5105 **/ 5319 **/
5106void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5320void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5107 struct ixgbe_ring *tx_ring)
5108{ 5321{
5109 struct pci_dev *pdev = adapter->pdev; 5322 ixgbe_clean_tx_ring(tx_ring);
5110
5111 ixgbe_clean_tx_ring(adapter, tx_ring);
5112 5323
5113 vfree(tx_ring->tx_buffer_info); 5324 vfree(tx_ring->tx_buffer_info);
5114 tx_ring->tx_buffer_info = NULL; 5325 tx_ring->tx_buffer_info = NULL;
5115 5326
5116 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 5327 /* if not set, then don't free */
5117 tx_ring->dma); 5328 if (!tx_ring->desc)
5329 return;
5330
5331 dma_free_coherent(tx_ring->dev, tx_ring->size,
5332 tx_ring->desc, tx_ring->dma);
5118 5333
5119 tx_ring->desc = NULL; 5334 tx_ring->desc = NULL;
5120} 5335}
@@ -5131,28 +5346,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5131 5346
5132 for (i = 0; i < adapter->num_tx_queues; i++) 5347 for (i = 0; i < adapter->num_tx_queues; i++)
5133 if (adapter->tx_ring[i]->desc) 5348 if (adapter->tx_ring[i]->desc)
5134 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); 5349 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5135} 5350}
5136 5351
5137/** 5352/**
5138 * ixgbe_free_rx_resources - Free Rx Resources 5353 * ixgbe_free_rx_resources - Free Rx Resources
5139 * @adapter: board private structure
5140 * @rx_ring: ring to clean the resources from 5354 * @rx_ring: ring to clean the resources from
5141 * 5355 *
5142 * Free all receive software resources 5356 * Free all receive software resources
5143 **/ 5357 **/
5144void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5358void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5145 struct ixgbe_ring *rx_ring)
5146{ 5359{
5147 struct pci_dev *pdev = adapter->pdev; 5360 ixgbe_clean_rx_ring(rx_ring);
5148
5149 ixgbe_clean_rx_ring(adapter, rx_ring);
5150 5361
5151 vfree(rx_ring->rx_buffer_info); 5362 vfree(rx_ring->rx_buffer_info);
5152 rx_ring->rx_buffer_info = NULL; 5363 rx_ring->rx_buffer_info = NULL;
5153 5364
5154 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 5365 /* if not set, then don't free */
5155 rx_ring->dma); 5366 if (!rx_ring->desc)
5367 return;
5368
5369 dma_free_coherent(rx_ring->dev, rx_ring->size,
5370 rx_ring->desc, rx_ring->dma);
5156 5371
5157 rx_ring->desc = NULL; 5372 rx_ring->desc = NULL;
5158} 5373}
@@ -5169,7 +5384,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5169 5384
5170 for (i = 0; i < adapter->num_rx_queues; i++) 5385 for (i = 0; i < adapter->num_rx_queues; i++)
5171 if (adapter->rx_ring[i]->desc) 5386 if (adapter->rx_ring[i]->desc)
5172 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); 5387 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5173} 5388}
5174 5389
5175/** 5390/**
@@ -5182,6 +5397,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5182static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 5397static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5183{ 5398{
5184 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5399 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5400 struct ixgbe_hw *hw = &adapter->hw;
5185 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5401 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5186 5402
5187 /* MTU < 68 is an error and causes problems on some kernels */ 5403 /* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5408,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5192 /* must set new MTU before calling down or up */ 5408 /* must set new MTU before calling down or up */
5193 netdev->mtu = new_mtu; 5409 netdev->mtu = new_mtu;
5194 5410
5411 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5412 hw->fc.low_water = FC_LOW_WATER(max_frame);
5413
5195 if (netif_running(netdev)) 5414 if (netif_running(netdev))
5196 ixgbe_reinit_locked(adapter); 5415 ixgbe_reinit_locked(adapter);
5197 5416
@@ -5287,8 +5506,8 @@ static int ixgbe_close(struct net_device *netdev)
5287#ifdef CONFIG_PM 5506#ifdef CONFIG_PM
5288static int ixgbe_resume(struct pci_dev *pdev) 5507static int ixgbe_resume(struct pci_dev *pdev)
5289{ 5508{
5290 struct net_device *netdev = pci_get_drvdata(pdev); 5509 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5291 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5510 struct net_device *netdev = adapter->netdev;
5292 u32 err; 5511 u32 err;
5293 5512
5294 pci_set_power_state(pdev, PCI_D0); 5513 pci_set_power_state(pdev, PCI_D0);
@@ -5319,7 +5538,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5319 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5320 5539
5321 if (netif_running(netdev)) { 5540 if (netif_running(netdev)) {
5322 err = ixgbe_open(adapter->netdev); 5541 err = ixgbe_open(netdev);
5323 if (err) 5542 if (err)
5324 return err; 5543 return err;
5325 } 5544 }
@@ -5332,8 +5551,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
5332 5551
5333static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 5552static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5334{ 5553{
5335 struct net_device *netdev = pci_get_drvdata(pdev); 5554 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5336 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5555 struct net_device *netdev = adapter->netdev;
5337 struct ixgbe_hw *hw = &adapter->hw; 5556 struct ixgbe_hw *hw = &adapter->hw;
5338 u32 ctrl, fctrl; 5557 u32 ctrl, fctrl;
5339 u32 wufc = adapter->wol; 5558 u32 wufc = adapter->wol;
@@ -5350,6 +5569,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5350 ixgbe_free_all_rx_resources(adapter); 5569 ixgbe_free_all_rx_resources(adapter);
5351 } 5570 }
5352 5571
5572 ixgbe_clear_interrupt_scheme(adapter);
5573
5353#ifdef CONFIG_PM 5574#ifdef CONFIG_PM
5354 retval = pci_save_state(pdev); 5575 retval = pci_save_state(pdev);
5355 if (retval) 5576 if (retval)
@@ -5376,15 +5597,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5376 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5597 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5377 } 5598 }
5378 5599
5379 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 5600 switch (hw->mac.type) {
5380 pci_wake_from_d3(pdev, true); 5601 case ixgbe_mac_82598EB:
5381 else
5382 pci_wake_from_d3(pdev, false); 5602 pci_wake_from_d3(pdev, false);
5603 break;
5604 case ixgbe_mac_82599EB:
5605 case ixgbe_mac_X540:
5606 pci_wake_from_d3(pdev, !!wufc);
5607 break;
5608 default:
5609 break;
5610 }
5383 5611
5384 *enable_wake = !!wufc; 5612 *enable_wake = !!wufc;
5385 5613
5386 ixgbe_clear_interrupt_scheme(adapter);
5387
5388 ixgbe_release_hw_control(adapter); 5614 ixgbe_release_hw_control(adapter);
5389 5615
5390 pci_disable_device(pdev); 5616 pci_disable_device(pdev);
@@ -5433,10 +5659,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5433{ 5659{
5434 struct net_device *netdev = adapter->netdev; 5660 struct net_device *netdev = adapter->netdev;
5435 struct ixgbe_hw *hw = &adapter->hw; 5661 struct ixgbe_hw *hw = &adapter->hw;
5662 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5436 u64 total_mpc = 0; 5663 u64 total_mpc = 0;
5437 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5664 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5438 u64 non_eop_descs = 0, restart_queue = 0; 5665 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5439 struct ixgbe_hw_stats *hwstats = &adapter->stats; 5666 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5667 u64 bytes = 0, packets = 0;
5440 5668
5441 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5669 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5442 test_bit(__IXGBE_RESETTING, &adapter->state)) 5670 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5449,21 +5677,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5449 adapter->hw_rx_no_dma_resources += 5677 adapter->hw_rx_no_dma_resources +=
5450 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5678 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5451 for (i = 0; i < adapter->num_rx_queues; i++) { 5679 for (i = 0; i < adapter->num_rx_queues; i++) {
5452 rsc_count += adapter->rx_ring[i]->rsc_count; 5680 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5453 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5681 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5454 } 5682 }
5455 adapter->rsc_total_count = rsc_count; 5683 adapter->rsc_total_count = rsc_count;
5456 adapter->rsc_total_flush = rsc_flush; 5684 adapter->rsc_total_flush = rsc_flush;
5457 } 5685 }
5458 5686
5687 for (i = 0; i < adapter->num_rx_queues; i++) {
5688 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5689 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5690 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5691 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5692 bytes += rx_ring->stats.bytes;
5693 packets += rx_ring->stats.packets;
5694 }
5695 adapter->non_eop_descs = non_eop_descs;
5696 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5697 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5698 netdev->stats.rx_bytes = bytes;
5699 netdev->stats.rx_packets = packets;
5700
5701 bytes = 0;
5702 packets = 0;
5459 /* gather some stats to the adapter struct that are per queue */ 5703 /* gather some stats to the adapter struct that are per queue */
5460 for (i = 0; i < adapter->num_tx_queues; i++) 5704 for (i = 0; i < adapter->num_tx_queues; i++) {
5461 restart_queue += adapter->tx_ring[i]->restart_queue; 5705 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5706 restart_queue += tx_ring->tx_stats.restart_queue;
5707 tx_busy += tx_ring->tx_stats.tx_busy;
5708 bytes += tx_ring->stats.bytes;
5709 packets += tx_ring->stats.packets;
5710 }
5462 adapter->restart_queue = restart_queue; 5711 adapter->restart_queue = restart_queue;
5463 5712 adapter->tx_busy = tx_busy;
5464 for (i = 0; i < adapter->num_rx_queues; i++) 5713 netdev->stats.tx_bytes = bytes;
5465 non_eop_descs += adapter->rx_ring[i]->non_eop_descs; 5714 netdev->stats.tx_packets = packets;
5466 adapter->non_eop_descs = non_eop_descs;
5467 5715
5468 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5716 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5469 for (i = 0; i < 8; i++) { 5717 for (i = 0; i < 8; i++) {
@@ -5478,17 +5726,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5478 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5726 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5479 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5727 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5480 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5728 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5481 if (hw->mac.type == ixgbe_mac_82599EB) { 5729 switch (hw->mac.type) {
5482 hwstats->pxonrxc[i] += 5730 case ixgbe_mac_82598EB:
5483 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5484 hwstats->pxoffrxc[i] +=
5485 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5486 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5487 } else {
5488 hwstats->pxonrxc[i] += 5731 hwstats->pxonrxc[i] +=
5489 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 5732 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5490 hwstats->pxoffrxc[i] += 5733 break;
5491 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 5734 case ixgbe_mac_82599EB:
5735 case ixgbe_mac_X540:
5736 hwstats->pxonrxc[i] +=
5737 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5738 break;
5739 default:
5740 break;
5492 } 5741 }
5493 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 5742 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5494 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 5743 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5497,21 +5746,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5497 /* work around hardware counting issue */ 5746 /* work around hardware counting issue */
5498 hwstats->gprc -= missed_rx; 5747 hwstats->gprc -= missed_rx;
5499 5748
5749 ixgbe_update_xoff_received(adapter);
5750
5500 /* 82598 hardware only has a 32 bit counter in the high register */ 5751 /* 82598 hardware only has a 32 bit counter in the high register */
5501 if (hw->mac.type == ixgbe_mac_82599EB) { 5752 switch (hw->mac.type) {
5502 u64 tmp; 5753 case ixgbe_mac_82598EB:
5754 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5755 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5756 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5757 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5758 break;
5759 case ixgbe_mac_82599EB:
5760 case ixgbe_mac_X540:
5503 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5761 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5504 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; 5762 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5505 /* 4 high bits of GORC */
5506 hwstats->gorc += (tmp << 32);
5507 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5763 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5508 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; 5764 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5509 /* 4 high bits of GOTC */
5510 hwstats->gotc += (tmp << 32);
5511 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5765 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5512 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5766 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5513 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5767 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5514 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5515 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5768 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5516 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5769 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5517#ifdef IXGBE_FCOE 5770#ifdef IXGBE_FCOE
@@ -5522,12 +5775,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5522 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5775 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5523 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5776 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5524#endif /* IXGBE_FCOE */ 5777#endif /* IXGBE_FCOE */
5525 } else { 5778 break;
5526 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5779 default:
5527 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5780 break;
5528 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5529 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5530 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5531 } 5781 }
5532 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5782 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5533 hwstats->bprc += bprc; 5783 hwstats->bprc += bprc;
@@ -5700,8 +5950,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5700 5950
5701 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5951 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5702 for (i = 0; i < adapter->num_tx_queues; i++) 5952 for (i = 0; i < adapter->num_tx_queues; i++)
5703 set_bit(__IXGBE_FDIR_INIT_DONE, 5953 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5704 &(adapter->tx_ring[i]->reinit_state)); 5954 &(adapter->tx_ring[i]->state));
5705 } else { 5955 } else {
5706 e_err(probe, "failed to finish FDIR re-initialization, " 5956 e_err(probe, "failed to finish FDIR re-initialization, "
5707 "ignored adding FDIR ATR filters\n"); 5957 "ignored adding FDIR ATR filters\n");
@@ -5763,17 +6013,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5763 if (!netif_carrier_ok(netdev)) { 6013 if (!netif_carrier_ok(netdev)) {
5764 bool flow_rx, flow_tx; 6014 bool flow_rx, flow_tx;
5765 6015
5766 if (hw->mac.type == ixgbe_mac_82599EB) { 6016 switch (hw->mac.type) {
5767 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6017 case ixgbe_mac_82598EB: {
5768 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5769 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5770 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5771 } else {
5772 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6018 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5773 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 6019 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5774 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 6020 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5775 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 6021 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5776 } 6022 }
6023 break;
6024 case ixgbe_mac_82599EB:
6025 case ixgbe_mac_X540: {
6026 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6027 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6028 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6029 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6030 }
6031 break;
6032 default:
6033 flow_tx = false;
6034 flow_rx = false;
6035 break;
6036 }
5777 6037
5778 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 6038 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5779 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6039 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5787,7 +6047,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5787 netif_carrier_on(netdev); 6047 netif_carrier_on(netdev);
5788 } else { 6048 } else {
5789 /* Force detection of hung controller */ 6049 /* Force detection of hung controller */
5790 adapter->detect_tx_hung = true; 6050 for (i = 0; i < adapter->num_tx_queues; i++) {
6051 tx_ring = adapter->tx_ring[i];
6052 set_check_for_tx_hang(tx_ring);
6053 }
5791 } 6054 }
5792 } else { 6055 } else {
5793 adapter->link_up = false; 6056 adapter->link_up = false;
@@ -5823,7 +6086,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5823 6086
5824static int ixgbe_tso(struct ixgbe_adapter *adapter, 6087static int ixgbe_tso(struct ixgbe_adapter *adapter,
5825 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 6088 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5826 u32 tx_flags, u8 *hdr_len) 6089 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5827{ 6090{
5828 struct ixgbe_adv_tx_context_desc *context_desc; 6091 struct ixgbe_adv_tx_context_desc *context_desc;
5829 unsigned int i; 6092 unsigned int i;
@@ -5841,7 +6104,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5841 l4len = tcp_hdrlen(skb); 6104 l4len = tcp_hdrlen(skb);
5842 *hdr_len += l4len; 6105 *hdr_len += l4len;
5843 6106
5844 if (skb->protocol == htons(ETH_P_IP)) { 6107 if (protocol == htons(ETH_P_IP)) {
5845 struct iphdr *iph = ip_hdr(skb); 6108 struct iphdr *iph = ip_hdr(skb);
5846 iph->tot_len = 0; 6109 iph->tot_len = 0;
5847 iph->check = 0; 6110 iph->check = 0;
@@ -5880,7 +6143,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5880 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 6143 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5881 IXGBE_ADVTXD_DTYP_CTXT); 6144 IXGBE_ADVTXD_DTYP_CTXT);
5882 6145
5883 if (skb->protocol == htons(ETH_P_IP)) 6146 if (protocol == htons(ETH_P_IP))
5884 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 6147 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6148 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5886 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6149 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +6169,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5906 return false; 6169 return false;
5907} 6170}
5908 6171
5909static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 6172static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6173 __be16 protocol)
5910{ 6174{
5911 u32 rtn = 0; 6175 u32 rtn = 0;
5912 __be16 protocol;
5913
5914 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5915 protocol = ((const struct vlan_ethhdr *)skb->data)->
5916 h_vlan_encapsulated_proto;
5917 else
5918 protocol = skb->protocol;
5919 6176
5920 switch (protocol) { 6177 switch (protocol) {
5921 case cpu_to_be16(ETH_P_IP): 6178 case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +6200,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5943 default: 6200 default:
5944 if (unlikely(net_ratelimit())) 6201 if (unlikely(net_ratelimit()))
5945 e_warn(probe, "partial checksum but proto=%x!\n", 6202 e_warn(probe, "partial checksum but proto=%x!\n",
5946 skb->protocol); 6203 protocol);
5947 break; 6204 break;
5948 } 6205 }
5949 6206
@@ -5952,7 +6209,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5952 6209
5953static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 6210static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5954 struct ixgbe_ring *tx_ring, 6211 struct ixgbe_ring *tx_ring,
5955 struct sk_buff *skb, u32 tx_flags) 6212 struct sk_buff *skb, u32 tx_flags,
6213 __be16 protocol)
5956{ 6214{
5957 struct ixgbe_adv_tx_context_desc *context_desc; 6215 struct ixgbe_adv_tx_context_desc *context_desc;
5958 unsigned int i; 6216 unsigned int i;
@@ -5981,7 +6239,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5981 IXGBE_ADVTXD_DTYP_CTXT); 6239 IXGBE_ADVTXD_DTYP_CTXT);
5982 6240
5983 if (skb->ip_summed == CHECKSUM_PARTIAL) 6241 if (skb->ip_summed == CHECKSUM_PARTIAL)
5984 type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 6242 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5985 6243
5986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6244 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5987 /* use index zero for tx checksum offload */ 6245 /* use index zero for tx checksum offload */
@@ -6004,15 +6262,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6004static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6262static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6005 struct ixgbe_ring *tx_ring, 6263 struct ixgbe_ring *tx_ring,
6006 struct sk_buff *skb, u32 tx_flags, 6264 struct sk_buff *skb, u32 tx_flags,
6007 unsigned int first) 6265 unsigned int first, const u8 hdr_len)
6008{ 6266{
6009 struct pci_dev *pdev = adapter->pdev; 6267 struct device *dev = tx_ring->dev;
6010 struct ixgbe_tx_buffer *tx_buffer_info; 6268 struct ixgbe_tx_buffer *tx_buffer_info;
6011 unsigned int len; 6269 unsigned int len;
6012 unsigned int total = skb->len; 6270 unsigned int total = skb->len;
6013 unsigned int offset = 0, size, count = 0, i; 6271 unsigned int offset = 0, size, count = 0, i;
6014 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6272 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6015 unsigned int f; 6273 unsigned int f;
6274 unsigned int bytecount = skb->len;
6275 u16 gso_segs = 1;
6016 6276
6017 i = tx_ring->next_to_use; 6277 i = tx_ring->next_to_use;
6018 6278
@@ -6027,10 +6287,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6027 6287
6028 tx_buffer_info->length = size; 6288 tx_buffer_info->length = size;
6029 tx_buffer_info->mapped_as_page = false; 6289 tx_buffer_info->mapped_as_page = false;
6030 tx_buffer_info->dma = dma_map_single(&pdev->dev, 6290 tx_buffer_info->dma = dma_map_single(dev,
6031 skb->data + offset, 6291 skb->data + offset,
6032 size, DMA_TO_DEVICE); 6292 size, DMA_TO_DEVICE);
6033 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6293 if (dma_mapping_error(dev, tx_buffer_info->dma))
6034 goto dma_error; 6294 goto dma_error;
6035 tx_buffer_info->time_stamp = jiffies; 6295 tx_buffer_info->time_stamp = jiffies;
6036 tx_buffer_info->next_to_watch = i; 6296 tx_buffer_info->next_to_watch = i;
@@ -6063,12 +6323,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6063 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6323 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6064 6324
6065 tx_buffer_info->length = size; 6325 tx_buffer_info->length = size;
6066 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, 6326 tx_buffer_info->dma = dma_map_page(dev,
6067 frag->page, 6327 frag->page,
6068 offset, size, 6328 offset, size,
6069 DMA_TO_DEVICE); 6329 DMA_TO_DEVICE);
6070 tx_buffer_info->mapped_as_page = true; 6330 tx_buffer_info->mapped_as_page = true;
6071 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6331 if (dma_mapping_error(dev, tx_buffer_info->dma))
6072 goto dma_error; 6332 goto dma_error;
6073 tx_buffer_info->time_stamp = jiffies; 6333 tx_buffer_info->time_stamp = jiffies;
6074 tx_buffer_info->next_to_watch = i; 6334 tx_buffer_info->next_to_watch = i;
@@ -6082,6 +6342,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6082 break; 6342 break;
6083 } 6343 }
6084 6344
6345 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6346 gso_segs = skb_shinfo(skb)->gso_segs;
6347#ifdef IXGBE_FCOE
6348 /* adjust for FCoE Sequence Offload */
6349 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6350 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6351 skb_shinfo(skb)->gso_size);
6352#endif /* IXGBE_FCOE */
6353 bytecount += (gso_segs - 1) * hdr_len;
6354
6355 /* multiply data chunks by size of headers */
6356 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6357 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6085 tx_ring->tx_buffer_info[i].skb = skb; 6358 tx_ring->tx_buffer_info[i].skb = skb;
6086 tx_ring->tx_buffer_info[first].next_to_watch = i; 6359 tx_ring->tx_buffer_info[first].next_to_watch = i;
6087 6360
@@ -6103,14 +6376,13 @@ dma_error:
6103 i += tx_ring->count; 6376 i += tx_ring->count;
6104 i--; 6377 i--;
6105 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6378 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6106 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 6379 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
6107 } 6380 }
6108 6381
6109 return 0; 6382 return 0;
6110} 6383}
6111 6384
6112static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6385static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6113 struct ixgbe_ring *tx_ring,
6114 int tx_flags, int count, u32 paylen, u8 hdr_len) 6386 int tx_flags, int count, u32 paylen, u8 hdr_len)
6115{ 6387{
6116 union ixgbe_adv_tx_desc *tx_desc = NULL; 6388 union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,60 +6447,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6175 wmb(); 6447 wmb();
6176 6448
6177 tx_ring->next_to_use = i; 6449 tx_ring->next_to_use = i;
6178 writel(i, adapter->hw.hw_addr + tx_ring->tail); 6450 writel(i, tx_ring->tail);
6179} 6451}
6180 6452
6181static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6453static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6182 int queue, u32 tx_flags) 6454 u8 queue, u32 tx_flags, __be16 protocol)
6183{ 6455{
6184 struct ixgbe_atr_input atr_input; 6456 struct ixgbe_atr_input atr_input;
6185 struct tcphdr *th;
6186 struct iphdr *iph = ip_hdr(skb); 6457 struct iphdr *iph = ip_hdr(skb);
6187 struct ethhdr *eth = (struct ethhdr *)skb->data; 6458 struct ethhdr *eth = (struct ethhdr *)skb->data;
6188 u16 vlan_id, src_port, dst_port, flex_bytes; 6459 struct tcphdr *th;
6189 u32 src_ipv4_addr, dst_ipv4_addr; 6460 u16 vlan_id;
6190 u8 l4type = 0;
6191 6461
6192 /* Right now, we support IPv4 only */ 6462 /* Right now, we support IPv4 w/ TCP only */
6193 if (skb->protocol != htons(ETH_P_IP)) 6463 if (protocol != htons(ETH_P_IP) ||
6194 return; 6464 iph->protocol != IPPROTO_TCP)
6195 /* check if we're UDP or TCP */
6196 if (iph->protocol == IPPROTO_TCP) {
6197 th = tcp_hdr(skb);
6198 src_port = th->source;
6199 dst_port = th->dest;
6200 l4type |= IXGBE_ATR_L4TYPE_TCP;
6201 /* l4type IPv4 type is 0, no need to assign */
6202 } else {
6203 /* Unsupported L4 header, just bail here */
6204 return; 6465 return;
6205 }
6206 6466
6207 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6467 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6208 6468
6209 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6469 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6210 IXGBE_TX_FLAGS_VLAN_SHIFT; 6470 IXGBE_TX_FLAGS_VLAN_SHIFT;
6211 src_ipv4_addr = iph->saddr; 6471
6212 dst_ipv4_addr = iph->daddr; 6472 th = tcp_hdr(skb);
6213 flex_bytes = eth->h_proto;
6214 6473
6215 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6474 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6216 ixgbe_atr_set_src_port_82599(&atr_input, dst_port); 6475 ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
6217 ixgbe_atr_set_dst_port_82599(&atr_input, src_port); 6476 ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
6218 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); 6477 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
6219 ixgbe_atr_set_l4type_82599(&atr_input, l4type); 6478 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
6220 /* src and dst are inverted, think how the receiver sees them */ 6479 /* src and dst are inverted, think how the receiver sees them */
6221 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); 6480 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
6222 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); 6481 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
6223 6482
6224 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6483 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6225 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6484 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6226} 6485}
6227 6486
6228static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6487static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6229 struct ixgbe_ring *tx_ring, int size)
6230{ 6488{
6231 netif_stop_subqueue(netdev, tx_ring->queue_index); 6489 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6232 /* Herbert's original patch had: 6490 /* Herbert's original patch had:
6233 * smp_mb__after_netif_stop_queue(); 6491 * smp_mb__after_netif_stop_queue();
6234 * but since that doesn't exist yet, just open code it. */ 6492 * but since that doesn't exist yet, just open code it. */
@@ -6240,27 +6498,29 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6240 return -EBUSY; 6498 return -EBUSY;
6241 6499
6242 /* A reprieve! - use start_queue because it doesn't call schedule */ 6500 /* A reprieve! - use start_queue because it doesn't call schedule */
6243 netif_start_subqueue(netdev, tx_ring->queue_index); 6501 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6244 ++tx_ring->restart_queue; 6502 ++tx_ring->tx_stats.restart_queue;
6245 return 0; 6503 return 0;
6246} 6504}
6247 6505
6248static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6506static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6249 struct ixgbe_ring *tx_ring, int size)
6250{ 6507{
6251 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6508 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6252 return 0; 6509 return 0;
6253 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 6510 return __ixgbe_maybe_stop_tx(tx_ring, size);
6254} 6511}
6255 6512
6256static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6513static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6257{ 6514{
6258 struct ixgbe_adapter *adapter = netdev_priv(dev); 6515 struct ixgbe_adapter *adapter = netdev_priv(dev);
6259 int txq = smp_processor_id(); 6516 int txq = smp_processor_id();
6260
6261#ifdef IXGBE_FCOE 6517#ifdef IXGBE_FCOE
6262 if ((skb->protocol == htons(ETH_P_FCOE)) || 6518 __be16 protocol;
6263 (skb->protocol == htons(ETH_P_FIP))) { 6519
6520 protocol = vlan_get_protocol(skb);
6521
6522 if ((protocol == htons(ETH_P_FCOE)) ||
6523 (protocol == htons(ETH_P_FIP))) {
6264 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6524 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6265 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6525 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6266 txq += adapter->ring_feature[RING_F_FCOE].mask; 6526 txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6292,10 +6552,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6292 return skb_tx_hash(dev, skb); 6552 return skb_tx_hash(dev, skb);
6293} 6553}
6294 6554
6295netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, 6555netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6296 struct ixgbe_adapter *adapter, 6556 struct ixgbe_adapter *adapter,
6297 struct ixgbe_ring *tx_ring) 6557 struct ixgbe_ring *tx_ring)
6298{ 6558{
6559 struct net_device *netdev = tx_ring->netdev;
6299 struct netdev_queue *txq; 6560 struct netdev_queue *txq;
6300 unsigned int first; 6561 unsigned int first;
6301 unsigned int tx_flags = 0; 6562 unsigned int tx_flags = 0;
@@ -6303,6 +6564,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6303 int tso; 6564 int tso;
6304 int count = 0; 6565 int count = 0;
6305 unsigned int f; 6566 unsigned int f;
6567 __be16 protocol;
6568
6569 protocol = vlan_get_protocol(skb);
6306 6570
6307 if (vlan_tx_tag_present(skb)) { 6571 if (vlan_tx_tag_present(skb)) {
6308 tx_flags |= vlan_tx_tag_get(skb); 6572 tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6587,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6323 /* for FCoE with DCB, we force the priority to what 6587 /* for FCoE with DCB, we force the priority to what
6324 * was specified by the switch */ 6588 * was specified by the switch */
6325 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6589 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6326 (skb->protocol == htons(ETH_P_FCOE) || 6590 (protocol == htons(ETH_P_FCOE) ||
6327 skb->protocol == htons(ETH_P_FIP))) { 6591 protocol == htons(ETH_P_FIP))) {
6328#ifdef CONFIG_IXGBE_DCB 6592#ifdef CONFIG_IXGBE_DCB
6329 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6593 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6330 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6594 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6598,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6334 } 6598 }
6335#endif 6599#endif
6336 /* flag for FCoE offloads */ 6600 /* flag for FCoE offloads */
6337 if (skb->protocol == htons(ETH_P_FCOE)) 6601 if (protocol == htons(ETH_P_FCOE))
6338 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6602 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6339 } 6603 }
6340#endif 6604#endif
@@ -6350,8 +6614,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6350 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6614 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6351 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6615 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6352 6616
6353 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 6617 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6354 adapter->tx_busy++; 6618 tx_ring->tx_stats.tx_busy++;
6355 return NETDEV_TX_BUSY; 6619 return NETDEV_TX_BUSY;
6356 } 6620 }
6357 6621
@@ -6368,9 +6632,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6368 tx_flags |= IXGBE_TX_FLAGS_FSO; 6632 tx_flags |= IXGBE_TX_FLAGS_FSO;
6369#endif /* IXGBE_FCOE */ 6633#endif /* IXGBE_FCOE */
6370 } else { 6634 } else {
6371 if (skb->protocol == htons(ETH_P_IP)) 6635 if (protocol == htons(ETH_P_IP))
6372 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6636 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6373 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6637 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6638 protocol);
6374 if (tso < 0) { 6639 if (tso < 0) {
6375 dev_kfree_skb_any(skb); 6640 dev_kfree_skb_any(skb);
6376 return NETDEV_TX_OK; 6641 return NETDEV_TX_OK;
@@ -6378,30 +6643,30 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6378 6643
6379 if (tso) 6644 if (tso)
6380 tx_flags |= IXGBE_TX_FLAGS_TSO; 6645 tx_flags |= IXGBE_TX_FLAGS_TSO;
6381 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6646 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6647 protocol) &&
6382 (skb->ip_summed == CHECKSUM_PARTIAL)) 6648 (skb->ip_summed == CHECKSUM_PARTIAL))
6383 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6649 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6384 } 6650 }
6385 6651
6386 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 6652 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6387 if (count) { 6653 if (count) {
6388 /* add the ATR filter if ATR is on */ 6654 /* add the ATR filter if ATR is on */
6389 if (tx_ring->atr_sample_rate) { 6655 if (tx_ring->atr_sample_rate) {
6390 ++tx_ring->atr_count; 6656 ++tx_ring->atr_count;
6391 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6657 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6392 test_bit(__IXGBE_FDIR_INIT_DONE, 6658 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6393 &tx_ring->reinit_state)) { 6659 &tx_ring->state)) {
6394 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6660 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6395 tx_flags); 6661 tx_flags, protocol);
6396 tx_ring->atr_count = 0; 6662 tx_ring->atr_count = 0;
6397 } 6663 }
6398 } 6664 }
6399 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6665 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6400 txq->tx_bytes += skb->len; 6666 txq->tx_bytes += skb->len;
6401 txq->tx_packets++; 6667 txq->tx_packets++;
6402 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6668 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6403 hdr_len); 6669 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6404 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6405 6670
6406 } else { 6671 } else {
6407 dev_kfree_skb_any(skb); 6672 dev_kfree_skb_any(skb);
@@ -6418,7 +6683,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
6418 struct ixgbe_ring *tx_ring; 6683 struct ixgbe_ring *tx_ring;
6419 6684
6420 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6685 tx_ring = adapter->tx_ring[skb->queue_mapping];
6421 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); 6686 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6422} 6687}
6423 6688
6424/** 6689/**
@@ -6559,20 +6824,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6559 6824
6560 /* accurate rx/tx bytes/packets stats */ 6825 /* accurate rx/tx bytes/packets stats */
6561 dev_txq_stats_fold(netdev, stats); 6826 dev_txq_stats_fold(netdev, stats);
6827 rcu_read_lock();
6562 for (i = 0; i < adapter->num_rx_queues; i++) { 6828 for (i = 0; i < adapter->num_rx_queues; i++) {
6563 struct ixgbe_ring *ring = adapter->rx_ring[i]; 6829 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6564 u64 bytes, packets; 6830 u64 bytes, packets;
6565 unsigned int start; 6831 unsigned int start;
6566 6832
6567 do { 6833 if (ring) {
6568 start = u64_stats_fetch_begin_bh(&ring->syncp); 6834 do {
6569 packets = ring->stats.packets; 6835 start = u64_stats_fetch_begin_bh(&ring->syncp);
6570 bytes = ring->stats.bytes; 6836 packets = ring->stats.packets;
6571 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 6837 bytes = ring->stats.bytes;
6572 stats->rx_packets += packets; 6838 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6573 stats->rx_bytes += bytes; 6839 stats->rx_packets += packets;
6840 stats->rx_bytes += bytes;
6841 }
6574 } 6842 }
6575 6843 rcu_read_unlock();
6576 /* following stats updated by ixgbe_watchdog_task() */ 6844 /* following stats updated by ixgbe_watchdog_task() */
6577 stats->multicast = netdev->stats.multicast; 6845 stats->multicast = netdev->stats.multicast;
6578 stats->rx_errors = netdev->stats.rx_errors; 6846 stats->rx_errors = netdev->stats.rx_errors;
@@ -6687,11 +6955,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6687 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 6955 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6688 static int cards_found; 6956 static int cards_found;
6689 int i, err, pci_using_dac; 6957 int i, err, pci_using_dac;
6958 u8 part_str[IXGBE_PBANUM_LENGTH];
6690 unsigned int indices = num_possible_cpus(); 6959 unsigned int indices = num_possible_cpus();
6691#ifdef IXGBE_FCOE 6960#ifdef IXGBE_FCOE
6692 u16 device_caps; 6961 u16 device_caps;
6693#endif 6962#endif
6694 u32 part_num, eec; 6963 u32 eec;
6695 6964
6696 /* Catch broken hardware that put the wrong VF device ID in 6965 /* Catch broken hardware that put the wrong VF device ID in
6697 * the PCIe SR-IOV capability. 6966 * the PCIe SR-IOV capability.
@@ -6754,8 +7023,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6754 7023
6755 SET_NETDEV_DEV(netdev, &pdev->dev); 7024 SET_NETDEV_DEV(netdev, &pdev->dev);
6756 7025
6757 pci_set_drvdata(pdev, netdev);
6758 adapter = netdev_priv(netdev); 7026 adapter = netdev_priv(netdev);
7027 pci_set_drvdata(pdev, adapter);
6759 7028
6760 adapter->netdev = netdev; 7029 adapter->netdev = netdev;
6761 adapter->pdev = pdev; 7030 adapter->pdev = pdev;
@@ -6778,7 +7047,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6778 netdev->netdev_ops = &ixgbe_netdev_ops; 7047 netdev->netdev_ops = &ixgbe_netdev_ops;
6779 ixgbe_set_ethtool_ops(netdev); 7048 ixgbe_set_ethtool_ops(netdev);
6780 netdev->watchdog_timeo = 5 * HZ; 7049 netdev->watchdog_timeo = 5 * HZ;
6781 strcpy(netdev->name, pci_name(pdev)); 7050 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
6782 7051
6783 adapter->bd_number = cards_found; 7052 adapter->bd_number = cards_found;
6784 7053
@@ -6828,8 +7097,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6828 goto err_sw_init; 7097 goto err_sw_init;
6829 7098
6830 /* Make it possible the adapter to be woken up via WOL */ 7099 /* Make it possible the adapter to be woken up via WOL */
6831 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7100 switch (adapter->hw.mac.type) {
7101 case ixgbe_mac_82599EB:
7102 case ixgbe_mac_X540:
6832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 7103 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7104 break;
7105 default:
7106 break;
7107 }
6833 7108
6834 /* 7109 /*
6835 * If there is a fan on this device and it has failed log the 7110 * If there is a fan on this device and it has failed log the
@@ -6937,8 +7212,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6937 goto err_eeprom; 7212 goto err_eeprom;
6938 } 7213 }
6939 7214
6940 /* power down the optics */ 7215 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
6941 if (hw->phy.multispeed_fiber) 7216 if (hw->mac.ops.disable_tx_laser &&
7217 ((hw->phy.multispeed_fiber) ||
7218 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7219 (hw->mac.type == ixgbe_mac_82599EB))))
6942 hw->mac.ops.disable_tx_laser(hw); 7220 hw->mac.ops.disable_tx_laser(hw);
6943 7221
6944 init_timer(&adapter->watchdog_timer); 7222 init_timer(&adapter->watchdog_timer);
@@ -6953,6 +7231,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6953 goto err_sw_init; 7231 goto err_sw_init;
6954 7232
6955 switch (pdev->device) { 7233 switch (pdev->device) {
7234 case IXGBE_DEV_ID_82599_SFP:
7235 /* Only this subdevice supports WOL */
7236 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7237 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7238 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7239 break;
7240 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7241 /* All except this subdevice support WOL */
7242 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7243 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7244 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7245 break;
6956 case IXGBE_DEV_ID_82599_KX4: 7246 case IXGBE_DEV_ID_82599_KX4:
6957 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7247 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6958 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 7248 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6976,16 +7266,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6976 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : 7266 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6977 "Unknown"), 7267 "Unknown"),
6978 netdev->dev_addr); 7268 netdev->dev_addr);
6979 ixgbe_read_pba_num_generic(hw, &part_num); 7269
7270 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7271 if (err)
7272 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
6980 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 7273 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6981 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 7274 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
6982 "PBA No: %06x-%03x\n",
6983 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 7275 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6984 (part_num >> 8), (part_num & 0xff)); 7276 part_str);
6985 else 7277 else
6986 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 7278 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
6987 hw->mac.type, hw->phy.type, 7279 hw->mac.type, hw->phy.type, part_str);
6988 (part_num >> 8), (part_num & 0xff));
6989 7280
6990 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7281 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6991 e_dev_warn("PCI-Express bandwidth available for this card is " 7282 e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7078,17 +7369,19 @@ err_dma:
7078 **/ 7369 **/
7079static void __devexit ixgbe_remove(struct pci_dev *pdev) 7370static void __devexit ixgbe_remove(struct pci_dev *pdev)
7080{ 7371{
7081 struct net_device *netdev = pci_get_drvdata(pdev); 7372 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7082 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7373 struct net_device *netdev = adapter->netdev;
7083 7374
7084 set_bit(__IXGBE_DOWN, &adapter->state); 7375 set_bit(__IXGBE_DOWN, &adapter->state);
7085 /* clear the module not found bit to make sure the worker won't 7376
7086 * reschedule 7377 /*
7378 * The timers may be rescheduled, so explicitly disable them
7379 * from being rescheduled.
7087 */ 7380 */
7088 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 7381 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7089 del_timer_sync(&adapter->watchdog_timer); 7382 del_timer_sync(&adapter->watchdog_timer);
7090
7091 del_timer_sync(&adapter->sfp_timer); 7383 del_timer_sync(&adapter->sfp_timer);
7384
7092 cancel_work_sync(&adapter->watchdog_task); 7385 cancel_work_sync(&adapter->watchdog_task);
7093 cancel_work_sync(&adapter->sfp_task); 7386 cancel_work_sync(&adapter->sfp_task);
7094 cancel_work_sync(&adapter->multispeed_fiber_task); 7387 cancel_work_sync(&adapter->multispeed_fiber_task);
@@ -7096,7 +7389,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7096 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 7389 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7097 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 7390 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7098 cancel_work_sync(&adapter->fdir_reinit_task); 7391 cancel_work_sync(&adapter->fdir_reinit_task);
7099 flush_scheduled_work(); 7392 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
7393 cancel_work_sync(&adapter->check_overtemp_task);
7100 7394
7101#ifdef CONFIG_IXGBE_DCA 7395#ifdef CONFIG_IXGBE_DCA
7102 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7396 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7149,8 +7443,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7149static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7443static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7150 pci_channel_state_t state) 7444 pci_channel_state_t state)
7151{ 7445{
7152 struct net_device *netdev = pci_get_drvdata(pdev); 7446 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7153 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7447 struct net_device *netdev = adapter->netdev;
7154 7448
7155 netif_device_detach(netdev); 7449 netif_device_detach(netdev);
7156 7450
@@ -7173,8 +7467,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7173 */ 7467 */
7174static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 7468static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7175{ 7469{
7176 struct net_device *netdev = pci_get_drvdata(pdev); 7470 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7177 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7178 pci_ers_result_t result; 7471 pci_ers_result_t result;
7179 int err; 7472 int err;
7180 7473
@@ -7212,8 +7505,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7212 */ 7505 */
7213static void ixgbe_io_resume(struct pci_dev *pdev) 7506static void ixgbe_io_resume(struct pci_dev *pdev)
7214{ 7507{
7215 struct net_device *netdev = pci_get_drvdata(pdev); 7508 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7216 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7509 struct net_device *netdev = adapter->netdev;
7217 7510
7218 if (netif_running(netdev)) { 7511 if (netif_running(netdev)) {
7219 if (ixgbe_up(adapter)) { 7512 if (ixgbe_up(adapter)) {
@@ -7278,6 +7571,7 @@ static void __exit ixgbe_exit_module(void)
7278 dca_unregister_notify(&dca_notifier); 7571 dca_unregister_notify(&dca_notifier);
7279#endif 7572#endif
7280 pci_unregister_driver(&ixgbe_driver); 7573 pci_unregister_driver(&ixgbe_driver);
7574 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7281} 7575}
7282 7576
7283#ifdef CONFIG_IXGBE_DCA 7577#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb98..027c628c3aae 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
319 u32 vflre = 0; 319 u32 vflre = 0;
320 s32 ret_val = IXGBE_ERR_MBX; 320 s32 ret_val = IXGBE_ERR_MBX;
321 321
322 if (hw->mac.type == ixgbe_mac_82599EB) 322 switch (hw->mac.type) {
323 case ixgbe_mac_82599EB:
324 case ixgbe_mac_X540:
323 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); 325 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
326 break;
327 default:
328 break;
329 }
324 330
325 if (vflre & (1 << vf_shift)) { 331 if (vflre & (1 << vf_shift)) {
326 ret_val = 0; 332 ret_val = 0;
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
439{ 445{
440 struct ixgbe_mbx_info *mbx = &hw->mbx; 446 struct ixgbe_mbx_info *mbx = &hw->mbx;
441 447
442 if (hw->mac.type != ixgbe_mac_82599EB) 448 switch (hw->mac.type) {
443 return; 449 case ixgbe_mac_82599EB:
444 450 case ixgbe_mac_X540:
445 mbx->timeout = 0; 451 mbx->timeout = 0;
446 mbx->usec_delay = 0; 452 mbx->usec_delay = 0;
447 453
448 mbx->size = IXGBE_VFMAILBOX_SIZE; 454 mbx->size = IXGBE_VFMAILBOX_SIZE;
449 455
450 mbx->stats.msgs_tx = 0; 456 mbx->stats.msgs_tx = 0;
451 mbx->stats.msgs_rx = 0; 457 mbx->stats.msgs_rx = 0;
452 mbx->stats.reqs = 0; 458 mbx->stats.reqs = 0;
453 mbx->stats.acks = 0; 459 mbx->stats.acks = 0;
454 mbx->stats.rsts = 0; 460 mbx->stats.rsts = 0;
461 break;
462 default:
463 break;
464 }
455} 465}
456 466
457struct ixgbe_mbx_operations mbx_ops_82599 = { 467struct ixgbe_mbx_operations mbx_ops_generic = {
458 .read = ixgbe_read_mbx_pf, 468 .read = ixgbe_read_mbx_pf,
459 .write = ixgbe_write_mbx_pf, 469 .write = ixgbe_write_mbx_pf,
460 .read_posted = ixgbe_read_posted_mbx, 470 .read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b53..3df9b1590218 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
90 90
91extern struct ixgbe_mbx_operations mbx_ops_82599; 91extern struct ixgbe_mbx_operations mbx_ops_generic;
92 92
93#endif /* _IXGBE_MBX_H_ */ 93#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..8f7123e8fc0a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
115 case TN1010_PHY_ID: 115 case TN1010_PHY_ID:
116 phy_type = ixgbe_phy_tn; 116 phy_type = ixgbe_phy_tn;
117 break; 117 break;
118 case X540_PHY_ID:
119 phy_type = ixgbe_phy_aq;
120 break;
118 case QT2022_PHY_ID: 121 case QT2022_PHY_ID:
119 phy_type = ixgbe_phy_qt; 122 phy_type = ixgbe_phy_qt;
120 break; 123 break;
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
425} 428}
426 429
427/** 430/**
431 * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
432 * @hw: pointer to hardware structure
433 * @speed: pointer to link speed
434 * @autoneg: boolean auto-negotiation value
435 *
436 * Determines the link capabilities by reading the AUTOC register.
437 */
438s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
439 ixgbe_link_speed *speed,
440 bool *autoneg)
441{
442 s32 status = IXGBE_ERR_LINK_SETUP;
443 u16 speed_ability;
444
445 *speed = 0;
446 *autoneg = true;
447
448 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
449 &speed_ability);
450
451 if (status == 0) {
452 if (speed_ability & MDIO_SPEED_10G)
453 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
454 if (speed_ability & MDIO_PMA_SPEED_1000)
455 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
456 if (speed_ability & MDIO_PMA_SPEED_100)
457 *speed |= IXGBE_LINK_SPEED_100_FULL;
458 }
459
460 return status;
461}
462
463/**
428 * ixgbe_reset_phy_nl - Performs a PHY reset 464 * ixgbe_reset_phy_nl - Performs a PHY reset
429 * @hw: pointer to hardware structure 465 * @hw: pointer to hardware structure
430 **/ 466 **/
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1378} 1414}
1379 1415
1380/** 1416/**
1417 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
1418 * @hw: pointer to hardware structure
1419 * @firmware_version: pointer to the PHY Firmware Version
1420**/
1421s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1422 u16 *firmware_version)
1423{
1424 s32 status = 0;
1425
1426 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
1427 firmware_version);
1428
1429 return status;
1430}
1431
1432/**
1381 * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1433 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1382 * @hw: pointer to hardware structure 1434 * @hw: pointer to hardware structure
1383 * 1435 *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..e2c6b7eac641 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
96 ixgbe_link_speed speed, 96 ixgbe_link_speed speed,
97 bool autoneg, 97 bool autoneg,
98 bool autoneg_wait_to_complete); 98 bool autoneg_wait_to_complete);
99s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
100 ixgbe_link_speed *speed,
101 bool *autoneg);
99 102
100/* PHY specific */ 103/* PHY specific */
101s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 104s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
103 bool *link_up); 106 bool *link_up);
104s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 107s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
105 u16 *firmware_version); 108 u16 *firmware_version);
109s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
110 u16 *firmware_version);
106 111
107s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 112s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
108s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 113s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f3..6e3e94b5a5f6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 * addresses 68 * addresses
69 */ 69 */
70 for (i = 0; i < entries; i++) { 70 for (i = 0; i < entries; i++) {
71 vfinfo->vf_mc_hashes[i] = hash_list[i];; 71 vfinfo->vf_mc_hashes[i] = hash_list[i];
72 } 72 }
73 73
74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
179{ 179{
180 unsigned char vf_mac_addr[6]; 180 unsigned char vf_mac_addr[6];
181 struct net_device *netdev = pci_get_drvdata(pdev); 181 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
183 unsigned int vfn = (event_mask & 0x3f); 182 unsigned int vfn = (event_mask & 0x3f);
184 183
185 bool enable = ((event_mask & 0x10000000U) != 0); 184 bool enable = ((event_mask & 0x10000000U) != 0);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c973..59f6d0afe0fe 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -54,9 +54,14 @@
54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C 54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
55#define IXGBE_DEV_ID_82599_CX4 0x10F9 55#define IXGBE_DEV_ID_82599_CX4 0x10F9
56#define IXGBE_DEV_ID_82599_SFP 0x10FB 56#define IXGBE_DEV_ID_82599_SFP 0x10FB
57#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
58#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 61#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
59#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 62#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
63#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
64#define IXGBE_DEV_ID_X540T 0x1528
60 65
61/* General Registers */ 66/* General Registers */
62#define IXGBE_CTRL 0x00000 67#define IXGBE_CTRL 0x00000
@@ -994,8 +999,10 @@
994/* PHY IDs*/ 999/* PHY IDs*/
995#define TN1010_PHY_ID 0x00A19410 1000#define TN1010_PHY_ID 0x00A19410
996#define TNX_FW_REV 0xB 1001#define TNX_FW_REV 0xB
1002#define X540_PHY_ID 0x01540200
997#define QT2022_PHY_ID 0x0043A400 1003#define QT2022_PHY_ID 0x0043A400
998#define ATH_PHY_ID 0x03429050 1004#define ATH_PHY_ID 0x03429050
1005#define AQ_FW_REV 0x20
999 1006
1000/* PHY Types */ 1007/* PHY Types */
1001#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 1008#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1463,6 +1470,8 @@
1463#define IXGBE_ANLP1_PAUSE 0x0C00 1470#define IXGBE_ANLP1_PAUSE 0x0C00
1464#define IXGBE_ANLP1_SYM_PAUSE 0x0400 1471#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1465#define IXGBE_ANLP1_ASM_PAUSE 0x0800 1472#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1473#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
1474
1466 1475
1467/* SW Semaphore Register bitmasks */ 1476/* SW Semaphore Register bitmasks */
1468#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1477#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1500,7 @@
1491#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1500#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1492#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1501#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1493#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ 1502#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1503#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
1494#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ 1504#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1495/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1505/* EEPROM Addressing bits based on type (0-small, 1-large) */
1496#define IXGBE_EEC_ADDR_SIZE 0x00000400 1506#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1510,18 @@
1500#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 1510#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
1501#define IXGBE_EEPROM_OPCODE_BITS 8 1511#define IXGBE_EEPROM_OPCODE_BITS 8
1502 1512
1513/* Part Number String Length */
1514#define IXGBE_PBANUM_LENGTH 11
1515
1503/* Checksum and EEPROM pointers */ 1516/* Checksum and EEPROM pointers */
1517#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1504#define IXGBE_EEPROM_CHECKSUM 0x3F 1518#define IXGBE_EEPROM_CHECKSUM 0x3F
1505#define IXGBE_EEPROM_SUM 0xBABA 1519#define IXGBE_EEPROM_SUM 0xBABA
1506#define IXGBE_PCIE_ANALOG_PTR 0x03 1520#define IXGBE_PCIE_ANALOG_PTR 0x03
1507#define IXGBE_ATLAS0_CONFIG_PTR 0x04 1521#define IXGBE_ATLAS0_CONFIG_PTR 0x04
1522#define IXGBE_PHY_PTR 0x04
1508#define IXGBE_ATLAS1_CONFIG_PTR 0x05 1523#define IXGBE_ATLAS1_CONFIG_PTR 0x05
1524#define IXGBE_OPTION_ROM_PTR 0x05
1509#define IXGBE_PCIE_GENERAL_PTR 0x06 1525#define IXGBE_PCIE_GENERAL_PTR 0x06
1510#define IXGBE_PCIE_CONFIG0_PTR 0x07 1526#define IXGBE_PCIE_CONFIG0_PTR 0x07
1511#define IXGBE_PCIE_CONFIG1_PTR 0x08 1527#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -2113,6 +2129,14 @@ typedef u32 ixgbe_physical_layer;
2113#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2129#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2114#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 2130#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2115 2131
2132/* Flow Control Macros */
2133#define PAUSE_RTT 8
2134#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
2135
2136#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
2137 PAUSE_MTU(MTU))
2138#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2139
2116/* Software ATR hash keys */ 2140/* Software ATR hash keys */
2117#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2141#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
2118#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2142#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2188,7 @@ struct ixgbe_atr_input_masks {
2164enum ixgbe_eeprom_type { 2188enum ixgbe_eeprom_type {
2165 ixgbe_eeprom_uninitialized = 0, 2189 ixgbe_eeprom_uninitialized = 0,
2166 ixgbe_eeprom_spi, 2190 ixgbe_eeprom_spi,
2191 ixgbe_flash,
2167 ixgbe_eeprom_none /* No NVM support */ 2192 ixgbe_eeprom_none /* No NVM support */
2168}; 2193};
2169 2194
@@ -2171,12 +2196,14 @@ enum ixgbe_mac_type {
2171 ixgbe_mac_unknown = 0, 2196 ixgbe_mac_unknown = 0,
2172 ixgbe_mac_82598EB, 2197 ixgbe_mac_82598EB,
2173 ixgbe_mac_82599EB, 2198 ixgbe_mac_82599EB,
2199 ixgbe_mac_X540,
2174 ixgbe_num_macs 2200 ixgbe_num_macs
2175}; 2201};
2176 2202
2177enum ixgbe_phy_type { 2203enum ixgbe_phy_type {
2178 ixgbe_phy_unknown = 0, 2204 ixgbe_phy_unknown = 0,
2179 ixgbe_phy_tn, 2205 ixgbe_phy_tn,
2206 ixgbe_phy_aq,
2180 ixgbe_phy_cu_unknown, 2207 ixgbe_phy_cu_unknown,
2181 ixgbe_phy_qt, 2208 ixgbe_phy_qt,
2182 ixgbe_phy_xaui, 2209 ixgbe_phy_xaui,
@@ -2405,6 +2432,7 @@ struct ixgbe_eeprom_operations {
2405 s32 (*write)(struct ixgbe_hw *, u16, u16); 2432 s32 (*write)(struct ixgbe_hw *, u16, u16);
2406 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2433 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2407 s32 (*update_checksum)(struct ixgbe_hw *); 2434 s32 (*update_checksum)(struct ixgbe_hw *);
2435 u16 (*calc_checksum)(struct ixgbe_hw *);
2408}; 2436};
2409 2437
2410struct ixgbe_mac_operations { 2438struct ixgbe_mac_operations {
@@ -2574,6 +2602,7 @@ struct ixgbe_hw {
2574 u16 subsystem_vendor_id; 2602 u16 subsystem_vendor_id;
2575 u8 revision_id; 2603 u8 revision_id;
2576 bool adapter_stopped; 2604 bool adapter_stopped;
2605 bool force_full_reset;
2577}; 2606};
2578 2607
2579struct ixgbe_info { 2608struct ixgbe_info {
@@ -2614,6 +2643,9 @@ struct ixgbe_info {
2614#define IXGBE_ERR_NO_SPACE -25 2643#define IXGBE_ERR_NO_SPACE -25
2615#define IXGBE_ERR_OVERTEMP -26 2644#define IXGBE_ERR_OVERTEMP -26
2616#define IXGBE_ERR_RAR_INDEX -27 2645#define IXGBE_ERR_RAR_INDEX -27
2646#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2647#define IXGBE_ERR_PBA_SECTION -31
2648#define IXGBE_ERR_INVALID_ARGUMENT -32
2617#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2649#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2618 2650
2619#endif /* _IXGBE_TYPE_H_ */ 2651#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..cf88515c0ef8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,722 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35
36#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128
38#define IXGBE_X540_RAR_ENTRIES 128
39#define IXGBE_X540_MC_TBL_SIZE 128
40#define IXGBE_X540_VFT_TBL_SIZE 128
41
42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
44static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
46static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
47static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
48
49static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
50{
51 return ixgbe_media_type_copper;
52}
53
54static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{
56 struct ixgbe_mac_info *mac = &hw->mac;
57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
67
68 return 0;
69}
70
71/**
72 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
73 * @hw: pointer to hardware structure
74 * @speed: new link speed
75 * @autoneg: true if autonegotiation enabled
76 * @autoneg_wait_to_complete: true when waiting for completion is needed
77 **/
78static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
79 ixgbe_link_speed speed, bool autoneg,
80 bool autoneg_wait_to_complete)
81{
82 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
83 autoneg_wait_to_complete);
84}
85
86/**
87 * ixgbe_reset_hw_X540 - Perform hardware reset
88 * @hw: pointer to hardware structure
89 *
90 * Resets the hardware by resetting the transmit and receive units, masks
91 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
92 * reset.
93 **/
94static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
95{
96 ixgbe_link_speed link_speed;
97 s32 status = 0;
98 u32 ctrl;
99 u32 ctrl_ext;
100 u32 reset_bit;
101 u32 i;
102 u32 autoc;
103 u32 autoc2;
104 bool link_up = false;
105
106 /* Call adapter stop to disable tx/rx and clear interrupts */
107 hw->mac.ops.stop_adapter(hw);
108
109 /*
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset
112 */
113 status = ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118
119 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when
122 * mng is using it. If link is down or the flag to force full link
123 * reset is set, then perform link reset.
124 */
125 if (hw->force_full_reset) {
126 reset_bit = IXGBE_CTRL_LNK_RST;
127 } else {
128 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
129 if (!link_up)
130 reset_bit = IXGBE_CTRL_LNK_RST;
131 else
132 reset_bit = IXGBE_CTRL_RST;
133 }
134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
137 IXGBE_WRITE_FLUSH(hw);
138
139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) {
141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST))
144 break;
145 }
146 if (ctrl & IXGBE_CTRL_RST) {
147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 }
150
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
154 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
155
156 msleep(50);
157
158 /* Set the Rx packet buffer size. */
159 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
160
161 /* Store the permanent mac address */
162 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
163
164 /*
165 * Store the original AUTOC/AUTOC2 values if they have not been
166 * stored off yet. Otherwise restore the stored original
167 * values since the reset operation sets back to defaults.
168 */
169 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
170 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
171 if (hw->mac.orig_link_settings_stored == false) {
172 hw->mac.orig_autoc = autoc;
173 hw->mac.orig_autoc2 = autoc2;
174 hw->mac.orig_link_settings_stored = true;
175 } else {
176 if (autoc != hw->mac.orig_autoc)
177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
178 IXGBE_AUTOC_AN_RESTART));
179
180 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
181 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
182 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
183 autoc2 |= (hw->mac.orig_autoc2 &
184 IXGBE_AUTOC2_UPPER_MASK);
185 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
186 }
187 }
188
189 /*
190 * Store MAC address from RAR0, clear receive address registers, and
191 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address.
193 */
194 hw->mac.num_rar_entries = 128;
195 hw->mac.ops.init_rx_addrs(hw);
196
197 /* Store the permanent mac address */
198 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
199
200 /* Store the permanent SAN mac address */
201 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
202
203 /* Add the SAN MAC address to the RAR only if it's a valid address */
204 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
205 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
206 hw->mac.san_addr, 0, IXGBE_RAH_AV);
207
208 /* Reserve the last RAR for the SAN MAC address */
209 hw->mac.num_rar_entries--;
210 }
211
212 /* Store the alternative WWNN/WWPN prefix */
213 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
214 &hw->mac.wwpn_prefix);
215
216 return status;
217}
218
219/**
220 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
221 * @hw: pointer to hardware structure
222 *
223 * Determines physical layer capabilities of the current configuration.
224 **/
225static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
226{
227 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
228 u16 ext_ability = 0;
229
230 hw->phy.ops.identify(hw);
231
232 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
233 &ext_ability);
234 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
235 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
236 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
237 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
238 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
239 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
240
241 return physical_layer;
242}
243
244/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure
247 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{
250 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
251 u32 eec;
252 u16 eeprom_size;
253
254 if (eeprom->type == ixgbe_eeprom_uninitialized) {
255 eeprom->semaphore_delay = 10;
256 eeprom->type = ixgbe_flash;
257
258 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
259 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
260 IXGBE_EEC_SIZE_SHIFT);
261 eeprom->word_size = 1 << (eeprom_size +
262 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size);
266 }
267
268 return 0;
269}
270
271/**
272 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
273 * @hw: pointer to hardware structure
274 * @offset: offset of word in the EEPROM to read
275 * @data: word read from the EERPOM
276 **/
277static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{
279 s32 status;
280
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else
284 status = IXGBE_ERR_SWFW_SYNC;
285
286 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
287 return status;
288}
289
290/**
291 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
292 * @hw: pointer to hardware structure
293 * @offset: offset of word in the EEPROM to write
294 * @data: word write to the EEPROM
295 *
296 * Write a 16 bit word to the EEPROM using the EEWR register.
297 **/
298static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
299{
300 u32 eewr;
301 s32 status;
302
303 hw->eeprom.ops.init_params(hw);
304
305 if (offset >= hw->eeprom.word_size) {
306 status = IXGBE_ERR_EEPROM;
307 goto out;
308 }
309
310 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
311 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START;
313
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n");
318 goto out;
319 }
320
321 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
322
323 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
324 if (status != 0) {
325 hw_dbg(hw, "Eeprom write EEWR timed out\n");
326 goto out;
327 }
328 } else {
329 status = IXGBE_ERR_SWFW_SYNC;
330 }
331
332out:
333 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
334 return status;
335}
336
337/**
338 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
339 * @hw: pointer to hardware structure
340 **/
341static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
342{
343 u16 i;
344 u16 j;
345 u16 checksum = 0;
346 u16 length = 0;
347 u16 pointer = 0;
348 u16 word = 0;
349
350 /* Include 0x0-0x3F in the checksum */
351 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
352 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
353 hw_dbg(hw, "EEPROM read failed\n");
354 break;
355 }
356 checksum += word;
357 }
358
359 /*
360 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
361 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
362 */
363 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
364 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
365 continue;
366
367 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
368 hw_dbg(hw, "EEPROM read failed\n");
369 break;
370 }
371
372 /* Skip pointer section if the pointer is invalid. */
373 if (pointer == 0xFFFF || pointer == 0 ||
374 pointer >= hw->eeprom.word_size)
375 continue;
376
377 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
378 hw_dbg(hw, "EEPROM read failed\n");
379 break;
380 }
381
382 /* Skip pointer section if length is invalid. */
383 if (length == 0xFFFF || length == 0 ||
384 (pointer + length) >= hw->eeprom.word_size)
385 continue;
386
387 for (j = pointer+1; j <= pointer+length; j++) {
388 if (hw->eeprom.ops.read(hw, j, &word) != 0) {
389 hw_dbg(hw, "EEPROM read failed\n");
390 break;
391 }
392 checksum += word;
393 }
394 }
395
396 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
397
398 return checksum;
399}
400
401/**
402 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
403 * @hw: pointer to hardware structure
404 *
405 * After writing EEPROM to shadow RAM using EEWR register, software calculates
406 * checksum and updates the EEPROM and instructs the hardware to update
407 * the flash.
408 **/
409static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
410{
411 s32 status;
412
413 status = ixgbe_update_eeprom_checksum_generic(hw);
414
415 if (status)
416 status = ixgbe_update_flash_X540(hw);
417
418 return status;
419}
420
421/**
422 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
423 * @hw: pointer to hardware structure
424 *
425 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
426 * EEPROM from shadow RAM to the flash device.
427 **/
428static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
429{
430 u32 flup;
431 s32 status = IXGBE_ERR_EEPROM;
432
433 status = ixgbe_poll_flash_update_done_X540(hw);
434 if (status == IXGBE_ERR_EEPROM) {
435 hw_dbg(hw, "Flash update time out\n");
436 goto out;
437 }
438
439 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
440 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
441
442 status = ixgbe_poll_flash_update_done_X540(hw);
443 if (status)
444 hw_dbg(hw, "Flash update complete\n");
445 else
446 hw_dbg(hw, "Flash update time out\n");
447
448 if (hw->revision_id == 0) {
449 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
450
451 if (flup & IXGBE_EEC_SEC1VAL) {
452 flup |= IXGBE_EEC_FLUP;
453 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
454 }
455
456 status = ixgbe_poll_flash_update_done_X540(hw);
457 if (status)
458 hw_dbg(hw, "Flash update complete\n");
459 else
460 hw_dbg(hw, "Flash update time out\n");
461
462 }
463out:
464 return status;
465}
466
467/**
468 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
469 * @hw: pointer to hardware structure
470 *
471 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
472 * flash update is done.
473 **/
474static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
475{
476 u32 i;
477 u32 reg;
478 s32 status = IXGBE_ERR_EEPROM;
479
480 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
481 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
482 if (reg & IXGBE_EEC_FLUDONE) {
483 status = 0;
484 break;
485 }
486 udelay(5);
487 }
488 return status;
489}
490
491/**
492 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
493 * @hw: pointer to hardware structure
494 * @mask: Mask to specify which semaphore to acquire
495 *
496 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
497 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
498 **/
499static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
500{
501 u32 swfw_sync;
502 u32 swmask = mask;
503 u32 fwmask = mask << 5;
504 u32 hwmask = 0;
505 u32 timeout = 200;
506 u32 i;
507
508 if (swmask == IXGBE_GSSR_EEP_SM)
509 hwmask = IXGBE_GSSR_FLASH_SM;
510
511 for (i = 0; i < timeout; i++) {
512 /*
513 * SW NVM semaphore bit is used for access to all
514 * SW_FW_SYNC bits (not just NVM)
515 */
516 if (ixgbe_get_swfw_sync_semaphore(hw))
517 return IXGBE_ERR_SWFW_SYNC;
518
519 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
520 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
521 swfw_sync |= swmask;
522 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
523 ixgbe_release_swfw_sync_semaphore(hw);
524 break;
525 } else {
526 /*
527 * Firmware currently using resource (fwmask),
528 * hardware currently using resource (hwmask),
529 * or other software thread currently using
530 * resource (swmask)
531 */
532 ixgbe_release_swfw_sync_semaphore(hw);
533 msleep(5);
534 }
535 }
536
537 /*
538 * If the resource is not released by the FW/HW the SW can assume that
539 * the FW/HW malfunctions. In that case the SW should sets the
540 * SW bit(s) of the requested resource(s) while ignoring the
541 * corresponding FW/HW bits in the SW_FW_SYNC register.
542 */
543 if (i >= timeout) {
544 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
545 if (swfw_sync & (fwmask | hwmask)) {
546 if (ixgbe_get_swfw_sync_semaphore(hw))
547 return IXGBE_ERR_SWFW_SYNC;
548
549 swfw_sync |= swmask;
550 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
551 ixgbe_release_swfw_sync_semaphore(hw);
552 }
553 }
554
555 msleep(5);
556 return 0;
557}
558
559/**
560 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
561 * @hw: pointer to hardware structure
562 * @mask: Mask to specify which semaphore to release
563 *
564 * Releases the SWFW semaphore throught the SW_FW_SYNC register
565 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
566 **/
567static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
568{
569 u32 swfw_sync;
570 u32 swmask = mask;
571
572 ixgbe_get_swfw_sync_semaphore(hw);
573
574 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
575 swfw_sync &= ~swmask;
576 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
577
578 ixgbe_release_swfw_sync_semaphore(hw);
579 msleep(5);
580}
581
582/**
583 * ixgbe_get_nvm_semaphore - Get hardware semaphore
584 * @hw: pointer to hardware structure
585 *
586 * Sets the hardware semaphores so SW/FW can gain control of shared resources
587 **/
588static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
589{
590 s32 status = IXGBE_ERR_EEPROM;
591 u32 timeout = 2000;
592 u32 i;
593 u32 swsm;
594
595 /* Get SMBI software semaphore between device drivers first */
596 for (i = 0; i < timeout; i++) {
597 /*
598 * If the SMBI bit is 0 when we read it, then the bit will be
599 * set and we have the semaphore
600 */
601 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
602 if (!(swsm & IXGBE_SWSM_SMBI)) {
603 status = 0;
604 break;
605 }
606 udelay(50);
607 }
608
609 /* Now get the semaphore between SW/FW through the REGSMP bit */
610 if (status) {
611 for (i = 0; i < timeout; i++) {
612 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
613 if (!(swsm & IXGBE_SWFW_REGSMP))
614 break;
615
616 udelay(50);
617 }
618 } else {
619 hw_dbg(hw, "Software semaphore SMBI between device drivers "
620 "not granted.\n");
621 }
622
623 return status;
624}
625
626/**
627 * ixgbe_release_nvm_semaphore - Release hardware semaphore
628 * @hw: pointer to hardware structure
629 *
630 * This function clears hardware semaphore bits.
631 **/
632static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
633{
634 u32 swsm;
635
636 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
637
638 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
639 swsm &= ~IXGBE_SWSM_SMBI;
640 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
641
642 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
643 swsm &= ~IXGBE_SWFW_REGSMP;
644 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
645
646 IXGBE_WRITE_FLUSH(hw);
647}
648
649static struct ixgbe_mac_operations mac_ops_X540 = {
650 .init_hw = &ixgbe_init_hw_generic,
651 .reset_hw = &ixgbe_reset_hw_X540,
652 .start_hw = &ixgbe_start_hw_generic,
653 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
654 .get_media_type = &ixgbe_get_media_type_X540,
655 .get_supported_physical_layer =
656 &ixgbe_get_supported_physical_layer_X540,
657 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
658 .get_mac_addr = &ixgbe_get_mac_addr_generic,
659 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
660 .get_device_caps = NULL,
661 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
662 .stop_adapter = &ixgbe_stop_adapter_generic,
663 .get_bus_info = &ixgbe_get_bus_info_generic,
664 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
665 .read_analog_reg8 = NULL,
666 .write_analog_reg8 = NULL,
667 .setup_link = &ixgbe_setup_mac_link_X540,
668 .check_link = &ixgbe_check_mac_link_generic,
669 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
670 .led_on = &ixgbe_led_on_generic,
671 .led_off = &ixgbe_led_off_generic,
672 .blink_led_start = &ixgbe_blink_led_start_generic,
673 .blink_led_stop = &ixgbe_blink_led_stop_generic,
674 .set_rar = &ixgbe_set_rar_generic,
675 .clear_rar = &ixgbe_clear_rar_generic,
676 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic,
683 .clear_vfta = &ixgbe_clear_vfta_generic,
684 .set_vfta = &ixgbe_set_vfta_generic,
685 .fc_enable = &ixgbe_fc_enable_generic,
686 .init_uta_tables = &ixgbe_init_uta_tables_generic,
687 .setup_sfp = NULL,
688};
689
690static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
691 .init_params = &ixgbe_init_eeprom_params_X540,
692 .read = &ixgbe_read_eerd_X540,
693 .write = &ixgbe_write_eewr_X540,
694 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
695 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
696 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
697};
698
699static struct ixgbe_phy_operations phy_ops_X540 = {
700 .identify = &ixgbe_identify_phy_generic,
701 .identify_sfp = &ixgbe_identify_sfp_module_generic,
702 .init = NULL,
703 .reset = &ixgbe_reset_phy_generic,
704 .read_reg = &ixgbe_read_phy_reg_generic,
705 .write_reg = &ixgbe_write_phy_reg_generic,
706 .setup_link = &ixgbe_setup_phy_link_generic,
707 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
708 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
709 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
710 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
711 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
712 .check_overtemp = &ixgbe_tn_check_overtemp,
713};
714
715struct ixgbe_info ixgbe_X540_info = {
716 .mac = ixgbe_mac_X540,
717 .get_invariants = &ixgbe_get_invariants_X540,
718 .mac_ops = &mac_ops_X540,
719 .eeprom_ops = &eeprom_ops_X540,
720 .phy_ops = &phy_ops_X540,
721 .mbx_ops = &mbx_ops_generic,
722};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8cc..1f35d229e71a 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82599 Virtual Function driver 3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation. 4# Copyright(c) 1999 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a05..f8a807d606c7 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4cc817acfb62..fa29b3c8c464 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -544,7 +544,7 @@ struct ixgbevf_reg_test {
544#define TABLE64_TEST_HI 6 544#define TABLE64_TEST_HI 6
545 545
546/* default VF register test */ 546/* default VF register test */
547static struct ixgbevf_reg_test reg_test_vf[] = { 547static const struct ixgbevf_reg_test reg_test_vf[] = {
548 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 548 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
549 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 549 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
550 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 550 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
@@ -557,19 +557,23 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
557 { 0, 0, 0, 0 } 557 { 0, 0, 0, 0 }
558}; 558};
559 559
560static const u32 register_test_patterns[] = {
561 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
562};
563
560#define REG_PATTERN_TEST(R, M, W) \ 564#define REG_PATTERN_TEST(R, M, W) \
561{ \ 565{ \
562 u32 pat, val, before; \ 566 u32 pat, val, before; \
563 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 567 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
564 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
565 before = readl(adapter->hw.hw_addr + R); \ 568 before = readl(adapter->hw.hw_addr + R); \
566 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 569 writel((register_test_patterns[pat] & W), \
570 (adapter->hw.hw_addr + R)); \
567 val = readl(adapter->hw.hw_addr + R); \ 571 val = readl(adapter->hw.hw_addr + R); \
568 if (val != (_test[pat] & W & M)) { \ 572 if (val != (register_test_patterns[pat] & W & M)) { \
569 hw_dbg(&adapter->hw, \ 573 hw_dbg(&adapter->hw, \
570 "pattern test reg %04X failed: got " \ 574 "pattern test reg %04X failed: got " \
571 "0x%08X expected 0x%08X\n", \ 575 "0x%08X expected 0x%08X\n", \
572 R, val, (_test[pat] & W & M)); \ 576 R, val, (register_test_patterns[pat] & W & M)); \
573 *data = R; \ 577 *data = R; \
574 writel(before, adapter->hw.hw_addr + R); \ 578 writel(before, adapter->hw.hw_addr + R); \
575 return 1; \ 579 return 1; \
@@ -596,7 +600,7 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
596 600
597static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 601static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
598{ 602{
599 struct ixgbevf_reg_test *test; 603 const struct ixgbevf_reg_test *test;
600 u32 i; 604 u32 i;
601 605
602 test = reg_test_vf; 606 test = reg_test_vf;
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa2..0cd6abcf9306 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c9652389..809e38ce8a13 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 82599 Virtual Function";
53 53
54#define DRV_VERSION "1.0.0-k0" 54#define DRV_VERSION "1.0.12-k0"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation.";
57 58
58static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 59static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
59 [board_82599_vf] = &ixgbevf_vf_info, 60 [board_82599_vf] = &ixgbevf_vf_info,
@@ -2488,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2488 int size; 2489 int size;
2489 2490
2490 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2491 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2491 tx_ring->tx_buffer_info = vmalloc(size); 2492 tx_ring->tx_buffer_info = vzalloc(size);
2492 if (!tx_ring->tx_buffer_info) 2493 if (!tx_ring->tx_buffer_info)
2493 goto err; 2494 goto err;
2494 memset(tx_ring->tx_buffer_info, 0, size);
2495 2495
2496 /* round up to nearest 4K */ 2496 /* round up to nearest 4K */
2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2555 int size; 2555 int size;
2556 2556
2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2558 rx_ring->rx_buffer_info = vmalloc(size); 2558 rx_ring->rx_buffer_info = vzalloc(size);
2559 if (!rx_ring->rx_buffer_info) { 2559 if (!rx_ring->rx_buffer_info) {
2560 hw_dbg(&adapter->hw, 2560 hw_dbg(&adapter->hw,
2561 "Unable to vmalloc buffer memory for " 2561 "Unable to vmalloc buffer memory for "
2562 "the receive descriptor ring\n"); 2562 "the receive descriptor ring\n");
2563 goto alloc_failed; 2563 goto alloc_failed;
2564 } 2564 }
2565 memset(rx_ring->rx_buffer_info, 0, size);
2566 2565
2567 /* Round up to nearest 4K */ 2566 /* Round up to nearest 4K */
2568 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2567 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3424,10 +3423,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3424 if (hw->mac.ops.get_bus_info) 3423 if (hw->mac.ops.get_bus_info)
3425 hw->mac.ops.get_bus_info(hw); 3424 hw->mac.ops.get_bus_info(hw);
3426 3425
3427
3428 netif_carrier_off(netdev);
3429 netif_tx_stop_all_queues(netdev);
3430
3431 strcpy(netdev->name, "eth%d"); 3426 strcpy(netdev->name, "eth%d");
3432 3427
3433 err = register_netdev(netdev); 3428 err = register_netdev(netdev);
@@ -3436,6 +3431,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3436 3431
3437 adapter->netdev_registered = true; 3432 adapter->netdev_registered = true;
3438 3433
3434 netif_carrier_off(netdev);
3435
3439 ixgbevf_init_last_counter_stats(adapter); 3436 ixgbevf_init_last_counter_stats(adapter);
3440 3437
3441 /* print the MAC address */ 3438 /* print the MAC address */
@@ -3487,10 +3484,9 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3487 3484
3488 del_timer_sync(&adapter->watchdog_timer); 3485 del_timer_sync(&adapter->watchdog_timer);
3489 3486
3487 cancel_work_sync(&adapter->reset_task);
3490 cancel_work_sync(&adapter->watchdog_task); 3488 cancel_work_sync(&adapter->watchdog_task);
3491 3489
3492 flush_scheduled_work();
3493
3494 if (adapter->netdev_registered) { 3490 if (adapter->netdev_registered) {
3495 unregister_netdev(netdev); 3491 unregister_netdev(netdev);
3496 adapter->netdev_registered = false; 3492 adapter->netdev_registered = false;
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a65..7a8833125770 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7f..b2b5bf5daa3d 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec1..fb80ca1bcc93 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfaf..971019d819b4 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc831424..144c99d5363a 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3119c2..2411e72ba572 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2076,12 +2076,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
2076 } 2076 }
2077 2077
2078 if (new_mtu > 1900) { 2078 if (new_mtu > 1900) {
2079 netdev->features &= ~(NETIF_F_HW_CSUM | 2079 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2080 NETIF_F_TSO | 2080 NETIF_F_TSO | NETIF_F_TSO6);
2081 NETIF_F_TSO6);
2082 } else { 2081 } else {
2083 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2082 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2084 netdev->features |= NETIF_F_HW_CSUM; 2083 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2085 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2084 if (test_bit(JME_FLAG_TSO, &jme->flags))
2086 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2085 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2087 } 2086 }
@@ -2514,10 +2513,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
2514 if (on) { 2513 if (on) {
2515 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2514 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2516 if (netdev->mtu <= 1900) 2515 if (netdev->mtu <= 1900)
2517 netdev->features |= NETIF_F_HW_CSUM; 2516 netdev->features |=
2517 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2518 } else { 2518 } else {
2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2520 netdev->features &= ~NETIF_F_HW_CSUM; 2520 netdev->features &=
2521 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2521 } 2522 }
2522 2523
2523 return 0; 2524 return 0;
@@ -2797,7 +2798,8 @@ jme_init_one(struct pci_dev *pdev,
2797 netdev->netdev_ops = &jme_netdev_ops; 2798 netdev->netdev_ops = &jme_netdev_ops;
2798 netdev->ethtool_ops = &jme_ethtool_ops; 2799 netdev->ethtool_ops = &jme_ethtool_ops;
2799 netdev->watchdog_timeo = TX_TIMEOUT; 2800 netdev->watchdog_timeo = TX_TIMEOUT;
2800 netdev->features = NETIF_F_HW_CSUM | 2801 netdev->features = NETIF_F_IP_CSUM |
2802 NETIF_F_IPV6_CSUM |
2801 NETIF_F_SG | 2803 NETIF_F_SG |
2802 NETIF_F_TSO | 2804 NETIF_F_TSO |
2803 NETIF_F_TSO6 | 2805 NETIF_F_TSO6 |
@@ -2955,11 +2957,7 @@ jme_init_one(struct pci_dev *pdev,
2955 * Tell stack that we are not ready to work until open() 2957 * Tell stack that we are not ready to work until open()
2956 */ 2958 */
2957 netif_carrier_off(netdev); 2959 netif_carrier_off(netdev);
2958 netif_stop_queue(netdev);
2959 2960
2960 /*
2961 * Register netdev
2962 */
2963 rc = register_netdev(netdev); 2961 rc = register_netdev(netdev);
2964 if (rc) { 2962 if (rc) {
2965 pr_err("Cannot register net device\n"); 2963 pr_err("Cannot register net device\n");
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1545 1545
1546/* driver bus management functions */ 1546/* driver bus management functions */
1547 1547
1548#ifdef CONFIG_PM
1549static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1550{
1551 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1552 struct net_device *dev = ks->netdev;
1553
1554 if (netif_running(dev)) {
1555 netif_device_detach(dev);
1556 ks8851_net_stop(dev);
1557 }
1558
1559 return 0;
1560}
1561
1562static int ks8851_resume(struct spi_device *spi)
1563{
1564 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1565 struct net_device *dev = ks->netdev;
1566
1567 if (netif_running(dev)) {
1568 ks8851_net_open(dev);
1569 netif_device_attach(dev);
1570 }
1571
1572 return 0;
1573}
1574#else
1575#define ks8851_suspend NULL
1576#define ks8851_resume NULL
1577#endif
1578
1548static int __devinit ks8851_probe(struct spi_device *spi) 1579static int __devinit ks8851_probe(struct spi_device *spi)
1549{ 1580{
1550 struct net_device *ndev; 1581 struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
1679 }, 1710 },
1680 .probe = ks8851_probe, 1711 .probe = ks8851_probe,
1681 .remove = __devexit_p(ks8851_remove), 1712 .remove = __devexit_p(ks8851_remove),
1713 .suspend = ks8851_suspend,
1714 .resume = ks8851_resume,
1682}; 1715};
1683 1716
1684static int __init ks8851_init(void) 1717static int __init ks8851_init(void)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
207#define LANCE_BUS_IF 0x16 207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18 208#define LANCE_TOTAL_SIZE 0x18
209 209
210#define TX_TIMEOUT 20 210#define TX_TIMEOUT (HZ/5)
211 211
212/* The LANCE Rx and Tx ring descriptors. */ 212/* The LANCE Rx and Tx ring descriptors. */
213struct lance_rx_head { 213struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
161#define RX_SUSPEND 0x0030 161#define RX_SUSPEND 0x0030
162#define RX_ABORT 0x0040 162#define RX_ABORT 0x0040
163 163
164#define TX_TIMEOUT 5 164#define TX_TIMEOUT (HZ/20)
165 165
166 166
167struct i596_reg { 167struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178b..da74db4a03d4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
203static int __ei_open(struct net_device *dev) 203static int __ei_open(struct net_device *dev)
204{ 204{
205 unsigned long flags; 205 unsigned long flags;
206 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 206 struct ei_device *ei_local = netdev_priv(dev);
207 207
208 if (dev->watchdog_timeo <= 0) 208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT; 209 dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
231 */ 231 */
232static int __ei_close(struct net_device *dev) 232static int __ei_close(struct net_device *dev)
233{ 233{
234 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags; 235 unsigned long flags;
236 236
237 /* 237 /*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
256static void __ei_tx_timeout(struct net_device *dev) 256static void __ei_tx_timeout(struct net_device *dev)
257{ 257{
258 unsigned long e8390_base = dev->base_addr; 258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags; 261 unsigned long flags;
262 262
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 struct net_device *dev) 303 struct net_device *dev)
304{ 304{
305 unsigned long e8390_base = dev->base_addr; 305 unsigned long e8390_base = dev->base_addr;
306 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 306 struct ei_device *ei_local = netdev_priv(dev);
307 int send_length = skb->len, output_page; 307 int send_length = skb->len, output_page;
308 unsigned long flags; 308 unsigned long flags;
309 char buf[ETH_ZLEN]; 309 char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
592static void ei_tx_intr(struct net_device *dev) 592static void ei_tx_intr(struct net_device *dev)
593{ 593{
594 unsigned long e8390_base = dev->base_addr; 594 unsigned long e8390_base = dev->base_addr;
595 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 595 struct ei_device *ei_local = netdev_priv(dev);
596 int status = ei_inb(e8390_base + EN0_TSR); 596 int status = ei_inb(e8390_base + EN0_TSR);
597 597
598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ 598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
675static void ei_receive(struct net_device *dev) 675static void ei_receive(struct net_device *dev)
676{ 676{
677 unsigned long e8390_base = dev->base_addr; 677 unsigned long e8390_base = dev->base_addr;
678 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 678 struct ei_device *ei_local = netdev_priv(dev);
679 unsigned char rxing_page, this_frame, next_frame; 679 unsigned char rxing_page, this_frame, next_frame;
680 unsigned short current_offset; 680 unsigned short current_offset;
681 int rx_pkt_count = 0; 681 int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
879static struct net_device_stats *__ei_get_stats(struct net_device *dev) 879static struct net_device_stats *__ei_get_stats(struct net_device *dev)
880{ 880{
881 unsigned long ioaddr = dev->base_addr; 881 unsigned long ioaddr = dev->base_addr;
882 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 882 struct ei_device *ei_local = netdev_priv(dev);
883 unsigned long flags; 883 unsigned long flags;
884 884
885 /* If the card is stopped, just return the present stats. */ 885 /* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
927{ 927{
928 unsigned long e8390_base = dev->base_addr; 928 unsigned long e8390_base = dev->base_addr;
929 int i; 929 int i;
930 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 930 struct ei_device *ei_local = netdev_priv(dev);
931 931
932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) 932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
933 { 933 {
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
981static void __ei_set_multicast_list(struct net_device *dev) 981static void __ei_set_multicast_list(struct net_device *dev)
982{ 982{
983 unsigned long flags; 983 unsigned long flags;
984 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 984 struct ei_device *ei_local = netdev_priv(dev);
985 985
986 spin_lock_irqsave(&ei_local->page_lock, flags); 986 spin_lock_irqsave(&ei_local->page_lock, flags);
987 do_set_multicast_list(dev); 987 do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
998 998
999static void ethdev_setup(struct net_device *dev) 999static void ethdev_setup(struct net_device *dev)
1000{ 1000{
1001 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1001 struct ei_device *ei_local = netdev_priv(dev);
1002 if (ei_debug > 1) 1002 if (ei_debug > 1)
1003 printk(version); 1003 printk(version);
1004 1004
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
1036static void __NS8390_init(struct net_device *dev, int startp) 1036static void __NS8390_init(struct net_device *dev, int startp)
1037{ 1037{
1038 unsigned long e8390_base = dev->base_addr; 1038 unsigned long e8390_base = dev->base_addr;
1039 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1039 struct ei_device *ei_local = netdev_priv(dev);
1040 int i; 1040 int i;
1041 int endcfg = ei_local->word16 1041 int endcfg = ei_local->word16
1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) 1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1099 int start_page) 1099 int start_page)
1100{ 1100{
1101 unsigned long e8390_base = dev->base_addr; 1101 unsigned long e8390_base = dev->base_addr;
1102 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1102 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1103 1103
1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1105 1105
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20db..6ed577b065df 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru;
41}; 42};
42 43
43#define macvlan_port_get_rcu(dev) \ 44#define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
169 macvlan_broadcast(skb, port, NULL, 170 macvlan_broadcast(skb, port, NULL,
170 MACVLAN_MODE_PRIVATE | 171 MACVLAN_MODE_PRIVATE |
171 MACVLAN_MODE_VEPA | 172 MACVLAN_MODE_VEPA |
173 MACVLAN_MODE_PASSTHRU|
172 MACVLAN_MODE_BRIDGE); 174 MACVLAN_MODE_BRIDGE);
173 else if (src->mode == MACVLAN_MODE_VEPA) 175 else if (src->mode == MACVLAN_MODE_VEPA)
174 /* flood to everyone except source */ 176 /* flood to everyone except source */
@@ -185,7 +187,10 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
185 return skb; 187 return skb;
186 } 188 }
187 189
188 vlan = macvlan_hash_lookup(port, eth->h_dest); 190 if (port->passthru)
191 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
192 else
193 vlan = macvlan_hash_lookup(port, eth->h_dest);
189 if (vlan == NULL) 194 if (vlan == NULL)
190 return skb; 195 return skb;
191 196
@@ -243,18 +248,22 @@ xmit_world:
243netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 248netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
244 struct net_device *dev) 249 struct net_device *dev)
245{ 250{
246 int i = skb_get_queue_mapping(skb);
247 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
248 unsigned int len = skb->len; 251 unsigned int len = skb->len;
249 int ret; 252 int ret;
253 const struct macvlan_dev *vlan = netdev_priv(dev);
250 254
251 ret = macvlan_queue_xmit(skb, dev); 255 ret = macvlan_queue_xmit(skb, dev);
252 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 256 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
253 txq->tx_packets++; 257 struct macvlan_pcpu_stats *pcpu_stats;
254 txq->tx_bytes += len;
255 } else
256 txq->tx_dropped++;
257 258
259 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
260 u64_stats_update_begin(&pcpu_stats->syncp);
261 pcpu_stats->tx_packets++;
262 pcpu_stats->tx_bytes += len;
263 u64_stats_update_end(&pcpu_stats->syncp);
264 } else {
265 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
266 }
258 return ret; 267 return ret;
259} 268}
260EXPORT_SYMBOL_GPL(macvlan_start_xmit); 269EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@ static int macvlan_open(struct net_device *dev)
284 struct net_device *lowerdev = vlan->lowerdev; 293 struct net_device *lowerdev = vlan->lowerdev;
285 int err; 294 int err;
286 295
296 if (vlan->port->passthru) {
297 dev_set_promiscuity(lowerdev, 1);
298 goto hash_add;
299 }
300
287 err = -EBUSY; 301 err = -EBUSY;
288 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 302 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
289 goto out; 303 goto out;
@@ -296,6 +310,8 @@ static int macvlan_open(struct net_device *dev)
296 if (err < 0) 310 if (err < 0)
297 goto del_unicast; 311 goto del_unicast;
298 } 312 }
313
314hash_add:
299 macvlan_hash_add(vlan); 315 macvlan_hash_add(vlan);
300 return 0; 316 return 0;
301 317
@@ -310,12 +326,18 @@ static int macvlan_stop(struct net_device *dev)
310 struct macvlan_dev *vlan = netdev_priv(dev); 326 struct macvlan_dev *vlan = netdev_priv(dev);
311 struct net_device *lowerdev = vlan->lowerdev; 327 struct net_device *lowerdev = vlan->lowerdev;
312 328
329 if (vlan->port->passthru) {
330 dev_set_promiscuity(lowerdev, -1);
331 goto hash_del;
332 }
333
313 dev_mc_unsync(lowerdev, dev); 334 dev_mc_unsync(lowerdev, dev);
314 if (dev->flags & IFF_ALLMULTI) 335 if (dev->flags & IFF_ALLMULTI)
315 dev_set_allmulti(lowerdev, -1); 336 dev_set_allmulti(lowerdev, -1);
316 337
317 dev_uc_del(lowerdev, dev->dev_addr); 338 dev_uc_del(lowerdev, dev->dev_addr);
318 339
340hash_del:
319 macvlan_hash_del(vlan); 341 macvlan_hash_del(vlan);
320 return 0; 342 return 0;
321} 343}
@@ -414,14 +436,15 @@ static int macvlan_init(struct net_device *dev)
414 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 436 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
415 (lowerdev->state & MACVLAN_STATE_MASK); 437 (lowerdev->state & MACVLAN_STATE_MASK);
416 dev->features = lowerdev->features & MACVLAN_FEATURES; 438 dev->features = lowerdev->features & MACVLAN_FEATURES;
439 dev->features |= NETIF_F_LLTX;
417 dev->gso_max_size = lowerdev->gso_max_size; 440 dev->gso_max_size = lowerdev->gso_max_size;
418 dev->iflink = lowerdev->ifindex; 441 dev->iflink = lowerdev->ifindex;
419 dev->hard_header_len = lowerdev->hard_header_len; 442 dev->hard_header_len = lowerdev->hard_header_len;
420 443
421 macvlan_set_lockdep_class(dev); 444 macvlan_set_lockdep_class(dev);
422 445
423 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats); 446 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
424 if (!vlan->rx_stats) 447 if (!vlan->pcpu_stats)
425 return -ENOMEM; 448 return -ENOMEM;
426 449
427 return 0; 450 return 0;
@@ -431,7 +454,7 @@ static void macvlan_uninit(struct net_device *dev)
431{ 454{
432 struct macvlan_dev *vlan = netdev_priv(dev); 455 struct macvlan_dev *vlan = netdev_priv(dev);
433 456
434 free_percpu(vlan->rx_stats); 457 free_percpu(vlan->pcpu_stats);
435} 458}
436 459
437static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 460static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
439{ 462{
440 struct macvlan_dev *vlan = netdev_priv(dev); 463 struct macvlan_dev *vlan = netdev_priv(dev);
441 464
442 dev_txq_stats_fold(dev, stats); 465 if (vlan->pcpu_stats) {
443 466 struct macvlan_pcpu_stats *p;
444 if (vlan->rx_stats) { 467 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
445 struct macvlan_rx_stats *p, accum = {0}; 468 u32 rx_errors = 0, tx_dropped = 0;
446 u64 rx_packets, rx_bytes, rx_multicast;
447 unsigned int start; 469 unsigned int start;
448 int i; 470 int i;
449 471
450 for_each_possible_cpu(i) { 472 for_each_possible_cpu(i) {
451 p = per_cpu_ptr(vlan->rx_stats, i); 473 p = per_cpu_ptr(vlan->pcpu_stats, i);
452 do { 474 do {
453 start = u64_stats_fetch_begin_bh(&p->syncp); 475 start = u64_stats_fetch_begin_bh(&p->syncp);
454 rx_packets = p->rx_packets; 476 rx_packets = p->rx_packets;
455 rx_bytes = p->rx_bytes; 477 rx_bytes = p->rx_bytes;
456 rx_multicast = p->rx_multicast; 478 rx_multicast = p->rx_multicast;
479 tx_packets = p->tx_packets;
480 tx_bytes = p->tx_bytes;
457 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 481 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
458 accum.rx_packets += rx_packets; 482
459 accum.rx_bytes += rx_bytes; 483 stats->rx_packets += rx_packets;
460 accum.rx_multicast += rx_multicast; 484 stats->rx_bytes += rx_bytes;
461 /* rx_errors is an ulong, updated without syncp protection */ 485 stats->multicast += rx_multicast;
462 accum.rx_errors += p->rx_errors; 486 stats->tx_packets += tx_packets;
487 stats->tx_bytes += tx_bytes;
488 /* rx_errors & tx_dropped are u32, updated
489 * without syncp protection.
490 */
491 rx_errors += p->rx_errors;
492 tx_dropped += p->tx_dropped;
463 } 493 }
464 stats->rx_packets = accum.rx_packets; 494 stats->rx_errors = rx_errors;
465 stats->rx_bytes = accum.rx_bytes; 495 stats->rx_dropped = rx_errors;
466 stats->rx_errors = accum.rx_errors; 496 stats->tx_dropped = tx_dropped;
467 stats->rx_dropped = accum.rx_errors;
468 stats->multicast = accum.rx_multicast;
469 } 497 }
470 return stats; 498 return stats;
471} 499}
@@ -549,6 +577,7 @@ static int macvlan_port_create(struct net_device *dev)
549 if (port == NULL) 577 if (port == NULL)
550 return -ENOMEM; 578 return -ENOMEM;
551 579
580 port->passthru = false;
552 port->dev = dev; 581 port->dev = dev;
553 INIT_LIST_HEAD(&port->vlans); 582 INIT_LIST_HEAD(&port->vlans);
554 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 583 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
593 case MACVLAN_MODE_PRIVATE: 622 case MACVLAN_MODE_PRIVATE:
594 case MACVLAN_MODE_VEPA: 623 case MACVLAN_MODE_VEPA:
595 case MACVLAN_MODE_BRIDGE: 624 case MACVLAN_MODE_BRIDGE:
625 case MACVLAN_MODE_PASSTHRU:
596 break; 626 break;
597 default: 627 default:
598 return -EINVAL; 628 return -EINVAL;
@@ -601,25 +631,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
601 return 0; 631 return 0;
602} 632}
603 633
604static int macvlan_get_tx_queues(struct net *net,
605 struct nlattr *tb[],
606 unsigned int *num_tx_queues,
607 unsigned int *real_num_tx_queues)
608{
609 struct net_device *real_dev;
610
611 if (!tb[IFLA_LINK])
612 return -EINVAL;
613
614 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
615 if (!real_dev)
616 return -ENODEV;
617
618 *num_tx_queues = real_dev->num_tx_queues;
619 *real_num_tx_queues = real_dev->real_num_tx_queues;
620 return 0;
621}
622
623int macvlan_common_newlink(struct net *src_net, struct net_device *dev, 634int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
624 struct nlattr *tb[], struct nlattr *data[], 635 struct nlattr *tb[], struct nlattr *data[],
625 int (*receive)(struct sk_buff *skb), 636 int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
661 } 672 }
662 port = macvlan_port_get(lowerdev); 673 port = macvlan_port_get(lowerdev);
663 674
675 /* Only 1 macvlan device can be created in passthru mode */
676 if (port->passthru)
677 return -EINVAL;
678
664 vlan->lowerdev = lowerdev; 679 vlan->lowerdev = lowerdev;
665 vlan->dev = dev; 680 vlan->dev = dev;
666 vlan->port = port; 681 vlan->port = port;
@@ -671,6 +686,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
671 if (data && data[IFLA_MACVLAN_MODE]) 686 if (data && data[IFLA_MACVLAN_MODE])
672 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 687 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
673 688
689 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
690 if (!list_empty(&port->vlans))
691 return -EINVAL;
692 port->passthru = true;
693 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
694 }
695
674 err = register_netdevice(dev); 696 err = register_netdevice(dev);
675 if (err < 0) 697 if (err < 0)
676 goto destroy_port; 698 goto destroy_port;
@@ -743,7 +765,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
743{ 765{
744 /* common fields */ 766 /* common fields */
745 ops->priv_size = sizeof(struct macvlan_dev); 767 ops->priv_size = sizeof(struct macvlan_dev);
746 ops->get_tx_queues = macvlan_get_tx_queues;
747 ops->validate = macvlan_validate; 768 ops->validate = macvlan_validate;
748 ops->maxtype = IFLA_MACVLAN_MAX; 769 ops->maxtype = IFLA_MACVLAN_MAX;
749 ops->policy = macvlan_policy; 770 ops->policy = macvlan_policy;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index dd2b6a71c6d7..02076e16542a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1514,11 +1514,6 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
1514 return genphy_restart_aneg(mp->phy); 1514 return genphy_restart_aneg(mp->phy);
1515} 1515}
1516 1516
1517static u32 mv643xx_eth_get_link(struct net_device *dev)
1518{
1519 return !!netif_carrier_ok(dev);
1520}
1521
1522static int 1517static int
1523mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1518mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1524{ 1519{
@@ -1658,7 +1653,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1658 .set_settings = mv643xx_eth_set_settings, 1653 .set_settings = mv643xx_eth_set_settings,
1659 .get_drvinfo = mv643xx_eth_get_drvinfo, 1654 .get_drvinfo = mv643xx_eth_get_drvinfo,
1660 .nway_reset = mv643xx_eth_nway_reset, 1655 .nway_reset = mv643xx_eth_nway_reset,
1661 .get_link = mv643xx_eth_get_link, 1656 .get_link = ethtool_op_get_link,
1662 .get_coalesce = mv643xx_eth_get_coalesce, 1657 .get_coalesce = mv643xx_eth_get_coalesce,
1663 .set_coalesce = mv643xx_eth_set_coalesce, 1658 .set_coalesce = mv643xx_eth_set_coalesce,
1664 .get_ringparam = mv643xx_eth_get_ringparam, 1659 .get_ringparam = mv643xx_eth_get_ringparam,
@@ -2983,7 +2978,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2983 unregister_netdev(mp->dev); 2978 unregister_netdev(mp->dev);
2984 if (mp->phy != NULL) 2979 if (mp->phy != NULL)
2985 phy_detach(mp->phy); 2980 phy_detach(mp->phy);
2986 flush_scheduled_work(); 2981 cancel_work_sync(&mp->tx_timeout_task);
2987 free_netdev(mp->dev); 2982 free_netdev(mp->dev);
2988 2983
2989 platform_set_drvdata(pdev, NULL); 2984 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 8524cc40ec57..1ce0207e62a9 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4067,7 +4067,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4067 if (mgp == NULL) 4067 if (mgp == NULL)
4068 return; 4068 return;
4069 4069
4070 flush_scheduled_work(); 4070 cancel_work_sync(&mgp->watchdog_work);
4071 netdev = mgp->dev; 4071 netdev = mgp->dev;
4072 unregister_netdev(netdev); 4072 unregister_netdev(netdev);
4073 4073
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f110..30be8c634ebd 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
86 86
87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) 87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
88{ 88{
89 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 89 struct ei_device *ei_local = netdev_priv(dev);
90 int i; 90 int i;
91 unsigned char bus_width; 91 unsigned char bus_width;
92 92
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
218 int start_page, stop_page; 218 int start_page, stop_page;
219 int reg0, ret; 219 int reg0, ret;
220 static unsigned version_printed; 220 static unsigned version_printed;
221 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 221 struct ei_device *ei_local = netdev_priv(dev);
222 unsigned char bus_width; 222 unsigned char bus_width;
223 223
224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) 224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
371static void ne_reset_8390(struct net_device *dev) 371static void ne_reset_8390(struct net_device *dev)
372{ 372{
373 unsigned long reset_start_time = jiffies; 373 unsigned long reset_start_time = jiffies;
374 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 374 struct ei_device *ei_local = netdev_priv(dev);
375 375
376 if (ei_debug > 1) 376 if (ei_debug > 1)
377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); 377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
397 397
398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
399{ 399{
400 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 400 struct ei_device *ei_local = netdev_priv(dev);
401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
402 402
403 if (ei_status.dmaing) 403 if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
437 437
438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
439{ 439{
440 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 440 struct ei_device *ei_local = netdev_priv(dev);
441#ifdef NE_SANITY_CHECK 441#ifdef NE_SANITY_CHECK
442 int xfer_count = count; 442 int xfer_count = count;
443#endif 443#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
507static void ne_block_output(struct net_device *dev, int count, 507static void ne_block_output(struct net_device *dev, int count,
508 const unsigned char *buf, const int start_page) 508 const unsigned char *buf, const int start_page)
509{ 509{
510 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 510 struct ei_device *ei_local = netdev_priv(dev);
511 unsigned long dma_start; 511 unsigned long dma_start;
512#ifdef NE_SANITY_CHECK 512#ifdef NE_SANITY_CHECK
513 int retries = 0; 513 int retries = 0;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa279..731077d8d962 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
214 tx_ring->num_desc = adapter->num_txd; 214 tx_ring->num_desc = adapter->num_txd;
215 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 215 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
216 216
217 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 217 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
218 if (cmd_buf_arr == NULL) { 218 if (cmd_buf_arr == NULL) {
219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", 219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
220 netdev->name); 220 netdev->name);
221 goto err_out; 221 goto err_out;
222 } 222 }
223 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
224 tx_ring->cmd_buf_arr = cmd_buf_arr; 223 tx_ring->cmd_buf_arr = cmd_buf_arr;
225 224
226 recv_ctx = &adapter->recv_ctx; 225 recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
279 break; 278 break;
280 279
281 } 280 }
282 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) 281 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
283 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
284 if (rds_ring->rx_buf_arr == NULL) { 282 if (rds_ring->rx_buf_arr == NULL) {
285 printk(KERN_ERR "%s: Failed to allocate " 283 printk(KERN_ERR "%s: Failed to allocate "
286 "rx buffer ring %d\n", 284 "rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
288 /* free whatever was already allocated */ 286 /* free whatever was already allocated */
289 goto err_out; 287 goto err_out;
290 } 288 }
291 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
292 INIT_LIST_HEAD(&rds_ring->free_list); 289 INIT_LIST_HEAD(&rds_ring->free_list);
293 /* 290 /*
294 * Now go through all of them, set reference handles 291 * Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba9517404..ceeaac989df2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
44MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
46MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
47MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 44MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
48 45
49char netxen_nic_driver_name[] = "netxen_nic"; 46char netxen_nic_driver_name[] = "netxen_nic";
@@ -1280,6 +1277,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1280 int i = 0, err; 1277 int i = 0, err;
1281 int pci_func_id = PCI_FUNC(pdev->devfn); 1278 int pci_func_id = PCI_FUNC(pdev->devfn);
1282 uint8_t revision_id; 1279 uint8_t revision_id;
1280 u32 val;
1283 1281
1284 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1282 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1285 pr_warning("%s: chip revisions between 0x%x-0x%x " 1283 pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1355,8 +1353,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1355 break; 1353 break;
1356 } 1354 }
1357 1355
1358 if (reset_devices) { 1356 if (adapter->portnum == 0) {
1359 if (adapter->portnum == 0) { 1357 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
1358 if (val != 0xffffffff && val != 0) {
1360 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1359 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1361 adapter->need_fw_reset = 1; 1360 adapter->need_fw_reset = 1;
1362 } 1361 }
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 781e368329f9..f64c42414bd7 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9917,7 +9917,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9917 if (!netif_running(dev)) 9917 if (!netif_running(dev))
9918 return 0; 9918 return 0;
9919 9919
9920 flush_scheduled_work(); 9920 flush_work_sync(&np->reset_task);
9921 niu_netif_stop(np); 9921 niu_netif_stop(np);
9922 9922
9923 del_timer_sync(&np->timer); 9923 del_timer_sync(&np->timer);
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c0edc9..c8c873b31a89 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
469} 469}
470 470
471/** 471/**
472 * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
473 * @netdev: Network interface device structure
474 * Returns
475 * true(1): Checksum On
476 * false(0): Checksum Off
477 */
478static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
479{
480 return (netdev->features & NETIF_F_HW_CSUM) != 0;
481}
482
483/**
484 * pch_gbe_set_tx_csum - Turn transmit checksums on or off 472 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
485 * @netdev: Network interface device structure 473 * @netdev: Network interface device structure
486 * @data: Checksum on[true] or off[false] 474 * @data: Checksum on[true] or off[false]
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
493 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 481 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
494 482
495 adapter->tx_csum = data; 483 adapter->tx_csum = data;
496 if (data) 484 return ethtool_op_set_tx_ipv6_csum(netdev, data);
497 netdev->features |= NETIF_F_HW_CSUM;
498 else
499 netdev->features &= ~NETIF_F_HW_CSUM;
500 return 0;
501} 485}
502 486
503/** 487/**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
572 .set_pauseparam = pch_gbe_set_pauseparam, 556 .set_pauseparam = pch_gbe_set_pauseparam,
573 .get_rx_csum = pch_gbe_get_rx_csum, 557 .get_rx_csum = pch_gbe_get_rx_csum,
574 .set_rx_csum = pch_gbe_set_rx_csum, 558 .set_rx_csum = pch_gbe_set_rx_csum,
575 .get_tx_csum = pch_gbe_get_tx_csum,
576 .set_tx_csum = pch_gbe_set_tx_csum, 559 .set_tx_csum = pch_gbe_set_tx_csum,
577 .get_strings = pch_gbe_get_strings, 560 .get_strings = pch_gbe_get_strings,
578 .get_ethtool_stats = pch_gbe_get_ethtool_stats, 561 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..d7355306a738 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation. 2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. 3 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4 * 4 *
5 * This code was derived from the Intel e1000e Linux driver. 5 * This code was derived from the Intel e1000e Linux driver.
6 * 6 *
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1523 int desNo; 1523 int desNo;
1524 1524
1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1526 tx_ring->buffer_info = vmalloc(size); 1526 tx_ring->buffer_info = vzalloc(size);
1527 if (!tx_ring->buffer_info) { 1527 if (!tx_ring->buffer_info) {
1528 pr_err("Unable to allocate memory for the buffer infomation\n"); 1528 pr_err("Unable to allocate memory for the buffer infomation\n");
1529 return -ENOMEM; 1529 return -ENOMEM;
1530 } 1530 }
1531 memset(tx_ring->buffer_info, 0, size);
1532 1531
1533 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1532 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1534 1533
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1573 int desNo; 1572 int desNo;
1574 1573
1575 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1574 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1576 rx_ring->buffer_info = vmalloc(size); 1575 rx_ring->buffer_info = vzalloc(size);
1577 if (!rx_ring->buffer_info) { 1576 if (!rx_ring->buffer_info) {
1578 pr_err("Unable to allocate memory for the receive descriptor ring\n"); 1577 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1579 return -ENOMEM; 1578 return -ENOMEM;
1580 } 1579 }
1581 memset(rx_ring->buffer_info, 0, size);
1582 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1580 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1583 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1581 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1584 &rx_ring->dma, GFP_KERNEL); 1582 &rx_ring->dma, GFP_KERNEL);
@@ -2321,7 +2319,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2321 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2319 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2322 netif_napi_add(netdev, &adapter->napi, 2320 netif_napi_add(netdev, &adapter->napi,
2323 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); 2321 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2324 netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO; 2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2325 pch_gbe_set_ethtool_ops(netdev); 2323 pch_gbe_set_ethtool_ops(netdev);
2326 2324
2327 pch_gbe_mac_reset_hw(&adapter->hw); 2325 pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2360,9 +2358,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2360 pch_gbe_check_options(adapter); 2358 pch_gbe_check_options(adapter);
2361 2359
2362 if (adapter->tx_csum) 2360 if (adapter->tx_csum)
2363 netdev->features |= NETIF_F_HW_CSUM; 2361 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2364 else 2362 else
2365 netdev->features &= ~NETIF_F_HW_CSUM; 2363 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2366 2364
2367 /* initialize the wol settings based on the eeprom settings */ 2365 /* initialize the wol settings based on the eeprom settings */
2368 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING; 2366 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@ -2464,8 +2462,8 @@ static void __exit pch_gbe_exit_module(void)
2464module_init(pch_gbe_init_module); 2462module_init(pch_gbe_init_module);
2465module_exit(pch_gbe_exit_module); 2463module_exit(pch_gbe_exit_module);
2466 2464
2467MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver"); 2465MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2468MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>"); 2466MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2469MODULE_LICENSE("GPL"); 2467MODULE_LICENSE("GPL");
2470MODULE_VERSION(DRV_VERSION); 2468MODULE_VERSION(DRV_VERSION);
2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); 2469MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
434 .err = "using default of " 434 .err = "using default of "
435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD), 435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
436 .def = PCH_GBE_DEFAULT_TXD, 436 .def = PCH_GBE_DEFAULT_TXD,
437 .arg = { .r = { .min = PCH_GBE_MIN_TXD } }, 437 .arg = { .r = { .min = PCH_GBE_MIN_TXD,
438 .arg = { .r = { .max = PCH_GBE_MAX_TXD } } 438 .max = PCH_GBE_MAX_TXD } }
439 }; 439 };
440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
441 tx_ring->count = TxDescriptors; 441 tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
450 .err = "using default of " 450 .err = "using default of "
451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD), 451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
452 .def = PCH_GBE_DEFAULT_RXD, 452 .def = PCH_GBE_DEFAULT_RXD,
453 .arg = { .r = { .min = PCH_GBE_MIN_RXD } }, 453 .arg = { .r = { .min = PCH_GBE_MIN_RXD,
454 .arg = { .r = { .max = PCH_GBE_MAX_RXD } } 454 .max = PCH_GBE_MAX_RXD } }
455 }; 455 };
456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
457 rx_ring->count = RxDescriptors; 457 rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..1a0eb128e607 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
111 111
112typedef struct axnet_dev_t { 112typedef struct axnet_dev_t {
113 struct pcmcia_device *p_dev; 113 struct pcmcia_device *p_dev;
114 caddr_t base; 114 caddr_t base;
115 struct timer_list watchdog; 115 struct timer_list watchdog;
116 int stale, fast_poll; 116 int stale, fast_poll;
117 u_short link_status; 117 u_short link_status;
118 u_char duplex_flag; 118 u_char duplex_flag;
119 int phy_id; 119 int phy_id;
120 int flags; 120 int flags;
121 int active_low;
121} axnet_dev_t; 122} axnet_dev_t;
122 123
123static inline axnet_dev_t *PRIV(struct net_device *dev) 124static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
322 if (info->flags & IS_AX88790) 323 if (info->flags & IS_AX88790)
323 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ 324 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
324 325
326 info->active_low = 0;
327
325 for (i = 0; i < 32; i++) { 328 for (i = 0; i < 32; i++) {
326 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 329 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
327 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 330 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
329 if ((j != 0) && (j != 0xffff)) break; 332 if ((j != 0) && (j != 0xffff)) break;
330 } 333 }
331 334
332 /* Maybe PHY is in power down mode. (PPD_SET = 1)
333 Bit 2 of CCSR is active low. */
334 if (i == 32) { 335 if (i == 32) {
336 /* Maybe PHY is in power down mode. (PPD_SET = 1)
337 Bit 2 of CCSR is active low. */
335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 338 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
336 for (i = 0; i < 32; i++) { 339 for (i = 0; i < 32; i++) {
337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 340 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 341 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
339 if (j == j2) continue; 342 if (j == j2) continue;
340 if ((j != 0) && (j != 0xffff)) break; 343 if ((j != 0) && (j != 0xffff)) {
344 info->active_low = 1;
345 break;
346 }
341 } 347 }
342 } 348 }
343 349
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
383static int axnet_resume(struct pcmcia_device *link) 389static int axnet_resume(struct pcmcia_device *link)
384{ 390{
385 struct net_device *dev = link->priv; 391 struct net_device *dev = link->priv;
392 axnet_dev_t *info = PRIV(dev);
386 393
387 if (link->open) { 394 if (link->open) {
395 if (info->active_low == 1)
396 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
397
388 axnet_reset_8390(dev); 398 axnet_reset_8390(dev);
389 AX88190_init(dev, 1); 399 AX88190_init(dev, 1);
390 netif_device_attach(dev); 400 netif_device_attach(dev);
@@ -865,7 +875,7 @@ static void do_set_multicast_list(struct net_device *dev);
865static int ax_open(struct net_device *dev) 875static int ax_open(struct net_device *dev)
866{ 876{
867 unsigned long flags; 877 unsigned long flags;
868 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 878 struct ei_device *ei_local = netdev_priv(dev);
869 879
870 /* 880 /*
871 * Grab the page lock so we own the register set, then call 881 * Grab the page lock so we own the register set, then call
@@ -916,7 +926,7 @@ static int ax_close(struct net_device *dev)
916static void axnet_tx_timeout(struct net_device *dev) 926static void axnet_tx_timeout(struct net_device *dev)
917{ 927{
918 long e8390_base = dev->base_addr; 928 long e8390_base = dev->base_addr;
919 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 929 struct ei_device *ei_local = netdev_priv(dev);
920 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 930 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
921 unsigned long flags; 931 unsigned long flags;
922 932
@@ -963,7 +973,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
963 struct net_device *dev) 973 struct net_device *dev)
964{ 974{
965 long e8390_base = dev->base_addr; 975 long e8390_base = dev->base_addr;
966 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 976 struct ei_device *ei_local = netdev_priv(dev);
967 int length, send_length, output_page; 977 int length, send_length, output_page;
968 unsigned long flags; 978 unsigned long flags;
969 u8 packet[ETH_ZLEN]; 979 u8 packet[ETH_ZLEN];
@@ -1260,7 +1270,7 @@ static void ei_tx_err(struct net_device *dev)
1260static void ei_tx_intr(struct net_device *dev) 1270static void ei_tx_intr(struct net_device *dev)
1261{ 1271{
1262 long e8390_base = dev->base_addr; 1272 long e8390_base = dev->base_addr;
1263 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1273 struct ei_device *ei_local = netdev_priv(dev);
1264 int status = inb(e8390_base + EN0_TSR); 1274 int status = inb(e8390_base + EN0_TSR);
1265 1275
1266 /* 1276 /*
@@ -1344,7 +1354,7 @@ static void ei_tx_intr(struct net_device *dev)
1344static void ei_receive(struct net_device *dev) 1354static void ei_receive(struct net_device *dev)
1345{ 1355{
1346 long e8390_base = dev->base_addr; 1356 long e8390_base = dev->base_addr;
1347 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1357 struct ei_device *ei_local = netdev_priv(dev);
1348 unsigned char rxing_page, this_frame, next_frame; 1358 unsigned char rxing_page, this_frame, next_frame;
1349 unsigned short current_offset; 1359 unsigned short current_offset;
1350 int rx_pkt_count = 0; 1360 int rx_pkt_count = 0;
@@ -1529,7 +1539,7 @@ static void ei_rx_overrun(struct net_device *dev)
1529static struct net_device_stats *get_stats(struct net_device *dev) 1539static struct net_device_stats *get_stats(struct net_device *dev)
1530{ 1540{
1531 long ioaddr = dev->base_addr; 1541 long ioaddr = dev->base_addr;
1532 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1542 struct ei_device *ei_local = netdev_priv(dev);
1533 unsigned long flags; 1543 unsigned long flags;
1534 1544
1535 /* If the card is stopped, just return the present stats. */ 1545 /* If the card is stopped, just return the present stats. */
@@ -1578,7 +1588,7 @@ static void do_set_multicast_list(struct net_device *dev)
1578{ 1588{
1579 long e8390_base = dev->base_addr; 1589 long e8390_base = dev->base_addr;
1580 int i; 1590 int i;
1581 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 1591 struct ei_device *ei_local = netdev_priv(dev);
1582 1592
1583 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { 1593 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
1584 memset(ei_local->mcfilter, 0, 8); 1594 memset(ei_local->mcfilter, 0, 8);
@@ -1636,7 +1646,7 @@ static void AX88190_init(struct net_device *dev, int startp)
1636{ 1646{
1637 axnet_dev_t *info = PRIV(dev); 1647 axnet_dev_t *info = PRIV(dev);
1638 long e8390_base = dev->base_addr; 1648 long e8390_base = dev->base_addr;
1639 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1649 struct ei_device *ei_local = netdev_priv(dev);
1640 int i; 1650 int i;
1641 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; 1651 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
1642 1652
@@ -1702,7 +1712,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1702 int start_page) 1712 int start_page)
1703{ 1713{
1704 long e8390_base = dev->base_addr; 1714 long e8390_base = dev->base_addr;
1705 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1715 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1706 1716
1707 if (inb_p(e8390_base) & E8390_TRANS) 1717 if (inb_p(e8390_base) & E8390_TRANS)
1708 { 1718 {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f0bd1a1aba3a..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/marvell_phy.h> 32#include <linux/marvell_phy.h>
33#include <linux/of.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
39#define MII_MARVELL_PHY_PAGE 22
40
38#define MII_M1011_IEVENT 0x13 41#define MII_M1011_IEVENT 0x13
39#define MII_M1011_IEVENT_CLEAR 0x0000 42#define MII_M1011_IEVENT_CLEAR 0x0000
40 43
@@ -80,7 +83,6 @@
80#define MII_88E1121_PHY_LED_CTRL 16 83#define MII_88E1121_PHY_LED_CTRL 16
81#define MII_88E1121_PHY_LED_PAGE 3 84#define MII_88E1121_PHY_LED_PAGE 3
82#define MII_88E1121_PHY_LED_DEF 0x0030 85#define MII_88E1121_PHY_LED_DEF 0x0030
83#define MII_88E1121_PHY_PAGE 22
84 86
85#define MII_M1011_PHY_STATUS 0x11 87#define MII_M1011_PHY_STATUS 0x11
86#define MII_M1011_PHY_STATUS_1000 0x8000 88#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
186 return 0; 188 return 0;
187} 189}
188 190
191#ifdef CONFIG_OF_MDIO
192/*
193 * Set and/or override some configuration registers based on the
194 * marvell,reg-init property stored in the of_node for the phydev.
195 *
196 * marvell,reg-init = <reg-page reg mask value>,...;
197 *
198 * There may be one or more sets of <reg-page reg mask value>:
199 *
200 * reg-page: which register bank to use.
201 * reg: the register.
202 * mask: if non-zero, ANDed with existing register value.
203 * value: ORed with the masked value and written to the regiser.
204 *
205 */
206static int marvell_of_reg_init(struct phy_device *phydev)
207{
208 const __be32 *paddr;
209 int len, i, saved_page, current_page, page_changed, ret;
210
211 if (!phydev->dev.of_node)
212 return 0;
213
214 paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
215 if (!paddr || len < (4 * sizeof(*paddr)))
216 return 0;
217
218 saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
219 if (saved_page < 0)
220 return saved_page;
221 page_changed = 0;
222 current_page = saved_page;
223
224 ret = 0;
225 len /= sizeof(*paddr);
226 for (i = 0; i < len - 3; i += 4) {
227 u16 reg_page = be32_to_cpup(paddr + i);
228 u16 reg = be32_to_cpup(paddr + i + 1);
229 u16 mask = be32_to_cpup(paddr + i + 2);
230 u16 val_bits = be32_to_cpup(paddr + i + 3);
231 int val;
232
233 if (reg_page != current_page) {
234 current_page = reg_page;
235 page_changed = 1;
236 ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
237 if (ret < 0)
238 goto err;
239 }
240
241 val = 0;
242 if (mask) {
243 val = phy_read(phydev, reg);
244 if (val < 0) {
245 ret = val;
246 goto err;
247 }
248 val &= mask;
249 }
250 val |= val_bits;
251
252 ret = phy_write(phydev, reg, val);
253 if (ret < 0)
254 goto err;
255
256 }
257err:
258 if (page_changed) {
259 i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
260 if (ret == 0)
261 ret = i;
262 }
263 return ret;
264}
265#else
266static int marvell_of_reg_init(struct phy_device *phydev)
267{
268 return 0;
269}
270#endif /* CONFIG_OF_MDIO */
271
189static int m88e1121_config_aneg(struct phy_device *phydev) 272static int m88e1121_config_aneg(struct phy_device *phydev)
190{ 273{
191 int err, oldpage, mscr; 274 int err, oldpage, mscr;
192 275
193 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 276 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
194 277
195 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 278 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
196 MII_88E1121_PHY_MSCR_PAGE); 279 MII_88E1121_PHY_MSCR_PAGE);
197 if (err < 0) 280 if (err < 0)
198 return err; 281 return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
218 return err; 301 return err;
219 } 302 }
220 303
221 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 304 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
222 305
223 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 306 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
224 if (err < 0) 307 if (err < 0)
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
229 if (err < 0) 312 if (err < 0)
230 return err; 313 return err;
231 314
232 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 315 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
233 316
234 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); 317 phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
235 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); 318 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
236 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 319 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
237 320
238 err = genphy_config_aneg(phydev); 321 err = genphy_config_aneg(phydev);
239 322
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
244{ 327{
245 int err, oldpage, mscr; 328 int err, oldpage, mscr;
246 329
247 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 330 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
248 331
249 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 332 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
250 MII_88E1121_PHY_MSCR_PAGE); 333 MII_88E1121_PHY_MSCR_PAGE);
251 if (err < 0) 334 if (err < 0)
252 return err; 335 return err;
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
258 if (err < 0) 341 if (err < 0)
259 return err; 342 return err;
260 343
261 err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 344 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
262 if (err < 0) 345 if (err < 0)
263 return err; 346 return err;
264 347
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
368 return err; 451 return err;
369 } 452 }
370 453
454 err = marvell_of_reg_init(phydev);
455 if (err < 0)
456 return err;
371 457
372 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
373 if (err < 0) 459 if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
398 int err; 484 int err;
399 485
400 /* Change address */ 486 /* Change address */
401 err = phy_write(phydev, 0x16, 0x0002); 487 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
402 if (err < 0) 488 if (err < 0)
403 return err; 489 return err;
404 490
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
408 return err; 494 return err;
409 495
410 /* Change address */ 496 /* Change address */
411 err = phy_write(phydev, 0x16, 0x0003); 497 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
412 if (err < 0) 498 if (err < 0)
413 return err; 499 return err;
414 500
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
420 if (err < 0) 506 if (err < 0)
421 return err; 507 return err;
422 508
509 err = marvell_of_reg_init(phydev);
510 if (err < 0)
511 return err;
512
423 /* Reset address */ 513 /* Reset address */
424 err = phy_write(phydev, 0x16, 0x0); 514 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
515 if (err < 0)
516 return err;
517
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523}
524
525static int m88e1149_config_init(struct phy_device *phydev)
526{
527 int err;
528
529 /* Change address */
530 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
531 if (err < 0)
532 return err;
533
534 /* Enable 1000 Mbit */
535 err = phy_write(phydev, 0x15, 0x1048);
536 if (err < 0)
537 return err;
538
539 err = marvell_of_reg_init(phydev);
540 if (err < 0)
541 return err;
542
543 /* Reset address */
544 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
425 if (err < 0) 545 if (err < 0)
426 return err; 546 return err;
427 547
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
491 } 611 }
492 } 612 }
493 613
614 err = marvell_of_reg_init(phydev);
615 if (err < 0)
616 return err;
617
494 return 0; 618 return 0;
495} 619}
496 620
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
685 .driver = { .owner = THIS_MODULE }, 809 .driver = { .owner = THIS_MODULE },
686 }, 810 },
687 { 811 {
812 .phy_id = MARVELL_PHY_ID_88E1149R,
813 .phy_id_mask = MARVELL_PHY_ID_MASK,
814 .name = "Marvell 88E1149R",
815 .features = PHY_GBIT_FEATURES,
816 .flags = PHY_HAS_INTERRUPT,
817 .config_init = &m88e1149_config_init,
818 .config_aneg = &m88e1118_config_aneg,
819 .read_status = &genphy_read_status,
820 .ack_interrupt = &marvell_ack_interrupt,
821 .config_intr = &marvell_config_intr,
822 .driver = { .owner = THIS_MODULE },
823 },
824 {
688 .phy_id = MARVELL_PHY_ID_88E1240, 825 .phy_id = MARVELL_PHY_ID_88E1240,
689 .phy_id_mask = MARVELL_PHY_ID_MASK, 826 .phy_id_mask = MARVELL_PHY_ID_MASK,
690 .name = "Marvell 88E1240", 827 .name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
735 { 0x01410e10, 0xfffffff0 }, 872 { 0x01410e10, 0xfffffff0 },
736 { 0x01410cb0, 0xfffffff0 }, 873 { 0x01410cb0, 0xfffffff0 },
737 { 0x01410cd0, 0xfffffff0 }, 874 { 0x01410cd0, 0xfffffff0 },
875 { 0x01410e50, 0xfffffff0 },
738 { 0x01410e30, 0xfffffff0 }, 876 { 0x01410e30, 0xfffffff0 },
739 { 0x01410e90, 0xfffffff0 }, 877 { 0x01410e90, 0xfffffff0 },
740 { } 878 { }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93f..a8445c72fc13 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev), 47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down"); 48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 49 if (phydev->link)
50 printk(" - %d/%s", phydev->speed, 50 printk(KERN_CONT " - %d/%s", phydev->speed,
51 DUPLEX_FULL == phydev->duplex ? 51 DUPLEX_FULL == phydev->duplex ?
52 "Full" : "Half"); 52 "Full" : "Half");
53 53
54 printk("\n"); 54 printk(KERN_CONT "\n");
55} 55}
56EXPORT_SYMBOL(phy_print_status); 56EXPORT_SYMBOL(phy_print_status);
57 57
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..b708f68471a6 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1136 a four-byte PPP header on each packet */ 1136 a four-byte PPP header on each packet */
1137 *skb_push(skb, 2) = 1; 1137 *skb_push(skb, 2) = 1;
1138 if (ppp->pass_filter && 1138 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter, 1139 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 ppp->pass_len) == 0) {
1141 if (ppp->debug & 1) 1140 if (ppp->debug & 1)
1142 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n");
1143 kfree_skb(skb); 1142 kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1145 } 1144 }
1146 /* if this packet passes the active filter, record the time */ 1145 /* if this packet passes the active filter, record the time */
1147 if (!(ppp->active_filter && 1146 if (!(ppp->active_filter &&
1148 sk_run_filter(skb, ppp->active_filter, 1147 sk_run_filter(skb, ppp->active_filter) == 0))
1149 ppp->active_len) == 0))
1150 ppp->last_xmit = jiffies; 1148 ppp->last_xmit = jiffies;
1151 skb_pull(skb, 2); 1149 skb_pull(skb, 2);
1152#else 1150#else
@@ -1758,8 +1756,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1758 1756
1759 *skb_push(skb, 2) = 0; 1757 *skb_push(skb, 2) = 0;
1760 if (ppp->pass_filter && 1758 if (ppp->pass_filter &&
1761 sk_run_filter(skb, ppp->pass_filter, 1759 sk_run_filter(skb, ppp->pass_filter) == 0) {
1762 ppp->pass_len) == 0) {
1763 if (ppp->debug & 1) 1760 if (ppp->debug & 1)
1764 printk(KERN_DEBUG "PPP: inbound frame " 1761 printk(KERN_DEBUG "PPP: inbound frame "
1765 "not passed\n"); 1762 "not passed\n");
@@ -1767,8 +1764,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1767 return; 1764 return;
1768 } 1765 }
1769 if (!(ppp->active_filter && 1766 if (!(ppp->active_filter &&
1770 sk_run_filter(skb, ppp->active_filter, 1767 sk_run_filter(skb, ppp->active_filter) == 0))
1771 ppp->active_len) == 0))
1772 ppp->last_recv = jiffies; 1768 ppp->last_recv = jiffies;
1773 __skb_pull(skb, 2); 1769 __skb_pull(skb, 2);
1774 } else 1770 } else
@@ -2584,16 +2580,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2584 */ 2580 */
2585 dev_net_set(dev, net); 2581 dev_net_set(dev, net);
2586 2582
2587 ret = -EEXIST;
2588 mutex_lock(&pn->all_ppp_mutex); 2583 mutex_lock(&pn->all_ppp_mutex);
2589 2584
2590 if (unit < 0) { 2585 if (unit < 0) {
2591 unit = unit_get(&pn->units_idr, ppp); 2586 unit = unit_get(&pn->units_idr, ppp);
2592 if (unit < 0) { 2587 if (unit < 0) {
2593 *retp = unit; 2588 ret = unit;
2594 goto out2; 2589 goto out2;
2595 } 2590 }
2596 } else { 2591 } else {
2592 ret = -EEXIST;
2597 if (unit_find(&pn->units_idr, unit)) 2593 if (unit_find(&pn->units_idr, unit))
2598 goto out2; /* unit already exists */ 2594 goto out2; /* unit already exists */
2599 /* 2595 /*
@@ -2668,10 +2664,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2668 ppp->closing = 1; 2664 ppp->closing = 1;
2669 ppp_unlock(ppp); 2665 ppp_unlock(ppp);
2670 unregister_netdev(ppp->dev); 2666 unregister_netdev(ppp->dev);
2667 unit_put(&pn->units_idr, ppp->file.index);
2671 } else 2668 } else
2672 ppp_unlock(ppp); 2669 ppp_unlock(ppp);
2673 2670
2674 unit_put(&pn->units_idr, ppp->file.index);
2675 ppp->file.dead = 1; 2671 ppp->file.dead = 1;
2676 ppp->owner = NULL; 2672 ppp->owner = NULL;
2677 wake_up_interruptible(&ppp->file.rwait); 2673 wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2855,7 @@ static void __exit ppp_cleanup(void)
2859 * by holding all_ppp_mutex 2855 * by holding all_ppp_mutex
2860 */ 2856 */
2861 2857
2862/* associate pointer with specified number */ 2858static int __unit_alloc(struct idr *p, void *ptr, int n)
2863static int unit_set(struct idr *p, void *ptr, int n)
2864{ 2859{
2865 int unit, err; 2860 int unit, err;
2866 2861
@@ -2871,10 +2866,24 @@ again:
2871 } 2866 }
2872 2867
2873 err = idr_get_new_above(p, ptr, n, &unit); 2868 err = idr_get_new_above(p, ptr, n, &unit);
2874 if (err == -EAGAIN) 2869 if (err < 0) {
2875 goto again; 2870 if (err == -EAGAIN)
2871 goto again;
2872 return err;
2873 }
2874
2875 return unit;
2876}
2877
2878/* associate pointer with specified number */
2879static int unit_set(struct idr *p, void *ptr, int n)
2880{
2881 int unit;
2876 2882
2877 if (unit != n) { 2883 unit = __unit_alloc(p, ptr, n);
2884 if (unit < 0)
2885 return unit;
2886 else if (unit != n) {
2878 idr_remove(p, unit); 2887 idr_remove(p, unit);
2879 return -EINVAL; 2888 return -EINVAL;
2880 } 2889 }
@@ -2885,19 +2894,7 @@ again:
2885/* get new free unit number and associate pointer with it */ 2894/* get new free unit number and associate pointer with it */
2886static int unit_get(struct idr *p, void *ptr) 2895static int unit_get(struct idr *p, void *ptr)
2887{ 2896{
2888 int unit, err; 2897 return __unit_alloc(p, ptr, 0);
2889
2890again:
2891 if (!idr_pre_get(p, GFP_KERNEL)) {
2892 printk(KERN_ERR "PPP: No free memory for idr\n");
2893 return -ENOMEM;
2894 }
2895
2896 err = idr_get_new_above(p, ptr, 0, &unit);
2897 if (err == -EAGAIN)
2898 goto again;
2899
2900 return unit;
2901} 2898}
2902 2899
2903/* put unit number back to a pool */ 2900/* put unit number back to a pool */
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bfa..164cfad6ce79 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -277,7 +277,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
277 iph->tos = 0; 277 iph->tos = 0;
278 iph->daddr = rt->rt_dst; 278 iph->daddr = rt->rt_dst;
279 iph->saddr = rt->rt_src; 279 iph->saddr = rt->rt_src;
280 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT); 280 iph->ttl = ip4_dst_hoplimit(&rt->dst);
281 iph->tot_len = htons(skb->len); 281 iph->tot_len = htons(skb->len);
282 282
283 skb_dst_drop(skb); 283 skb_dst_drop(skb);
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
673 int err = 0; 673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675 675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *), 676 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) { 677 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n"); 678 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM; 679 return -ENOMEM;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 18c0297743f1..1b63c8aef121 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1450,16 +1450,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
1450 strncpy(info->bus_info, "N/A", 32); 1450 strncpy(info->bus_info, "N/A", 32);
1451} 1451}
1452 1452
1453static u32 pxa168_get_link(struct net_device *dev)
1454{
1455 return !!netif_carrier_ok(dev);
1456}
1457
1458static const struct ethtool_ops pxa168_ethtool_ops = { 1453static const struct ethtool_ops pxa168_ethtool_ops = {
1459 .get_settings = pxa168_get_settings, 1454 .get_settings = pxa168_get_settings,
1460 .set_settings = pxa168_set_settings, 1455 .set_settings = pxa168_set_settings,
1461 .get_drvinfo = pxa168_get_drvinfo, 1456 .get_drvinfo = pxa168_get_drvinfo,
1462 .get_link = pxa168_get_link, 1457 .get_link = ethtool_op_get_link,
1463}; 1458};
1464 1459
1465static const struct net_device_ops pxa168_eth_netdev_ops = { 1460static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1607,7 +1602,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1607 mdiobus_unregister(pep->smi_bus); 1602 mdiobus_unregister(pep->smi_bus);
1608 mdiobus_free(pep->smi_bus); 1603 mdiobus_free(pep->smi_bus);
1609 unregister_netdev(dev); 1604 unregister_netdev(dev);
1610 flush_scheduled_work(); 1605 cancel_work_sync(&pep->tx_timeout_task);
1611 free_netdev(dev); 1606 free_netdev(dev);
1612 platform_set_drvdata(pdev, NULL); 1607 platform_set_drvdata(pdev, NULL);
1613 return 0; 1608 return 0;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34ab..1a3584edd79c 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@ map_error:
2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2468 struct net_device *ndev) 2468 struct net_device *ndev)
2469{ 2469{
2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2470 struct ql3_adapter *qdev = netdev_priv(ndev);
2471 struct ql3xxx_port_registers __iomem *port_regs = 2471 struct ql3xxx_port_registers __iomem *port_regs =
2472 qdev->mem_map_registers; 2472 qdev->mem_map_registers;
2473 struct ql_tx_buf_cb *tx_cb; 2473 struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3390 3390
3391static void ql_display_dev_info(struct net_device *ndev) 3391static void ql_display_dev_info(struct net_device *ndev)
3392{ 3392{
3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3393 struct ql3_adapter *qdev = netdev_priv(ndev);
3394 struct pci_dev *pdev = qdev->pdev; 3394 struct pci_dev *pdev = qdev->pdev;
3395 3395
3396 netdev_info(ndev, 3396 netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
3573 3573
3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3575{ 3575{
3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3576 struct ql3_adapter *qdev = netdev_priv(ndev);
3577 struct ql3xxx_port_registers __iomem *port_regs = 3577 struct ql3xxx_port_registers __iomem *port_regs =
3578 qdev->mem_map_registers; 3578 qdev->mem_map_registers;
3579 struct sockaddr *addr = p; 3579 struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3608 3608
3609static void ql3xxx_tx_timeout(struct net_device *ndev) 3609static void ql3xxx_tx_timeout(struct net_device *ndev)
3610{ 3610{
3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3611 struct ql3_adapter *qdev = netdev_priv(ndev);
3612 3612
3613 netdev_err(ndev, "Resetting...\n"); 3613 netdev_err(ndev, "Resetting...\n");
3614 /* 3614 /*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b74..f267da42f243 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef _QLCNIC_H_ 8#ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
51 34
52#define _QLCNIC_LINUX_MAJOR 5 35#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 36#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 11 37#define _QLCNIC_LINUX_SUBVERSION 13
55#define QLCNIC_LINUX_VERSIONID "5.0.11" 38#define QLCNIC_LINUX_VERSIONID "5.0.13"
56#define QLCNIC_DRV_IDC_VER 0x01 39#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -798,7 +781,6 @@ struct qlcnic_nic_intr_coalesce {
798#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16 781#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
799#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 782#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
800#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18 783#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
801#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
802#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20 784#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
803#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21 785#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
804#define QLCNIC_C2C_OPCODE 22 786#define QLCNIC_C2C_OPCODE 22
@@ -923,6 +905,7 @@ struct qlcnic_ipaddr {
923#define QLCNIC_MACSPOOF 0x200 905#define QLCNIC_MACSPOOF 0x200
924#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 906#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
925#define QLCNIC_PROMISC_DISABLED 0x800 907#define QLCNIC_PROMISC_DISABLED 0x800
908#define QLCNIC_NEED_FLR 0x1000
926#define QLCNIC_IS_MSI_FAMILY(adapter) \ 909#define QLCNIC_IS_MSI_FAMILY(adapter) \
927 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 910 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
928 911
@@ -1126,8 +1109,7 @@ struct qlcnic_eswitch {
1126/* Return codes for Error handling */ 1109/* Return codes for Error handling */
1127#define QL_STATUS_INVALID_PARAM -1 1110#define QL_STATUS_INVALID_PARAM -1
1128 1111
1129#define MAX_BW 100 1112#define MAX_BW 100 /* % of link speed */
1130#define MIN_BW 1
1131#define MAX_VLAN_ID 4095 1113#define MAX_VLAN_ID 4095
1132#define MIN_VLAN_ID 2 1114#define MIN_VLAN_ID 2
1133#define MAX_TX_QUEUES 1 1115#define MAX_TX_QUEUES 1
@@ -1135,7 +1117,7 @@ struct qlcnic_eswitch {
1135#define DEFAULT_MAC_LEARN 1 1117#define DEFAULT_MAC_LEARN 1
1136 1118
1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1119#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1138#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1120#define IS_VALID_BW(bw) (bw <= MAX_BW)
1139#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1121#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1140#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1122#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1141 1123
@@ -1314,21 +1296,15 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1314int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1296int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1315void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1297void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1316 struct qlcnic_host_tx_ring *tx_ring); 1298 struct qlcnic_host_tx_ring *tx_ring);
1317void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1318int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1319void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1299void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1320 1300
1321/* Functions from qlcnic_main.c */ 1301/* Functions from qlcnic_main.c */
1322int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
1323void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
1324int qlcnic_reset_context(struct qlcnic_adapter *); 1302int qlcnic_reset_context(struct qlcnic_adapter *);
1325u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1303u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1326 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); 1304 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1327void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1305void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1328int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1306int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1329int qlcnic_check_loopback_buff(unsigned char *data);
1330netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1307netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1331void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1332 1308
1333/* Management functions */ 1309/* Management functions */
1334int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); 1310int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1353,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1377 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1353 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1378 {0x1077, 0x8020, 0x103c, 0x3733, 1354 {0x1077, 0x8020, 0x103c, 0x3733,
1379 "NC523SFP 10Gb 2-port Server Adapter"}, 1355 "NC523SFP 10Gb 2-port Server Adapter"},
1356 {0x1077, 0x8020, 0x103c, 0x3346,
1357 "CN1000Q Dual Port Converged Network Adapter"},
1380 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1358 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1381}; 1359};
1382 1360
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6b..27631f23b3fd 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -480,6 +463,11 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
480{ 463{
481 int err; 464 int err;
482 465
466 if (adapter->flags & QLCNIC_NEED_FLR) {
467 pci_reset_function(adapter->pdev);
468 adapter->flags &= ~QLCNIC_NEED_FLR;
469 }
470
483 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 471 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
484 if (err) 472 if (err)
485 return err; 473 return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c4..0eaf31bf8a0d 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/types.h> 8#include <linux/types.h>
@@ -101,8 +84,7 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
101static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Register_Test_on_offline", 85 "Register_Test_on_offline",
103 "Link_Test_on_offline", 86 "Link_Test_on_offline",
104 "Interrupt_Test_offline", 87 "Interrupt_Test_offline"
105 "Loopback_Test_offline"
106}; 88};
107 89
108#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 90#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
643 } 625 }
644} 626}
645 627
646#define QLC_ILB_PKT_SIZE 64
647#define QLC_NUM_ILB_PKT 16
648#define QLC_ILB_MAX_RCV_LOOP 10
649
650static void qlcnic_create_loopback_buff(unsigned char *data)
651{
652 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
653 memset(data, 0x4e, QLC_ILB_PKT_SIZE);
654 memset(data, 0xff, 12);
655 memcpy(data + 12, random_data, sizeof(random_data));
656}
657
658int qlcnic_check_loopback_buff(unsigned char *data)
659{
660 unsigned char buff[QLC_ILB_PKT_SIZE];
661 qlcnic_create_loopback_buff(buff);
662 return memcmp(data, buff, QLC_ILB_PKT_SIZE);
663}
664
665static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
666{
667 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
668 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
669 struct sk_buff *skb;
670 int i, loop, cnt = 0;
671
672 for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
673 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
674 qlcnic_create_loopback_buff(skb->data);
675 skb_put(skb, QLC_ILB_PKT_SIZE);
676
677 adapter->diag_cnt = 0;
678 qlcnic_xmit_frame(skb, adapter->netdev);
679
680 loop = 0;
681 do {
682 msleep(1);
683 qlcnic_process_rcv_ring_diag(sds_ring);
684 } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
685 !adapter->diag_cnt);
686
687 dev_kfree_skb_any(skb);
688
689 if (!adapter->diag_cnt)
690 dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
691 " not recevied\n", i + 1);
692 else
693 cnt++;
694 }
695 if (cnt != i) {
696 dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
697 return -1;
698 }
699 return 0;
700}
701
702static int qlcnic_loopback_test(struct net_device *netdev)
703{
704 struct qlcnic_adapter *adapter = netdev_priv(netdev);
705 int max_sds_rings = adapter->max_sds_rings;
706 int ret;
707
708 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
709 dev_warn(&adapter->pdev->dev, "Loopback test not supported"
710 "for non privilege function\n");
711 return 0;
712 }
713
714 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
715 return -EIO;
716
717 if (qlcnic_request_quiscent_mode(adapter)) {
718 clear_bit(__QLCNIC_RESETTING, &adapter->state);
719 return -EIO;
720 }
721
722 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
723 if (ret)
724 goto clear_it;
725
726 ret = qlcnic_set_ilb_mode(adapter);
727 if (ret)
728 goto done;
729
730 ret = qlcnic_do_ilb_test(adapter);
731
732 qlcnic_clear_ilb_mode(adapter);
733
734done:
735 qlcnic_diag_free_res(netdev, max_sds_rings);
736
737clear_it:
738 qlcnic_clear_quiscent_mode(adapter);
739 adapter->max_sds_rings = max_sds_rings;
740 clear_bit(__QLCNIC_RESETTING, &adapter->state);
741 return ret;
742}
743
744static int qlcnic_irq_test(struct net_device *netdev) 628static int qlcnic_irq_test(struct net_device *netdev)
745{ 629{
746 struct qlcnic_adapter *adapter = netdev_priv(netdev); 630 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -793,9 +677,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
793 if (data[2]) 677 if (data[2])
794 eth_test->flags |= ETH_TEST_FL_FAILED; 678 eth_test->flags |= ETH_TEST_FL_FAILED;
795 679
796 data[3] = qlcnic_loopback_test(dev);
797 if (data[3])
798 eth_test->flags |= ETH_TEST_FL_FAILED;
799 680
800 } 681 }
801} 682}
@@ -925,9 +806,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
925 806
926 dev->features &= ~NETIF_F_LRO; 807 dev->features &= ~NETIF_F_LRO;
927 qlcnic_send_lro_cleanup(adapter); 808 qlcnic_send_lro_cleanup(adapter);
809 dev_info(&adapter->pdev->dev,
810 "disabling LRO as rx_csum is off\n");
928 } 811 }
929 adapter->rx_csum = !!data; 812 adapter->rx_csum = !!data;
930 dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
931 return 0; 813 return 0;
932} 814}
933 815
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1a..19328e05b75d 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef __QLCNIC_HDR_H_ 8#ifndef __QLCNIC_HDR_H_
@@ -722,7 +705,7 @@ enum {
722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 705#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 706#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
724 707
725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 708#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 709#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
727#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 710#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
728#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 711#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a7ee27..c9c4bf1458a8 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -1234,56 +1217,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1234 1217
1235 return rv; 1218 return rv;
1236} 1219}
1237
1238static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1239{
1240 struct qlcnic_nic_req req;
1241 int rv;
1242 u64 word;
1243
1244 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1245 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1246
1247 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1248 ((u64)adapter->portnum << 16);
1249 req.req_hdr = cpu_to_le64(word);
1250 req.words[0] = cpu_to_le64(flag);
1251
1252 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1253 if (rv)
1254 dev_err(&adapter->pdev->dev,
1255 "%sting loopback mode failed.\n",
1256 flag ? "Set" : "Reset");
1257 return rv;
1258}
1259
1260int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1261{
1262 if (qlcnic_set_fw_loopback(adapter, 1))
1263 return -EIO;
1264
1265 if (qlcnic_nic_set_promisc(adapter,
1266 VPORT_MISS_MODE_ACCEPT_ALL)) {
1267 qlcnic_set_fw_loopback(adapter, 0);
1268 return -EIO;
1269 }
1270
1271 msleep(1000);
1272 return 0;
1273}
1274
1275void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1276{
1277 int mode = VPORT_MISS_MODE_DROP;
1278 struct net_device *netdev = adapter->netdev;
1279
1280 qlcnic_set_fw_loopback(adapter, 0);
1281
1282 if (netdev->flags & IFF_PROMISC)
1283 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1284 else if (netdev->flags & IFF_ALLMULTI)
1285 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1286
1287 qlcnic_nic_set_promisc(adapter, mode);
1288 msleep(1000);
1289}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41fe..9b9c7c39d3ee 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/netdevice.h> 8#include <linux/netdevice.h>
@@ -236,12 +219,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
236 tx_ring->num_desc = adapter->num_txd; 219 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 220 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238 221
239 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 222 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
240 if (cmd_buf_arr == NULL) { 223 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); 224 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
242 goto err_out; 225 goto err_out;
243 } 226 }
244 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
245 tx_ring->cmd_buf_arr = cmd_buf_arr; 227 tx_ring->cmd_buf_arr = cmd_buf_arr;
246 228
247 recv_ctx = &adapter->recv_ctx; 229 recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
275 rds_ring->dma_size + NET_IP_ALIGN; 257 rds_ring->dma_size + NET_IP_ALIGN;
276 break; 258 break;
277 } 259 }
278 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) 260 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
279 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
280 if (rds_ring->rx_buf_arr == NULL) { 261 if (rds_ring->rx_buf_arr == NULL) {
281 dev_err(&netdev->dev, "Failed to allocate " 262 dev_err(&netdev->dev, "Failed to allocate "
282 "rx buffer ring %d\n", ring); 263 "rx buffer ring %d\n", ring);
283 goto err_out; 264 goto err_out;
284 } 265 }
285 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
286 INIT_LIST_HEAD(&rds_ring->free_list); 266 INIT_LIST_HEAD(&rds_ring->free_list);
287 /* 267 /*
288 * Now go through all of them, set reference handles 268 * Now go through all of them, set reference handles
@@ -1693,99 +1673,6 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1693 spin_unlock(&rds_ring->lock); 1673 spin_unlock(&rds_ring->lock);
1694} 1674}
1695 1675
1696static void dump_skb(struct sk_buff *skb)
1697{
1698 int i;
1699 unsigned char *data = skb->data;
1700
1701 for (i = 0; i < skb->len; i++) {
1702 printk("%02x ", data[i]);
1703 if ((i & 0x0f) == 8)
1704 printk("\n");
1705 }
1706}
1707
1708static struct qlcnic_rx_buffer *
1709qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1710 struct qlcnic_host_sds_ring *sds_ring,
1711 int ring, u64 sts_data0)
1712{
1713 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1714 struct qlcnic_rx_buffer *buffer;
1715 struct sk_buff *skb;
1716 struct qlcnic_host_rds_ring *rds_ring;
1717 int index, length, cksum, pkt_offset;
1718
1719 if (unlikely(ring >= adapter->max_rds_rings))
1720 return NULL;
1721
1722 rds_ring = &recv_ctx->rds_rings[ring];
1723
1724 index = qlcnic_get_sts_refhandle(sts_data0);
1725 if (unlikely(index >= rds_ring->num_desc))
1726 return NULL;
1727
1728 buffer = &rds_ring->rx_buf_arr[index];
1729
1730 length = qlcnic_get_sts_totallength(sts_data0);
1731 cksum = qlcnic_get_sts_status(sts_data0);
1732 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1733
1734 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1735 if (!skb)
1736 return buffer;
1737
1738 if (length > rds_ring->skb_size)
1739 skb_put(skb, rds_ring->skb_size);
1740 else
1741 skb_put(skb, length);
1742
1743 if (pkt_offset)
1744 skb_pull(skb, pkt_offset);
1745
1746 if (!qlcnic_check_loopback_buff(skb->data))
1747 adapter->diag_cnt++;
1748 else
1749 dump_skb(skb);
1750
1751 dev_kfree_skb_any(skb);
1752 adapter->stats.rx_pkts++;
1753 adapter->stats.rxbytes += length;
1754
1755 return buffer;
1756}
1757
1758void
1759qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1760{
1761 struct qlcnic_adapter *adapter = sds_ring->adapter;
1762 struct status_desc *desc;
1763 struct qlcnic_rx_buffer *rxbuf;
1764 u64 sts_data0;
1765
1766 int opcode, ring, desc_cnt;
1767 u32 consumer = sds_ring->consumer;
1768
1769 desc = &sds_ring->desc_head[consumer];
1770 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1771
1772 if (!(sts_data0 & STATUS_OWNER_HOST))
1773 return;
1774
1775 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1776 opcode = qlcnic_get_sts_opcode(sts_data0);
1777
1778 ring = qlcnic_get_sts_type(sts_data0);
1779 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1780 ring, sts_data0);
1781
1782 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1783 consumer = get_next_index(consumer, sds_ring->num_desc);
1784
1785 sds_ring->consumer = consumer;
1786 writel(consumer, sds_ring->crb_sts_consumer);
1787}
1788
1789void 1676void
1790qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, 1677qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1791 u8 alt_mac, u8 *mac) 1678 u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cdf9ab3..788850e2ba4e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/slab.h> 8#include <linux/slab.h>
@@ -1450,7 +1433,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1450 netdev->irq = adapter->msix_entries[0].vector; 1433 netdev->irq = adapter->msix_entries[0].vector;
1451 1434
1452 netif_carrier_off(netdev); 1435 netif_carrier_off(netdev);
1453 netif_stop_queue(netdev);
1454 1436
1455 err = register_netdev(netdev); 1437 err = register_netdev(netdev);
1456 if (err) { 1438 if (err) {
@@ -1486,6 +1468,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1486 uint8_t revision_id; 1468 uint8_t revision_id;
1487 uint8_t pci_using_dac; 1469 uint8_t pci_using_dac;
1488 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; 1470 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1471 u32 val;
1489 1472
1490 err = pci_enable_device(pdev); 1473 err = pci_enable_device(pdev);
1491 if (err) 1474 if (err)
@@ -1547,6 +1530,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1547 if (err) 1530 if (err)
1548 goto err_out_iounmap; 1531 goto err_out_iounmap;
1549 1532
1533 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
1534 if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
1535 adapter->flags |= QLCNIC_NEED_FLR;
1536
1550 err = adapter->nic_ops->start_firmware(adapter); 1537 err = adapter->nic_ops->start_firmware(adapter);
1551 if (err) { 1538 if (err) {
1552 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1539 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2855,61 +2842,6 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2855 qlcnic_api_unlock(adapter); 2842 qlcnic_api_unlock(adapter);
2856} 2843}
2857 2844
2858/* Caller should held RESETTING bit.
2859 * This should be call in sync with qlcnic_request_quiscent_mode.
2860 */
2861void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
2862{
2863 qlcnic_clr_drv_state(adapter);
2864 qlcnic_api_lock(adapter);
2865 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
2866 qlcnic_api_unlock(adapter);
2867}
2868
2869/* Caller should held RESETTING bit.
2870 */
2871int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
2872{
2873 u8 timeo = adapter->dev_init_timeo / 2;
2874 u32 state;
2875
2876 if (qlcnic_api_lock(adapter))
2877 return -EIO;
2878
2879 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2880 if (state != QLCNIC_DEV_READY)
2881 return -EIO;
2882
2883 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
2884 qlcnic_api_unlock(adapter);
2885 QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
2886 qlcnic_idc_debug_info(adapter, 0);
2887
2888 qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
2889
2890 do {
2891 msleep(2000);
2892 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2893 if (state == QLCNIC_DEV_QUISCENT)
2894 return 0;
2895 if (!qlcnic_check_drv_state(adapter)) {
2896 if (qlcnic_api_lock(adapter))
2897 return -EIO;
2898 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2899 QLCNIC_DEV_QUISCENT);
2900 qlcnic_api_unlock(adapter);
2901 QLCDB(adapter, DRV, "QUISCENT mode set\n");
2902 return 0;
2903 }
2904 } while (--timeo);
2905
2906 dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
2907 " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
2908 QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
2909 qlcnic_clear_quiscent_mode(adapter);
2910 return -EIO;
2911}
2912
2913/*Transit to RESET state from READY state only */ 2845/*Transit to RESET state from READY state only */
2914static void 2846static void
2915qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2847qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3588,9 +3520,12 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3588 case QLCNIC_PORT_DEFAULTS: 3520 case QLCNIC_PORT_DEFAULTS:
3589 if (QLC_DEV_GET_DRV(op_mode, pci_func) != 3521 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3590 QLCNIC_NON_PRIV_FUNC) { 3522 QLCNIC_NON_PRIV_FUNC) {
3591 esw_cfg[i].mac_anti_spoof = 0; 3523 if (esw_cfg[i].mac_anti_spoof != 0)
3592 esw_cfg[i].mac_override = 1; 3524 return QL_STATUS_INVALID_PARAM;
3593 esw_cfg[i].promisc_mode = 1; 3525 if (esw_cfg[i].mac_override != 1)
3526 return QL_STATUS_INVALID_PARAM;
3527 if (esw_cfg[i].promisc_mode != 1)
3528 return QL_STATUS_INVALID_PARAM;
3594 } 3529 }
3595 break; 3530 break;
3596 case QLCNIC_ADD_VLAN: 3531 case QLCNIC_ADD_VLAN:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..bdb8fe868539 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.25.00.00-01" 19#define DRV_VERSION "v1.00.00.27.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2221int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2221int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2222int ql_pause_mpi_risc(struct ql_adapter *qdev); 2222int ql_pause_mpi_risc(struct ql_adapter *qdev);
2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2225int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2225 u32 ram_addr, int word_count); 2226 u32 ram_addr, int word_count);
2226int ql_core_dump(struct ql_adapter *qdev, 2227int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2236int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2237int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2237int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2238int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2238int ql_wait_fifo_empty(struct ql_adapter *qdev); 2239int ql_wait_fifo_empty(struct ql_adapter *qdev);
2240void ql_get_dump(struct ql_adapter *qdev, void *buff);
2239void ql_gen_reg_dump(struct ql_adapter *qdev, 2241void ql_gen_reg_dump(struct ql_adapter *qdev,
2240 struct ql_reg_dump *mpi_coredump); 2242 struct ql_reg_dump *mpi_coredump);
2241netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2243netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status) 1318 if (status)
1319 return; 1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1320 1332
1321 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) 1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev); 1340 ql_get_core_dump(qdev);
1341 }
1323} 1342}
1324 1343
1325/* Coredump to messages log file using separate worker thread */ 1344/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0; 376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0; 377 drvinfo->testinfo_len = 0;
378 drvinfo->regdump_len = 0; 378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
379 drvinfo->eedump_len = 0; 382 drvinfo->eedump_len = 0;
380} 383}
381 384
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
547 550
548static int ql_get_regs_len(struct net_device *ndev) 551static int ql_get_regs_len(struct net_device *ndev)
549{ 552{
550 return sizeof(struct ql_reg_dump); 553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
551} 559}
552 560
553static void ql_get_regs(struct net_device *ndev, 561static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
555{ 563{
556 struct ql_adapter *qdev = netdev_priv(ndev); 564 struct ql_adapter *qdev = netdev_priv(ndev);
557 565
558 ql_gen_reg_dump(qdev, p); 566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
559} 572}
560 573
561static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..e4dbbbfec723 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
62/* NETIF_MSG_PKTDATA | */ 62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64 64
65static int debug = 0x00007fff; /* defaults above */ 65static int debug = -1; /* defaults above */
66module_param(debug, int, 0); 66module_param(debug, int, 0664);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int qlge_irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, 0664);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static int qlge_mpi_coredump; 76static int qlge_mpi_coredump;
@@ -3844,7 +3844,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3844 3844
3845static void ql_display_dev_info(struct net_device *ndev) 3845static void ql_display_dev_info(struct net_device *ndev)
3846{ 3846{
3847 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3847 struct ql_adapter *qdev = netdev_priv(ndev);
3848 3848
3849 netif_info(qdev, probe, qdev->ndev, 3849 netif_info(qdev, probe, qdev->ndev,
3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4264,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4264 4264
4265static void qlge_set_multicast_list(struct net_device *ndev) 4265static void qlge_set_multicast_list(struct net_device *ndev)
4266{ 4266{
4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4267 struct ql_adapter *qdev = netdev_priv(ndev);
4268 struct netdev_hw_addr *ha; 4268 struct netdev_hw_addr *ha;
4269 int i, status; 4269 int i, status;
4270 4270
@@ -4354,7 +4354,7 @@ exit:
4354 4354
4355static int qlge_set_mac_address(struct net_device *ndev, void *p) 4355static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356{ 4356{
4357 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4357 struct ql_adapter *qdev = netdev_priv(ndev);
4358 struct sockaddr *addr = p; 4358 struct sockaddr *addr = p;
4359 int status; 4359 int status;
4360 4360
@@ -4377,7 +4377,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
4377 4377
4378static void qlge_tx_timeout(struct net_device *ndev) 4378static void qlge_tx_timeout(struct net_device *ndev)
4379{ 4379{
4380 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4380 struct ql_adapter *qdev = netdev_priv(ndev);
4381 ql_queue_asic_error(qdev); 4381 ql_queue_asic_error(qdev);
4382} 4382}
4383 4383
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..100a462cc916 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..98d792c33877 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
846 else 846 else
847 tp->features &= ~RTL_FEATURE_WOL; 847 tp->features &= ~RTL_FEATURE_WOL;
848 __rtl8169_set_wol(tp, wol->wolopts); 848 __rtl8169_set_wol(tp, wol->wolopts);
849 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
850
851 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
852 850
851 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
852
853 return 0; 853 return 0;
854} 854}
855 855
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
2931 .hw_start = rtl_hw_start_8168, 2931 .hw_start = rtl_hw_start_8168,
2932 .region = 2, 2932 .region = 2,
2933 .align = 8, 2933 .align = 8,
2934 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2934 .intr_event = SYSErr | LinkChg | RxOverflow |
2935 TxErr | TxOK | RxOK | RxErr, 2935 TxErr | TxOK | RxOK | RxErr,
2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3240,7 +3240,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3240 struct net_device *dev = pci_get_drvdata(pdev); 3240 struct net_device *dev = pci_get_drvdata(pdev);
3241 struct rtl8169_private *tp = netdev_priv(dev); 3241 struct rtl8169_private *tp = netdev_priv(dev);
3242 3242
3243 flush_scheduled_work(); 3243 cancel_delayed_work_sync(&tp->task);
3244 3244
3245 unregister_netdev(dev); 3245 unregister_netdev(dev);
3246 3246
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4440 u32 status = opts1 & RxProtoMask; 4440 u32 status = opts1 & RxProtoMask;
4441 4441
4442 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || 4442 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4443 ((status == RxProtoUDP) && !(opts1 & UDPFail)) || 4443 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4444 ((status == RxProtoIP) && !(opts1 & IPFail)))
4445 skb->ip_summed = CHECKSUM_UNNECESSARY; 4444 skb->ip_summed = CHECKSUM_UNNECESSARY;
4446 else 4445 else
4447 skb_checksum_none_assert(skb); 4446 skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4588 } 4587 }
4589 4588
4590 /* Work around for rx fifo overflow */ 4589 /* Work around for rx fifo overflow */
4591 if (unlikely(status & RxFIFOOver)) { 4590 if (unlikely(status & RxFIFOOver) &&
4591 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4592 netif_stop_queue(dev); 4592 netif_stop_queue(dev);
4593 rtl8169_tx_timeout(dev); 4593 rtl8169_tx_timeout(dev);
4594 break; 4594 break;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896a..39c17cecb8b9 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -88,14 +88,14 @@
88#include "s2io.h" 88#include "s2io.h"
89#include "s2io-regs.h" 89#include "s2io-regs.h"
90 90
91#define DRV_VERSION "2.0.26.27" 91#define DRV_VERSION "2.0.26.28"
92 92
93/* S2io Driver name & version. */ 93/* S2io Driver name & version. */
94static char s2io_driver_name[] = "Neterion"; 94static const char s2io_driver_name[] = "Neterion";
95static char s2io_driver_version[] = DRV_VERSION; 95static const char s2io_driver_version[] = DRV_VERSION;
96 96
97static int rxd_size[2] = {32, 48}; 97static const int rxd_size[2] = {32, 48};
98static int rxd_count[2] = {127, 85}; 98static const int rxd_count[2] = {127, 85};
99 99
100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101{ 101{
@@ -3598,10 +3598,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
3598 val64 = readq(&bar0->pif_rd_swapper_fb); 3598 val64 = readq(&bar0->pif_rd_swapper_fb);
3599 if (val64 != 0x0123456789ABCDEFULL) { 3599 if (val64 != 0x0123456789ABCDEFULL) {
3600 int i = 0; 3600 int i = 0;
3601 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */ 3601 static const u64 value[] = {
3602 0x8100008181000081ULL, /* FE=1, SE=0 */ 3602 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3603 0x4200004242000042ULL, /* FE=0, SE=1 */ 3603 0x8100008181000081ULL, /* FE=1, SE=0 */
3604 0}; /* FE=0, SE=0 */ 3604 0x4200004242000042ULL, /* FE=0, SE=1 */
3605 0 /* FE=0, SE=0 */
3606 };
3605 3607
3606 while (i < 4) { 3608 while (i < 4) {
3607 writeq(value[i], &bar0->swapper_ctrl); 3609 writeq(value[i], &bar0->swapper_ctrl);
@@ -3627,10 +3629,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
3627 3629
3628 if (val64 != valt) { 3630 if (val64 != valt) {
3629 int i = 0; 3631 int i = 0;
3630 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ 3632 static const u64 value[] = {
3631 0x0081810000818100ULL, /* FE=1, SE=0 */ 3633 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */ 3634 0x0081810000818100ULL, /* FE=1, SE=0 */
3633 0}; /* FE=0, SE=0 */ 3635 0x0042420000424200ULL, /* FE=0, SE=1 */
3636 0 /* FE=0, SE=0 */
3637 };
3634 3638
3635 while (i < 4) { 3639 while (i < 4) {
3636 writeq((value[i] | valr), &bar0->swapper_ctrl); 3640 writeq((value[i] | valr), &bar0->swapper_ctrl);
@@ -5568,30 +5572,27 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
5568 struct s2io_nic *sp = netdev_priv(dev); 5572 struct s2io_nic *sp = netdev_priv(dev);
5569 int i, tx_desc_count = 0, rx_desc_count = 0; 5573 int i, tx_desc_count = 0, rx_desc_count = 0;
5570 5574
5571 if (sp->rxd_mode == RXD_MODE_1) 5575 if (sp->rxd_mode == RXD_MODE_1) {
5572 ering->rx_max_pending = MAX_RX_DESC_1; 5576 ering->rx_max_pending = MAX_RX_DESC_1;
5573 else if (sp->rxd_mode == RXD_MODE_3B) 5577 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5578 } else {
5574 ering->rx_max_pending = MAX_RX_DESC_2; 5579 ering->rx_max_pending = MAX_RX_DESC_2;
5580 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5581 }
5575 5582
5583 ering->rx_mini_max_pending = 0;
5576 ering->tx_max_pending = MAX_TX_DESC; 5584 ering->tx_max_pending = MAX_TX_DESC;
5577 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5578 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5579 5585
5580 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); 5586 for (i = 0; i < sp->config.rx_ring_num; i++)
5581 ering->tx_pending = tx_desc_count;
5582 rx_desc_count = 0;
5583 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5584 rx_desc_count += sp->config.rx_cfg[i].num_rxd; 5587 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5585
5586 ering->rx_pending = rx_desc_count; 5588 ering->rx_pending = rx_desc_count;
5587
5588 ering->rx_mini_max_pending = 0;
5589 ering->rx_mini_pending = 0;
5590 if (sp->rxd_mode == RXD_MODE_1)
5591 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5592 else if (sp->rxd_mode == RXD_MODE_3B)
5593 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5594 ering->rx_jumbo_pending = rx_desc_count; 5589 ering->rx_jumbo_pending = rx_desc_count;
5590 ering->rx_mini_pending = 0;
5591
5592 for (i = 0; i < sp->config.tx_fifo_num; i++)
5593 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5594 ering->tx_pending = tx_desc_count;
5595 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5595} 5596}
5596 5597
5597/** 5598/**
@@ -7692,6 +7693,8 @@ static void s2io_init_pci(struct s2io_nic *sp)
7692static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, 7693static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7693 u8 *dev_multiq) 7694 u8 *dev_multiq)
7694{ 7695{
7696 int i;
7697
7695 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) { 7698 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7696 DBG_PRINT(ERR_DBG, "Requested number of tx fifos " 7699 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7697 "(%d) not supported\n", tx_fifo_num); 7700 "(%d) not supported\n", tx_fifo_num);
@@ -7750,6 +7753,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7750 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); 7753 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7751 rx_ring_mode = 1; 7754 rx_ring_mode = 1;
7752 } 7755 }
7756
7757 for (i = 0; i < MAX_RX_RINGS; i++)
7758 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7759 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7760 "supported\nDefaulting to %d\n",
7761 MAX_RX_BLOCKS_PER_RING);
7762 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7763 }
7764
7753 return SUCCESS; 7765 return SUCCESS;
7754} 7766}
7755 7767
@@ -8321,8 +8333,7 @@ mem_alloc_failed:
8321 8333
8322static void __devexit s2io_rem_nic(struct pci_dev *pdev) 8334static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8323{ 8335{
8324 struct net_device *dev = 8336 struct net_device *dev = pci_get_drvdata(pdev);
8325 (struct net_device *)pci_get_drvdata(pdev);
8326 struct s2io_nic *sp; 8337 struct s2io_nic *sp;
8327 8338
8328 if (dev == NULL) { 8339 if (dev == NULL) {
@@ -8330,9 +8341,11 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8330 return; 8341 return;
8331 } 8342 }
8332 8343
8333 flush_scheduled_work();
8334
8335 sp = netdev_priv(dev); 8344 sp = netdev_priv(dev);
8345
8346 cancel_work_sync(&sp->rst_timer_task);
8347 cancel_work_sync(&sp->set_link_task);
8348
8336 unregister_netdev(dev); 8349 unregister_netdev(dev);
8337 8350
8338 free_shared_mem(sp); 8351 free_shared_mem(sp);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 00b8614efe48..7d160306b651 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -355,13 +355,12 @@ struct stat_block {
355#define FIFO_OTHER_MAX_NUM 1 355#define FIFO_OTHER_MAX_NUM 1
356 356
357 357
358#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 ) 358#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
359#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 ) 359#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
360#define MAX_RX_DESC_3 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
361#define MAX_TX_DESC (MAX_AVAILABLE_TXDS) 360#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
362 361
363/* FIFO mappings for all possible number of fifos configured */ 362/* FIFO mappings for all possible number of fifos configured */
364static int fifo_map[][MAX_TX_FIFOS] = { 363static const int fifo_map[][MAX_TX_FIFOS] = {
365 {0, 0, 0, 0, 0, 0, 0, 0}, 364 {0, 0, 0, 0, 0, 0, 0, 0},
366 {0, 0, 0, 0, 1, 1, 1, 1}, 365 {0, 0, 0, 0, 1, 1, 1, 1},
367 {0, 0, 0, 1, 1, 1, 2, 2}, 366 {0, 0, 0, 1, 1, 1, 2, 2},
@@ -372,7 +371,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
372 {0, 1, 2, 3, 4, 5, 6, 7}, 371 {0, 1, 2, 3, 4, 5, 6, 7},
373}; 372};
374 373
375static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7}; 374static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
376 375
377/* Maintains Per FIFO related information. */ 376/* Maintains Per FIFO related information. */
378struct tx_fifo_config { 377struct tx_fifo_config {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf372828..76290a8c3c14 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1449 dev->irq = pdev->irq; 1449 dev->irq = pdev->irq;
1450 1450
1451 /* faked with skb_copy_and_csum_dev */ 1451 /* faked with skb_copy_and_csum_dev */
1452 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; 1452 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1453 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1453 1454
1454 dev->netdev_ops = &sc92031_netdev_ops; 1455 dev->netdev_ops = &sc92031_netdev_ops;
1455 dev->watchdog_timeo = TX_TIMEOUT; 1456 dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e47976..2166c1d0a533 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -23,7 +23,6 @@
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include "net_driver.h" 24#include "net_driver.h"
25#include "efx.h" 25#include "efx.h"
26#include "mdio_10g.h"
27#include "nic.h" 26#include "nic.h"
28 27
29#include "mcdi.h" 28#include "mcdi.h"
@@ -197,7 +196,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
197 196
198static void efx_remove_channels(struct efx_nic *efx); 197static void efx_remove_channels(struct efx_nic *efx);
199static void efx_remove_port(struct efx_nic *efx); 198static void efx_remove_port(struct efx_nic *efx);
199static void efx_init_napi(struct efx_nic *efx);
200static void efx_fini_napi(struct efx_nic *efx); 200static void efx_fini_napi(struct efx_nic *efx);
201static void efx_fini_napi_channel(struct efx_channel *channel);
201static void efx_fini_struct(struct efx_nic *efx); 202static void efx_fini_struct(struct efx_nic *efx);
202static void efx_start_all(struct efx_nic *efx); 203static void efx_start_all(struct efx_nic *efx);
203static void efx_stop_all(struct efx_nic *efx); 204static void efx_stop_all(struct efx_nic *efx);
@@ -335,8 +336,10 @@ void efx_process_channel_now(struct efx_channel *channel)
335 336
336 /* Disable interrupts and wait for ISRs to complete */ 337 /* Disable interrupts and wait for ISRs to complete */
337 efx_nic_disable_interrupts(efx); 338 efx_nic_disable_interrupts(efx);
338 if (efx->legacy_irq) 339 if (efx->legacy_irq) {
339 synchronize_irq(efx->legacy_irq); 340 synchronize_irq(efx->legacy_irq);
341 efx->legacy_irq_enabled = false;
342 }
340 if (channel->irq) 343 if (channel->irq)
341 synchronize_irq(channel->irq); 344 synchronize_irq(channel->irq);
342 345
@@ -351,6 +354,8 @@ void efx_process_channel_now(struct efx_channel *channel)
351 efx_channel_processed(channel); 354 efx_channel_processed(channel);
352 355
353 napi_enable(&channel->napi_str); 356 napi_enable(&channel->napi_str);
357 if (efx->legacy_irq)
358 efx->legacy_irq_enabled = true;
354 efx_nic_enable_interrupts(efx); 359 efx_nic_enable_interrupts(efx);
355} 360}
356 361
@@ -426,6 +431,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
426 431
427 *channel = *old_channel; 432 *channel = *old_channel;
428 433
434 channel->napi_dev = NULL;
429 memset(&channel->eventq, 0, sizeof(channel->eventq)); 435 memset(&channel->eventq, 0, sizeof(channel->eventq));
430 436
431 rx_queue = &channel->rx_queue; 437 rx_queue = &channel->rx_queue;
@@ -736,9 +742,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
736 if (rc) 742 if (rc)
737 goto rollback; 743 goto rollback;
738 744
745 efx_init_napi(efx);
746
739 /* Destroy old channels */ 747 /* Destroy old channels */
740 for (i = 0; i < efx->n_channels; i++) 748 for (i = 0; i < efx->n_channels; i++) {
749 efx_fini_napi_channel(other_channel[i]);
741 efx_remove_channel(other_channel[i]); 750 efx_remove_channel(other_channel[i]);
751 }
742out: 752out:
743 /* Free unused channel structures */ 753 /* Free unused channel structures */
744 for (i = 0; i < efx->n_channels; i++) 754 for (i = 0; i < efx->n_channels; i++)
@@ -910,6 +920,7 @@ static void efx_mac_work(struct work_struct *data)
910 920
911static int efx_probe_port(struct efx_nic *efx) 921static int efx_probe_port(struct efx_nic *efx)
912{ 922{
923 unsigned char *perm_addr;
913 int rc; 924 int rc;
914 925
915 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 926 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -923,11 +934,12 @@ static int efx_probe_port(struct efx_nic *efx)
923 return rc; 934 return rc;
924 935
925 /* Sanity check MAC address */ 936 /* Sanity check MAC address */
926 if (is_valid_ether_addr(efx->mac_address)) { 937 perm_addr = efx->net_dev->perm_addr;
927 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 938 if (is_valid_ether_addr(perm_addr)) {
939 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
928 } else { 940 } else {
929 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", 941 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
930 efx->mac_address); 942 perm_addr);
931 if (!allow_bad_hwaddr) { 943 if (!allow_bad_hwaddr) {
932 rc = -EINVAL; 944 rc = -EINVAL;
933 goto err; 945 goto err;
@@ -1400,6 +1412,8 @@ static void efx_start_all(struct efx_nic *efx)
1400 efx_start_channel(channel); 1412 efx_start_channel(channel);
1401 } 1413 }
1402 1414
1415 if (efx->legacy_irq)
1416 efx->legacy_irq_enabled = true;
1403 efx_nic_enable_interrupts(efx); 1417 efx_nic_enable_interrupts(efx);
1404 1418
1405 /* Switch to event based MCDI completions after enabling interrupts. 1419 /* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1474,10 @@ static void efx_stop_all(struct efx_nic *efx)
1460 1474
1461 /* Disable interrupts and wait for ISR to complete */ 1475 /* Disable interrupts and wait for ISR to complete */
1462 efx_nic_disable_interrupts(efx); 1476 efx_nic_disable_interrupts(efx);
1463 if (efx->legacy_irq) 1477 if (efx->legacy_irq) {
1464 synchronize_irq(efx->legacy_irq); 1478 synchronize_irq(efx->legacy_irq);
1479 efx->legacy_irq_enabled = false;
1480 }
1465 efx_for_each_channel(channel, efx) { 1481 efx_for_each_channel(channel, efx) {
1466 if (channel->irq) 1482 if (channel->irq)
1467 synchronize_irq(channel->irq); 1483 synchronize_irq(channel->irq);
@@ -1593,7 +1609,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1593 * 1609 *
1594 **************************************************************************/ 1610 **************************************************************************/
1595 1611
1596static int efx_init_napi(struct efx_nic *efx) 1612static void efx_init_napi(struct efx_nic *efx)
1597{ 1613{
1598 struct efx_channel *channel; 1614 struct efx_channel *channel;
1599 1615
@@ -1602,18 +1618,21 @@ static int efx_init_napi(struct efx_nic *efx)
1602 netif_napi_add(channel->napi_dev, &channel->napi_str, 1618 netif_napi_add(channel->napi_dev, &channel->napi_str,
1603 efx_poll, napi_weight); 1619 efx_poll, napi_weight);
1604 } 1620 }
1605 return 0; 1621}
1622
1623static void efx_fini_napi_channel(struct efx_channel *channel)
1624{
1625 if (channel->napi_dev)
1626 netif_napi_del(&channel->napi_str);
1627 channel->napi_dev = NULL;
1606} 1628}
1607 1629
1608static void efx_fini_napi(struct efx_nic *efx) 1630static void efx_fini_napi(struct efx_nic *efx)
1609{ 1631{
1610 struct efx_channel *channel; 1632 struct efx_channel *channel;
1611 1633
1612 efx_for_each_channel(channel, efx) { 1634 efx_for_each_channel(channel, efx)
1613 if (channel->napi_dev) 1635 efx_fini_napi_channel(channel);
1614 netif_napi_del(&channel->napi_str);
1615 channel->napi_dev = NULL;
1616 }
1617} 1636}
1618 1637
1619/************************************************************************** 1638/**************************************************************************
@@ -1962,7 +1981,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
1962 1981
1963 efx_stop_all(efx); 1982 efx_stop_all(efx);
1964 mutex_lock(&efx->mac_lock); 1983 mutex_lock(&efx->mac_lock);
1965 mutex_lock(&efx->spi_lock);
1966 1984
1967 efx_fini_channels(efx); 1985 efx_fini_channels(efx);
1968 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1986 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2004,7 +2022,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2004 efx_init_channels(efx); 2022 efx_init_channels(efx);
2005 efx_restore_filters(efx); 2023 efx_restore_filters(efx);
2006 2024
2007 mutex_unlock(&efx->spi_lock);
2008 mutex_unlock(&efx->mac_lock); 2025 mutex_unlock(&efx->mac_lock);
2009 2026
2010 efx_start_all(efx); 2027 efx_start_all(efx);
@@ -2014,7 +2031,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2014fail: 2031fail:
2015 efx->port_initialized = false; 2032 efx->port_initialized = false;
2016 2033
2017 mutex_unlock(&efx->spi_lock);
2018 mutex_unlock(&efx->mac_lock); 2034 mutex_unlock(&efx->mac_lock);
2019 2035
2020 return rc; 2036 return rc;
@@ -2202,8 +2218,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2202 /* Initialise common structures */ 2218 /* Initialise common structures */
2203 memset(efx, 0, sizeof(*efx)); 2219 memset(efx, 0, sizeof(*efx));
2204 spin_lock_init(&efx->biu_lock); 2220 spin_lock_init(&efx->biu_lock);
2205 mutex_init(&efx->mdio_lock);
2206 mutex_init(&efx->spi_lock);
2207#ifdef CONFIG_SFC_MTD 2221#ifdef CONFIG_SFC_MTD
2208 INIT_LIST_HEAD(&efx->mtd_list); 2222 INIT_LIST_HEAD(&efx->mtd_list);
2209#endif 2223#endif
@@ -2335,9 +2349,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2335 if (rc) 2349 if (rc)
2336 goto fail1; 2350 goto fail1;
2337 2351
2338 rc = efx_init_napi(efx); 2352 efx_init_napi(efx);
2339 if (rc)
2340 goto fail2;
2341 2353
2342 rc = efx->type->init(efx); 2354 rc = efx->type->init(efx);
2343 if (rc) { 2355 if (rc) {
@@ -2368,7 +2380,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2368 efx->type->fini(efx); 2380 efx->type->fini(efx);
2369 fail3: 2381 fail3:
2370 efx_fini_napi(efx); 2382 efx_fini_napi(efx);
2371 fail2:
2372 efx_remove_all(efx); 2383 efx_remove_all(efx);
2373 fail1: 2384 fail1:
2374 return rc; 2385 return rc;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 10a1bf40da96..003fdb35b4bb 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -74,9 +74,8 @@ extern int efx_filter_insert_filter(struct efx_nic *efx,
74 bool replace); 74 bool replace);
75extern int efx_filter_remove_filter(struct efx_nic *efx, 75extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
77extern void efx_filter_table_clear(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
78 enum efx_filter_table_id table_id, 78 enum efx_filter_priority priority);
79 enum efx_filter_priority priority);
80 79
81/* Channels */ 80/* Channels */
82extern void efx_process_channel_now(struct efx_channel *channel); 81extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16b8b47..0e8bb19ed60d 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -11,14 +11,13 @@
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
14#include <linux/in.h>
14#include "net_driver.h" 15#include "net_driver.h"
15#include "workarounds.h" 16#include "workarounds.h"
16#include "selftest.h" 17#include "selftest.h"
17#include "efx.h" 18#include "efx.h"
18#include "filter.h" 19#include "filter.h"
19#include "nic.h" 20#include "nic.h"
20#include "spi.h"
21#include "mdio_10g.h"
22 21
23struct ethtool_string { 22struct ethtool_string {
24 char name[ETH_GSTRING_LEN]; 23 char name[ETH_GSTRING_LEN];
@@ -560,12 +559,8 @@ static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
560 if (rc) 559 if (rc)
561 return rc; 560 return rc;
562 561
563 if (!(data & ETH_FLAG_NTUPLE)) { 562 if (!(data & ETH_FLAG_NTUPLE))
564 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, 563 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
565 EFX_FILTER_PRI_MANUAL);
566 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
567 EFX_FILTER_PRI_MANUAL);
568 }
569 564
570 return 0; 565 return 0;
571} 566}
@@ -584,6 +579,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
584 goto fail1; 579 goto fail1;
585 } 580 }
586 581
582 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
583 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
584
587 /* We need rx buffers and interrupts. */ 585 /* We need rx buffers and interrupts. */
588 already_up = (efx->net_dev->flags & IFF_UP); 586 already_up = (efx->net_dev->flags & IFF_UP);
589 if (!already_up) { 587 if (!already_up) {
@@ -602,9 +600,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
602 if (!already_up) 600 if (!already_up)
603 dev_close(efx->net_dev); 601 dev_close(efx->net_dev);
604 602
605 netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", 603 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
606 rc == 0 ? "passed" : "failed", 604 rc == 0 ? "passed" : "failed",
607 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 605 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
608 606
609 fail2: 607 fail2:
610 fail1: 608 fail1:
@@ -622,68 +620,6 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
622 return mdio45_nway_restart(&efx->mdio); 620 return mdio45_nway_restart(&efx->mdio);
623} 621}
624 622
625static u32 efx_ethtool_get_link(struct net_device *net_dev)
626{
627 struct efx_nic *efx = netdev_priv(net_dev);
628
629 return efx->link_state.up;
630}
631
632static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
633{
634 struct efx_nic *efx = netdev_priv(net_dev);
635 struct efx_spi_device *spi = efx->spi_eeprom;
636
637 if (!spi)
638 return 0;
639 return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
640 min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
641}
642
643static int efx_ethtool_get_eeprom(struct net_device *net_dev,
644 struct ethtool_eeprom *eeprom, u8 *buf)
645{
646 struct efx_nic *efx = netdev_priv(net_dev);
647 struct efx_spi_device *spi = efx->spi_eeprom;
648 size_t len;
649 int rc;
650
651 rc = mutex_lock_interruptible(&efx->spi_lock);
652 if (rc)
653 return rc;
654 rc = falcon_spi_read(efx, spi,
655 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
656 eeprom->len, &len, buf);
657 mutex_unlock(&efx->spi_lock);
658
659 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
660 eeprom->len = len;
661 return rc;
662}
663
664static int efx_ethtool_set_eeprom(struct net_device *net_dev,
665 struct ethtool_eeprom *eeprom, u8 *buf)
666{
667 struct efx_nic *efx = netdev_priv(net_dev);
668 struct efx_spi_device *spi = efx->spi_eeprom;
669 size_t len;
670 int rc;
671
672 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
673 return -EINVAL;
674
675 rc = mutex_lock_interruptible(&efx->spi_lock);
676 if (rc)
677 return rc;
678 rc = falcon_spi_write(efx, spi,
679 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
680 eeprom->len, &len, buf);
681 mutex_unlock(&efx->spi_lock);
682
683 eeprom->len = len;
684 return rc;
685}
686
687static int efx_ethtool_get_coalesce(struct net_device *net_dev, 623static int efx_ethtool_get_coalesce(struct net_device *net_dev,
688 struct ethtool_coalesce *coalesce) 624 struct ethtool_coalesce *coalesce)
689{ 625{
@@ -978,6 +914,7 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
978 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; 914 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
979 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; 915 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
980 struct efx_filter_spec filter; 916 struct efx_filter_spec filter;
917 int rc;
981 918
982 /* Range-check action */ 919 /* Range-check action */
983 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || 920 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
@@ -987,9 +924,16 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
987 if (~ntuple->fs.data_mask) 924 if (~ntuple->fs.data_mask)
988 return -EINVAL; 925 return -EINVAL;
989 926
927 efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
928 (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
929 0xfff : ntuple->fs.action);
930
990 switch (ntuple->fs.flow_type) { 931 switch (ntuple->fs.flow_type) {
991 case TCP_V4_FLOW: 932 case TCP_V4_FLOW:
992 case UDP_V4_FLOW: 933 case UDP_V4_FLOW: {
934 u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
935 IPPROTO_TCP : IPPROTO_UDP);
936
993 /* Must match all of destination, */ 937 /* Must match all of destination, */
994 if (ip_mask->ip4dst | ip_mask->pdst) 938 if (ip_mask->ip4dst | ip_mask->pdst)
995 return -EINVAL; 939 return -EINVAL;
@@ -1001,7 +945,22 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
1001 /* and nothing else */ 945 /* and nothing else */
1002 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) 946 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
1003 return -EINVAL; 947 return -EINVAL;
948
949 if (!ip_mask->ip4src)
950 rc = efx_filter_set_ipv4_full(&filter, proto,
951 ip_entry->ip4dst,
952 ip_entry->pdst,
953 ip_entry->ip4src,
954 ip_entry->psrc);
955 else
956 rc = efx_filter_set_ipv4_local(&filter, proto,
957 ip_entry->ip4dst,
958 ip_entry->pdst);
959 if (rc)
960 return rc;
1004 break; 961 break;
962 }
963
1005 case ETHER_FLOW: 964 case ETHER_FLOW:
1006 /* Must match all of destination, */ 965 /* Must match all of destination, */
1007 if (!is_zero_ether_addr(mac_mask->h_dest)) 966 if (!is_zero_ether_addr(mac_mask->h_dest))
@@ -1014,58 +973,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
1014 if (!is_broadcast_ether_addr(mac_mask->h_source) || 973 if (!is_broadcast_ether_addr(mac_mask->h_source) ||
1015 mac_mask->h_proto != htons(0xffff)) 974 mac_mask->h_proto != htons(0xffff))
1016 return -EINVAL; 975 return -EINVAL;
976
977 rc = efx_filter_set_eth_local(
978 &filter,
979 (ntuple->fs.vlan_tag_mask == 0xf000) ?
980 ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
981 mac_entry->h_dest);
982 if (rc)
983 return rc;
1017 break; 984 break;
985
1018 default: 986 default:
1019 return -EINVAL; 987 return -EINVAL;
1020 } 988 }
1021 989
1022 filter.priority = EFX_FILTER_PRI_MANUAL; 990 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
1023 filter.flags = 0;
1024
1025 switch (ntuple->fs.flow_type) {
1026 case TCP_V4_FLOW:
1027 if (!ip_mask->ip4src)
1028 efx_filter_set_rx_tcp_full(&filter,
1029 htonl(ip_entry->ip4src),
1030 htons(ip_entry->psrc),
1031 htonl(ip_entry->ip4dst),
1032 htons(ip_entry->pdst));
1033 else
1034 efx_filter_set_rx_tcp_wild(&filter,
1035 htonl(ip_entry->ip4dst),
1036 htons(ip_entry->pdst));
1037 break;
1038 case UDP_V4_FLOW:
1039 if (!ip_mask->ip4src)
1040 efx_filter_set_rx_udp_full(&filter,
1041 htonl(ip_entry->ip4src),
1042 htons(ip_entry->psrc),
1043 htonl(ip_entry->ip4dst),
1044 htons(ip_entry->pdst));
1045 else
1046 efx_filter_set_rx_udp_wild(&filter,
1047 htonl(ip_entry->ip4dst),
1048 htons(ip_entry->pdst));
1049 break;
1050 case ETHER_FLOW:
1051 if (ntuple->fs.vlan_tag_mask == 0xf000)
1052 efx_filter_set_rx_mac_full(&filter,
1053 ntuple->fs.vlan_tag & 0xfff,
1054 mac_entry->h_dest);
1055 else
1056 efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
1057 break;
1058 }
1059
1060 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
1061 return efx_filter_remove_filter(efx, &filter); 991 return efx_filter_remove_filter(efx, &filter);
1062 } else { 992 else
1063 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1064 filter.dmaq_id = 0xfff;
1065 else
1066 filter.dmaq_id = ntuple->fs.action;
1067 return efx_filter_insert_filter(efx, &filter, true); 993 return efx_filter_insert_filter(efx, &filter, true);
1068 }
1069} 994}
1070 995
1071static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 996static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1115,10 +1040,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1115 .get_msglevel = efx_ethtool_get_msglevel, 1040 .get_msglevel = efx_ethtool_get_msglevel,
1116 .set_msglevel = efx_ethtool_set_msglevel, 1041 .set_msglevel = efx_ethtool_set_msglevel,
1117 .nway_reset = efx_ethtool_nway_reset, 1042 .nway_reset = efx_ethtool_nway_reset,
1118 .get_link = efx_ethtool_get_link, 1043 .get_link = ethtool_op_get_link,
1119 .get_eeprom_len = efx_ethtool_get_eeprom_len,
1120 .get_eeprom = efx_ethtool_get_eeprom,
1121 .set_eeprom = efx_ethtool_set_eeprom,
1122 .get_coalesce = efx_ethtool_get_coalesce, 1044 .get_coalesce = efx_ethtool_get_coalesce,
1123 .set_coalesce = efx_ethtool_set_coalesce, 1045 .set_coalesce = efx_ethtool_set_coalesce,
1124 .get_ringparam = efx_ethtool_get_ringparam, 1046 .get_ringparam = efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019bb2b15..70e4f7dcce81 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -24,7 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "regs.h" 25#include "regs.h"
26#include "io.h" 26#include "io.h"
27#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "workarounds.h" 28#include "workarounds.h"
30 29
@@ -255,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
255 /* Input validation */ 254 /* Input validation */
256 if (len > FALCON_SPI_MAX_LEN) 255 if (len > FALCON_SPI_MAX_LEN)
257 return -EINVAL; 256 return -EINVAL;
258 BUG_ON(!mutex_is_locked(&efx->spi_lock));
259 257
260 /* Check that previous command is not still running */ 258 /* Check that previous command is not still running */
261 rc = falcon_spi_poll(efx); 259 rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
719 int prtad, int devad, u16 addr, u16 value) 717 int prtad, int devad, u16 addr, u16 value)
720{ 718{
721 struct efx_nic *efx = netdev_priv(net_dev); 719 struct efx_nic *efx = netdev_priv(net_dev);
720 struct falcon_nic_data *nic_data = efx->nic_data;
722 efx_oword_t reg; 721 efx_oword_t reg;
723 int rc; 722 int rc;
724 723
@@ -726,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
726 "writing MDIO %d register %d.%d with 0x%04x\n", 725 "writing MDIO %d register %d.%d with 0x%04x\n",
727 prtad, devad, addr, value); 726 prtad, devad, addr, value);
728 727
729 mutex_lock(&efx->mdio_lock); 728 mutex_lock(&nic_data->mdio_lock);
730 729
731 /* Check MDIO not currently being accessed */ 730 /* Check MDIO not currently being accessed */
732 rc = falcon_gmii_wait(efx); 731 rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
762 } 761 }
763 762
764out: 763out:
765 mutex_unlock(&efx->mdio_lock); 764 mutex_unlock(&nic_data->mdio_lock);
766 return rc; 765 return rc;
767} 766}
768 767
@@ -771,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
771 int prtad, int devad, u16 addr) 770 int prtad, int devad, u16 addr)
772{ 771{
773 struct efx_nic *efx = netdev_priv(net_dev); 772 struct efx_nic *efx = netdev_priv(net_dev);
773 struct falcon_nic_data *nic_data = efx->nic_data;
774 efx_oword_t reg; 774 efx_oword_t reg;
775 int rc; 775 int rc;
776 776
777 mutex_lock(&efx->mdio_lock); 777 mutex_lock(&nic_data->mdio_lock);
778 778
779 /* Check MDIO not currently being accessed */ 779 /* Check MDIO not currently being accessed */
780 rc = falcon_gmii_wait(efx); 780 rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@ static int falcon_mdio_read(struct net_device *net_dev,
813 } 813 }
814 814
815out: 815out:
816 mutex_unlock(&efx->mdio_lock); 816 mutex_unlock(&nic_data->mdio_lock);
817 return rc; 817 return rc;
818} 818}
819 819
@@ -841,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
841 } 841 }
842 842
843 /* Fill out MDIO structure and loopback modes */ 843 /* Fill out MDIO structure and loopback modes */
844 mutex_init(&nic_data->mdio_lock);
844 efx->mdio.mdio_read = falcon_mdio_read; 845 efx->mdio.mdio_read = falcon_mdio_read;
845 efx->mdio.mdio_write = falcon_mdio_write; 846 efx->mdio.mdio_write = falcon_mdio_write;
846 rc = efx->phy_op->probe(efx); 847 rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
880 efx_nic_free_buffer(efx, &efx->stats_buffer); 881 efx_nic_free_buffer(efx, &efx->stats_buffer);
881} 882}
882 883
884/* Global events are basically PHY events */
885static bool
886falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
887{
888 struct efx_nic *efx = channel->efx;
889 struct falcon_nic_data *nic_data = efx->nic_data;
890
891 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
893 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
894 /* Ignored */
895 return true;
896
897 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
899 nic_data->xmac_poll_required = true;
900 return true;
901 }
902
903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
906 netif_err(efx, rx_err, efx->net_dev,
907 "channel %d seen global RX_RESET event. Resetting.\n",
908 channel->channel);
909
910 atomic_inc(&efx->rx_reset);
911 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
912 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
913 return true;
914 }
915
916 return false;
917}
918
883/************************************************************************** 919/**************************************************************************
884 * 920 *
885 * Falcon test code 921 * Falcon test code
@@ -889,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
889static int 925static int
890falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) 926falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
891{ 927{
928 struct falcon_nic_data *nic_data = efx->nic_data;
892 struct falcon_nvconfig *nvconfig; 929 struct falcon_nvconfig *nvconfig;
893 struct efx_spi_device *spi; 930 struct efx_spi_device *spi;
894 void *region; 931 void *region;
@@ -896,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
896 __le16 *word, *limit; 933 __le16 *word, *limit;
897 u32 csum; 934 u32 csum;
898 935
899 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 936 if (efx_spi_present(&nic_data->spi_flash))
900 if (!spi) 937 spi = &nic_data->spi_flash;
938 else if (efx_spi_present(&nic_data->spi_eeprom))
939 spi = &nic_data->spi_eeprom;
940 else
901 return -EINVAL; 941 return -EINVAL;
902 942
903 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 943 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
905 return -ENOMEM; 945 return -ENOMEM;
906 nvconfig = region + FALCON_NVCONFIG_OFFSET; 946 nvconfig = region + FALCON_NVCONFIG_OFFSET;
907 947
908 mutex_lock(&efx->spi_lock); 948 mutex_lock(&nic_data->spi_lock);
909 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 949 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
910 mutex_unlock(&efx->spi_lock); 950 mutex_unlock(&nic_data->spi_lock);
911 if (rc) { 951 if (rc) {
912 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 952 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
913 efx->spi_flash ? "flash" : "EEPROM"); 953 efx_spi_present(&nic_data->spi_flash) ?
954 "flash" : "EEPROM");
914 rc = -EIO; 955 rc = -EIO;
915 goto out; 956 goto out;
916 } 957 }
@@ -1012,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
1012 1053
1013/* Resets NIC to known state. This routine must be called in process 1054/* Resets NIC to known state. This routine must be called in process
1014 * context and is allowed to sleep. */ 1055 * context and is allowed to sleep. */
1015static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) 1056static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1016{ 1057{
1017 struct falcon_nic_data *nic_data = efx->nic_data; 1058 struct falcon_nic_data *nic_data = efx->nic_data;
1018 efx_oword_t glb_ctl_reg_ker; 1059 efx_oword_t glb_ctl_reg_ker;
@@ -1108,6 +1149,18 @@ fail5:
1108 return rc; 1149 return rc;
1109} 1150}
1110 1151
1152static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1153{
1154 struct falcon_nic_data *nic_data = efx->nic_data;
1155 int rc;
1156
1157 mutex_lock(&nic_data->spi_lock);
1158 rc = __falcon_reset_hw(efx, method);
1159 mutex_unlock(&nic_data->spi_lock);
1160
1161 return rc;
1162}
1163
1111static void falcon_monitor(struct efx_nic *efx) 1164static void falcon_monitor(struct efx_nic *efx)
1112{ 1165{
1113 bool link_changed; 1166 bool link_changed;
@@ -1189,16 +1242,11 @@ static int falcon_reset_sram(struct efx_nic *efx)
1189 return -ETIMEDOUT; 1242 return -ETIMEDOUT;
1190} 1243}
1191 1244
1192static int falcon_spi_device_init(struct efx_nic *efx, 1245static void falcon_spi_device_init(struct efx_nic *efx,
1193 struct efx_spi_device **spi_device_ret, 1246 struct efx_spi_device *spi_device,
1194 unsigned int device_id, u32 device_type) 1247 unsigned int device_id, u32 device_type)
1195{ 1248{
1196 struct efx_spi_device *spi_device;
1197
1198 if (device_type != 0) { 1249 if (device_type != 0) {
1199 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1200 if (!spi_device)
1201 return -ENOMEM;
1202 spi_device->device_id = device_id; 1250 spi_device->device_id = device_id;
1203 spi_device->size = 1251 spi_device->size =
1204 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); 1252 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1263,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1215 1 << SPI_DEV_TYPE_FIELD(device_type, 1263 1 << SPI_DEV_TYPE_FIELD(device_type,
1216 SPI_DEV_TYPE_BLOCK_SIZE); 1264 SPI_DEV_TYPE_BLOCK_SIZE);
1217 } else { 1265 } else {
1218 spi_device = NULL; 1266 spi_device->size = 0;
1219 } 1267 }
1220
1221 kfree(*spi_device_ret);
1222 *spi_device_ret = spi_device;
1223 return 0;
1224}
1225
1226static void falcon_remove_spi_devices(struct efx_nic *efx)
1227{
1228 kfree(efx->spi_eeprom);
1229 efx->spi_eeprom = NULL;
1230 kfree(efx->spi_flash);
1231 efx->spi_flash = NULL;
1232} 1268}
1233 1269
1234/* Extract non-volatile configuration */ 1270/* Extract non-volatile configuration */
1235static int falcon_probe_nvconfig(struct efx_nic *efx) 1271static int falcon_probe_nvconfig(struct efx_nic *efx)
1236{ 1272{
1273 struct falcon_nic_data *nic_data = efx->nic_data;
1237 struct falcon_nvconfig *nvconfig; 1274 struct falcon_nvconfig *nvconfig;
1238 int board_rev;
1239 int rc; 1275 int rc;
1240 1276
1241 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 1277 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1279,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1243 return -ENOMEM; 1279 return -ENOMEM;
1244 1280
1245 rc = falcon_read_nvram(efx, nvconfig); 1281 rc = falcon_read_nvram(efx, nvconfig);
1246 if (rc == -EINVAL) { 1282 if (rc)
1247 netif_err(efx, probe, efx->net_dev, 1283 goto out;
1248 "NVRAM is invalid therefore using defaults\n"); 1284
1249 efx->phy_type = PHY_TYPE_NONE; 1285 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1250 efx->mdio.prtad = MDIO_PRTAD_NONE; 1286 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1251 board_rev = 0; 1287
1252 rc = 0; 1288 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1253 } else if (rc) { 1289 falcon_spi_device_init(
1254 goto fail1; 1290 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1255 } else { 1291 le32_to_cpu(nvconfig->board_v3
1256 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 1292 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1257 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3; 1293 falcon_spi_device_init(
1258 1294 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1259 efx->phy_type = v2->port0_phy_type; 1295 le32_to_cpu(nvconfig->board_v3
1260 efx->mdio.prtad = v2->port0_phy_addr; 1296 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1261 board_rev = le16_to_cpu(v2->board_revision);
1262
1263 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1264 rc = falcon_spi_device_init(
1265 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1266 le32_to_cpu(v3->spi_device_type
1267 [FFE_AB_SPI_DEVICE_FLASH]));
1268 if (rc)
1269 goto fail2;
1270 rc = falcon_spi_device_init(
1271 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1272 le32_to_cpu(v3->spi_device_type
1273 [FFE_AB_SPI_DEVICE_EEPROM]));
1274 if (rc)
1275 goto fail2;
1276 }
1277 } 1297 }
1278 1298
1279 /* Read the MAC addresses */ 1299 /* Read the MAC addresses */
1280 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); 1300 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1281 1301
1282 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 1302 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1283 efx->phy_type, efx->mdio.prtad); 1303 efx->phy_type, efx->mdio.prtad);
1284 1304
1285 rc = falcon_probe_board(efx, board_rev); 1305 rc = falcon_probe_board(efx,
1286 if (rc) 1306 le16_to_cpu(nvconfig->board_v2.board_revision));
1287 goto fail2; 1307out:
1288
1289 kfree(nvconfig);
1290 return 0;
1291
1292 fail2:
1293 falcon_remove_spi_devices(efx);
1294 fail1:
1295 kfree(nvconfig); 1308 kfree(nvconfig);
1296 return rc; 1309 return rc;
1297} 1310}
@@ -1299,6 +1312,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1299/* Probe all SPI devices on the NIC */ 1312/* Probe all SPI devices on the NIC */
1300static void falcon_probe_spi_devices(struct efx_nic *efx) 1313static void falcon_probe_spi_devices(struct efx_nic *efx)
1301{ 1314{
1315 struct falcon_nic_data *nic_data = efx->nic_data;
1302 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 1316 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1303 int boot_dev; 1317 int boot_dev;
1304 1318
@@ -1327,12 +1341,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1327 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 1341 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1328 } 1342 }
1329 1343
1344 mutex_init(&nic_data->spi_lock);
1345
1330 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) 1346 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1331 falcon_spi_device_init(efx, &efx->spi_flash, 1347 falcon_spi_device_init(efx, &nic_data->spi_flash,
1332 FFE_AB_SPI_DEVICE_FLASH, 1348 FFE_AB_SPI_DEVICE_FLASH,
1333 default_flash_type); 1349 default_flash_type);
1334 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) 1350 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1335 falcon_spi_device_init(efx, &efx->spi_eeprom, 1351 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1336 FFE_AB_SPI_DEVICE_EEPROM, 1352 FFE_AB_SPI_DEVICE_EEPROM,
1337 large_eeprom_type); 1353 large_eeprom_type);
1338} 1354}
@@ -1397,7 +1413,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1397 } 1413 }
1398 1414
1399 /* Now we can reset the NIC */ 1415 /* Now we can reset the NIC */
1400 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 1416 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
1401 if (rc) { 1417 if (rc) {
1402 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 1418 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1403 goto fail3; 1419 goto fail3;
@@ -1419,8 +1435,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
1419 1435
1420 /* Read in the non-volatile configuration */ 1436 /* Read in the non-volatile configuration */
1421 rc = falcon_probe_nvconfig(efx); 1437 rc = falcon_probe_nvconfig(efx);
1422 if (rc) 1438 if (rc) {
1439 if (rc == -EINVAL)
1440 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
1423 goto fail5; 1441 goto fail5;
1442 }
1424 1443
1425 /* Initialise I2C adapter */ 1444 /* Initialise I2C adapter */
1426 board = falcon_board(efx); 1445 board = falcon_board(efx);
@@ -1452,7 +1471,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1452 BUG_ON(i2c_del_adapter(&board->i2c_adap)); 1471 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1453 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1472 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1454 fail5: 1473 fail5:
1455 falcon_remove_spi_devices(efx);
1456 efx_nic_free_buffer(efx, &efx->irq_status); 1474 efx_nic_free_buffer(efx, &efx->irq_status);
1457 fail4: 1475 fail4:
1458 fail3: 1476 fail3:
@@ -1606,10 +1624,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
1606 BUG_ON(rc); 1624 BUG_ON(rc);
1607 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1625 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1608 1626
1609 falcon_remove_spi_devices(efx);
1610 efx_nic_free_buffer(efx, &efx->irq_status); 1627 efx_nic_free_buffer(efx, &efx->irq_status);
1611 1628
1612 falcon_reset_hw(efx, RESET_TYPE_ALL); 1629 __falcon_reset_hw(efx, RESET_TYPE_ALL);
1613 1630
1614 /* Release the second function after the reset */ 1631 /* Release the second function after the reset */
1615 if (nic_data->pci_dev2) { 1632 if (nic_data->pci_dev2) {
@@ -1720,6 +1737,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1720 .reset = falcon_reset_hw, 1737 .reset = falcon_reset_hw,
1721 .probe_port = falcon_probe_port, 1738 .probe_port = falcon_probe_port,
1722 .remove_port = falcon_remove_port, 1739 .remove_port = falcon_remove_port,
1740 .handle_global_event = falcon_handle_global_event,
1723 .prepare_flush = falcon_prepare_flush, 1741 .prepare_flush = falcon_prepare_flush,
1724 .update_stats = falcon_update_nic_stats, 1742 .update_stats = falcon_update_nic_stats,
1725 .start_stats = falcon_start_nic_stats, 1743 .start_stats = falcon_start_nic_stats,
@@ -1760,6 +1778,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1760 .reset = falcon_reset_hw, 1778 .reset = falcon_reset_hw,
1761 .probe_port = falcon_probe_port, 1779 .probe_port = falcon_probe_port,
1762 .remove_port = falcon_remove_port, 1780 .remove_port = falcon_remove_port,
1781 .handle_global_event = falcon_handle_global_event,
1763 .prepare_flush = falcon_prepare_flush, 1782 .prepare_flush = falcon_prepare_flush,
1764 .update_stats = falcon_update_nic_stats, 1783 .update_stats = falcon_update_nic_stats,
1765 .start_stats = falcon_start_nic_stats, 1784 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b5a477..2dd16f0b3ced 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -13,8 +13,6 @@
13#include "phy.h" 13#include "phy.h"
14#include "efx.h" 14#include "efx.h"
15#include "nic.h" 15#include "nic.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h" 16#include "workarounds.h"
19 17
20/* Macros for unpacking the board revision */ 18/* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
30#define FALCON_BOARD_SFN4112F 0x52 28#define FALCON_BOARD_SFN4112F 0x52
31 29
32/* Board temperature is about 15°C above ambient when air flow is 30/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */ 31 * limited. The maximum acceptable ambient temperature varies
32 * depending on the PHY specifications but the critical temperature
33 * above which we should shut down to avoid damage is 80°C. */
34#define FALCON_BOARD_TEMP_BIAS 15 34#define FALCON_BOARD_TEMP_BIAS 15
35#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
35 36
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature 37/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000 38 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */ 39 * should aim to keep this well below 100°C.' */
40#define FALCON_JUNC_TEMP_MIN 0
39#define FALCON_JUNC_TEMP_MAX 90 41#define FALCON_JUNC_TEMP_MAX 90
42#define FALCON_JUNC_TEMP_CRIT 125
40 43
41/***************************************************************************** 44/*****************************************************************************
42 * Support for LM87 sensor chip used on several boards 45 * Support for LM87 sensor chip used on several boards
43 */ 46 */
47#define LM87_REG_TEMP_HW_INT_LOCK 0x13
48#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
49#define LM87_REG_TEMP_HW_INT 0x17
50#define LM87_REG_TEMP_HW_EXT 0x18
51#define LM87_REG_TEMP_EXT1 0x26
52#define LM87_REG_TEMP_INT 0x27
44#define LM87_REG_ALARMS1 0x41 53#define LM87_REG_ALARMS1 0x41
45#define LM87_REG_ALARMS2 0x42 54#define LM87_REG_ALARMS2 0x42
46#define LM87_IN_LIMITS(nr, _min, _max) \ 55#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
57 66
58#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) 67#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
59 68
69static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
70{
71 while (*reg_values) {
72 u8 reg = *reg_values++;
73 u8 value = *reg_values++;
74 int rc = i2c_smbus_write_byte_data(client, reg, value);
75 if (rc)
76 return rc;
77 }
78 return 0;
79}
80
81static const u8 falcon_lm87_common_regs[] = {
82 LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
83 LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
84 LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
85 LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
86 LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
87 0
88};
89
60static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
61 const u8 *reg_values) 91 const u8 *reg_values)
62{ 92{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
67 if (!client) 97 if (!client)
68 return -EIO; 98 return -EIO;
69 99
70 while (*reg_values) { 100 /* Read-to-clear alarm/interrupt status */
71 u8 reg = *reg_values++; 101 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
72 u8 value = *reg_values++; 102 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
73 rc = i2c_smbus_write_byte_data(client, reg, value); 103
74 if (rc) 104 rc = efx_poke_lm87(client, reg_values);
75 goto err; 105 if (rc)
76 } 106 goto err;
107 rc = efx_poke_lm87(client, falcon_lm87_common_regs);
108 if (rc)
109 goto err;
77 110
78 board->hwmon_client = client; 111 board->hwmon_client = client;
79 return 0; 112 return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
91static int efx_check_lm87(struct efx_nic *efx, unsigned mask) 124static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
92{ 125{
93 struct i2c_client *client = falcon_board(efx)->hwmon_client; 126 struct i2c_client *client = falcon_board(efx)->hwmon_client;
94 s32 alarms1, alarms2; 127 bool temp_crit, elec_fault, is_failure;
128 u16 alarms;
129 s32 reg;
95 130
96 /* If link is up then do not monitor temperature */ 131 /* If link is up then do not monitor temperature */
97 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) 132 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
98 return 0; 133 return 0;
99 134
100 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); 135 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
101 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); 136 if (reg < 0)
102 if (alarms1 < 0) 137 return reg;
103 return alarms1; 138 alarms = reg;
104 if (alarms2 < 0) 139 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
105 return alarms2; 140 if (reg < 0)
106 alarms1 &= mask; 141 return reg;
107 alarms2 &= mask >> 8; 142 alarms |= reg << 8;
108 if (alarms1 || alarms2) { 143 alarms &= mask;
144
145 temp_crit = false;
146 if (alarms & LM87_ALARM_TEMP_INT) {
147 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
148 if (reg < 0)
149 return reg;
150 if (reg > FALCON_BOARD_TEMP_CRIT)
151 temp_crit = true;
152 }
153 if (alarms & LM87_ALARM_TEMP_EXT1) {
154 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
155 if (reg < 0)
156 return reg;
157 if (reg > FALCON_JUNC_TEMP_CRIT)
158 temp_crit = true;
159 }
160 elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
161 is_failure = temp_crit || elec_fault;
162
163 if (alarms)
109 netif_err(efx, hw, efx->net_dev, 164 netif_err(efx, hw, efx->net_dev,
110 "LM87 detected a hardware failure (status %02x:%02x)" 165 "LM87 detected a hardware %s (status %02x:%02x)"
111 "%s%s%s\n", 166 "%s%s%s%s\n",
112 alarms1, alarms2, 167 is_failure ? "failure" : "problem",
113 (alarms1 & LM87_ALARM_TEMP_INT) ? 168 alarms & 0xff, alarms >> 8,
169 (alarms & LM87_ALARM_TEMP_INT) ?
114 "; board is overheating" : "", 170 "; board is overheating" : "",
115 (alarms1 & LM87_ALARM_TEMP_EXT1) ? 171 (alarms & LM87_ALARM_TEMP_EXT1) ?
116 "; controller is overheating" : "", 172 "; controller is overheating" : "",
117 (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1) 173 temp_crit ? "; reached critical temperature" : "",
118 || alarms2) ? 174 elec_fault ? "; electrical fault" : "");
119 "; electrical fault" : "");
120 return -ERANGE;
121 }
122 175
123 return 0; 176 return is_failure ? -ERANGE : 0;
124} 177}
125 178
126#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
325 new_mode = old_mode & ~PHY_MODE_SPECIAL; 378 new_mode = old_mode & ~PHY_MODE_SPECIAL;
326 else 379 else
327 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
328 if (old_mode == new_mode) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
329 err = 0; 382 err = 0;
330 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
331 err = -EBUSY; 384 err = -EBUSY;
@@ -362,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
362 415
363static int sfe4001_check_hw(struct efx_nic *efx) 416static int sfe4001_check_hw(struct efx_nic *efx)
364{ 417{
418 struct falcon_nic_data *nic_data = efx->nic_data;
365 s32 status; 419 s32 status;
366 420
367 /* If XAUI link is up then do not monitor */ 421 /* If XAUI link is up then do not monitor */
368 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) 422 if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
369 return 0; 423 return 0;
370 424
371 /* Check the powered status of the PHY. Lack of power implies that 425 /* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595ebb5b..b49e84394641 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -16,7 +16,6 @@
16#include "io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h"
20#include "workarounds.h" 19#include "workarounds.h"
21 20
22/************************************************************************** 21/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
88 87
89static void falcon_ack_status_intr(struct efx_nic *efx) 88static void falcon_ack_status_intr(struct efx_nic *efx)
90{ 89{
90 struct falcon_nic_data *nic_data = efx->nic_data;
91 efx_oword_t reg; 91 efx_oword_t reg;
92 92
93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
99 99
100 /* We can only use this interrupt to signal the negative edge of 100 /* We can only use this interrupt to signal the negative edge of
101 * xaui_align [we have to poll the positive edge]. */ 101 * xaui_align [we have to poll the positive edge]. */
102 if (efx->xmac_poll_required) 102 if (nic_data->xmac_poll_required)
103 return; 103 return;
104 104
105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK); 105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
277 277
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 278static int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 279{
280 struct falcon_nic_data *nic_data = efx->nic_data;
281
280 falcon_reconfigure_xgxs_core(efx); 282 falcon_reconfigure_xgxs_core(efx);
281 falcon_reconfigure_xmac_core(efx); 283 falcon_reconfigure_xmac_core(efx);
282 284
283 falcon_reconfigure_mac_wrapper(efx); 285 falcon_reconfigure_mac_wrapper(efx);
284 286
285 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 287 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
286 falcon_ack_status_intr(efx); 288 falcon_ack_status_intr(efx);
287 289
288 return 0; 290 return 0;
@@ -350,11 +352,13 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
350 352
351void falcon_poll_xmac(struct efx_nic *efx) 353void falcon_poll_xmac(struct efx_nic *efx)
352{ 354{
355 struct falcon_nic_data *nic_data = efx->nic_data;
356
353 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || 357 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
354 !efx->xmac_poll_required) 358 !nic_data->xmac_poll_required)
355 return; 359 return;
356 360
357 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
358 falcon_ack_status_intr(efx); 362 falcon_ack_status_intr(efx);
359} 363}
360 364
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b910..d4722c41c4ce 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -7,6 +7,7 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9 9
10#include <linux/in.h>
10#include "efx.h" 11#include "efx.h"
11#include "filter.h" 12#include "filter.h"
12#include "io.h" 13#include "io.h"
@@ -26,19 +27,26 @@
26 */ 27 */
27#define FILTER_CTL_SRCH_MAX 200 28#define FILTER_CTL_SRCH_MAX 200
28 29
30enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC,
33 EFX_FILTER_TABLE_COUNT,
34};
35
29struct efx_filter_table { 36struct efx_filter_table {
37 enum efx_filter_table_id id;
30 u32 offset; /* address of table relative to BAR */ 38 u32 offset; /* address of table relative to BAR */
31 unsigned size; /* number of entries */ 39 unsigned size; /* number of entries */
32 unsigned step; /* step between entries */ 40 unsigned step; /* step between entries */
33 unsigned used; /* number currently used */ 41 unsigned used; /* number currently used */
34 unsigned long *used_bitmap; 42 unsigned long *used_bitmap;
35 struct efx_filter_spec *spec; 43 struct efx_filter_spec *spec;
44 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
36}; 45};
37 46
38struct efx_filter_state { 47struct efx_filter_state {
39 spinlock_t lock; 48 spinlock_t lock;
40 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
41 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
42}; 50};
43 51
44/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -65,68 +73,203 @@ static u16 efx_filter_increment(u32 key)
65} 73}
66 74
67static enum efx_filter_table_id 75static enum efx_filter_table_id
68efx_filter_type_table_id(enum efx_filter_type type) 76efx_filter_spec_table_id(const struct efx_filter_spec *spec)
77{
78 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
79 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
80 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
81 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
82 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
83 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
84 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
85 return spec->type >> 2;
86}
87
88static struct efx_filter_table *
89efx_filter_spec_table(struct efx_filter_state *state,
90 const struct efx_filter_spec *spec)
69{ 91{
70 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2)); 92 if (spec->type == EFX_FILTER_UNSPEC)
71 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2)); 93 return NULL;
72 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2)); 94 else
73 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2)); 95 return &state->table[efx_filter_spec_table_id(spec)];
74 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
75 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
76 return type >> 2;
77} 96}
78 97
79static void 98static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
80efx_filter_table_reset_search_depth(struct efx_filter_state *state,
81 enum efx_filter_table_id table_id)
82{ 99{
83 memset(state->search_depth + (table_id << 2), 0, 100 memset(table->search_depth, 0, sizeof(table->search_depth));
84 sizeof(state->search_depth[0]) << 2);
85} 101}
86 102
87static void efx_filter_push_rx_limits(struct efx_nic *efx) 103static void efx_filter_push_rx_limits(struct efx_nic *efx)
88{ 104{
89 struct efx_filter_state *state = efx->filter_state; 105 struct efx_filter_state *state = efx->filter_state;
106 struct efx_filter_table *table;
90 efx_oword_t filter_ctl; 107 efx_oword_t filter_ctl;
91 108
92 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 109 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
93 110
111 table = &state->table[EFX_FILTER_TABLE_RX_IP];
94 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 112 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
95 state->search_depth[EFX_FILTER_RX_TCP_FULL] + 113 table->search_depth[EFX_FILTER_TCP_FULL] +
96 FILTER_CTL_SRCH_FUDGE_FULL); 114 FILTER_CTL_SRCH_FUDGE_FULL);
97 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 115 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
98 state->search_depth[EFX_FILTER_RX_TCP_WILD] + 116 table->search_depth[EFX_FILTER_TCP_WILD] +
99 FILTER_CTL_SRCH_FUDGE_WILD); 117 FILTER_CTL_SRCH_FUDGE_WILD);
100 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 118 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
101 state->search_depth[EFX_FILTER_RX_UDP_FULL] + 119 table->search_depth[EFX_FILTER_UDP_FULL] +
102 FILTER_CTL_SRCH_FUDGE_FULL); 120 FILTER_CTL_SRCH_FUDGE_FULL);
103 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 121 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
104 state->search_depth[EFX_FILTER_RX_UDP_WILD] + 122 table->search_depth[EFX_FILTER_UDP_WILD] +
105 FILTER_CTL_SRCH_FUDGE_WILD); 123 FILTER_CTL_SRCH_FUDGE_WILD);
106 124
107 if (state->table[EFX_FILTER_TABLE_RX_MAC].size) { 125 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
126 if (table->size) {
108 EFX_SET_OWORD_FIELD( 127 EFX_SET_OWORD_FIELD(
109 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 128 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
110 state->search_depth[EFX_FILTER_RX_MAC_FULL] + 129 table->search_depth[EFX_FILTER_MAC_FULL] +
111 FILTER_CTL_SRCH_FUDGE_FULL); 130 FILTER_CTL_SRCH_FUDGE_FULL);
112 EFX_SET_OWORD_FIELD( 131 EFX_SET_OWORD_FIELD(
113 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 132 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
114 state->search_depth[EFX_FILTER_RX_MAC_WILD] + 133 table->search_depth[EFX_FILTER_MAC_WILD] +
115 FILTER_CTL_SRCH_FUDGE_WILD); 134 FILTER_CTL_SRCH_FUDGE_WILD);
116 } 135 }
117 136
118 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 137 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
119} 138}
120 139
140static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
141 __be32 host1, __be16 port1,
142 __be32 host2, __be16 port2)
143{
144 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
145 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
146 spec->data[2] = ntohl(host2);
147}
148
149/**
150 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
151 * @spec: Specification to initialise
152 * @proto: Transport layer protocol number
153 * @host: Local host address (network byte order)
154 * @port: Local port (network byte order)
155 */
156int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
157 __be32 host, __be16 port)
158{
159 __be32 host1;
160 __be16 port1;
161
162 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
163
164 /* This cannot currently be combined with other filtering */
165 if (spec->type != EFX_FILTER_UNSPEC)
166 return -EPROTONOSUPPORT;
167
168 if (port == 0)
169 return -EINVAL;
170
171 switch (proto) {
172 case IPPROTO_TCP:
173 spec->type = EFX_FILTER_TCP_WILD;
174 break;
175 case IPPROTO_UDP:
176 spec->type = EFX_FILTER_UDP_WILD;
177 break;
178 default:
179 return -EPROTONOSUPPORT;
180 }
181
182 /* Filter is constructed in terms of source and destination,
183 * with the odd wrinkle that the ports are swapped in a UDP
184 * wildcard filter. We need to convert from local and remote
185 * (= zero for wildcard) addresses.
186 */
187 host1 = 0;
188 if (proto != IPPROTO_UDP) {
189 port1 = 0;
190 } else {
191 port1 = port;
192 port = 0;
193 }
194
195 __efx_filter_set_ipv4(spec, host1, port1, host, port);
196 return 0;
197}
198
199/**
200 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
201 * @spec: Specification to initialise
202 * @proto: Transport layer protocol number
203 * @host: Local host address (network byte order)
204 * @port: Local port (network byte order)
205 * @rhost: Remote host address (network byte order)
206 * @rport: Remote port (network byte order)
207 */
208int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
209 __be32 host, __be16 port,
210 __be32 rhost, __be16 rport)
211{
212 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
213
214 /* This cannot currently be combined with other filtering */
215 if (spec->type != EFX_FILTER_UNSPEC)
216 return -EPROTONOSUPPORT;
217
218 if (port == 0 || rport == 0)
219 return -EINVAL;
220
221 switch (proto) {
222 case IPPROTO_TCP:
223 spec->type = EFX_FILTER_TCP_FULL;
224 break;
225 case IPPROTO_UDP:
226 spec->type = EFX_FILTER_UDP_FULL;
227 break;
228 default:
229 return -EPROTONOSUPPORT;
230 }
231
232 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
233 return 0;
234}
235
236/**
237 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
238 * @spec: Specification to initialise
239 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
240 * @addr: Local Ethernet MAC address
241 */
242int efx_filter_set_eth_local(struct efx_filter_spec *spec,
243 u16 vid, const u8 *addr)
244{
245 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
246
247 /* This cannot currently be combined with other filtering */
248 if (spec->type != EFX_FILTER_UNSPEC)
249 return -EPROTONOSUPPORT;
250
251 if (vid == EFX_FILTER_VID_UNSPEC) {
252 spec->type = EFX_FILTER_MAC_WILD;
253 spec->data[0] = 0;
254 } else {
255 spec->type = EFX_FILTER_MAC_FULL;
256 spec->data[0] = vid;
257 }
258
259 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
260 spec->data[2] = addr[0] << 8 | addr[1];
261 return 0;
262}
263
121/* Build a filter entry and return its n-tuple key. */ 264/* Build a filter entry and return its n-tuple key. */
122static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) 265static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
123{ 266{
124 u32 data3; 267 u32 data3;
125 268
126 switch (efx_filter_type_table_id(spec->type)) { 269 switch (efx_filter_spec_table_id(spec)) {
127 case EFX_FILTER_TABLE_RX_IP: { 270 case EFX_FILTER_TABLE_RX_IP: {
128 bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL || 271 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
129 spec->type == EFX_FILTER_RX_UDP_WILD); 272 spec->type == EFX_FILTER_UDP_WILD);
130 EFX_POPULATE_OWORD_7( 273 EFX_POPULATE_OWORD_7(
131 *filter, 274 *filter,
132 FRF_BZ_RSS_EN, 275 FRF_BZ_RSS_EN,
@@ -143,7 +286,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
143 } 286 }
144 287
145 case EFX_FILTER_TABLE_RX_MAC: { 288 case EFX_FILTER_TABLE_RX_MAC: {
146 bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD; 289 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
147 EFX_POPULATE_OWORD_8( 290 EFX_POPULATE_OWORD_8(
148 *filter, 291 *filter,
149 FRF_CZ_RMFT_RSS_EN, 292 FRF_CZ_RMFT_RSS_EN,
@@ -206,6 +349,14 @@ found:
206 return filter_idx; 349 return filter_idx;
207} 350}
208 351
352/* Construct/deconstruct external filter IDs */
353
354static inline int
355efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
356{
357 return table_id << 16 | index;
358}
359
209/** 360/**
210 * efx_filter_insert_filter - add or replace a filter 361 * efx_filter_insert_filter - add or replace a filter
211 * @efx: NIC in which to insert the filter 362 * @efx: NIC in which to insert the filter
@@ -213,30 +364,28 @@ found:
213 * @replace: Flag for whether the specified filter may replace a filter 364 * @replace: Flag for whether the specified filter may replace a filter
214 * with an identical match expression and equal or lower priority 365 * with an identical match expression and equal or lower priority
215 * 366 *
216 * On success, return the filter index within its table. 367 * On success, return the filter ID.
217 * On failure, return a negative error code. 368 * On failure, return a negative error code.
218 */ 369 */
219int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, 370int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
220 bool replace) 371 bool replace)
221{ 372{
222 struct efx_filter_state *state = efx->filter_state; 373 struct efx_filter_state *state = efx->filter_state;
223 enum efx_filter_table_id table_id = 374 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
224 efx_filter_type_table_id(spec->type);
225 struct efx_filter_table *table = &state->table[table_id];
226 struct efx_filter_spec *saved_spec; 375 struct efx_filter_spec *saved_spec;
227 efx_oword_t filter; 376 efx_oword_t filter;
228 int filter_idx, depth; 377 int filter_idx, depth;
229 u32 key; 378 u32 key;
230 int rc; 379 int rc;
231 380
232 if (table->size == 0) 381 if (!table || table->size == 0)
233 return -EINVAL; 382 return -EINVAL;
234 383
235 key = efx_filter_build(&filter, spec); 384 key = efx_filter_build(&filter, spec);
236 385
237 netif_vdbg(efx, hw, efx->net_dev, 386 netif_vdbg(efx, hw, efx->net_dev,
238 "%s: type %d search_depth=%d", __func__, spec->type, 387 "%s: type %d search_depth=%d", __func__, spec->type,
239 state->search_depth[spec->type]); 388 table->search_depth[spec->type]);
240 389
241 spin_lock_bh(&state->lock); 390 spin_lock_bh(&state->lock);
242 391
@@ -263,8 +412,8 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
263 } 412 }
264 *saved_spec = *spec; 413 *saved_spec = *spec;
265 414
266 if (state->search_depth[spec->type] < depth) { 415 if (table->search_depth[spec->type] < depth) {
267 state->search_depth[spec->type] = depth; 416 table->search_depth[spec->type] = depth;
268 efx_filter_push_rx_limits(efx); 417 efx_filter_push_rx_limits(efx);
269 } 418 }
270 419
@@ -273,6 +422,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
273 netif_vdbg(efx, hw, efx->net_dev, 422 netif_vdbg(efx, hw, efx->net_dev,
274 "%s: filter type %d index %d rxq %u set", 423 "%s: filter type %d index %d rxq %u set",
275 __func__, spec->type, filter_idx, spec->dmaq_id); 424 __func__, spec->type, filter_idx, spec->dmaq_id);
425 rc = efx_filter_make_id(table->id, filter_idx);
276 426
277out: 427out:
278 spin_unlock_bh(&state->lock); 428 spin_unlock_bh(&state->lock);
@@ -306,15 +456,16 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
306int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) 456int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
307{ 457{
308 struct efx_filter_state *state = efx->filter_state; 458 struct efx_filter_state *state = efx->filter_state;
309 enum efx_filter_table_id table_id = 459 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
310 efx_filter_type_table_id(spec->type);
311 struct efx_filter_table *table = &state->table[table_id];
312 struct efx_filter_spec *saved_spec; 460 struct efx_filter_spec *saved_spec;
313 efx_oword_t filter; 461 efx_oword_t filter;
314 int filter_idx, depth; 462 int filter_idx, depth;
315 u32 key; 463 u32 key;
316 int rc; 464 int rc;
317 465
466 if (!table)
467 return -EINVAL;
468
318 key = efx_filter_build(&filter, spec); 469 key = efx_filter_build(&filter, spec);
319 470
320 spin_lock_bh(&state->lock); 471 spin_lock_bh(&state->lock);
@@ -332,7 +483,7 @@ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
332 483
333 efx_filter_table_clear_entry(efx, table, filter_idx); 484 efx_filter_table_clear_entry(efx, table, filter_idx);
334 if (table->used == 0) 485 if (table->used == 0)
335 efx_filter_table_reset_search_depth(state, table_id); 486 efx_filter_table_reset_search_depth(table);
336 rc = 0; 487 rc = 0;
337 488
338out: 489out:
@@ -340,15 +491,9 @@ out:
340 return rc; 491 return rc;
341} 492}
342 493
343/** 494static void efx_filter_table_clear(struct efx_nic *efx,
344 * efx_filter_table_clear - remove filters from a table by priority 495 enum efx_filter_table_id table_id,
345 * @efx: NIC from which to remove the filters 496 enum efx_filter_priority priority)
346 * @table_id: Table from which to remove the filters
347 * @priority: Maximum priority to remove
348 */
349void efx_filter_table_clear(struct efx_nic *efx,
350 enum efx_filter_table_id table_id,
351 enum efx_filter_priority priority)
352{ 497{
353 struct efx_filter_state *state = efx->filter_state; 498 struct efx_filter_state *state = efx->filter_state;
354 struct efx_filter_table *table = &state->table[table_id]; 499 struct efx_filter_table *table = &state->table[table_id];
@@ -360,11 +505,22 @@ void efx_filter_table_clear(struct efx_nic *efx,
360 if (table->spec[filter_idx].priority <= priority) 505 if (table->spec[filter_idx].priority <= priority)
361 efx_filter_table_clear_entry(efx, table, filter_idx); 506 efx_filter_table_clear_entry(efx, table, filter_idx);
362 if (table->used == 0) 507 if (table->used == 0)
363 efx_filter_table_reset_search_depth(state, table_id); 508 efx_filter_table_reset_search_depth(table);
364 509
365 spin_unlock_bh(&state->lock); 510 spin_unlock_bh(&state->lock);
366} 511}
367 512
513/**
514 * efx_filter_clear_rx - remove RX filters by priority
515 * @efx: NIC from which to remove the filters
516 * @priority: Maximum priority to remove
517 */
518void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
519{
520 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
521 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
522}
523
368/* Restore filter stater after reset */ 524/* Restore filter stater after reset */
369void efx_restore_filters(struct efx_nic *efx) 525void efx_restore_filters(struct efx_nic *efx)
370{ 526{
@@ -407,6 +563,7 @@ int efx_probe_filters(struct efx_nic *efx)
407 563
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 565 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP;
410 table->offset = FR_BZ_RX_FILTER_TBL0; 567 table->offset = FR_BZ_RX_FILTER_TBL0;
411 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 568 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
412 table->step = FR_BZ_RX_FILTER_TBL0_STEP; 569 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
@@ -414,6 +571,7 @@ int efx_probe_filters(struct efx_nic *efx)
414 571
415 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 572 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
416 table = &state->table[EFX_FILTER_TABLE_RX_MAC]; 573 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
574 table->id = EFX_FILTER_TABLE_RX_MAC;
417 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 575 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
418 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 576 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
419 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 577 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
@@ -428,10 +586,9 @@ int efx_probe_filters(struct efx_nic *efx)
428 GFP_KERNEL); 586 GFP_KERNEL);
429 if (!table->used_bitmap) 587 if (!table->used_bitmap)
430 goto fail; 588 goto fail;
431 table->spec = vmalloc(table->size * sizeof(*table->spec)); 589 table->spec = vzalloc(table->size * sizeof(*table->spec));
432 if (!table->spec) 590 if (!table->spec)
433 goto fail; 591 goto fail;
434 memset(table->spec, 0, table->size * sizeof(*table->spec));
435 } 592 }
436 593
437 return 0; 594 return 0;
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
index a53319ded79c..872f2132a496 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/sfc/filter.h
@@ -12,31 +12,27 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14 14
15enum efx_filter_table_id {
16 EFX_FILTER_TABLE_RX_IP = 0,
17 EFX_FILTER_TABLE_RX_MAC,
18 EFX_FILTER_TABLE_COUNT,
19};
20
21/** 15/**
22 * enum efx_filter_type - type of hardware filter 16 * enum efx_filter_type - type of hardware filter
23 * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple 17 * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
24 * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port) 18 * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
25 * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple 19 * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
26 * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port) 20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
27 * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID 21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
28 * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address 22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
23 * @EFX_FILTER_UNSPEC: Match type is unspecified
29 * 24 *
30 * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types. 25 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
31 */ 26 */
32enum efx_filter_type { 27enum efx_filter_type {
33 EFX_FILTER_RX_TCP_FULL = 0, 28 EFX_FILTER_TCP_FULL = 0,
34 EFX_FILTER_RX_TCP_WILD, 29 EFX_FILTER_TCP_WILD,
35 EFX_FILTER_RX_UDP_FULL, 30 EFX_FILTER_UDP_FULL,
36 EFX_FILTER_RX_UDP_WILD, 31 EFX_FILTER_UDP_WILD,
37 EFX_FILTER_RX_MAC_FULL = 4, 32 EFX_FILTER_MAC_FULL = 4,
38 EFX_FILTER_RX_MAC_WILD, 33 EFX_FILTER_MAC_WILD,
39 EFX_FILTER_TYPE_COUNT, 34 EFX_FILTER_TYPE_COUNT, /* number of specific types */
35 EFX_FILTER_UNSPEC = 0xf,
40}; 36};
41 37
42/** 38/**
@@ -63,13 +59,13 @@ enum efx_filter_priority {
63 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override 59 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
64 * any IP filter that matches the same packet. By default, IP 60 * any IP filter that matches the same packet. By default, IP
65 * filters take precedence. 61 * filters take precedence.
66 * 62 * @EFX_FILTER_FLAG_RX: Filter is for RX
67 * Currently, no flags are defined for TX filters.
68 */ 63 */
69enum efx_filter_flags { 64enum efx_filter_flags {
70 EFX_FILTER_FLAG_RX_RSS = 0x01, 65 EFX_FILTER_FLAG_RX_RSS = 0x01,
71 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 66 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
72 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, 67 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
68 EFX_FILTER_FLAG_RX = 0x08,
73}; 69};
74 70
75/** 71/**
@@ -91,99 +87,26 @@ struct efx_filter_spec {
91 u32 data[3]; 87 u32 data[3];
92}; 88};
93 89
94/** 90static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
95 * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match 91 enum efx_filter_priority priority,
96 * @spec: Specification to initialise 92 enum efx_filter_flags flags,
97 * @shost: Source host address (host byte order) 93 unsigned rxq_id)
98 * @sport: Source port (host byte order)
99 * @dhost: Destination host address (host byte order)
100 * @dport: Destination port (host byte order)
101 */
102static inline void
103efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
104 u32 shost, u16 sport, u32 dhost, u16 dport)
105{
106 spec->type = EFX_FILTER_RX_TCP_FULL;
107 spec->data[0] = sport | shost << 16;
108 spec->data[1] = dport << 16 | shost >> 16;
109 spec->data[2] = dhost;
110}
111
112/**
113 * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
114 * @spec: Specification to initialise
115 * @dhost: Destination host address (host byte order)
116 * @dport: Destination port (host byte order)
117 */
118static inline void
119efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
120{
121 spec->type = EFX_FILTER_RX_TCP_WILD;
122 spec->data[0] = 0;
123 spec->data[1] = dport << 16;
124 spec->data[2] = dhost;
125}
126
127/**
128 * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
129 * @spec: Specification to initialise
130 * @shost: Source host address (host byte order)
131 * @sport: Source port (host byte order)
132 * @dhost: Destination host address (host byte order)
133 * @dport: Destination port (host byte order)
134 */
135static inline void
136efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
137 u32 shost, u16 sport, u32 dhost, u16 dport)
138{
139 spec->type = EFX_FILTER_RX_UDP_FULL;
140 spec->data[0] = sport | shost << 16;
141 spec->data[1] = dport << 16 | shost >> 16;
142 spec->data[2] = dhost;
143}
144
145/**
146 * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
147 * @spec: Specification to initialise
148 * @dhost: Destination host address (host byte order)
149 * @dport: Destination port (host byte order)
150 */
151static inline void
152efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
153{ 94{
154 spec->type = EFX_FILTER_RX_UDP_WILD; 95 spec->type = EFX_FILTER_UNSPEC;
155 spec->data[0] = dport; 96 spec->priority = priority;
156 spec->data[1] = 0; 97 spec->flags = EFX_FILTER_FLAG_RX | flags;
157 spec->data[2] = dhost; 98 spec->dmaq_id = rxq_id;
158} 99}
159 100
160/** 101extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
161 * efx_filter_set_rx_mac_full - specify RX filter with MAC full match 102 __be32 host, __be16 port);
162 * @spec: Specification to initialise 103extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
163 * @vid: VLAN ID 104 __be32 host, __be16 port,
164 * @addr: Destination MAC address 105 __be32 rhost, __be16 rport);
165 */ 106extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
166static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec, 107 u16 vid, const u8 *addr);
167 u16 vid, const u8 *addr) 108enum {
168{ 109 EFX_FILTER_VID_UNSPEC = 0xffff,
169 spec->type = EFX_FILTER_RX_MAC_FULL; 110};
170 spec->data[0] = vid;
171 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
172 spec->data[2] = addr[0] << 8 | addr[1];
173}
174
175/**
176 * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
177 * @spec: Specification to initialise
178 * @addr: Destination MAC address
179 */
180static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
181 const u8 *addr)
182{
183 spec->type = EFX_FILTER_RX_MAC_WILD;
184 spec->data[0] = 0;
185 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
186 spec->data[2] = addr[0] << 8 | addr[1];
187}
188 111
189#endif /* EFX_FILTER_H */ 112#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe87437..6da4ae20a039 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -22,28 +22,39 @@
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy:
24 * 24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes 25 * Most CSRs are 128-bit (oword) and therefore cannot be read or
26 * which necessitates locking. 26 * written atomically. Access from the host is buffered by the Bus
27 * Under normal operation few writes to NIC registers are made and these 27 * Interface Unit (BIU). Whenever the host reads from the lowest
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special 28 * address of such a register, or from the address of a different such
29 * cased to allow 4-byte (hence lockless) accesses. 29 * register, the BIU latches the register's value. Subsequent reads
30 * from higher addresses of the same register will read the latched
31 * value. Whenever the host writes part of such a register, the BIU
32 * collects the written value and does not write to the underlying
33 * register until all 4 dwords have been written. A similar buffering
34 * scheme applies to host access to the NIC's 64-bit SRAM.
30 * 35 *
31 * It *is* safe to write to these 4-byte registers in the middle of an 36 * Access to different CSRs and 64-bit SRAM words must be serialised,
32 * access to an 8-byte or 16-byte register. We therefore use a 37 * since interleaved access can result in lost writes or lost
33 * spinlock to protect accesses to the larger registers, but no locks 38 * information from read-to-clear fields. We use efx_nic::biu_lock
34 * for the 4-byte registers. 39 * for this. (We could use separate locks for read and write, but
40 * this is not normally a performance bottleneck.)
35 * 41 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2 42 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
37 * due to the way the 16byte registers are "collected" in the BIU. 43 * 128-bit but are special-cased in the BIU to avoid the need for
44 * locking in the host:
38 * 45 *
39 * We also lock when carrying out reads, to ensure consistency of the 46 * - They are write-only.
40 * data (made possible since the BIU reads all 128 bits into a cache). 47 * - The semantics of writing to these registers are such that
41 * Reads are very rare, so this isn't a significant performance 48 * replacing the low 96 bits with zero does not affect functionality.
42 * impact. (Most data transferred from NIC to host is DMAed directly 49 * - If the host writes to the last dword address of such a register
43 * into host memory). 50 * (i.e. the high 32 bits) the underlying register will always be
44 * 51 * written. If the collector does not hold values for the low 96
45 * I/O BAR access uses locks for both reads and writes (but is only provided 52 * bits of the register, they will be written as zero. Writing to
46 * for testing purposes). 53 * the last qword does not have this effect and must not be done.
54 * - If the host writes to the address of any other part of such a
55 * register while the collector already holds values for some other
56 * register, the write is discarded and the collector maintains its
57 * current state.
47 */ 58 */
48 59
49#if BITS_PER_LONG == 64 60#if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
72 return (__force __le32)__raw_readl(efx->membase + reg); 83 return (__force __le32)__raw_readl(efx->membase + reg);
73} 84}
74 85
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */ 86/* Write a normal 128-bit CSR, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, 87static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg) 88 unsigned int reg)
78{ 89{
@@ -85,21 +96,18 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
85 spin_lock_irqsave(&efx->biu_lock, flags); 96 spin_lock_irqsave(&efx->biu_lock, flags);
86#ifdef EFX_USE_QWORD_IO 97#ifdef EFX_USE_QWORD_IO
87 _efx_writeq(efx, value->u64[0], reg + 0); 98 _efx_writeq(efx, value->u64[0], reg + 0);
88 wmb();
89 _efx_writeq(efx, value->u64[1], reg + 8); 99 _efx_writeq(efx, value->u64[1], reg + 8);
90#else 100#else
91 _efx_writed(efx, value->u32[0], reg + 0); 101 _efx_writed(efx, value->u32[0], reg + 0);
92 _efx_writed(efx, value->u32[1], reg + 4); 102 _efx_writed(efx, value->u32[1], reg + 4);
93 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
94 wmb();
95 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
96#endif 105#endif
97 mmiowb(); 106 mmiowb();
98 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
99} 108}
100 109
101/* Write an 8-byte NIC SRAM entry through the supplied mapping, 110/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
102 * locking as appropriate. */
103static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, 111static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
104 efx_qword_t *value, unsigned int index) 112 efx_qword_t *value, unsigned int index)
105{ 113{
@@ -115,36 +123,25 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
115 __raw_writeq((__force u64)value->u64[0], membase + addr); 123 __raw_writeq((__force u64)value->u64[0], membase + addr);
116#else 124#else
117 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
118 wmb();
119 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
120#endif 127#endif
121 mmiowb(); 128 mmiowb();
122 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
123} 130}
124 131
125/* Write dword to NIC register that allows partial writes 132/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
126 *
127 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
128 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
129 * for lockless writes.
130 */
131static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 133static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
132 unsigned int reg) 134 unsigned int reg)
133{ 135{
134 netif_vdbg(efx, hw, efx->net_dev, 136 netif_vdbg(efx, hw, efx->net_dev,
135 "writing partial register %x with "EFX_DWORD_FMT"\n", 137 "writing register %x with "EFX_DWORD_FMT"\n",
136 reg, EFX_DWORD_VAL(*value)); 138 reg, EFX_DWORD_VAL(*value));
137 139
138 /* No lock required */ 140 /* No lock required */
139 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
140} 142}
141 143
142/* Read from a NIC register 144/* Read a 128-bit CSR, locking as appropriate. */
143 *
144 * This reads an entire 16-byte register in one go, locking as
145 * appropriate. It is essential to read the first dword first, as this
146 * prompts the NIC to load the current value into the shadow register.
147 */
148static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, 145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
149 unsigned int reg) 146 unsigned int reg)
150{ 147{
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -163,8 +159,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
163 EFX_OWORD_VAL(*value)); 159 EFX_OWORD_VAL(*value));
164} 160}
165 161
166/* Read an 8-byte SRAM entry through supplied mapping, 162/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
167 * locking as appropriate. */
168static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, 163static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
169 efx_qword_t *value, unsigned int index) 164 efx_qword_t *value, unsigned int index)
170{ 165{
@@ -176,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
176 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
177#else 172#else
178 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
179 rmb();
180 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
181#endif 175#endif
182 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -186,7 +180,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
186 addr, EFX_QWORD_VAL(*value)); 180 addr, EFX_QWORD_VAL(*value));
187} 181}
188 182
189/* Read dword from register that allows partial writes (sic) */ 183/* Read a 32-bit CSR or SRAM */
190static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, 184static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
191 unsigned int reg) 185 unsigned int reg)
192{ 186{
@@ -196,28 +190,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
196 reg, EFX_DWORD_VAL(*value)); 190 reg, EFX_DWORD_VAL(*value));
197} 191}
198 192
199/* Write to a register forming part of a table */ 193/* Write a 128-bit CSR forming part of a table */
200static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, 194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
201 unsigned int reg, unsigned int index) 195 unsigned int reg, unsigned int index)
202{ 196{
203 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); 197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
204} 198}
205 199
206/* Read to a register forming part of a table */ 200/* Read a 128-bit CSR forming part of a table */
207static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, 201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
208 unsigned int reg, unsigned int index) 202 unsigned int reg, unsigned int index)
209{ 203{
210 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
211} 205}
212 206
213/* Write to a dword register forming part of a table */ 207/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
214static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, 208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
215 unsigned int reg, unsigned int index) 209 unsigned int reg, unsigned int index)
216{ 210{
217 efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); 211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
218} 212}
219 213
220/* Read from a dword register forming part of a table */ 214/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
221static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, 215static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
222 unsigned int reg, unsigned int index) 216 unsigned int reg, unsigned int index)
223{ 217{
@@ -231,29 +225,54 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
231#define EFX_PAGED_REG(page, reg) \ 225#define EFX_PAGED_REG(page, reg) \
232 ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) 226 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
233 227
234/* As for efx_writeo(), but for a page-mapped register. */ 228/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
235static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, 229static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
236 unsigned int reg, unsigned int page) 230 unsigned int reg, unsigned int page)
237{ 231{
238 efx_writeo(efx, value, EFX_PAGED_REG(page, reg)); 232 reg = EFX_PAGED_REG(page, reg);
239}
240 233
241/* As for efx_writed(), but for a page-mapped register. */ 234 netif_vdbg(efx, hw, efx->net_dev,
242static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value, 235 "writing register %x with " EFX_OWORD_FMT "\n", reg,
243 unsigned int reg, unsigned int page) 236 EFX_OWORD_VAL(*value));
237
238#ifdef EFX_USE_QWORD_IO
239 _efx_writeq(efx, value->u64[0], reg + 0);
240#else
241 _efx_writed(efx, value->u32[0], reg + 0);
242 _efx_writed(efx, value->u32[1], reg + 4);
243#endif
244 _efx_writed(efx, value->u32[2], reg + 8);
245 _efx_writed(efx, value->u32[3], reg + 12);
246}
247#define efx_writeo_page(efx, value, reg, page) \
248 _efx_writeo_page(efx, value, \
249 reg + \
250 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
251 page)
252
253/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
254 * RX_DESC_UPD or TX_DESC_UPD)
255 */
256static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
257 unsigned int reg, unsigned int page)
244{ 258{
245 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 259 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
246} 260}
247 261#define efx_writed_page(efx, value, reg, page) \
248/* Write dword to page-mapped register with an extra lock. 262 _efx_writed_page(efx, value, \
249 * 263 reg + \
250 * As for efx_writed_page(), but for a register that suffers from 264 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
251 * SFC bug 3181. Take out a lock so the BIU collector cannot be 265 && (reg) != 0xa1c), \
252 * confused. */ 266 page)
253static inline void efx_writed_page_locked(struct efx_nic *efx, 267
254 efx_dword_t *value, 268/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
255 unsigned int reg, 269 * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
256 unsigned int page) 270 * collector register.
271 */
272static inline void _efx_writed_page_locked(struct efx_nic *efx,
273 efx_dword_t *value,
274 unsigned int reg,
275 unsigned int page)
257{ 276{
258 unsigned long flags __attribute__ ((unused)); 277 unsigned long flags __attribute__ ((unused));
259 278
@@ -265,5 +284,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx,
265 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 284 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
266 } 285 }
267} 286}
287#define efx_writed_page_locked(efx, value, reg, page) \
288 _efx_writed_page_locked(efx, value, \
289 reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
290 page)
268 291
269#endif /* EFX_IO_H */ 292#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910c2ce7..b716e827b291 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -381,7 +381,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
381 -rc); 381 -rc);
382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
383 } else 383 } else
384 netif_err(efx, hw, efx->net_dev, 384 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 386 cmd, (int)inlen, -rc);
387 } 387 }
@@ -463,6 +463,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
463 if (mcdi->mode == MCDI_MODE_EVENTS) { 463 if (mcdi->mode == MCDI_MODE_EVENTS) {
464 mcdi->resprc = rc; 464 mcdi->resprc = rc;
465 mcdi->resplen = 0; 465 mcdi->resplen = 0;
466 ++mcdi->credits;
466 } 467 }
467 } else 468 } else
468 /* Nobody was waiting for an MCDI request, so trigger a reset */ 469 /* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742446b1..0e97eed663c6 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -16,7 +16,6 @@
16#include "phy.h" 16#include "phy.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h"
20#include "nic.h" 19#include "nic.h"
21#include "selftest.h" 20#include "selftest.h"
22 21
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d946020429..56b0266b441f 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,7 +15,6 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "workarounds.h" 17#include "workarounds.h"
18#include "nic.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
21{ 20{
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701f..d38627448c22 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
321 struct efx_mtd *efx_mtd = mtd->priv; 321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi; 322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx; 323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
324 int rc; 325 int rc;
325 326
326 rc = mutex_lock_interruptible(&efx->spi_lock); 327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
327 if (rc) 328 if (rc)
328 return rc; 329 return rc;
329 rc = falcon_spi_read(efx, spi, part->offset + start, len, 330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
330 retlen, buffer); 331 retlen, buffer);
331 mutex_unlock(&efx->spi_lock); 332 mutex_unlock(&nic_data->spi_lock);
332 return rc; 333 return rc;
333} 334}
334 335
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
338 struct efx_mtd *efx_mtd = mtd->priv; 339 struct efx_mtd *efx_mtd = mtd->priv;
339 struct efx_nic *efx = efx_mtd->efx; 340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
340 int rc; 342 int rc;
341 343
342 rc = mutex_lock_interruptible(&efx->spi_lock); 344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
343 if (rc) 345 if (rc)
344 return rc; 346 return rc;
345 rc = efx_spi_erase(part, part->offset + start, len); 347 rc = efx_spi_erase(part, part->offset + start, len);
346 mutex_unlock(&efx->spi_lock); 348 mutex_unlock(&nic_data->spi_lock);
347 return rc; 349 return rc;
348} 350}
349 351
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
354 struct efx_mtd *efx_mtd = mtd->priv; 356 struct efx_mtd *efx_mtd = mtd->priv;
355 const struct efx_spi_device *spi = efx_mtd->spi; 357 const struct efx_spi_device *spi = efx_mtd->spi;
356 struct efx_nic *efx = efx_mtd->efx; 358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
357 int rc; 360 int rc;
358 361
359 rc = mutex_lock_interruptible(&efx->spi_lock); 362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
360 if (rc) 363 if (rc)
361 return rc; 364 return rc;
362 rc = falcon_spi_write(efx, spi, part->offset + start, len, 365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
363 retlen, buffer); 366 retlen, buffer);
364 mutex_unlock(&efx->spi_lock); 367 mutex_unlock(&nic_data->spi_lock);
365 return rc; 368 return rc;
366} 369}
367 370
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
371 struct efx_mtd *efx_mtd = mtd->priv; 374 struct efx_mtd *efx_mtd = mtd->priv;
372 struct efx_nic *efx = efx_mtd->efx; 375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
373 int rc; 377 int rc;
374 378
375 mutex_lock(&efx->spi_lock); 379 mutex_lock(&nic_data->spi_lock);
376 rc = efx_spi_slow_wait(part, true); 380 rc = efx_spi_slow_wait(part, true);
377 mutex_unlock(&efx->spi_lock); 381 mutex_unlock(&nic_data->spi_lock);
378 return rc; 382 return rc;
379} 383}
380 384
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
387 391
388static int falcon_mtd_probe(struct efx_nic *efx) 392static int falcon_mtd_probe(struct efx_nic *efx)
389{ 393{
390 struct efx_spi_device *spi = efx->spi_flash; 394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
391 struct efx_mtd *efx_mtd; 396 struct efx_mtd *efx_mtd;
392 int rc; 397 int rc = -ENODEV;
393 398
394 ASSERT_RTNL(); 399 ASSERT_RTNL();
395 400
396 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) 401 spi = &nic_data->spi_flash;
397 return -ENODEV; 402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
398 403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
399 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), 404 GFP_KERNEL);
400 GFP_KERNEL); 405 if (!efx_mtd)
401 if (!efx_mtd) 406 return -ENOMEM;
402 return -ENOMEM; 407
403 408 efx_mtd->spi = spi;
404 efx_mtd->spi = spi; 409 efx_mtd->name = "flash";
405 efx_mtd->name = "flash"; 410 efx_mtd->ops = &falcon_mtd_ops;
406 efx_mtd->ops = &falcon_mtd_ops; 411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
407 426
408 efx_mtd->n_parts = 1; 427 spi = &nic_data->spi_eeprom;
409 efx_mtd->part[0].mtd.type = MTD_NORFLASH; 428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
410 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; 429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
411 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 430 GFP_KERNEL);
412 efx_mtd->part[0].mtd.erasesize = spi->erase_size; 431 if (!efx_mtd)
413 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; 432 return -ENOMEM;
414 efx_mtd->part[0].type_name = "sfc_flash_bootrom"; 433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
415 454
416 rc = efx_mtd_probe_device(efx, efx_mtd);
417 if (rc)
418 kfree(efx_mtd);
419 return rc; 455 return rc;
420} 456}
421 457
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d73b52..76f2fb197f0a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -142,6 +142,12 @@ struct efx_tx_buffer {
142 * @flushed: Used when handling queue flushing 142 * @flushed: Used when handling queue flushing
143 * @read_count: Current read pointer. 143 * @read_count: Current read pointer.
144 * This is the number of buffers that have been removed from both rings. 144 * This is the number of buffers that have been removed from both rings.
145 * @old_write_count: The value of @write_count when last checked.
146 * This is here for performance reasons. The xmit path will
147 * only get the up-to-date value of @write_count if this
148 * variable indicates that the queue is empty. This is to
149 * avoid cache-line ping-pong between the xmit path and the
150 * completion path.
145 * @stopped: Stopped count. 151 * @stopped: Stopped count.
146 * Set if this TX queue is currently stopping its port. 152 * Set if this TX queue is currently stopping its port.
147 * @insert_count: Current insert pointer 153 * @insert_count: Current insert pointer
@@ -163,6 +169,10 @@ struct efx_tx_buffer {
163 * @tso_long_headers: Number of packets with headers too long for standard 169 * @tso_long_headers: Number of packets with headers too long for standard
164 * blocks 170 * blocks
165 * @tso_packets: Number of packets via the TSO xmit path 171 * @tso_packets: Number of packets via the TSO xmit path
172 * @pushes: Number of times the TX push feature has been used
173 * @empty_read_count: If the completion path has seen the queue as empty
174 * and the transmission path has not yet checked this, the value of
175 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
166 */ 176 */
167struct efx_tx_queue { 177struct efx_tx_queue {
168 /* Members which don't change on the fast path */ 178 /* Members which don't change on the fast path */
@@ -177,6 +187,7 @@ struct efx_tx_queue {
177 187
178 /* Members used mainly on the completion path */ 188 /* Members used mainly on the completion path */
179 unsigned int read_count ____cacheline_aligned_in_smp; 189 unsigned int read_count ____cacheline_aligned_in_smp;
190 unsigned int old_write_count;
180 int stopped; 191 int stopped;
181 192
182 /* Members used only on the xmit path */ 193 /* Members used only on the xmit path */
@@ -187,6 +198,11 @@ struct efx_tx_queue {
187 unsigned int tso_bursts; 198 unsigned int tso_bursts;
188 unsigned int tso_long_headers; 199 unsigned int tso_long_headers;
189 unsigned int tso_packets; 200 unsigned int tso_packets;
201 unsigned int pushes;
202
203 /* Members shared between paths and sometimes updated */
204 unsigned int empty_read_count ____cacheline_aligned_in_smp;
205#define EFX_EMPTY_COUNT_VALID 0x80000000
190}; 206};
191 207
192/** 208/**
@@ -621,14 +637,13 @@ struct efx_filter_state;
621 * @pci_dev: The PCI device 637 * @pci_dev: The PCI device
622 * @type: Controller type attributes 638 * @type: Controller type attributes
623 * @legacy_irq: IRQ number 639 * @legacy_irq: IRQ number
640 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
624 * @workqueue: Workqueue for port reconfigures and the HW monitor. 641 * @workqueue: Workqueue for port reconfigures and the HW monitor.
625 * Work items do not hold and must not acquire RTNL. 642 * Work items do not hold and must not acquire RTNL.
626 * @workqueue_name: Name of workqueue 643 * @workqueue_name: Name of workqueue
627 * @reset_work: Scheduled reset workitem 644 * @reset_work: Scheduled reset workitem
628 * @monitor_work: Hardware monitor workitem
629 * @membase_phys: Memory BAR value as physical address 645 * @membase_phys: Memory BAR value as physical address
630 * @membase: Memory BAR value 646 * @membase: Memory BAR value
631 * @biu_lock: BIU (bus interface unit) lock
632 * @interrupt_mode: Interrupt mode 647 * @interrupt_mode: Interrupt mode
633 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 648 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
634 * @irq_rx_moderation: IRQ moderation time for RX event queues 649 * @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -647,23 +662,14 @@ struct efx_filter_state;
647 * @n_tx_channels: Number of channels used for TX 662 * @n_tx_channels: Number of channels used for TX
648 * @rx_buffer_len: RX buffer length 663 * @rx_buffer_len: RX buffer length
649 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 664 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
665 * @rx_hash_key: Toeplitz hash key for RSS
650 * @rx_indir_table: Indirection table for RSS 666 * @rx_indir_table: Indirection table for RSS
651 * @int_error_count: Number of internal errors seen recently 667 * @int_error_count: Number of internal errors seen recently
652 * @int_error_expire: Time at which error count will be expired 668 * @int_error_expire: Time at which error count will be expired
653 * @irq_status: Interrupt status buffer 669 * @irq_status: Interrupt status buffer
654 * @last_irq_cpu: Last CPU to handle interrupt.
655 * This register is written with the SMP processor ID whenever an
656 * interrupt is handled. It is used by efx_nic_test_interrupt()
657 * to verify that an interrupt has occurred.
658 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 670 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
659 * @fatal_irq_level: IRQ level (bit number) used for serious errors 671 * @fatal_irq_level: IRQ level (bit number) used for serious errors
660 * @spi_flash: SPI flash device
661 * This field will be %NULL if no flash device is present (or for Siena).
662 * @spi_eeprom: SPI EEPROM device
663 * This field will be %NULL if no EEPROM device is present (or for Siena).
664 * @spi_lock: SPI bus lock
665 * @mtd_list: List of MTDs attached to the NIC 672 * @mtd_list: List of MTDs attached to the NIC
666 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
667 * @nic_data: Hardware dependant state 673 * @nic_data: Hardware dependant state
668 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 674 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
669 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 675 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
@@ -676,21 +682,14 @@ struct efx_filter_state;
676 * @port_initialized: Port initialized? 682 * @port_initialized: Port initialized?
677 * @net_dev: Operating system network device. Consider holding the rtnl lock 683 * @net_dev: Operating system network device. Consider holding the rtnl lock
678 * @rx_checksum_enabled: RX checksumming enabled 684 * @rx_checksum_enabled: RX checksumming enabled
679 * @mac_stats: MAC statistics. These include all statistics the MACs
680 * can provide. Generic code converts these into a standard
681 * &struct net_device_stats.
682 * @stats_buffer: DMA buffer for statistics 685 * @stats_buffer: DMA buffer for statistics
683 * @stats_lock: Statistics update lock. Serialises statistics fetches
684 * @mac_op: MAC interface 686 * @mac_op: MAC interface
685 * @mac_address: Permanent MAC address
686 * @phy_type: PHY type 687 * @phy_type: PHY type
687 * @mdio_lock: MDIO lock
688 * @phy_op: PHY interface 688 * @phy_op: PHY interface
689 * @phy_data: PHY private data (including PHY-specific stats) 689 * @phy_data: PHY private data (including PHY-specific stats)
690 * @mdio: PHY MDIO interface 690 * @mdio: PHY MDIO interface
691 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 691 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
692 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 692 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
693 * @xmac_poll_required: XMAC link state needs polling
694 * @link_advertising: Autonegotiation advertising flags 693 * @link_advertising: Autonegotiation advertising flags
695 * @link_state: Current state of the link 694 * @link_state: Current state of the link
696 * @n_link_state_changes: Number of times the link has changed state 695 * @n_link_state_changes: Number of times the link has changed state
@@ -701,21 +700,34 @@ struct efx_filter_state;
701 * @loopback_mode: Loopback status 700 * @loopback_mode: Loopback status
702 * @loopback_modes: Supported loopback mode bitmask 701 * @loopback_modes: Supported loopback mode bitmask
703 * @loopback_selftest: Offline self-test private state 702 * @loopback_selftest: Offline self-test private state
703 * @monitor_work: Hardware monitor workitem
704 * @biu_lock: BIU (bus interface unit) lock
705 * @last_irq_cpu: Last CPU to handle interrupt.
706 * This register is written with the SMP processor ID whenever an
707 * interrupt is handled. It is used by efx_nic_test_interrupt()
708 * to verify that an interrupt has occurred.
709 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
710 * @mac_stats: MAC statistics. These include all statistics the MACs
711 * can provide. Generic code converts these into a standard
712 * &struct net_device_stats.
713 * @stats_lock: Statistics update lock. Serialises statistics fetches
704 * 714 *
705 * This is stored in the private area of the &struct net_device. 715 * This is stored in the private area of the &struct net_device.
706 */ 716 */
707struct efx_nic { 717struct efx_nic {
718 /* The following fields should be written very rarely */
719
708 char name[IFNAMSIZ]; 720 char name[IFNAMSIZ];
709 struct pci_dev *pci_dev; 721 struct pci_dev *pci_dev;
710 const struct efx_nic_type *type; 722 const struct efx_nic_type *type;
711 int legacy_irq; 723 int legacy_irq;
724 bool legacy_irq_enabled;
712 struct workqueue_struct *workqueue; 725 struct workqueue_struct *workqueue;
713 char workqueue_name[16]; 726 char workqueue_name[16];
714 struct work_struct reset_work; 727 struct work_struct reset_work;
715 struct delayed_work monitor_work;
716 resource_size_t membase_phys; 728 resource_size_t membase_phys;
717 void __iomem *membase; 729 void __iomem *membase;
718 spinlock_t biu_lock; 730
719 enum efx_int_mode interrupt_mode; 731 enum efx_int_mode interrupt_mode;
720 bool irq_rx_adaptive; 732 bool irq_rx_adaptive;
721 unsigned int irq_rx_moderation; 733 unsigned int irq_rx_moderation;
@@ -742,19 +754,13 @@ struct efx_nic {
742 unsigned long int_error_expire; 754 unsigned long int_error_expire;
743 755
744 struct efx_buffer irq_status; 756 struct efx_buffer irq_status;
745 volatile signed int last_irq_cpu;
746 unsigned irq_zero_count; 757 unsigned irq_zero_count;
747 unsigned fatal_irq_level; 758 unsigned fatal_irq_level;
748 759
749 struct efx_spi_device *spi_flash;
750 struct efx_spi_device *spi_eeprom;
751 struct mutex spi_lock;
752#ifdef CONFIG_SFC_MTD 760#ifdef CONFIG_SFC_MTD
753 struct list_head mtd_list; 761 struct list_head mtd_list;
754#endif 762#endif
755 763
756 unsigned n_rx_nodesc_drop_cnt;
757
758 void *nic_data; 764 void *nic_data;
759 765
760 struct mutex mac_lock; 766 struct mutex mac_lock;
@@ -766,22 +772,17 @@ struct efx_nic {
766 struct net_device *net_dev; 772 struct net_device *net_dev;
767 bool rx_checksum_enabled; 773 bool rx_checksum_enabled;
768 774
769 struct efx_mac_stats mac_stats;
770 struct efx_buffer stats_buffer; 775 struct efx_buffer stats_buffer;
771 spinlock_t stats_lock;
772 776
773 struct efx_mac_operations *mac_op; 777 struct efx_mac_operations *mac_op;
774 unsigned char mac_address[ETH_ALEN];
775 778
776 unsigned int phy_type; 779 unsigned int phy_type;
777 struct mutex mdio_lock;
778 struct efx_phy_operations *phy_op; 780 struct efx_phy_operations *phy_op;
779 void *phy_data; 781 void *phy_data;
780 struct mdio_if_info mdio; 782 struct mdio_if_info mdio;
781 unsigned int mdio_bus; 783 unsigned int mdio_bus;
782 enum efx_phy_mode phy_mode; 784 enum efx_phy_mode phy_mode;
783 785
784 bool xmac_poll_required;
785 u32 link_advertising; 786 u32 link_advertising;
786 struct efx_link_state link_state; 787 struct efx_link_state link_state;
787 unsigned int n_link_state_changes; 788 unsigned int n_link_state_changes;
@@ -797,6 +798,15 @@ struct efx_nic {
797 void *loopback_selftest; 798 void *loopback_selftest;
798 799
799 struct efx_filter_state *filter_state; 800 struct efx_filter_state *filter_state;
801
802 /* The following fields may be written more often */
803
804 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
805 spinlock_t biu_lock;
806 volatile signed int last_irq_cpu;
807 unsigned n_rx_nodesc_drop_cnt;
808 struct efx_mac_stats mac_stats;
809 spinlock_t stats_lock;
800}; 810};
801 811
802static inline int efx_dev_registered(struct efx_nic *efx) 812static inline int efx_dev_registered(struct efx_nic *efx)
@@ -829,6 +839,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
829 * be called while the controller is uninitialised. 839 * be called while the controller is uninitialised.
830 * @probe_port: Probe the MAC and PHY 840 * @probe_port: Probe the MAC and PHY
831 * @remove_port: Free resources allocated by probe_port() 841 * @remove_port: Free resources allocated by probe_port()
842 * @handle_global_event: Handle a "global" event (may be %NULL)
832 * @prepare_flush: Prepare the hardware for flushing the DMA queues 843 * @prepare_flush: Prepare the hardware for flushing the DMA queues
833 * @update_stats: Update statistics not provided by event handling 844 * @update_stats: Update statistics not provided by event handling
834 * @start_stats: Start the regular fetching of statistics 845 * @start_stats: Start the regular fetching of statistics
@@ -873,6 +884,7 @@ struct efx_nic_type {
873 int (*reset)(struct efx_nic *efx, enum reset_type method); 884 int (*reset)(struct efx_nic *efx, enum reset_type method);
874 int (*probe_port)(struct efx_nic *efx); 885 int (*probe_port)(struct efx_nic *efx);
875 void (*remove_port)(struct efx_nic *efx); 886 void (*remove_port)(struct efx_nic *efx);
887 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
876 void (*prepare_flush)(struct efx_nic *efx); 888 void (*prepare_flush)(struct efx_nic *efx);
877 void (*update_stats)(struct efx_nic *efx); 889 void (*update_stats)(struct efx_nic *efx);
878 void (*start_stats)(struct efx_nic *efx); 890 void (*start_stats)(struct efx_nic *efx);
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9a4244..da386599ab68 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -362,6 +362,35 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
363} 363}
364 364
365/* Write pointer and first descriptor for TX descriptor ring */
366static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
367 const efx_qword_t *txd)
368{
369 unsigned write_ptr;
370 efx_oword_t reg;
371
372 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
373 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
374
375 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
376 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
377 FRF_AZ_TX_DESC_WPTR, write_ptr);
378 reg.qword[0] = *txd;
379 efx_writeo_page(tx_queue->efx, &reg,
380 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
381}
382
383static inline bool
384efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
385{
386 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
387
388 if (empty_read_count == 0)
389 return false;
390
391 tx_queue->empty_read_count = 0;
392 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
393}
365 394
366/* For each entry inserted into the software descriptor ring, create a 395/* For each entry inserted into the software descriptor ring, create a
367 * descriptor in the hardware TX descriptor ring (in host memory), and 396 * descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,6 +402,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
373 struct efx_tx_buffer *buffer; 402 struct efx_tx_buffer *buffer;
374 efx_qword_t *txd; 403 efx_qword_t *txd;
375 unsigned write_ptr; 404 unsigned write_ptr;
405 unsigned old_write_count = tx_queue->write_count;
376 406
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 407 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378 408
@@ -391,7 +421,15 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
391 } while (tx_queue->write_count != tx_queue->insert_count); 421 } while (tx_queue->write_count != tx_queue->insert_count);
392 422
393 wmb(); /* Ensure descriptors are written before they are fetched */ 423 wmb(); /* Ensure descriptors are written before they are fetched */
394 efx_notify_tx_desc(tx_queue); 424
425 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
426 txd = efx_tx_desc(tx_queue,
427 old_write_count & tx_queue->ptr_mask);
428 efx_push_tx_desc(tx_queue, txd);
429 ++tx_queue->pushes;
430 } else {
431 efx_notify_tx_desc(tx_queue);
432 }
395} 433}
396 434
397/* Allocate hardware resources for a TX queue */ 435/* Allocate hardware resources for a TX queue */
@@ -894,46 +932,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
894 channel->channel, EFX_QWORD_VAL(*event)); 932 channel->channel, EFX_QWORD_VAL(*event));
895} 933}
896 934
897/* Global events are basically PHY events */
898static void
899efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
900{
901 struct efx_nic *efx = channel->efx;
902 bool handled = false;
903
904 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
905 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
906 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
907 /* Ignored */
908 handled = true;
909 }
910
911 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
912 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
913 efx->xmac_poll_required = true;
914 handled = true;
915 }
916
917 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
918 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
919 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
920 netif_err(efx, rx_err, efx->net_dev,
921 "channel %d seen global RX_RESET event. Resetting.\n",
922 channel->channel);
923
924 atomic_inc(&efx->rx_reset);
925 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
926 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
927 handled = true;
928 }
929
930 if (!handled)
931 netif_err(efx, hw, efx->net_dev,
932 "channel %d unknown global event "
933 EFX_QWORD_FMT "\n", channel->channel,
934 EFX_QWORD_VAL(*event));
935}
936
937static void 935static void
938efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 936efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
939{ 937{
@@ -1050,15 +1048,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1050 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1048 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1051 efx_handle_generated_event(channel, &event); 1049 efx_handle_generated_event(channel, &event);
1052 break; 1050 break;
1053 case FSE_AZ_EV_CODE_GLOBAL_EV:
1054 efx_handle_global_event(channel, &event);
1055 break;
1056 case FSE_AZ_EV_CODE_DRIVER_EV: 1051 case FSE_AZ_EV_CODE_DRIVER_EV:
1057 efx_handle_driver_event(channel, &event); 1052 efx_handle_driver_event(channel, &event);
1058 break; 1053 break;
1059 case FSE_CZ_EV_CODE_MCDI_EV: 1054 case FSE_CZ_EV_CODE_MCDI_EV:
1060 efx_mcdi_process_event(channel, &event); 1055 efx_mcdi_process_event(channel, &event);
1061 break; 1056 break;
1057 case FSE_AZ_EV_CODE_GLOBAL_EV:
1058 if (efx->type->handle_global_event &&
1059 efx->type->handle_global_event(channel, &event))
1060 break;
1061 /* else fall through */
1062 default: 1062 default:
1063 netif_err(channel->efx, hw, channel->efx->net_dev, 1063 netif_err(channel->efx, hw, channel->efx->net_dev,
1064 "channel %d unknown event type %d (data " 1064 "channel %d unknown event type %d (data "
@@ -1418,6 +1418,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1418 u32 queues; 1418 u32 queues;
1419 int syserr; 1419 int syserr;
1420 1420
1421 /* Could this be ours? If interrupts are disabled then the
1422 * channel state may not be valid.
1423 */
1424 if (!efx->legacy_irq_enabled)
1425 return result;
1426
1421 /* Read the ISR which also ACKs the interrupts */ 1427 /* Read the ISR which also ACKs the interrupts */
1422 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1428 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1423 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1429 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1664,7 +1670,7 @@ void efx_nic_init_common(struct efx_nic *efx)
1664 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1665 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1666 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1667 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); 1673 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1669 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1675 /* Enable SW_EV to inherit in char driver - assume harmless here */
1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1676 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722d..eb0586925b51 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -15,6 +15,7 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "efx.h" 16#include "efx.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "spi.h"
18 19
19/* 20/*
20 * Falcon hardware control 21 * Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
113 * @stats_pending: Is there a pending DMA of MAC statistics. 114 * @stats_pending: Is there a pending DMA of MAC statistics.
114 * @stats_timer: A timer for regularly fetching MAC statistics. 115 * @stats_timer: A timer for regularly fetching MAC statistics.
115 * @stats_dma_done: Pointer to the flag which indicates DMA completion. 116 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
117 * @spi_flash: SPI flash device
118 * @spi_eeprom: SPI EEPROM device
119 * @spi_lock: SPI bus lock
120 * @mdio_lock: MDIO bus lock
121 * @xmac_poll_required: XMAC link state needs polling
116 */ 122 */
117struct falcon_nic_data { 123struct falcon_nic_data {
118 struct pci_dev *pci_dev2; 124 struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
121 bool stats_pending; 127 bool stats_pending;
122 struct timer_list stats_timer; 128 struct timer_list stats_timer;
123 u32 *stats_dma_done; 129 u32 *stats_dma_done;
130 struct efx_spi_device spi_flash;
131 struct efx_spi_device spi_eeprom;
132 struct mutex spi_lock;
133 struct mutex mdio_lock;
134 bool xmac_poll_required;
124}; 135};
125 136
126static inline struct falcon_board *falcon_board(struct efx_nic *efx) 137static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -135,7 +146,6 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 146 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 147 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 148 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
139 */ 149 */
140struct siena_nic_data { 150struct siena_nic_data {
141 u64 fw_version; 151 u64 fw_version;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f3..ea3ae0089315 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -41,6 +41,8 @@
41#define PCS_UC_STATUS_LBN 0 41#define PCS_UC_STATUS_LBN 0
42#define PCS_UC_STATUS_WIDTH 8 42#define PCS_UC_STATUS_WIDTH 8
43#define PCS_UC_STATUS_FW_SAVE 0x20 43#define PCS_UC_STATUS_FW_SAVE 0x20
44#define PMA_PMD_MODE_REG 0xc301
45#define PMA_PMD_RXIN_SEL_LBN 6
44#define PMA_PMD_FTX_CTRL2_REG 0xc309 46#define PMA_PMD_FTX_CTRL2_REG 0xc309
45#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
46#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
282 * slow) reload of the firmware image (the microcontroller's code 284 * slow) reload of the firmware image (the microcontroller's code
283 * memory is not affected by the microcontroller reset). */ 285 * memory is not affected by the microcontroller reset). */
284 efx_mdio_write(efx, 1, 0xc317, 0x00ff); 286 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
287 /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
288 * restart doesn't reset it. We need to do that ourselves. */
289 efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
290 1 << PMA_PMD_RXIN_SEL_LBN, false);
285 efx_mdio_write(efx, 1, 0xc300, 0x0002); 291 efx_mdio_write(efx, 1, 0xc300, 0x0002);
286 msleep(20); 292 msleep(20);
287 293
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b5158e..3925fd621177 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -37,7 +37,7 @@
37 * This driver supports two methods for allocating and using RX buffers: 37 * This driver supports two methods for allocating and using RX buffers:
38 * each RX buffer may be backed by an skb or by an order-n page. 38 * each RX buffer may be backed by an skb or by an order-n page.
39 * 39 *
40 * When LRO is in use then the second method has a lower overhead, 40 * When GRO is in use then the second method has a lower overhead,
41 * since we don't have to allocate then free skbs on reassembled frames. 41 * since we don't have to allocate then free skbs on reassembled frames.
42 * 42 *
43 * Values: 43 * Values:
@@ -50,25 +50,25 @@
50 * 50 *
51 * - Since pushing and popping descriptors are separated by the rx_queue 51 * - Since pushing and popping descriptors are separated by the rx_queue
52 * size, so the watermarks should be ~rxd_size. 52 * size, so the watermarks should be ~rxd_size.
53 * - The performance win by using page-based allocation for LRO is less 53 * - The performance win by using page-based allocation for GRO is less
54 * than the performance hit of using page-based allocation of non-LRO, 54 * than the performance hit of using page-based allocation of non-GRO,
55 * so the watermarks should reflect this. 55 * so the watermarks should reflect this.
56 * 56 *
57 * Per channel we maintain a single variable, updated by each channel: 57 * Per channel we maintain a single variable, updated by each channel:
58 * 58 *
59 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : 59 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
60 * RX_ALLOC_FACTOR_SKB) 60 * RX_ALLOC_FACTOR_SKB)
61 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which 61 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
62 * limits the hysteresis), and update the allocation strategy: 62 * limits the hysteresis), and update the allocation strategy:
63 * 63 *
64 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 64 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
65 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 65 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
66 */ 66 */
67static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; 67static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
68 68
69#define RX_ALLOC_LEVEL_LRO 0x2000 69#define RX_ALLOC_LEVEL_GRO 0x2000
70#define RX_ALLOC_LEVEL_MAX 0x3000 70#define RX_ALLOC_LEVEL_MAX 0x3000
71#define RX_ALLOC_FACTOR_LRO 1 71#define RX_ALLOC_FACTOR_GRO 1
72#define RX_ALLOC_FACTOR_SKB (-2) 72#define RX_ALLOC_FACTOR_SKB (-2)
73 73
74/* This is the percentage fill level below which new RX descriptors 74/* This is the percentage fill level below which new RX descriptors
@@ -441,19 +441,19 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
442} 442}
443 443
444/* Pass a received packet up through the generic LRO stack 444/* Pass a received packet up through the generic GRO stack
445 * 445 *
446 * Handles driverlink veto, and passes the fragment up via 446 * Handles driverlink veto, and passes the fragment up via
447 * the appropriate LRO method 447 * the appropriate GRO method
448 */ 448 */
449static void efx_rx_packet_lro(struct efx_channel *channel, 449static void efx_rx_packet_gro(struct efx_channel *channel,
450 struct efx_rx_buffer *rx_buf, 450 struct efx_rx_buffer *rx_buf,
451 bool checksummed) 451 bool checksummed)
452{ 452{
453 struct napi_struct *napi = &channel->napi_str; 453 struct napi_struct *napi = &channel->napi_str;
454 gro_result_t gro_result; 454 gro_result_t gro_result;
455 455
456 /* Pass the skb/page into the LRO engine */ 456 /* Pass the skb/page into the GRO engine */
457 if (rx_buf->page) { 457 if (rx_buf->page) {
458 struct efx_nic *efx = channel->efx; 458 struct efx_nic *efx = channel->efx;
459 struct page *page = rx_buf->page; 459 struct page *page = rx_buf->page;
@@ -499,7 +499,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
499 if (gro_result == GRO_NORMAL) { 499 if (gro_result == GRO_NORMAL) {
500 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 500 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
501 } else if (gro_result != GRO_DROP) { 501 } else if (gro_result != GRO_DROP) {
502 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; 502 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
503 channel->irq_mod_score += 2; 503 channel->irq_mod_score += 2;
504 } 504 }
505} 505}
@@ -605,7 +605,7 @@ void __efx_rx_packet(struct efx_channel *channel,
605 } 605 }
606 606
607 if (likely(checksummed || rx_buf->page)) { 607 if (likely(checksummed || rx_buf->page)) {
608 efx_rx_packet_lro(channel, rx_buf, checksummed); 608 efx_rx_packet_gro(channel, rx_buf, checksummed);
609 return; 609 return;
610 } 610 }
611 611
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel)
628{ 628{
629 enum efx_rx_alloc_method method = rx_alloc_method; 629 enum efx_rx_alloc_method method = rx_alloc_method;
630 630
631 /* Only makes sense to use page based allocation if LRO is enabled */ 631 /* Only makes sense to use page based allocation if GRO is enabled */
632 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 632 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
633 method = RX_ALLOC_METHOD_SKB; 633 method = RX_ALLOC_METHOD_SKB;
634 } else if (method == RX_ALLOC_METHOD_AUTO) { 634 } else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel)
639 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; 639 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
640 640
641 /* Decide on the allocation method */ 641 /* Decide on the allocation method */
642 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? 642 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
643 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); 643 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
644 } 644 }
645 645
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f58a258..bf8456176443 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
194 194
195static int siena_probe_nvconfig(struct efx_nic *efx) 195static int siena_probe_nvconfig(struct efx_nic *efx)
196{ 196{
197 int rc; 197 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
198
199 rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
200 if (rc)
201 return rc;
202
203 return 0;
204} 198}
205 199
206static int siena_probe_nic(struct efx_nic *efx) 200static int siena_probe_nic(struct efx_nic *efx)
@@ -562,7 +556,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
562 if (nic_data->wol_filter_id != -1) 556 if (nic_data->wol_filter_id != -1)
563 efx_mcdi_wol_filter_remove(efx, 557 efx_mcdi_wol_filter_remove(efx,
564 nic_data->wol_filter_id); 558 nic_data->wol_filter_id);
565 rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, 559 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
566 &nic_data->wol_filter_id); 560 &nic_data->wol_filter_id);
567 if (rc) 561 if (rc)
568 goto fail; 562 goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813a..879b7f6bde3d 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -61,6 +61,11 @@ struct efx_spi_device {
61 unsigned int block_size; 61 unsigned int block_size;
62}; 62};
63 63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
64int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
65 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
66 int address, const void* in, void *out, size_t len); 71 int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48c96ee..f102912eba91 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -15,9 +15,7 @@
15#include "mdio_10g.h" 15#include "mdio_10g.h"
16#include "nic.h" 16#include "nic.h"
17#include "phy.h" 17#include "phy.h"
18#include "regs.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h"
21 19
22/* We expect these MMDs to be in the package. */ 20/* We expect these MMDs to be in the package. */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 21#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 11726989fe2d..bdb92b4af683 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -240,8 +240,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
240 * of read_count. */ 240 * of read_count. */
241 smp_mb(); 241 smp_mb();
242 tx_queue->old_read_count = 242 tx_queue->old_read_count =
243 *(volatile unsigned *) 243 ACCESS_ONCE(tx_queue->read_count);
244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count 244 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count); 245 - tx_queue->old_read_count);
247 q_space = efx->txq_entries - 1 - fill_level; 246 q_space = efx->txq_entries - 1 - fill_level;
@@ -401,6 +400,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401{ 400{
402 unsigned fill_level; 401 unsigned fill_level;
403 struct efx_nic *efx = tx_queue->efx; 402 struct efx_nic *efx = tx_queue->efx;
403 struct netdev_queue *queue;
404 404
405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 406
@@ -417,12 +417,25 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
417 417
418 /* Do this under netif_tx_lock(), to avoid racing 418 /* Do this under netif_tx_lock(), to avoid racing
419 * with efx_xmit(). */ 419 * with efx_xmit(). */
420 netif_tx_lock(efx->net_dev); 420 queue = netdev_get_tx_queue(
421 efx->net_dev,
422 tx_queue->queue / EFX_TXQ_TYPES);
423 __netif_tx_lock(queue, smp_processor_id());
421 if (tx_queue->stopped) { 424 if (tx_queue->stopped) {
422 tx_queue->stopped = 0; 425 tx_queue->stopped = 0;
423 efx_wake_queue(tx_queue->channel); 426 efx_wake_queue(tx_queue->channel);
424 } 427 }
425 netif_tx_unlock(efx->net_dev); 428 __netif_tx_unlock(queue);
429 }
430 }
431
432 /* Check whether the hardware queue is now empty */
433 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
434 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
435 if (tx_queue->read_count == tx_queue->old_write_count) {
436 smp_mb();
437 tx_queue->empty_read_count =
438 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
426 } 439 }
427 } 440 }
428} 441}
@@ -470,8 +483,10 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
470 483
471 tx_queue->insert_count = 0; 484 tx_queue->insert_count = 0;
472 tx_queue->write_count = 0; 485 tx_queue->write_count = 0;
486 tx_queue->old_write_count = 0;
473 tx_queue->read_count = 0; 487 tx_queue->read_count = 0;
474 tx_queue->old_read_count = 0; 488 tx_queue->old_read_count = 0;
489 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
475 BUG_ON(tx_queue->stopped); 490 BUG_ON(tx_queue->stopped);
476 491
477 /* Set up TX descriptor ring */ 492 /* Set up TX descriptor ring */
@@ -760,7 +775,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
760 * stopped from the access of read_count. */ 775 * stopped from the access of read_count. */
761 smp_mb(); 776 smp_mb();
762 tx_queue->old_read_count = 777 tx_queue->old_read_count =
763 *(volatile unsigned *)&tx_queue->read_count; 778 ACCESS_ONCE(tx_queue->read_count);
764 fill_level = (tx_queue->insert_count 779 fill_level = (tx_queue->insert_count
765 - tx_queue->old_read_count); 780 - tx_queue->old_read_count);
766 q_space = efx->txq_entries - 1 - fill_level; 781 q_space = efx->txq_entries - 1 - fill_level;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec583..819c1750e2ab 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
45 u32 ioaddr = ndev->base_addr; 45 u32 ioaddr = ndev->base_addr;
46 46
47 if (mdp->duplex) /* Full */ 47 if (mdp->duplex) /* Full */
48 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 48 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
49 else /* Half */ 49 else /* Half */
50 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 50 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
51} 51}
52 52
53static void sh_eth_set_rate(struct net_device *ndev) 53static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
57 57
58 switch (mdp->speed) { 58 switch (mdp->speed) {
59 case 10: /* 10BASE */ 59 case 10: /* 10BASE */
60 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); 60 writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
61 break; 61 break;
62 case 100:/* 100BASE */ 62 case 100:/* 100BASE */
63 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); 63 writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
64 break; 64 break;
65 default: 65 default:
66 break; 66 break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
96 u32 ioaddr = ndev->base_addr; 96 u32 ioaddr = ndev->base_addr;
97 97
98 if (mdp->duplex) /* Full */ 98 if (mdp->duplex) /* Full */
99 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 99 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
100 else /* Half */ 100 else /* Half */
101 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 101 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
102} 102}
103 103
104static void sh_eth_set_rate(struct net_device *ndev) 104static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
108 108
109 switch (mdp->speed) { 109 switch (mdp->speed) {
110 case 10: /* 10BASE */ 110 case 10: /* 10BASE */
111 ctrl_outl(0, ioaddr + RTRATE); 111 writel(0, ioaddr + RTRATE);
112 break; 112 break;
113 case 100:/* 100BASE */ 113 case 100:/* 100BASE */
114 ctrl_outl(1, ioaddr + RTRATE); 114 writel(1, ioaddr + RTRATE);
115 break; 115 break;
116 default: 116 default:
117 break; 117 break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
143static void sh_eth_chip_reset(struct net_device *ndev) 143static void sh_eth_chip_reset(struct net_device *ndev)
144{ 144{
145 /* reset device */ 145 /* reset device */
146 ctrl_outl(ARSTR_ARSTR, ARSTR); 146 writel(ARSTR_ARSTR, ARSTR);
147 mdelay(1); 147 mdelay(1);
148} 148}
149 149
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
152 u32 ioaddr = ndev->base_addr; 152 u32 ioaddr = ndev->base_addr;
153 int cnt = 100; 153 int cnt = 100;
154 154
155 ctrl_outl(EDSR_ENALL, ioaddr + EDSR); 155 writel(EDSR_ENALL, ioaddr + EDSR);
156 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 156 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
157 while (cnt > 0) { 157 while (cnt > 0) {
158 if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) 158 if (!(readl(ioaddr + EDMR) & 0x3))
159 break; 159 break;
160 mdelay(1); 160 mdelay(1);
161 cnt--; 161 cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
164 printk(KERN_ERR "Device reset fail\n"); 164 printk(KERN_ERR "Device reset fail\n");
165 165
166 /* Table Init */ 166 /* Table Init */
167 ctrl_outl(0x0, ioaddr + TDLAR); 167 writel(0x0, ioaddr + TDLAR);
168 ctrl_outl(0x0, ioaddr + TDFAR); 168 writel(0x0, ioaddr + TDFAR);
169 ctrl_outl(0x0, ioaddr + TDFXR); 169 writel(0x0, ioaddr + TDFXR);
170 ctrl_outl(0x0, ioaddr + TDFFR); 170 writel(0x0, ioaddr + TDFFR);
171 ctrl_outl(0x0, ioaddr + RDLAR); 171 writel(0x0, ioaddr + RDLAR);
172 ctrl_outl(0x0, ioaddr + RDFAR); 172 writel(0x0, ioaddr + RDFAR);
173 ctrl_outl(0x0, ioaddr + RDFXR); 173 writel(0x0, ioaddr + RDFXR);
174 ctrl_outl(0x0, ioaddr + RDFFR); 174 writel(0x0, ioaddr + RDFFR);
175} 175}
176 176
177static void sh_eth_set_duplex(struct net_device *ndev) 177static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
180 u32 ioaddr = ndev->base_addr; 180 u32 ioaddr = ndev->base_addr;
181 181
182 if (mdp->duplex) /* Full */ 182 if (mdp->duplex) /* Full */
183 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 183 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
184 else /* Half */ 184 else /* Half */
185 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 185 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
186} 186}
187 187
188static void sh_eth_set_rate(struct net_device *ndev) 188static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
192 192
193 switch (mdp->speed) { 193 switch (mdp->speed) {
194 case 10: /* 10BASE */ 194 case 10: /* 10BASE */
195 ctrl_outl(GECMR_10, ioaddr + GECMR); 195 writel(GECMR_10, ioaddr + GECMR);
196 break; 196 break;
197 case 100:/* 100BASE */ 197 case 100:/* 100BASE */
198 ctrl_outl(GECMR_100, ioaddr + GECMR); 198 writel(GECMR_100, ioaddr + GECMR);
199 break; 199 break;
200 case 1000: /* 1000BASE */ 200 case 1000: /* 1000BASE */
201 ctrl_outl(GECMR_1000, ioaddr + GECMR); 201 writel(GECMR_1000, ioaddr + GECMR);
202 break; 202 break;
203 default: 203 default:
204 break; 204 break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
283{ 283{
284 u32 ioaddr = ndev->base_addr; 284 u32 ioaddr = ndev->base_addr;
285 285
286 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 286 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287 mdelay(3); 287 mdelay(3);
288 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 288 writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
289} 289}
290#endif 290#endif
291 291
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
336{ 336{
337 u32 ioaddr = ndev->base_addr; 337 u32 ioaddr = ndev->base_addr;
338 338
339 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 339 writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
341 ioaddr + MAHR); 341 ioaddr + MAHR);
342 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), 342 writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343 ioaddr + MALR); 343 ioaddr + MALR);
344} 344}
345 345
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359 memcpy(ndev->dev_addr, mac, 6); 359 memcpy(ndev->dev_addr, mac, 6);
360 } else { 360 } else {
361 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 361 ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
362 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 362 ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
363 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 363 ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
364 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 364 ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
365 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 365 ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
366 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 366 ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
367 } 367 }
368} 368}
369 369
@@ -379,19 +379,19 @@ struct bb_info {
379/* PHY bit set */ 379/* PHY bit set */
380static void bb_set(u32 addr, u32 msk) 380static void bb_set(u32 addr, u32 msk)
381{ 381{
382 ctrl_outl(ctrl_inl(addr) | msk, addr); 382 writel(readl(addr) | msk, addr);
383} 383}
384 384
385/* PHY bit clear */ 385/* PHY bit clear */
386static void bb_clr(u32 addr, u32 msk) 386static void bb_clr(u32 addr, u32 msk)
387{ 387{
388 ctrl_outl((ctrl_inl(addr) & ~msk), addr); 388 writel((readl(addr) & ~msk), addr);
389} 389}
390 390
391/* PHY bit read */ 391/* PHY bit read */
392static int bb_read(u32 addr, u32 msk) 392static int bb_read(u32 addr, u32 msk)
393{ 393{
394 return (ctrl_inl(addr) & msk) != 0; 394 return (readl(addr) & msk) != 0;
395} 395}
396 396
397/* Data I/O pin control */ 397/* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507 /* Rx descriptor address set */ 507 /* Rx descriptor address set */
508 if (i == 0) { 508 if (i == 0) {
509 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR); 509 writel(mdp->rx_desc_dma, ioaddr + RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763) 510#if defined(CONFIG_CPU_SUBTYPE_SH7763)
511 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR); 511 writel(mdp->rx_desc_dma, ioaddr + RDFAR);
512#endif 512#endif
513 } 513 }
514 } 514 }
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
528 txdesc->buffer_length = 0; 528 txdesc->buffer_length = 0;
529 if (i == 0) { 529 if (i == 0) {
530 /* Tx descriptor address set */ 530 /* Tx descriptor address set */
531 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR); 531 writel(mdp->tx_desc_dma, ioaddr + TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763) 532#if defined(CONFIG_CPU_SUBTYPE_SH7763)
533 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR); 533 writel(mdp->tx_desc_dma, ioaddr + TDFAR);
534#endif 534#endif
535 } 535 }
536 } 536 }
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
623 /* Descriptor format */ 623 /* Descriptor format */
624 sh_eth_ring_format(ndev); 624 sh_eth_ring_format(ndev);
625 if (mdp->cd->rpadir) 625 if (mdp->cd->rpadir)
626 ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR); 626 writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
627 627
628 /* all sh_eth int mask */ 628 /* all sh_eth int mask */
629 ctrl_outl(0, ioaddr + EESIPR); 629 writel(0, ioaddr + EESIPR);
630 630
631#if defined(__LITTLE_ENDIAN__) 631#if defined(__LITTLE_ENDIAN__)
632 if (mdp->cd->hw_swap) 632 if (mdp->cd->hw_swap)
633 ctrl_outl(EDMR_EL, ioaddr + EDMR); 633 writel(EDMR_EL, ioaddr + EDMR);
634 else 634 else
635#endif 635#endif
636 ctrl_outl(0, ioaddr + EDMR); 636 writel(0, ioaddr + EDMR);
637 637
638 /* FIFO size set */ 638 /* FIFO size set */
639 ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR); 639 writel(mdp->cd->fdr_value, ioaddr + FDR);
640 ctrl_outl(0, ioaddr + TFTR); 640 writel(0, ioaddr + TFTR);
641 641
642 /* Frame recv control */ 642 /* Frame recv control */
643 ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR); 643 writel(mdp->cd->rmcr_value, ioaddr + RMCR);
644 644
645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 647 writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
648 648
649 if (mdp->cd->bculr) 649 if (mdp->cd->bculr)
650 ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */ 650 writel(0x800, ioaddr + BCULR); /* Burst sycle set */
651 651
652 ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR); 652 writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
653 653
654 if (!mdp->cd->no_trimd) 654 if (!mdp->cd->no_trimd)
655 ctrl_outl(0, ioaddr + TRIMD); 655 writel(0, ioaddr + TRIMD);
656 656
657 /* Recv frame limit set register */ 657 /* Recv frame limit set register */
658 ctrl_outl(RFLR_VALUE, ioaddr + RFLR); 658 writel(RFLR_VALUE, ioaddr + RFLR);
659 659
660 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 660 writel(readl(ioaddr + EESR), ioaddr + EESR);
661 ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR); 661 writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
662 662
663 /* PAUSE Prohibition */ 663 /* PAUSE Prohibition */
664 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | 664 val = (readl(ioaddr + ECMR) & ECMR_DM) |
665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666 666
667 ctrl_outl(val, ioaddr + ECMR); 667 writel(val, ioaddr + ECMR);
668 668
669 if (mdp->cd->set_rate) 669 if (mdp->cd->set_rate)
670 mdp->cd->set_rate(ndev); 670 mdp->cd->set_rate(ndev);
671 671
672 /* E-MAC Status Register clear */ 672 /* E-MAC Status Register clear */
673 ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR); 673 writel(mdp->cd->ecsr_value, ioaddr + ECSR);
674 674
675 /* E-MAC Interrupt Enable register */ 675 /* E-MAC Interrupt Enable register */
676 ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR); 676 writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
677 677
678 /* Set MAC address */ 678 /* Set MAC address */
679 update_mac_address(ndev); 679 update_mac_address(ndev);
680 680
681 /* mask reset */ 681 /* mask reset */
682 if (mdp->cd->apr) 682 if (mdp->cd->apr)
683 ctrl_outl(APR_AP, ioaddr + APR); 683 writel(APR_AP, ioaddr + APR);
684 if (mdp->cd->mpr) 684 if (mdp->cd->mpr)
685 ctrl_outl(MPR_MP, ioaddr + MPR); 685 writel(MPR_MP, ioaddr + MPR);
686 if (mdp->cd->tpauser) 686 if (mdp->cd->tpauser)
687 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 687 writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
688 688
689 /* Setting the Rx mode will start the Rx process. */ 689 /* Setting the Rx mode will start the Rx process. */
690 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 690 writel(EDRRR_R, ioaddr + EDRRR);
691 691
692 netif_start_queue(ndev); 692 netif_start_queue(ndev);
693 693
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
811 811
812 /* Restart Rx engine if stopped. */ 812 /* Restart Rx engine if stopped. */
813 /* If we don't need to check status, don't. -KDU */ 813 /* If we don't need to check status, don't. -KDU */
814 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R)) 814 if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
815 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 815 writel(EDRRR_R, ndev->base_addr + EDRRR);
816 816
817 return 0; 817 return 0;
818} 818}
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
827 u32 mask; 827 u32 mask;
828 828
829 if (intr_status & EESR_ECI) { 829 if (intr_status & EESR_ECI) {
830 felic_stat = ctrl_inl(ioaddr + ECSR); 830 felic_stat = readl(ioaddr + ECSR);
831 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */ 831 writel(felic_stat, ioaddr + ECSR); /* clear int */
832 if (felic_stat & ECSR_ICD) 832 if (felic_stat & ECSR_ICD)
833 mdp->stats.tx_carrier_errors++; 833 mdp->stats.tx_carrier_errors++;
834 if (felic_stat & ECSR_LCHNG) { 834 if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
839 else 839 else
840 link_stat = PHY_ST_LINK; 840 link_stat = PHY_ST_LINK;
841 } else { 841 } else {
842 link_stat = (ctrl_inl(ioaddr + PSR)); 842 link_stat = (readl(ioaddr + PSR));
843 if (mdp->ether_link_active_low) 843 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 844 link_stat = ~link_stat;
845 } 845 }
846 if (!(link_stat & PHY_ST_LINK)) { 846 if (!(link_stat & PHY_ST_LINK)) {
847 /* Link Down : disable tx and rx */ 847 /* Link Down : disable tx and rx */
848 ctrl_outl(ctrl_inl(ioaddr + ECMR) & 848 writel(readl(ioaddr + ECMR) &
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else { 850 } else {
851 /* Link Up */ 851 /* Link Up */
852 ctrl_outl(ctrl_inl(ioaddr + EESIPR) & 852 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 853 ~DMAC_M_ECI, ioaddr + EESIPR);
854 /*clear int */ 854 /*clear int */
855 ctrl_outl(ctrl_inl(ioaddr + ECSR), 855 writel(readl(ioaddr + ECSR),
856 ioaddr + ECSR); 856 ioaddr + ECSR);
857 ctrl_outl(ctrl_inl(ioaddr + EESIPR) | 857 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 858 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 859 /* enable tx and rx */
860 ctrl_outl(ctrl_inl(ioaddr + ECMR) | 860 writel(readl(ioaddr + ECMR) |
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR); 861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 862 }
863 } 863 }
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
888 /* Receive Descriptor Empty int */ 888 /* Receive Descriptor Empty int */
889 mdp->stats.rx_over_errors++; 889 mdp->stats.rx_over_errors++;
890 890
891 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) 891 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 892 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 893 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 894 }
895 if (intr_status & EESR_RFE) { 895 if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
903 mask &= ~EESR_ADE; 903 mask &= ~EESR_ADE;
904 if (intr_status & mask) { 904 if (intr_status & mask) {
905 /* Tx error */ 905 /* Tx error */
906 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 906 u32 edtrr = readl(ndev->base_addr + EDTRR);
907 /* dmesg */ 907 /* dmesg */
908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909 intr_status, mdp->cur_tx); 909 intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
915 /* SH7712 BUG */ 915 /* SH7712 BUG */
916 if (edtrr ^ EDTRR_TRNS) { 916 if (edtrr ^ EDTRR_TRNS) {
917 /* tx dma start */ 917 /* tx dma start */
918 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 918 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
919 } 919 }
920 /* wakeup */ 920 /* wakeup */
921 netif_wake_queue(ndev); 921 netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
934 spin_lock(&mdp->lock); 934 spin_lock(&mdp->lock);
935 935
936 /* Get interrpt stat */ 936 /* Get interrpt stat */
937 intr_status = ctrl_inl(ioaddr + EESR); 937 intr_status = readl(ioaddr + EESR);
938 /* Clear interrupt */ 938 /* Clear interrupt */
939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941 cd->tx_check | cd->eesr_err_check)) { 941 cd->tx_check | cd->eesr_err_check)) {
942 ctrl_outl(intr_status, ioaddr + EESR); 942 writel(intr_status, ioaddr + EESR);
943 ret = IRQ_HANDLED; 943 ret = IRQ_HANDLED;
944 } else 944 } else
945 goto other_irq; 945 goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1000 mdp->cd->set_rate(ndev); 1000 mdp->cd->set_rate(ndev);
1001 } 1001 }
1002 if (mdp->link == PHY_DOWN) { 1002 if (mdp->link == PHY_DOWN) {
1003 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 1003 writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
1004 | ECMR_DM, ioaddr + ECMR); 1004 | ECMR_DM, ioaddr + ECMR);
1005 new_state = 1; 1005 new_state = 1;
1006 mdp->link = phydev->link; 1006 mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1125 1125
1126 /* worning message out. */ 1126 /* worning message out. */
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR)); 1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1129
1130 /* tx_errors count up */ 1130 /* tx_errors count up */
1131 mdp->stats.tx_errors++; 1131 mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1196 1196
1197 mdp->cur_tx++; 1197 mdp->cur_tx++;
1198 1198
1199 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1199 if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1200 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 1200 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
1201 1201
1202 return NETDEV_TX_OK; 1202 return NETDEV_TX_OK;
1203} 1203}
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
1212 netif_stop_queue(ndev); 1212 netif_stop_queue(ndev);
1213 1213
1214 /* Disable interrupts by clearing the interrupt mask. */ 1214 /* Disable interrupts by clearing the interrupt mask. */
1215 ctrl_outl(0x0000, ioaddr + EESIPR); 1215 writel(0x0000, ioaddr + EESIPR);
1216 1216
1217 /* Stop the chip's Tx and Rx processes. */ 1217 /* Stop the chip's Tx and Rx processes. */
1218 ctrl_outl(0, ioaddr + EDTRR); 1218 writel(0, ioaddr + EDTRR);
1219 ctrl_outl(0, ioaddr + EDRRR); 1219 writel(0, ioaddr + EDRRR);
1220 1220
1221 /* PHY Disconnect */ 1221 /* PHY Disconnect */
1222 if (mdp->phydev) { 1222 if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1251 1251
1252 pm_runtime_get_sync(&mdp->pdev->dev); 1252 pm_runtime_get_sync(&mdp->pdev->dev);
1253 1253
1254 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1254 mdp->stats.tx_dropped += readl(ioaddr + TROCR);
1255 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1255 writel(0, ioaddr + TROCR); /* (write clear) */
1256 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1256 mdp->stats.collisions += readl(ioaddr + CDCR);
1257 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1257 writel(0, ioaddr + CDCR); /* (write clear) */
1258 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1258 mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
1259 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1259 writel(0, ioaddr + LCCR); /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1260#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1261 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */ 1261 mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
1262 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */ 1262 writel(0, ioaddr + CERCR); /* (write clear) */
1263 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */ 1263 mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
1264 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */ 1264 writel(0, ioaddr + CEECR); /* (write clear) */
1265#else 1265#else
1266 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1266 mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
1267 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1267 writel(0, ioaddr + CNDCR); /* (write clear) */
1268#endif 1268#endif
1269 pm_runtime_put_sync(&mdp->pdev->dev); 1269 pm_runtime_put_sync(&mdp->pdev->dev);
1270 1270
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1295 1295
1296 if (ndev->flags & IFF_PROMISC) { 1296 if (ndev->flags & IFF_PROMISC) {
1297 /* Set promiscuous. */ 1297 /* Set promiscuous. */
1298 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1298 writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1299 ioaddr + ECMR); 1299 ioaddr + ECMR);
1300 } else { 1300 } else {
1301 /* Normal, unicast/broadcast-only mode. */ 1301 /* Normal, unicast/broadcast-only mode. */
1302 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1302 writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1303 ioaddr + ECMR); 1303 ioaddr + ECMR);
1304 } 1304 }
1305} 1305}
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1307/* SuperH's TSU register init function */ 1307/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr) 1308static void sh_eth_tsu_init(u32 ioaddr)
1309{ 1309{
1310 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1310 writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
1311 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1311 writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
1312 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1312 writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
1313 ctrl_outl(0xc, ioaddr + TSU_BSYSL0); 1313 writel(0xc, ioaddr + TSU_BSYSL0);
1314 ctrl_outl(0xc, ioaddr + TSU_BSYSL1); 1314 writel(0xc, ioaddr + TSU_BSYSL1);
1315 ctrl_outl(0, ioaddr + TSU_PRISL0); 1315 writel(0, ioaddr + TSU_PRISL0);
1316 ctrl_outl(0, ioaddr + TSU_PRISL1); 1316 writel(0, ioaddr + TSU_PRISL1);
1317 ctrl_outl(0, ioaddr + TSU_FWSL0); 1317 writel(0, ioaddr + TSU_FWSL0);
1318 ctrl_outl(0, ioaddr + TSU_FWSL1); 1318 writel(0, ioaddr + TSU_FWSL1);
1319 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1319 writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1320#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1321 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1321 writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1322 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1322 writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1323#else 1323#else
1324 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1324 writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1325 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1325 writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif 1326#endif
1327 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1327 writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
1328 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1328 writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1329 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1329 writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
1330 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1330 writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
1331 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1331 writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
1332 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1332 writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
1333 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1333 writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
1334} 1334}
1335#endif /* SH_ETH_HAS_TSU */ 1335#endif /* SH_ETH_HAS_TSU */
1336 1336
@@ -1552,7 +1552,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
1552 1552
1553 sh_mdio_release(ndev); 1553 sh_mdio_release(ndev);
1554 unregister_netdev(ndev); 1554 unregister_netdev(ndev);
1555 flush_scheduled_work();
1556 pm_runtime_disable(&pdev->dev); 1555 pm_runtime_disable(&pdev->dev);
1557 free_netdev(ndev); 1556 free_netdev(ndev);
1558 platform_set_drvdata(pdev, NULL); 1557 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 8b47763958f2..efa64221eede 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/workqueue.h>
30#include <linux/netdevice.h> 29#include <linux/netdevice.h>
31#include <linux/phy.h> 30#include <linux/phy.h>
32 31
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index a5d6a6bd0c1a..3406ed870917 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1915,9 +1915,10 @@ err_release_board:
1915static void __devexit sis190_remove_one(struct pci_dev *pdev) 1915static void __devexit sis190_remove_one(struct pci_dev *pdev)
1916{ 1916{
1917 struct net_device *dev = pci_get_drvdata(pdev); 1917 struct net_device *dev = pci_get_drvdata(pdev);
1918 struct sis190_private *tp = netdev_priv(dev);
1918 1919
1919 sis190_mii_remove(dev); 1920 sis190_mii_remove(dev);
1920 flush_scheduled_work(); 1921 cancel_work_sync(&tp->phy_task);
1921 unregister_netdev(dev); 1922 unregister_netdev(dev);
1922 sis190_release_board(pdev); 1923 sis190_release_board(pdev);
1923 pci_set_drvdata(pdev, NULL); 1924 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..8c1404b58382 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3858 3858
3859 /* device is off until link detection */ 3859 /* device is off until link detection */
3860 netif_carrier_off(dev); 3860 netif_carrier_off(dev);
3861 netif_stop_queue(dev);
3862 3861
3863 return dev; 3862 return dev;
3864} 3863}
@@ -4013,8 +4012,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4013 if (!hw) 4012 if (!hw)
4014 return; 4013 return;
4015 4014
4016 flush_scheduled_work();
4017
4018 dev1 = hw->dev[1]; 4015 dev1 = hw->dev[1];
4019 if (dev1) 4016 if (dev1)
4020 unregister_netdev(dev1); 4017 unregister_netdev(dev1);
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e12a879..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
22#define __SMSC911X_H__ 22#define __SMSC911X_H__
23 23
24#define TX_FIFO_LOW_THRESHOLD ((u32)1600) 24#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
25#define SMSC911X_EEPROM_SIZE ((u32)7) 25#define SMSC911X_EEPROM_SIZE ((u32)128)
26#define USE_DEBUG 0 26#define USE_DEBUG 0
27 27
28/* This is the maximum number of packets to be received every 28/* This is the maximum number of packets to be received every
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e13224..5f06c4706abe 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Nov_2010"
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stmmac.h> 25#include <linux/stmmac.h>
26 26
@@ -37,7 +37,6 @@ struct stmmac_priv {
37 unsigned int cur_tx; 37 unsigned int cur_tx;
38 unsigned int dirty_tx; 38 unsigned int dirty_tx;
39 unsigned int dma_tx_size; 39 unsigned int dma_tx_size;
40 int tx_coe;
41 int tx_coalesce; 40 int tx_coalesce;
42 41
43 struct dma_desc *dma_rx ; 42 struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
48 struct sk_buff_head rx_recycle; 47 struct sk_buff_head rx_recycle;
49 48
50 struct net_device *dev; 49 struct net_device *dev;
51 int is_gmac;
52 dma_addr_t dma_rx_phy; 50 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size; 51 unsigned int dma_rx_size;
54 unsigned int dma_buf_sz; 52 unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
60 struct napi_struct napi; 58 struct napi_struct napi;
61 59
62 phy_interface_t phy_interface; 60 phy_interface_t phy_interface;
63 int pbl;
64 int bus_id;
65 int phy_addr; 61 int phy_addr;
66 int phy_mask; 62 int phy_mask;
67 int (*phy_reset) (void *priv); 63 int (*phy_reset) (void *priv);
68 void (*fix_mac_speed) (void *priv, unsigned int speed); 64 int rx_coe;
69 void (*bus_setup)(void __iomem *ioaddr); 65 int no_csum_insertion;
70 void *bsp_priv;
71 66
72 int phy_irq; 67 int phy_irq;
73 struct phy_device *phydev; 68 struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
77 unsigned int flow_ctrl; 72 unsigned int flow_ctrl;
78 unsigned int pause; 73 unsigned int pause;
79 struct mii_bus *mii; 74 struct mii_bus *mii;
80 int mii_clk_csr;
81 75
82 u32 msg_enable; 76 u32 msg_enable;
83 spinlock_t lock; 77 spinlock_t lock;
84 int wolopts; 78 int wolopts;
85 int wolenabled; 79 int wolenabled;
86 int shutdown;
87#ifdef CONFIG_STMMAC_TIMER 80#ifdef CONFIG_STMMAC_TIMER
88 struct stmmac_timer *tm; 81 struct stmmac_timer *tm;
89#endif 82#endif
90#ifdef STMMAC_VLAN_TAG_USED 83#ifdef STMMAC_VLAN_TAG_USED
91 struct vlan_group *vlgrp; 84 struct vlan_group *vlgrp;
92#endif 85#endif
93 int enh_desc; 86 struct plat_stmmacenet_data *plat;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
97}; 87};
98 88
99#ifdef CONFIG_STM_DRIVERS
100#include <linux/stm/pad.h>
101static inline int stmmac_claim_resource(struct platform_device *pdev)
102{
103 int ret = 0;
104 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
105
106 /* Pad routing setup */
107 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
108 dev_name(&pdev->dev)))) {
109 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
110 ret = -ENODEV;
111 }
112 return ret;
113}
114#else
115static inline int stmmac_claim_resource(struct platform_device *pdev)
116{
117 return 0;
118}
119#endif
120
121extern int stmmac_mdio_unregister(struct net_device *ndev); 89extern int stmmac_mdio_unregister(struct net_device *ndev);
122extern int stmmac_mdio_register(struct net_device *ndev); 90extern int stmmac_mdio_register(struct net_device *ndev);
123extern void stmmac_set_ethtool_ops(struct net_device *netdev); 91extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789a..fd719edc7f7c 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
94{ 94{
95 struct stmmac_priv *priv = netdev_priv(dev); 95 struct stmmac_priv *priv = netdev_priv(dev);
96 96
97 if (!priv->is_gmac) 97 if (!priv->plat->has_gmac)
98 strcpy(info->driver, MAC100_ETHTOOL_NAME); 98 strcpy(info->driver, MAC100_ETHTOOL_NAME);
99 else 99 else
100 strcpy(info->driver, GMAC_ETHTOOL_NAME); 100 strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
176 176
177 memset(reg_space, 0x0, REG_SPACE_SIZE); 177 memset(reg_space, 0x0, REG_SPACE_SIZE);
178 178
179 if (!priv->is_gmac) { 179 if (!priv->plat->has_gmac) {
180 /* MAC registers */ 180 /* MAC registers */
181 for (i = 0; i < 12; i++) 181 for (i = 0; i < 12; i++)
182 reg_space[i] = readl(priv->ioaddr + (i * 4)); 182 reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
197 } 197 }
198} 198}
199 199
200static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) 200static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{ 201{
212 struct stmmac_priv *priv = netdev_priv(dev); 202 struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
370 .get_link = ethtool_op_get_link, 360 .get_link = ethtool_op_get_link,
371 .get_rx_csum = stmmac_ethtool_get_rx_csum, 361 .get_rx_csum = stmmac_ethtool_get_rx_csum,
372 .get_tx_csum = ethtool_op_get_tx_csum, 362 .get_tx_csum = ethtool_op_get_tx_csum,
373 .set_tx_csum = stmmac_ethtool_set_tx_csum, 363 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
374 .get_sg = ethtool_op_get_sg, 364 .get_sg = ethtool_op_get_sg,
375 .set_sg = ethtool_op_set_sg, 365 .set_sg = ethtool_op_set_sg,
376 .get_pauseparam = stmmac_get_pauseparam, 366 .get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..20f803df8681 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
187} 187}
188 188
189/* On some ST platforms, some HW system configuraton registers have to be
190 * set according to the link speed negotiated.
191 */
192static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
193{
194 struct phy_device *phydev = priv->phydev;
195
196 if (likely(priv->plat->fix_mac_speed))
197 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
198 phydev->speed);
199}
200
189/** 201/**
190 * stmmac_adjust_link 202 * stmmac_adjust_link
191 * @dev: net device structure 203 * @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
228 new_state = 1; 240 new_state = 1;
229 switch (phydev->speed) { 241 switch (phydev->speed) {
230 case 1000: 242 case 1000:
231 if (likely(priv->is_gmac)) 243 if (likely(priv->plat->has_gmac))
232 ctrl &= ~priv->hw->link.port; 244 ctrl &= ~priv->hw->link.port;
233 if (likely(priv->fix_mac_speed)) 245 stmmac_hw_fix_mac_speed(priv);
234 priv->fix_mac_speed(priv->bsp_priv,
235 phydev->speed);
236 break; 246 break;
237 case 100: 247 case 100:
238 case 10: 248 case 10:
239 if (priv->is_gmac) { 249 if (priv->plat->has_gmac) {
240 ctrl |= priv->hw->link.port; 250 ctrl |= priv->hw->link.port;
241 if (phydev->speed == SPEED_100) { 251 if (phydev->speed == SPEED_100) {
242 ctrl |= priv->hw->link.speed; 252 ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
246 } else { 256 } else {
247 ctrl &= ~priv->hw->link.port; 257 ctrl &= ~priv->hw->link.port;
248 } 258 }
249 if (likely(priv->fix_mac_speed)) 259 stmmac_hw_fix_mac_speed(priv);
250 priv->fix_mac_speed(priv->bsp_priv,
251 phydev->speed);
252 break; 260 break;
253 default: 261 default:
254 if (netif_msg_link(priv)) 262 if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
305 return 0; 313 return 0;
306 } 314 }
307 315
308 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 316 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
309 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 317 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
310 priv->phy_addr); 318 priv->phy_addr);
311 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 319 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
552 */ 560 */
553static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 561static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
554{ 562{
555 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) { 563 if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
556 /* In case of GMAC, SF mode has to be enabled 564 /* In case of GMAC, SF mode has to be enabled
557 * to perform the TX COE. This depends on: 565 * to perform the TX COE. This depends on:
558 * 1) TX COE if actually supported 566 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
814 init_dma_desc_rings(dev); 822 init_dma_desc_rings(dev);
815 823
816 /* DMA initialization and SW reset */ 824 /* DMA initialization and SW reset */
817 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl, 825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
818 priv->dma_tx_phy, 826 priv->dma_tx_phy,
819 priv->dma_rx_phy) < 0)) { 827 priv->dma_rx_phy) < 0)) {
820 828
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
825 /* Copy the MAC addr into the HW */ 833 /* Copy the MAC addr into the HW */
826 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 834 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
827 /* If required, perform hw setup of the bus. */ 835 /* If required, perform hw setup of the bus. */
828 if (priv->bus_setup) 836 if (priv->plat->bus_setup)
829 priv->bus_setup(priv->ioaddr); 837 priv->plat->bus_setup(priv->ioaddr);
830 /* Initialize the MAC Core */ 838 /* Initialize the MAC Core */
831 priv->hw->mac->core_init(priv->ioaddr); 839 priv->hw->mac->core_init(priv->ioaddr);
832 840
833 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 841 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
834 if (priv->rx_coe) 842 if (priv->rx_coe)
835 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 843 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
836 if (priv->tx_coe) 844 if (priv->plat->tx_coe)
837 pr_info("\tTX Checksum insertion supported\n"); 845 pr_info("\tTX Checksum insertion supported\n");
838 846
839 priv->shutdown = 0;
840
841 /* Initialise the MMC (if present) to disable all interrupts. */ 847 /* Initialise the MMC (if present) to disable all interrupts. */
842 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -943,7 +949,7 @@ static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
943 skb, skb->len); 949 skb, skb->len);
944 950
945 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); 951 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
946 if (unlikely(IS_ERR(segs))) 952 if (IS_ERR(segs))
947 goto sw_tso_end; 953 goto sw_tso_end;
948 954
949 do { 955 do {
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1042 return stmmac_sw_tso(priv, skb); 1048 return stmmac_sw_tso(priv, skb);
1043 1049
1044 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1050 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1045 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion))) 1051 if (unlikely((!priv->plat->tx_coe) ||
1052 (priv->no_csum_insertion)))
1046 skb_checksum_help(skb); 1053 skb_checksum_help(skb);
1047 else 1054 else
1048 csum_insertion = 1; 1055 csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1146 DMA_FROM_DEVICE); 1153 DMA_FROM_DEVICE);
1147 1154
1148 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1155 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1149 if (unlikely(priv->is_gmac)) { 1156 if (unlikely(priv->plat->has_gmac)) {
1150 if (bfsize >= BUF_SIZE_8KiB) 1157 if (bfsize >= BUF_SIZE_8KiB)
1151 (p + entry)->des3 = 1158 (p + entry)->des3 =
1152 (p + entry)->des2 + BUF_SIZE_8KiB; 1159 (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1356 return -EBUSY; 1363 return -EBUSY;
1357 } 1364 }
1358 1365
1359 if (priv->is_gmac) 1366 if (priv->plat->has_gmac)
1360 max_mtu = JUMBO_LEN; 1367 max_mtu = JUMBO_LEN;
1361 else 1368 else
1362 max_mtu = ETH_DATA_LEN; 1369 max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1370 * needs to have the Tx COE disabled for oversized frames 1377 * needs to have the Tx COE disabled for oversized frames
1371 * (due to limited buffer sizes). In this case we disable 1378 * (due to limited buffer sizes). In this case we disable
1372 * the TX csum insertionin the TDES and not use SF. */ 1379 * the TX csum insertionin the TDES and not use SF. */
1373 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1380 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1374 priv->no_csum_insertion = 1; 1381 priv->no_csum_insertion = 1;
1375 else 1382 else
1376 priv->no_csum_insertion = 0; 1383 priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1390 return IRQ_NONE; 1397 return IRQ_NONE;
1391 } 1398 }
1392 1399
1393 if (priv->is_gmac) 1400 if (priv->plat->has_gmac)
1394 /* To handle GMAC own interrupts */ 1401 /* To handle GMAC own interrupts */
1395 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1402 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1396 1403
@@ -1487,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
1487 dev->netdev_ops = &stmmac_netdev_ops; 1494 dev->netdev_ops = &stmmac_netdev_ops;
1488 stmmac_set_ethtool_ops(dev); 1495 stmmac_set_ethtool_ops(dev);
1489 1496
1490 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); 1497 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
1498 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1491 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1499 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1492#ifdef STMMAC_VLAN_TAG_USED 1500#ifdef STMMAC_VLAN_TAG_USED
1493 /* Both mac100 and gmac support receive VLAN tag detection */ 1501 /* Both mac100 and gmac support receive VLAN tag detection */
@@ -1509,6 +1517,8 @@ static int stmmac_probe(struct net_device *dev)
1509 pr_warning("\tno valid MAC address;" 1517 pr_warning("\tno valid MAC address;"
1510 "please, use ifconfig or nwhwconfig!\n"); 1518 "please, use ifconfig or nwhwconfig!\n");
1511 1519
1520 spin_lock_init(&priv->lock);
1521
1512 ret = register_netdev(dev); 1522 ret = register_netdev(dev);
1513 if (ret) { 1523 if (ret) {
1514 pr_err("%s: ERROR %i registering the device\n", 1524 pr_err("%s: ERROR %i registering the device\n",
@@ -1518,9 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
1518 1528
1519 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", 1529 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1520 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1530 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1521 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1531 (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
1522
1523 spin_lock_init(&priv->lock);
1524 1532
1525 return ret; 1533 return ret;
1526} 1534}
@@ -1536,7 +1544,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1536 1544
1537 struct mac_device_info *device; 1545 struct mac_device_info *device;
1538 1546
1539 if (priv->is_gmac) 1547 if (priv->plat->has_gmac)
1540 device = dwmac1000_setup(priv->ioaddr); 1548 device = dwmac1000_setup(priv->ioaddr);
1541 else 1549 else
1542 device = dwmac100_setup(priv->ioaddr); 1550 device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1544 if (!device) 1552 if (!device)
1545 return -ENOMEM; 1553 return -ENOMEM;
1546 1554
1547 if (priv->enh_desc) { 1555 if (priv->plat->enh_desc) {
1548 device->desc = &enh_desc_ops; 1556 device->desc = &enh_desc_ops;
1549 pr_info("\tEnhanced descriptor structure\n"); 1557 pr_info("\tEnhanced descriptor structure\n");
1550 } else 1558 } else
@@ -1598,7 +1606,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
1598 plat_dat->bus_id); 1606 plat_dat->bus_id);
1599 1607
1600 /* Check that this phy is for the MAC being initialised */ 1608 /* Check that this phy is for the MAC being initialised */
1601 if (priv->bus_id != plat_dat->bus_id) 1609 if (priv->plat->bus_id != plat_dat->bus_id)
1602 return 0; 1610 return 0;
1603 1611
1604 /* OK, this PHY is connected to the MAC. 1612 /* OK, this PHY is connected to the MAC.
@@ -1634,7 +1642,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1634 struct resource *res; 1642 struct resource *res;
1635 void __iomem *addr = NULL; 1643 void __iomem *addr = NULL;
1636 struct net_device *ndev = NULL; 1644 struct net_device *ndev = NULL;
1637 struct stmmac_priv *priv; 1645 struct stmmac_priv *priv = NULL;
1638 struct plat_stmmacenet_data *plat_dat; 1646 struct plat_stmmacenet_data *plat_dat;
1639 1647
1640 pr_info("STMMAC driver:\n\tplatform registration... "); 1648 pr_info("STMMAC driver:\n\tplatform registration... ");
@@ -1683,13 +1691,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1683 priv->device = &(pdev->dev); 1691 priv->device = &(pdev->dev);
1684 priv->dev = ndev; 1692 priv->dev = ndev;
1685 plat_dat = pdev->dev.platform_data; 1693 plat_dat = pdev->dev.platform_data;
1686 priv->bus_id = plat_dat->bus_id; 1694
1687 priv->pbl = plat_dat->pbl; /* TLI */ 1695 priv->plat = plat_dat;
1688 priv->mii_clk_csr = plat_dat->clk_csr; 1696
1689 priv->tx_coe = plat_dat->tx_coe;
1690 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1691 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1692 priv->enh_desc = plat_dat->enh_desc;
1693 priv->ioaddr = addr; 1697 priv->ioaddr = addr;
1694 1698
1695 /* PMT module is not integrated in all the MAC devices. */ 1699 /* PMT module is not integrated in all the MAC devices. */
@@ -1703,10 +1707,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1703 /* Set the I/O base addr */ 1707 /* Set the I/O base addr */
1704 ndev->base_addr = (unsigned long)addr; 1708 ndev->base_addr = (unsigned long)addr;
1705 1709
1706 /* Verify embedded resource for the platform */ 1710 /* Custom initialisation */
1707 ret = stmmac_claim_resource(pdev); 1711 if (priv->plat->init) {
1708 if (ret < 0) 1712 ret = priv->plat->init(pdev);
1709 goto out; 1713 if (unlikely(ret))
1714 goto out;
1715 }
1710 1716
1711 /* MAC HW revice detection */ 1717 /* MAC HW revice detection */
1712 ret = stmmac_mac_device_setup(ndev); 1718 ret = stmmac_mac_device_setup(ndev);
@@ -1727,16 +1733,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1727 goto out; 1733 goto out;
1728 } 1734 }
1729 1735
1730 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1731 priv->bus_setup = plat_dat->bus_setup;
1732 priv->bsp_priv = plat_dat->bsp_priv;
1733
1734 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1736 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1735 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, 1737 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1736 pdev->id, ndev->irq, addr); 1738 pdev->id, ndev->irq, addr);
1737 1739
1738 /* MDIO bus Registration */ 1740 /* MDIO bus Registration */
1739 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1741 pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
1740 ret = stmmac_mdio_register(ndev); 1742 ret = stmmac_mdio_register(ndev);
1741 if (ret < 0) 1743 if (ret < 0)
1742 goto out; 1744 goto out;
@@ -1744,6 +1746,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1744 1746
1745out: 1747out:
1746 if (ret < 0) { 1748 if (ret < 0) {
1749 if (priv->plat->exit)
1750 priv->plat->exit(pdev);
1751
1747 platform_set_drvdata(pdev, NULL); 1752 platform_set_drvdata(pdev, NULL);
1748 release_mem_region(res->start, resource_size(res)); 1753 release_mem_region(res->start, resource_size(res));
1749 if (addr != NULL) 1754 if (addr != NULL)
@@ -1777,6 +1782,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1777 1782
1778 stmmac_mdio_unregister(ndev); 1783 stmmac_mdio_unregister(ndev);
1779 1784
1785 if (priv->plat->exit)
1786 priv->plat->exit(pdev);
1787
1780 platform_set_drvdata(pdev, NULL); 1788 platform_set_drvdata(pdev, NULL);
1781 unregister_netdev(ndev); 1789 unregister_netdev(ndev);
1782 1790
@@ -1790,69 +1798,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1790} 1798}
1791 1799
1792#ifdef CONFIG_PM 1800#ifdef CONFIG_PM
1793static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) 1801static int stmmac_suspend(struct device *dev)
1794{ 1802{
1795 struct net_device *dev = platform_get_drvdata(pdev); 1803 struct net_device *ndev = dev_get_drvdata(dev);
1796 struct stmmac_priv *priv = netdev_priv(dev); 1804 struct stmmac_priv *priv = netdev_priv(ndev);
1797 int dis_ic = 0; 1805 int dis_ic = 0;
1798 1806
1799 if (!dev || !netif_running(dev)) 1807 if (!ndev || !netif_running(ndev))
1800 return 0; 1808 return 0;
1801 1809
1802 spin_lock(&priv->lock); 1810 spin_lock(&priv->lock);
1803 1811
1804 if (state.event == PM_EVENT_SUSPEND) { 1812 netif_device_detach(ndev);
1805 netif_device_detach(dev); 1813 netif_stop_queue(ndev);
1806 netif_stop_queue(dev); 1814 if (priv->phydev)
1807 if (priv->phydev) 1815 phy_stop(priv->phydev);
1808 phy_stop(priv->phydev);
1809 1816
1810#ifdef CONFIG_STMMAC_TIMER 1817#ifdef CONFIG_STMMAC_TIMER
1811 priv->tm->timer_stop(); 1818 priv->tm->timer_stop();
1812 if (likely(priv->tm->enable)) 1819 if (likely(priv->tm->enable))
1813 dis_ic = 1; 1820 dis_ic = 1;
1814#endif 1821#endif
1815 napi_disable(&priv->napi); 1822 napi_disable(&priv->napi);
1816 1823
1817 /* Stop TX/RX DMA */ 1824 /* Stop TX/RX DMA */
1818 priv->hw->dma->stop_tx(priv->ioaddr); 1825 priv->hw->dma->stop_tx(priv->ioaddr);
1819 priv->hw->dma->stop_rx(priv->ioaddr); 1826 priv->hw->dma->stop_rx(priv->ioaddr);
1820 /* Clear the Rx/Tx descriptors */ 1827 /* Clear the Rx/Tx descriptors */
1821 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1828 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1822 dis_ic); 1829 dis_ic);
1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1830 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1824 1831
1825 /* Enable Power down mode by programming the PMT regs */ 1832 /* Enable Power down mode by programming the PMT regs */
1826 if (device_can_wakeup(priv->device)) 1833 if (device_may_wakeup(priv->device))
1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1834 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1828 else 1835 else
1829 stmmac_disable_mac(priv->ioaddr); 1836 stmmac_disable_mac(priv->ioaddr);
1830 } else {
1831 priv->shutdown = 1;
1832 /* Although this can appear slightly redundant it actually
1833 * makes fast the standby operation and guarantees the driver
1834 * working if hibernation is on media. */
1835 stmmac_release(dev);
1836 }
1837 1837
1838 spin_unlock(&priv->lock); 1838 spin_unlock(&priv->lock);
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
1842static int stmmac_resume(struct platform_device *pdev) 1842static int stmmac_resume(struct device *dev)
1843{ 1843{
1844 struct net_device *dev = platform_get_drvdata(pdev); 1844 struct net_device *ndev = dev_get_drvdata(dev);
1845 struct stmmac_priv *priv = netdev_priv(dev); 1845 struct stmmac_priv *priv = netdev_priv(ndev);
1846
1847 if (!netif_running(dev))
1848 return 0;
1849 1846
1850 if (priv->shutdown) { 1847 if (!netif_running(ndev))
1851 /* Re-open the interface and re-init the MAC/DMA
1852 and the rings (i.e. on hibernation stage) */
1853 stmmac_open(dev);
1854 return 0; 1848 return 0;
1855 }
1856 1849
1857 spin_lock(&priv->lock); 1850 spin_lock(&priv->lock);
1858 1851
@@ -1861,10 +1854,10 @@ static int stmmac_resume(struct platform_device *pdev)
1861 * is received. Anyway, it's better to manually clear 1854 * is received. Anyway, it's better to manually clear
1862 * this bit because it can generate problems while resuming 1855 * this bit because it can generate problems while resuming
1863 * from another devices (e.g. serial console). */ 1856 * from another devices (e.g. serial console). */
1864 if (device_can_wakeup(priv->device)) 1857 if (device_may_wakeup(priv->device))
1865 priv->hw->mac->pmt(priv->ioaddr, 0); 1858 priv->hw->mac->pmt(priv->ioaddr, 0);
1866 1859
1867 netif_device_attach(dev); 1860 netif_device_attach(ndev);
1868 1861
1869 /* Enable the MAC and DMA */ 1862 /* Enable the MAC and DMA */
1870 stmmac_enable_mac(priv->ioaddr); 1863 stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1865,59 @@ static int stmmac_resume(struct platform_device *pdev)
1872 priv->hw->dma->start_rx(priv->ioaddr); 1865 priv->hw->dma->start_rx(priv->ioaddr);
1873 1866
1874#ifdef CONFIG_STMMAC_TIMER 1867#ifdef CONFIG_STMMAC_TIMER
1875 priv->tm->timer_start(tmrate); 1868 if (likely(priv->tm->enable))
1869 priv->tm->timer_start(tmrate);
1876#endif 1870#endif
1877 napi_enable(&priv->napi); 1871 napi_enable(&priv->napi);
1878 1872
1879 if (priv->phydev) 1873 if (priv->phydev)
1880 phy_start(priv->phydev); 1874 phy_start(priv->phydev);
1881 1875
1882 netif_start_queue(dev); 1876 netif_start_queue(ndev);
1883 1877
1884 spin_unlock(&priv->lock); 1878 spin_unlock(&priv->lock);
1885 return 0; 1879 return 0;
1886} 1880}
1887#endif
1888 1881
1889static struct platform_driver stmmac_driver = { 1882static int stmmac_freeze(struct device *dev)
1890 .driver = { 1883{
1891 .name = STMMAC_RESOURCE_NAME, 1884 struct net_device *ndev = dev_get_drvdata(dev);
1892 }, 1885
1893 .probe = stmmac_dvr_probe, 1886 if (!ndev || !netif_running(ndev))
1894 .remove = stmmac_dvr_remove, 1887 return 0;
1895#ifdef CONFIG_PM 1888
1889 return stmmac_release(ndev);
1890}
1891
1892static int stmmac_restore(struct device *dev)
1893{
1894 struct net_device *ndev = dev_get_drvdata(dev);
1895
1896 if (!ndev || !netif_running(ndev))
1897 return 0;
1898
1899 return stmmac_open(ndev);
1900}
1901
1902static const struct dev_pm_ops stmmac_pm_ops = {
1896 .suspend = stmmac_suspend, 1903 .suspend = stmmac_suspend,
1897 .resume = stmmac_resume, 1904 .resume = stmmac_resume,
1898#endif 1905 .freeze = stmmac_freeze,
1906 .thaw = stmmac_restore,
1907 .restore = stmmac_restore,
1908};
1909#else
1910static const struct dev_pm_ops stmmac_pm_ops;
1911#endif /* CONFIG_PM */
1899 1912
1913static struct platform_driver stmmac_driver = {
1914 .probe = stmmac_dvr_probe,
1915 .remove = stmmac_dvr_remove,
1916 .driver = {
1917 .name = STMMAC_RESOURCE_NAME,
1918 .owner = THIS_MODULE,
1919 .pm = &stmmac_pm_ops,
1920 },
1900}; 1921};
1901 1922
1902/** 1923/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357d..234b4068a1fc 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
53 int data; 53 int data;
54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
55 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 56 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
57 57
58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
59 writel(regValue, priv->ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
86 | MII_WRITE; 86 | MII_WRITE;
87 87
88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 88 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
89 89
90 90
91 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
114 114
115 if (priv->phy_reset) { 115 if (priv->phy_reset) {
116 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 116 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
117 priv->phy_reset(priv->bsp_priv); 117 priv->phy_reset(priv->plat->bsp_priv);
118 } 118 }
119 119
120 /* This is a workaround for problems with the STE101P PHY. 120 /* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
157 new_bus->read = &stmmac_mdio_read; 157 new_bus->read = &stmmac_mdio_read;
158 new_bus->write = &stmmac_mdio_write; 158 new_bus->write = &stmmac_mdio_write;
159 new_bus->reset = &stmmac_mdio_reset; 159 new_bus->reset = &stmmac_mdio_reset;
160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
161 new_bus->priv = ndev; 161 new_bus->priv = ndev;
162 new_bus->irq = irqlist; 162 new_bus->irq = irqlist;
163 new_bus->phy_mask = priv->phy_mask; 163 new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4ceb3cf6a9a9..9e992ca4f543 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2380,10 +2380,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2380 */ 2380 */
2381 mutex_unlock(&gp->pm_mutex); 2381 mutex_unlock(&gp->pm_mutex);
2382 2382
2383 /* Wait for a pending reset task to complete */ 2383 /* Wait for the pending reset task to complete */
2384 while (gp->reset_task_pending) 2384 flush_work_sync(&gp->reset_task);
2385 yield();
2386 flush_scheduled_work();
2387 2385
2388 /* Shut the PHY down eventually and setup WOL */ 2386 /* Shut the PHY down eventually and setup WOL */
2389 gem_stop_phy(gp, gp->asleep_wol); 2387 gem_stop_phy(gp, gp->asleep_wol);
@@ -2928,10 +2926,8 @@ static void gem_remove_one(struct pci_dev *pdev)
2928 /* We shouldn't need any locking here */ 2926 /* We shouldn't need any locking here */
2929 gem_get_cell(gp); 2927 gem_get_cell(gp);
2930 2928
2931 /* Wait for a pending reset task to complete */ 2929 /* Cancel reset task */
2932 while (gp->reset_task_pending) 2930 cancel_work_sync(&gp->reset_task);
2933 yield();
2934 flush_scheduled_work();
2935 2931
2936 /* Shut the PHY down */ 2932 /* Shut the PHY down */
2937 gem_stop_phy(gp, 0); 2933 gem_stop_phy(gp, 0);
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 2cf84e5968b2..767e1e2b210d 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1295,17 +1295,9 @@ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvin
1295 strcpy(info->version, "2.02"); 1295 strcpy(info->version, "2.02");
1296} 1296}
1297 1297
1298static u32 sparc_lance_get_link(struct net_device *dev)
1299{
1300 /* We really do not keep track of this, but this
1301 * is better than not reporting anything at all.
1302 */
1303 return 1;
1304}
1305
1306static const struct ethtool_ops sparc_lance_ethtool_ops = { 1298static const struct ethtool_ops sparc_lance_ethtool_ops = {
1307 .get_drvinfo = sparc_lance_get_drvinfo, 1299 .get_drvinfo = sparc_lance_get_drvinfo,
1308 .get_link = sparc_lance_get_link, 1300 .get_link = ethtool_op_get_link,
1309}; 1301};
1310 1302
1311static const struct net_device_ops sparc_lance_ops = { 1303static const struct net_device_ops sparc_lance_ops = {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097a..57e19fb1324f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,7 @@
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mdio.h>
35#include <linux/mii.h> 36#include <linux/mii.h>
36#include <linux/phy.h> 37#include <linux/phy.h>
37#include <linux/brcmphy.h> 38#include <linux/brcmphy.h>
@@ -69,10 +70,10 @@
69 70
70#define DRV_MODULE_NAME "tg3" 71#define DRV_MODULE_NAME "tg3"
71#define TG3_MAJ_NUM 3 72#define TG3_MAJ_NUM 3
72#define TG3_MIN_NUM 115 73#define TG3_MIN_NUM 116
73#define DRV_MODULE_VERSION \ 74#define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 75 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE "October 14, 2010" 76#define DRV_MODULE_RELDATE "December 3, 2010"
76 77
77#define TG3_DEF_MAC_MODE 0 78#define TG3_DEF_MAC_MODE 0
78#define TG3_DEF_RX_MODE 0 79#define TG3_DEF_RX_MODE 0
@@ -1769,9 +1770,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1769 1770
1770 if (tp->link_config.autoneg == AUTONEG_ENABLE && 1771 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1771 current_link_up == 1 && 1772 current_link_up == 1 &&
1772 (tp->link_config.active_speed == SPEED_1000 || 1773 tp->link_config.active_duplex == DUPLEX_FULL &&
1773 (tp->link_config.active_speed == SPEED_100 && 1774 (tp->link_config.active_speed == SPEED_100 ||
1774 tp->link_config.active_duplex == DUPLEX_FULL))) { 1775 tp->link_config.active_speed == SPEED_1000)) {
1775 u32 eeectl; 1776 u32 eeectl;
1776 1777
1777 if (tp->link_config.active_speed == SPEED_1000) 1778 if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,7 +1782,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1781 1782
1782 tw32(TG3_CPMU_EEE_CTRL, eeectl); 1783 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1783 1784
1784 tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val); 1785 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1786 TG3_CL45_D7_EEERES_STAT, &val);
1785 1787
1786 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1788 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1787 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1789 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
@@ -2728,12 +2730,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) 2730 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2731 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2730 2732
2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2733 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2732 mac_mode |= tp->mac_mode & 2734 mac_mode |= MAC_MODE_APE_TX_EN |
2733 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2735 MAC_MODE_APE_RX_EN |
2734 if (mac_mode & MAC_MODE_APE_TX_EN) 2736 MAC_MODE_TDE_ENABLE;
2735 mac_mode |= MAC_MODE_TDE_ENABLE;
2736 }
2737 2737
2738 tw32_f(MAC_MODE, mac_mode); 2738 tw32_f(MAC_MODE, mac_mode);
2739 udelay(100); 2739 udelay(100);
@@ -2969,7 +2969,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2969 } 2969 }
2970 2970
2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { 2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2972 u32 val = 0; 2972 u32 val;
2973 2973
2974 tw32(TG3_CPMU_EEE_MODE, 2974 tw32(TG3_CPMU_EEE_MODE,
2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2986,19 +2986,18 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
2987 val | MII_TG3_DSP_CH34TP2_HIBW01); 2987 val | MII_TG3_DSP_CH34TP2_HIBW01);
2988 2988
2989 val = 0;
2989 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2990 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2990 /* Advertise 100-BaseTX EEE ability */ 2991 /* Advertise 100-BaseTX EEE ability */
2991 if (tp->link_config.advertising & 2992 if (tp->link_config.advertising &
2992 (ADVERTISED_100baseT_Half | 2993 ADVERTISED_100baseT_Full)
2993 ADVERTISED_100baseT_Full)) 2994 val |= MDIO_AN_EEE_ADV_100TX;
2994 val |= TG3_CL45_D7_EEEADV_CAP_100TX;
2995 /* Advertise 1000-BaseT EEE ability */ 2995 /* Advertise 1000-BaseT EEE ability */
2996 if (tp->link_config.advertising & 2996 if (tp->link_config.advertising &
2997 (ADVERTISED_1000baseT_Half | 2997 ADVERTISED_1000baseT_Full)
2998 ADVERTISED_1000baseT_Full)) 2998 val |= MDIO_AN_EEE_ADV_1000T;
2999 val |= TG3_CL45_D7_EEEADV_CAP_1000T;
3000 } 2999 }
3001 tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val); 3000 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3002 3001
3003 /* Turn off SM_DSP clock. */ 3002 /* Turn off SM_DSP clock. */
3004 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 3003 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -5763,7 +5762,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5763 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5762 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5764 5763
5765 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5764 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5766 !mss && skb->len > ETH_DATA_LEN) 5765 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5767 base_flags |= TXD_FLAG_JMB_PKT; 5766 base_flags |= TXD_FLAG_JMB_PKT;
5768 5767
5769 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5768 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5997,7 +5996,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5997#endif 5996#endif
5998 5997
5999 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5998 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6000 !mss && skb->len > ETH_DATA_LEN) 5999 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6001 base_flags |= TXD_FLAG_JMB_PKT; 6000 base_flags |= TXD_FLAG_JMB_PKT;
6002 6001
6003 len = skb_headlen(skb); 6002 len = skb_headlen(skb);
@@ -6339,13 +6338,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6339 kfree(tpr->rx_jmb_buffers); 6338 kfree(tpr->rx_jmb_buffers);
6340 tpr->rx_jmb_buffers = NULL; 6339 tpr->rx_jmb_buffers = NULL;
6341 if (tpr->rx_std) { 6340 if (tpr->rx_std) {
6342 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6341 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6343 tpr->rx_std, tpr->rx_std_mapping); 6342 tpr->rx_std, tpr->rx_std_mapping);
6344 tpr->rx_std = NULL; 6343 tpr->rx_std = NULL;
6345 } 6344 }
6346 if (tpr->rx_jmb) { 6345 if (tpr->rx_jmb) {
6347 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp), 6346 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6348 tpr->rx_jmb, tpr->rx_jmb_mapping); 6347 tpr->rx_jmb, tpr->rx_jmb_mapping);
6349 tpr->rx_jmb = NULL; 6348 tpr->rx_jmb = NULL;
6350 } 6349 }
6351} 6350}
@@ -6358,8 +6357,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6358 if (!tpr->rx_std_buffers) 6357 if (!tpr->rx_std_buffers)
6359 return -ENOMEM; 6358 return -ENOMEM;
6360 6359
6361 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6360 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6362 &tpr->rx_std_mapping); 6361 TG3_RX_STD_RING_BYTES(tp),
6362 &tpr->rx_std_mapping,
6363 GFP_KERNEL);
6363 if (!tpr->rx_std) 6364 if (!tpr->rx_std)
6364 goto err_out; 6365 goto err_out;
6365 6366
@@ -6370,9 +6371,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6370 if (!tpr->rx_jmb_buffers) 6371 if (!tpr->rx_jmb_buffers)
6371 goto err_out; 6372 goto err_out;
6372 6373
6373 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6374 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6374 TG3_RX_JMB_RING_BYTES(tp), 6375 TG3_RX_JMB_RING_BYTES(tp),
6375 &tpr->rx_jmb_mapping); 6376 &tpr->rx_jmb_mapping,
6377 GFP_KERNEL);
6376 if (!tpr->rx_jmb) 6378 if (!tpr->rx_jmb)
6377 goto err_out; 6379 goto err_out;
6378 } 6380 }
@@ -6491,7 +6493,7 @@ static void tg3_free_consistent(struct tg3 *tp)
6491 struct tg3_napi *tnapi = &tp->napi[i]; 6493 struct tg3_napi *tnapi = &tp->napi[i];
6492 6494
6493 if (tnapi->tx_ring) { 6495 if (tnapi->tx_ring) {
6494 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 6496 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6495 tnapi->tx_ring, tnapi->tx_desc_mapping); 6497 tnapi->tx_ring, tnapi->tx_desc_mapping);
6496 tnapi->tx_ring = NULL; 6498 tnapi->tx_ring = NULL;
6497 } 6499 }
@@ -6500,25 +6502,26 @@ static void tg3_free_consistent(struct tg3 *tp)
6500 tnapi->tx_buffers = NULL; 6502 tnapi->tx_buffers = NULL;
6501 6503
6502 if (tnapi->rx_rcb) { 6504 if (tnapi->rx_rcb) {
6503 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 6505 dma_free_coherent(&tp->pdev->dev,
6504 tnapi->rx_rcb, 6506 TG3_RX_RCB_RING_BYTES(tp),
6505 tnapi->rx_rcb_mapping); 6507 tnapi->rx_rcb,
6508 tnapi->rx_rcb_mapping);
6506 tnapi->rx_rcb = NULL; 6509 tnapi->rx_rcb = NULL;
6507 } 6510 }
6508 6511
6509 tg3_rx_prodring_fini(tp, &tnapi->prodring); 6512 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6510 6513
6511 if (tnapi->hw_status) { 6514 if (tnapi->hw_status) {
6512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6515 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6513 tnapi->hw_status, 6516 tnapi->hw_status,
6514 tnapi->status_mapping); 6517 tnapi->status_mapping);
6515 tnapi->hw_status = NULL; 6518 tnapi->hw_status = NULL;
6516 } 6519 }
6517 } 6520 }
6518 6521
6519 if (tp->hw_stats) { 6522 if (tp->hw_stats) {
6520 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 6523 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6521 tp->hw_stats, tp->stats_mapping); 6524 tp->hw_stats, tp->stats_mapping);
6522 tp->hw_stats = NULL; 6525 tp->hw_stats = NULL;
6523 } 6526 }
6524} 6527}
@@ -6531,9 +6534,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6531{ 6534{
6532 int i; 6535 int i;
6533 6536
6534 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6537 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6535 sizeof(struct tg3_hw_stats), 6538 sizeof(struct tg3_hw_stats),
6536 &tp->stats_mapping); 6539 &tp->stats_mapping,
6540 GFP_KERNEL);
6537 if (!tp->hw_stats) 6541 if (!tp->hw_stats)
6538 goto err_out; 6542 goto err_out;
6539 6543
@@ -6543,9 +6547,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6543 struct tg3_napi *tnapi = &tp->napi[i]; 6547 struct tg3_napi *tnapi = &tp->napi[i];
6544 struct tg3_hw_status *sblk; 6548 struct tg3_hw_status *sblk;
6545 6549
6546 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 6550 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_HW_STATUS_SIZE, 6551 TG3_HW_STATUS_SIZE,
6548 &tnapi->status_mapping); 6552 &tnapi->status_mapping,
6553 GFP_KERNEL);
6549 if (!tnapi->hw_status) 6554 if (!tnapi->hw_status)
6550 goto err_out; 6555 goto err_out;
6551 6556
@@ -6566,9 +6571,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6566 if (!tnapi->tx_buffers) 6571 if (!tnapi->tx_buffers)
6567 goto err_out; 6572 goto err_out;
6568 6573
6569 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, 6574 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6570 TG3_TX_RING_BYTES, 6575 TG3_TX_RING_BYTES,
6571 &tnapi->tx_desc_mapping); 6576 &tnapi->tx_desc_mapping,
6577 GFP_KERNEL);
6572 if (!tnapi->tx_ring) 6578 if (!tnapi->tx_ring)
6573 goto err_out; 6579 goto err_out;
6574 } 6580 }
@@ -6601,9 +6607,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6601 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6607 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 continue; 6608 continue;
6603 6609
6604 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, 6610 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6605 TG3_RX_RCB_RING_BYTES(tp), 6611 TG3_RX_RCB_RING_BYTES(tp),
6606 &tnapi->rx_rcb_mapping); 6612 &tnapi->rx_rcb_mapping,
6613 GFP_KERNEL);
6607 if (!tnapi->rx_rcb) 6614 if (!tnapi->rx_rcb)
6608 goto err_out; 6615 goto err_out;
6609 6616
@@ -6987,7 +6994,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6987 6994
6988 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 6995 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6989 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6996 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6990 pcie_set_readrq(tp->pdev, 4096); 6997 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 else { 6998 else {
6992 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 6999 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6993 tp->pci_cacheline_sz); 7000 tp->pci_cacheline_sz);
@@ -7181,7 +7188,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7181 tp->pcie_cap + PCI_EXP_DEVCTL, 7188 tp->pcie_cap + PCI_EXP_DEVCTL,
7182 val16); 7189 val16);
7183 7190
7184 pcie_set_readrq(tp->pdev, 4096); 7191 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7185 7192
7186 /* Clear error status */ 7193 /* Clear error status */
7187 pci_write_config_word(tp->pdev, 7194 pci_write_config_word(tp->pdev,
@@ -7222,19 +7229,21 @@ static int tg3_chip_reset(struct tg3 *tp)
7222 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7229 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 } 7230 }
7224 7231
7232 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7233 tp->mac_mode = MAC_MODE_APE_TX_EN |
7234 MAC_MODE_APE_RX_EN |
7235 MAC_MODE_TDE_ENABLE;
7236
7225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 7237 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7226 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7238 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7227 tw32_f(MAC_MODE, tp->mac_mode); 7239 val = tp->mac_mode;
7228 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 7240 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7229 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7241 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7230 tw32_f(MAC_MODE, tp->mac_mode); 7242 val = tp->mac_mode;
7231 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7232 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7233 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7234 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7235 tw32_f(MAC_MODE, tp->mac_mode);
7236 } else 7243 } else
7237 tw32_f(MAC_MODE, 0); 7244 val = 0;
7245
7246 tw32_f(MAC_MODE, val);
7238 udelay(40); 7247 udelay(40);
7239 7248
7240 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 7249 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7810,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7801 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) 7810 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7802 tg3_abort_hw(tp, 1); 7811 tg3_abort_hw(tp, 1);
7803 7812
7813 /* Enable MAC control of LPI */
7814 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7815 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7816 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7817 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7818
7819 tw32_f(TG3_CPMU_EEE_CTRL,
7820 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7821
7822 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7823 TG3_CPMU_EEEMD_LPI_IN_TX |
7824 TG3_CPMU_EEEMD_LPI_IN_RX |
7825 TG3_CPMU_EEEMD_EEE_ENABLE;
7826
7827 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7828 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7829
7830 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7831 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7832
7833 tw32_f(TG3_CPMU_EEE_MODE, val);
7834
7835 tw32_f(TG3_CPMU_EEE_DBTMR1,
7836 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7837 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7838
7839 tw32_f(TG3_CPMU_EEE_DBTMR2,
7840 TG3_CPMU_DBTMR1_APE_TX_2047US |
7841 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7842 }
7843
7804 if (reset_phy) 7844 if (reset_phy)
7805 tg3_phy_reset(tp); 7845 tg3_phy_reset(tp);
7806 7846
@@ -7860,18 +7900,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7860 tw32(GRC_MODE, grc_mode); 7900 tw32(GRC_MODE, grc_mode);
7861 } 7901 }
7862 7902
7863 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 7903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7864 u32 grc_mode = tr32(GRC_MODE); 7904 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7905 u32 grc_mode = tr32(GRC_MODE);
7865 7906
7866 /* Access the lower 1K of PL PCIE block registers. */ 7907 /* Access the lower 1K of PL PCIE block registers. */
7867 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7908 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7868 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7909 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7869 7910
7870 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); 7911 val = tr32(TG3_PCIE_TLDLPL_PORT +
7871 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 7912 TG3_PCIE_PL_LO_PHYCTL5);
7872 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 7913 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7914 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7873 7915
7874 tw32(GRC_MODE, grc_mode); 7916 tw32(GRC_MODE, grc_mode);
7917 }
7875 7918
7876 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7919 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7877 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7920 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7922,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7922 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7880 } 7923 }
7881 7924
7882 /* Enable MAC control of LPI */
7883 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7884 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7885 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7886 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7887
7888 tw32_f(TG3_CPMU_EEE_CTRL,
7889 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7890
7891 tw32_f(TG3_CPMU_EEE_MODE,
7892 TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7893 TG3_CPMU_EEEMD_LPI_IN_TX |
7894 TG3_CPMU_EEEMD_LPI_IN_RX |
7895 TG3_CPMU_EEEMD_EEE_ENABLE);
7896 }
7897
7898 /* This works around an issue with Athlon chipsets on 7925 /* This works around an issue with Athlon chipsets on
7899 * B3 tigon3 silicon. This bit has no effect on any 7926 * B3 tigon3 silicon. This bit has no effect on any
7900 * other revision. But do not set this on PCI Express 7927 * other revision. But do not set this on PCI Express
@@ -8162,8 +8189,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8162 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 8189 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8163 RDMAC_MODE_LNGREAD_ENAB); 8190 RDMAC_MODE_LNGREAD_ENAB);
8164 8191
8165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8167 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 8193 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8168 8194
8169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8229,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8204 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8230 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8205 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8231 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8233 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
8234 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
8235 }
8206 tw32(TG3_RDMA_RSRVCTRL_REG, 8236 tw32(TG3_RDMA_RSRVCTRL_REG,
8207 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8237 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8208 } 8238 }
@@ -8280,7 +8310,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8280 } 8310 }
8281 8311
8282 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8312 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8283 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8313 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8284 else 8314 else
8285 tp->mac_mode = 0; 8315 tp->mac_mode = 0;
8286 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8316 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9061,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
9031 pci_disable_msix(tp->pdev); 9061 pci_disable_msix(tp->pdev);
9032 return false; 9062 return false;
9033 } 9063 }
9034 if (tp->irq_cnt > 1) 9064
9065 if (tp->irq_cnt > 1) {
9035 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9066 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9068 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9069 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9070 }
9071 }
9036 9072
9037 return true; 9073 return true;
9038} 9074}
@@ -12411,8 +12447,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12411 if (cfg2 & (1 << 18)) 12447 if (cfg2 & (1 << 18))
12412 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12448 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12413 12449
12414 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12450 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
12415 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12451 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12452 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12416 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12453 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12417 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 12454 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12418 12455
@@ -12548,9 +12585,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12548 } 12585 }
12549 } 12586 }
12550 12587
12551 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12588 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12552 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12589 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12553 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)) 12590 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12591 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12592 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12554 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 12593 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12555 12594
12556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12595 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
@@ -13359,7 +13398,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13359 13398
13360 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13399 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13361 13400
13362 pcie_set_readrq(tp->pdev, 4096); 13401 tp->pcie_readrq = 4096;
13402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13403 u16 word;
13404
13405 pci_read_config_word(tp->pdev,
13406 tp->pcie_cap + PCI_EXP_LNKSTA,
13407 &word);
13408 switch (word & PCI_EXP_LNKSTA_CLS) {
13409 case PCI_EXP_LNKSTA_CLS_2_5GB:
13410 word &= PCI_EXP_LNKSTA_NLW;
13411 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13412 switch (word) {
13413 case 2:
13414 tp->pcie_readrq = 2048;
13415 break;
13416 case 4:
13417 tp->pcie_readrq = 1024;
13418 break;
13419 }
13420 break;
13421
13422 case PCI_EXP_LNKSTA_CLS_5_0GB:
13423 word &= PCI_EXP_LNKSTA_NLW;
13424 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13425 switch (word) {
13426 case 1:
13427 tp->pcie_readrq = 2048;
13428 break;
13429 case 2:
13430 tp->pcie_readrq = 1024;
13431 break;
13432 case 4:
13433 tp->pcie_readrq = 512;
13434 break;
13435 }
13436 }
13437 }
13438
13439 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13363 13440
13364 pci_read_config_word(tp->pdev, 13441 pci_read_config_word(tp->pdev,
13365 tp->pcie_cap + PCI_EXP_LNKCTL, 13442 tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13799,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13722 13799
13723 /* Preserve the APE MAC_MODE bits */ 13800 /* Preserve the APE MAC_MODE bits */
13724 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 13801 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13725 tp->mac_mode = tr32(MAC_MODE) | 13802 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13726 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13727 else 13803 else
13728 tp->mac_mode = TG3_DEF_MAC_MODE; 13804 tp->mac_mode = TG3_DEF_MAC_MODE;
13729 13805
@@ -14159,7 +14235,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14159 u32 *buf, saved_dma_rwctrl; 14235 u32 *buf, saved_dma_rwctrl;
14160 int ret = 0; 14236 int ret = 0;
14161 14237
14162 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 14238 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14239 &buf_dma, GFP_KERNEL);
14163 if (!buf) { 14240 if (!buf) {
14164 ret = -ENOMEM; 14241 ret = -ENOMEM;
14165 goto out_nofree; 14242 goto out_nofree;
@@ -14343,7 +14420,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14343 } 14420 }
14344 14421
14345out: 14422out:
14346 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 14423 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14347out_nofree: 14424out_nofree:
14348 return ret; 14425 return ret;
14349} 14426}
@@ -14957,7 +15034,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
14957 if (tp->fw) 15034 if (tp->fw)
14958 release_firmware(tp->fw); 15035 release_firmware(tp->fw);
14959 15036
14960 flush_scheduled_work(); 15037 cancel_work_sync(&tp->reset_task);
14961 15038
14962 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 15039 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14963 tg3_phy_fini(tp); 15040 tg3_phy_fini(tp);
@@ -14996,7 +15073,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14996 if (!netif_running(dev)) 15073 if (!netif_running(dev))
14997 return 0; 15074 return 0;
14998 15075
14999 flush_scheduled_work(); 15076 flush_work_sync(&tp->reset_task);
15000 tg3_phy_stop(tp); 15077 tg3_phy_stop(tp);
15001 tg3_netif_stop(tp); 15078 tg3_netif_stop(tp);
15002 15079
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9f..d62c8d937c82 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1094,13 +1094,19 @@
1094/* 0x3664 --> 0x36b0 unused */ 1094/* 0x3664 --> 0x36b0 unused */
1095 1095
1096#define TG3_CPMU_EEE_MODE 0x000036b0 1096#define TG3_CPMU_EEE_MODE 0x000036b0
1097#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 1097#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
1098#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 1098#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
1099#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 1099#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
1100#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 1100#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
1101#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 1101#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
1102/* 0x36b4 --> 0x36b8 unused */ 1102#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
1103 1103#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
1104#define TG3_CPMU_EEE_DBTMR1 0x000036b4
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1104#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1105#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
1106#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 1112#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -1327,6 +1333,8 @@
1327 1333
1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1330/* 0x4904 --> 0x4910 unused */ 1338/* 0x4904 --> 0x4910 unused */
1331 1339
1332#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 1340#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2170,9 +2178,6 @@
2170#define MII_TG3_TEST1_CRC_EN 0x8000 2178#define MII_TG3_TEST1_CRC_EN 0x8000
2171 2179
2172/* Clause 45 expansion registers */ 2180/* Clause 45 expansion registers */
2173#define TG3_CL45_D7_EEEADV_CAP 0x003c
2174#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
2175#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
2176#define TG3_CL45_D7_EEERES_STAT 0x803e 2181#define TG3_CL45_D7_EEERES_STAT 0x803e
2177#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 2182#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
2178#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 2183#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
@@ -2562,10 +2567,6 @@ struct ring_info {
2562 DEFINE_DMA_UNMAP_ADDR(mapping); 2567 DEFINE_DMA_UNMAP_ADDR(mapping);
2563}; 2568};
2564 2569
2565struct tg3_config_info {
2566 u32 flags;
2567};
2568
2569struct tg3_link_config { 2570struct tg3_link_config {
2570 /* Describes what we're trying to get. */ 2571 /* Describes what we're trying to get. */
2571 u32 advertising; 2572 u32 advertising;
@@ -2713,17 +2714,17 @@ struct tg3_napi {
2713 u32 last_irq_tag; 2714 u32 last_irq_tag;
2714 u32 int_mbox; 2715 u32 int_mbox;
2715 u32 coal_now; 2716 u32 coal_now;
2716 u32 tx_prod;
2717 u32 tx_cons;
2718 u32 tx_pending;
2719 u32 prodmbox;
2720 2717
2721 u32 consmbox; 2718 u32 consmbox ____cacheline_aligned;
2722 u32 rx_rcb_ptr; 2719 u32 rx_rcb_ptr;
2723 u16 *rx_rcb_prod_idx; 2720 u16 *rx_rcb_prod_idx;
2724 struct tg3_rx_prodring_set prodring; 2721 struct tg3_rx_prodring_set prodring;
2725
2726 struct tg3_rx_buffer_desc *rx_rcb; 2722 struct tg3_rx_buffer_desc *rx_rcb;
2723
2724 u32 tx_prod ____cacheline_aligned;
2725 u32 tx_cons;
2726 u32 tx_pending;
2727 u32 prodmbox;
2727 struct tg3_tx_buffer_desc *tx_ring; 2728 struct tg3_tx_buffer_desc *tx_ring;
2728 struct ring_info *tx_buffers; 2729 struct ring_info *tx_buffers;
2729 2730
@@ -2946,6 +2947,7 @@ struct tg3 {
2946 int pcix_cap; 2947 int pcix_cap;
2947 int pcie_cap; 2948 int pcie_cap;
2948 }; 2949 };
2950 int pcie_readrq;
2949 2951
2950 struct mii_bus *mdio_bus; 2952 struct mii_bus *mdio_bus;
2951 int mdio_irq[PHY_MAX_ADDR]; 2953 int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..c78a50586c1d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2021 de->media_timer.data = (unsigned long) de; 2021 de->media_timer.data = (unsigned long) de;
2022 2022
2023 netif_carrier_off(dev); 2023 netif_carrier_off(dev);
2024 netif_stop_queue(dev);
2025 2024
2026 /* wake up device, assign resources */ 2025 /* wake up device, assign resources */
2027 rc = pci_enable_device(pdev); 2026 rc = pci_enable_device(pdev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d1a269..7064e035757a 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
688 688
689 DMFE_DBUG(0, "dmfe_start_xmit", 0); 689 DMFE_DBUG(0, "dmfe_start_xmit", 0);
690 690
691 /* Resource flag check */
692 netif_stop_queue(dev);
693
694 /* Too large packet check */ 691 /* Too large packet check */
695 if (skb->len > MAX_PACKET_SIZE) { 692 if (skb->len > MAX_PACKET_SIZE) {
696 pr_err("big packet = %d\n", (u16)skb->len); 693 pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
698 return NETDEV_TX_OK; 695 return NETDEV_TX_OK;
699 } 696 }
700 697
698 /* Resource flag check */
699 netif_stop_queue(dev);
700
701 spin_lock_irqsave(&db->lock, flags); 701 spin_lock_irqsave(&db->lock, flags);
702 702
703 /* No Tx resource check, it never happen nromally */ 703 /* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2050 2050
2051 ugeth_vdbg("%s: IN", __func__); 2051 ugeth_vdbg("%s: IN", __func__);
2052 2052
2053 /*
2054 * Tell the kernel the link is down.
2055 * Must be done before disabling the controller
2056 * or deadlock may happen.
2057 */
2058 phy_stop(phydev);
2059
2053 /* Disable the controller */ 2060 /* Disable the controller */
2054 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2061 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2055 2062
2056 /* Tell the kernel the link is down */
2057 phy_stop(phydev);
2058
2059 /* Mask all interrupts */ 2063 /* Mask all interrupts */
2060 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2064 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2061 2065
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2065 /* Disable Rx and Tx */ 2069 /* Disable Rx and Tx */
2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2070 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2067 2071
2068 phy_disconnect(ugeth->phydev);
2069 ugeth->phydev = NULL;
2070
2071 ucc_geth_memclean(ugeth); 2072 ucc_geth_memclean(ugeth);
2072} 2073}
2073 2074
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
3550 3551
3551 napi_disable(&ugeth->napi); 3552 napi_disable(&ugeth->napi);
3552 3553
3554 cancel_work_sync(&ugeth->timeout_work);
3553 ucc_geth_stop(ugeth); 3555 ucc_geth_stop(ugeth);
3556 phy_disconnect(ugeth->phydev);
3557 ugeth->phydev = NULL;
3554 3558
3555 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3559 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3556 3560
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
3579 * Must reset MAC *and* PHY. This is done by reopening 3583 * Must reset MAC *and* PHY. This is done by reopening
3580 * the device. 3584 * the device.
3581 */ 3585 */
3582 ucc_geth_close(dev); 3586 netif_tx_stop_all_queues(dev);
3583 ucc_geth_open(dev); 3587 ucc_geth_stop(ugeth);
3588 ucc_geth_init_mac(ugeth);
3589 /* Must start PHY here */
3590 phy_start(ugeth->phydev);
3591 netif_tx_start_all_queues(dev);
3584 } 3592 }
3585 3593
3586 netif_tx_schedule_all(dev); 3594 netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
3594{ 3602{
3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3603 struct ucc_geth_private *ugeth = netdev_priv(dev);
3596 3604
3597 netif_carrier_off(dev);
3598 schedule_work(&ugeth->timeout_work); 3605 schedule_work(&ugeth->timeout_work);
3599} 3606}
3600 3607
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size 899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
900 */ 900 */
901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ 901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
902#define UCC_GETH_UTFTT_INIT 512 902#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
903 due to errata */
903/* Gigabit Ethernet (1000 Mbps) */ 904/* Gigabit Ethernet (1000 Mbps) */
904#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual 905#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
905 FIFO size */ 906 FIFO size */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe6db0e..6f600cced6e1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
196 IEEE 802 "local assignment" bit is set in the address, a "usbX" 196 IEEE 802 "local assignment" bit is set in the address, a "usbX"
197 name is used instead. 197 name is used instead.
198 198
199config USB_NET_CDC_NCM
200 tristate "CDC NCM support"
201 depends on USB_USBNET
202 default y
203 help
204 This driver provides support for CDC NCM (Network Control Model
205 Device USB Class Specification). The CDC NCM specification is
206 available from <http://www.usb.org/>.
207
208 Say "y" to link the driver statically, or "m" to build a
209 dynamically linked module.
210
211 This driver should work with at least the following devices:
212 * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
213 * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
214 * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
215 * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
216 * Ericsson F5521gw Mobile Broadband Module
217
199config USB_NET_DM9601 218config USB_NET_DM9601
200 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 219 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
201 depends on USB_USBNET 220 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b0259ae16..cac170301187 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
29obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
29 30
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 000000000000..593c104ab199
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1213 @@
1/*
2 * cdc_ncm.c
3 *
4 * Copyright (C) ST-Ericsson 2010
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 *
8 * USB Host Driver for Network Control Model (NCM)
9 * http://www.usb.org/developers/devclass_docs/NCM10.zip
10 *
11 * The NCM encoding, decoding and initialization logic
12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
13 *
14 * This software is available to you under a choice of one of two
15 * licenses. You may choose this file to be licensed under the terms
16 * of the GNU General Public License (GPL) Version 2 or the 2-clause
17 * BSD license listed below:
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/netdevice.h>
44#include <linux/ctype.h>
45#include <linux/ethtool.h>
46#include <linux/workqueue.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/usb.h>
50#include <linux/version.h>
51#include <linux/timer.h>
52#include <linux/spinlock.h>
53#include <linux/atomic.h>
54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h>
56
57#define DRIVER_VERSION "30-Nov-2010"
58
59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
61
62/* Maximum NTB length */
63#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
65
66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
67#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
68
69#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
70
71/* Default value for MaxDatagramSize */
72#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
73
74/*
75 * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
76 * the last NULL entry. Any additional datagrams in NTB would be discarded.
77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79
80/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82
83/* The following macro defines the minimum header space */
84#define CDC_NCM_MIN_HDR_SIZE \
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16;
96 struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
97};
98
99struct cdc_ncm_ctx {
100 struct cdc_ncm_data rx_ncm;
101 struct cdc_ncm_data tx_ncm;
102 struct usb_cdc_ncm_ntb_parameters ncm_parm;
103 struct timer_list tx_timer;
104
105 const struct usb_cdc_ncm_desc *func_desc;
106 const struct usb_cdc_header_desc *header_desc;
107 const struct usb_cdc_union_desc *union_desc;
108 const struct usb_cdc_ether_desc *ether_desc;
109
110 struct net_device *netdev;
111 struct usb_device *udev;
112 struct usb_host_endpoint *in_ep;
113 struct usb_host_endpoint *out_ep;
114 struct usb_host_endpoint *status_ep;
115 struct usb_interface *intf;
116 struct usb_interface *control;
117 struct usb_interface *data;
118
119 struct sk_buff *tx_curr_skb;
120 struct sk_buff *tx_rem_skb;
121
122 spinlock_t mtx;
123
124 u32 tx_timer_pending;
125 u32 tx_curr_offset;
126 u32 tx_curr_last_offset;
127 u32 tx_curr_frame_num;
128 u32 rx_speed;
129 u32 tx_speed;
130 u32 rx_max;
131 u32 tx_max;
132 u32 max_datagram_size;
133 u16 tx_max_datagrams;
134 u16 tx_remainder;
135 u16 tx_modulus;
136 u16 tx_ndp_modulus;
137 u16 tx_seq;
138 u16 connected;
139 u8 data_claimed;
140 u8 control_claimed;
141};
142
143static void cdc_ncm_tx_timeout(unsigned long arg);
144static const struct driver_info cdc_ncm_info;
145static struct usb_driver cdc_ncm_driver;
146static struct ethtool_ops cdc_ncm_ethtool_ops;
147
148static const struct usb_device_id cdc_devs[] = {
149 { USB_INTERFACE_INFO(USB_CLASS_COMM,
150 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
151 .driver_info = (unsigned long)&cdc_ncm_info,
152 },
153 {
154 },
155};
156
157MODULE_DEVICE_TABLE(usb, cdc_devs);
158
159static void
160cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
161{
162 struct usbnet *dev = netdev_priv(net);
163
164 strncpy(info->driver, dev->driver_name, sizeof(info->driver));
165 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
166 strncpy(info->fw_version, dev->driver_info->description,
167 sizeof(info->fw_version));
168 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
169}
170
171static int
172cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
173 void *data, u16 flags, u16 *actlen, u16 timeout)
174{
175 int err;
176
177 err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
178 usb_rcvctrlpipe(ctx->udev, 0) :
179 usb_sndctrlpipe(ctx->udev, 0),
180 req->bNotificationType, req->bmRequestType,
181 req->wValue,
182 req->wIndex, data,
183 req->wLength, timeout);
184
185 if (err < 0) {
186 if (actlen)
187 *actlen = 0;
188 return err;
189 }
190
191 if (actlen)
192 *actlen = err;
193
194 return 0;
195}
196
197static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{
199 struct usb_cdc_notification req;
200 u32 val;
201 __le16 max_datagram_size;
202 u8 flags;
203 u8 iface_no;
204 int err;
205
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207
208 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
209 req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
210 req.wValue = 0;
211 req.wIndex = cpu_to_le16(iface_no);
212 req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
213
214 err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
215 if (err) {
216 pr_debug("failed GET_NTB_PARAMETERS\n");
217 return 1;
218 }
219
220 /* read correct set of parameters according to device mode */
221 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
222 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
226
227 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities;
229 else
230 flags = 0;
231
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags);
237
238 /* max count of tx datagrams without terminating NULL entry */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240
241 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max <
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
245 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 }
249
250 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
253 (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
254 pr_debug("Using default maximum transmit length=%d\n",
255 CDC_NCM_NTB_MAX_SIZE_TX);
256 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
257 }
258
259 /*
260 * verify that the structure alignment is:
261 * - power of two
262 * - not greater than the maximum transmit length
263 * - not less than four bytes
264 */
265 val = ctx->tx_ndp_modulus;
266
267 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
268 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
269 pr_debug("Using default alignment: 4 bytes\n");
270 ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
271 }
272
273 /*
274 * verify that the payload alignment is:
275 * - power of two
276 * - not greater than the maximum transmit length
277 * - not less than four bytes
278 */
279 val = ctx->tx_modulus;
280
281 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
282 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
283 pr_debug("Using default transmit modulus: 4 bytes\n");
284 ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
285 }
286
287 /* verify the payload remainder */
288 if (ctx->tx_remainder >= ctx->tx_modulus) {
289 pr_debug("Using default transmit remainder: 0 bytes\n");
290 ctx->tx_remainder = 0;
291 }
292
293 /* adjust TX-remainder according to NCM specification. */
294 ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
295 (ctx->tx_modulus - 1));
296
297 /* additional configuration */
298
299 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
301 req.bNotificationType = USB_CDC_SET_CRC_MODE;
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
303 req.wIndex = cpu_to_le16(iface_no);
304 req.wLength = 0;
305
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
307 if (err)
308 pr_debug("Setting CRC mode off failed\n");
309
310 /* set NTB format */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
314 req.wIndex = cpu_to_le16(iface_no);
315 req.wLength = 0;
316
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320
321 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
324 req.wValue = 0;
325 req.wIndex = cpu_to_le16(iface_no);
326 req.wLength = cpu_to_le16(2);
327
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
329 if (err) {
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
331 CDC_NCM_MIN_DATAGRAM_SIZE);
332 /* use default */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
334 } else {
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
336
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 }
342
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
344 ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
345
346 return 0;
347}
348
349static void
350cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
351{
352 struct usb_host_endpoint *e;
353 u8 ep;
354
355 for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
356
357 e = intf->cur_altsetting->endpoint + ep;
358 switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
359 case USB_ENDPOINT_XFER_INT:
360 if (usb_endpoint_dir_in(&e->desc)) {
361 if (ctx->status_ep == NULL)
362 ctx->status_ep = e;
363 }
364 break;
365
366 case USB_ENDPOINT_XFER_BULK:
367 if (usb_endpoint_dir_in(&e->desc)) {
368 if (ctx->in_ep == NULL)
369 ctx->in_ep = e;
370 } else {
371 if (ctx->out_ep == NULL)
372 ctx->out_ep = e;
373 }
374 break;
375
376 default:
377 break;
378 }
379 }
380}
381
382static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
383{
384 if (ctx == NULL)
385 return;
386
387 del_timer_sync(&ctx->tx_timer);
388
389 if (ctx->data_claimed) {
390 usb_set_intfdata(ctx->data, NULL);
391 usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
392 }
393
394 if (ctx->control_claimed) {
395 usb_set_intfdata(ctx->control, NULL);
396 usb_driver_release_interface(driver_of(ctx->intf),
397 ctx->control);
398 }
399
400 if (ctx->tx_rem_skb != NULL) {
401 dev_kfree_skb_any(ctx->tx_rem_skb);
402 ctx->tx_rem_skb = NULL;
403 }
404
405 if (ctx->tx_curr_skb != NULL) {
406 dev_kfree_skb_any(ctx->tx_curr_skb);
407 ctx->tx_curr_skb = NULL;
408 }
409
410 kfree(ctx);
411}
412
413static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
414{
415 struct cdc_ncm_ctx *ctx;
416 struct usb_driver *driver;
417 u8 *buf;
418 int len;
419 int temp;
420 u8 iface_no;
421
422 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
423 if (ctx == NULL)
424 goto error;
425
426 memset(ctx, 0, sizeof(*ctx));
427
428 init_timer(&ctx->tx_timer);
429 spin_lock_init(&ctx->mtx);
430 ctx->netdev = dev->net;
431
432 /* store ctx pointer in device data field */
433 dev->data[0] = (unsigned long)ctx;
434
435 /* get some pointers */
436 driver = driver_of(intf);
437 buf = intf->cur_altsetting->extra;
438 len = intf->cur_altsetting->extralen;
439
440 ctx->udev = dev->udev;
441 ctx->intf = intf;
442
443 /* parse through descriptors associated with control interface */
444 while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
445
446 if (buf[1] != USB_DT_CS_INTERFACE)
447 goto advance;
448
449 switch (buf[2]) {
450 case USB_CDC_UNION_TYPE:
451 if (buf[0] < sizeof(*(ctx->union_desc)))
452 break;
453
454 ctx->union_desc =
455 (const struct usb_cdc_union_desc *)buf;
456
457 ctx->control = usb_ifnum_to_if(dev->udev,
458 ctx->union_desc->bMasterInterface0);
459 ctx->data = usb_ifnum_to_if(dev->udev,
460 ctx->union_desc->bSlaveInterface0);
461 break;
462
463 case USB_CDC_ETHERNET_TYPE:
464 if (buf[0] < sizeof(*(ctx->ether_desc)))
465 break;
466
467 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472
473 if (dev->hard_mtu <
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
475 dev->hard_mtu =
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break;
483
484 case USB_CDC_NCM_TYPE:
485 if (buf[0] < sizeof(*(ctx->func_desc)))
486 break;
487
488 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
489 break;
490
491 default:
492 break;
493 }
494advance:
495 /* advance to next descriptor */
496 temp = buf[0];
497 buf += temp;
498 len -= temp;
499 }
500
501 /* check if we got everything */
502 if ((ctx->control == NULL) || (ctx->data == NULL) ||
503 (ctx->ether_desc == NULL))
504 goto error;
505
506 /* claim interfaces, if any */
507 if (ctx->data != intf) {
508 temp = usb_driver_claim_interface(driver, ctx->data, dev);
509 if (temp)
510 goto error;
511 ctx->data_claimed = 1;
512 }
513
514 if (ctx->control != intf) {
515 temp = usb_driver_claim_interface(driver, ctx->control, dev);
516 if (temp)
517 goto error;
518 ctx->control_claimed = 1;
519 }
520
521 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
522
523 /* reset data interface */
524 temp = usb_set_interface(dev->udev, iface_no, 0);
525 if (temp)
526 goto error;
527
528 /* initialize data interface */
529 if (cdc_ncm_setup(ctx))
530 goto error;
531
532 /* configure data interface */
533 temp = usb_set_interface(dev->udev, iface_no, 1);
534 if (temp)
535 goto error;
536
537 cdc_ncm_find_endpoints(ctx, ctx->data);
538 cdc_ncm_find_endpoints(ctx, ctx->control);
539
540 if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
541 (ctx->status_ep == NULL))
542 goto error;
543
544 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
545
546 usb_set_intfdata(ctx->data, dev);
547 usb_set_intfdata(ctx->control, dev);
548 usb_set_intfdata(ctx->intf, dev);
549
550 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
551 if (temp)
552 goto error;
553
554 dev_info(&dev->udev->dev, "MAC-Address: "
555 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
556 dev->net->dev_addr[0], dev->net->dev_addr[1],
557 dev->net->dev_addr[2], dev->net->dev_addr[3],
558 dev->net->dev_addr[4], dev->net->dev_addr[5]);
559
560 dev->in = usb_rcvbulkpipe(dev->udev,
561 ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
562 dev->out = usb_sndbulkpipe(dev->udev,
563 ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
564 dev->status = ctx->status_ep;
565 dev->rx_urb_size = ctx->rx_max;
566
567 /*
568 * We should get an event when network connection is "connected" or
569 * "disconnected". Set network connection in "disconnected" state
570 * (carrier is OFF) during attach, so the IP network stack does not
571 * start IPv6 negotiation and more.
572 */
573 netif_carrier_off(dev->net);
574 ctx->tx_speed = ctx->rx_speed = 0;
575 return 0;
576
577error:
578 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
579 dev->data[0] = 0;
580 dev_info(&dev->udev->dev, "Descriptor failure\n");
581 return -ENODEV;
582}
583
584static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
585{
586 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
587 struct usb_driver *driver;
588
589 if (ctx == NULL)
590 return; /* no setup */
591
592 driver = driver_of(intf);
593
594 usb_set_intfdata(ctx->data, NULL);
595 usb_set_intfdata(ctx->control, NULL);
596 usb_set_intfdata(ctx->intf, NULL);
597
598 /* release interfaces, if any */
599 if (ctx->data_claimed) {
600 usb_driver_release_interface(driver, ctx->data);
601 ctx->data_claimed = 0;
602 }
603
604 if (ctx->control_claimed) {
605 usb_driver_release_interface(driver, ctx->control);
606 ctx->control_claimed = 0;
607 }
608
609 cdc_ncm_free(ctx);
610}
611
612static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
613{
614 if (first >= max)
615 return;
616 if (first >= end)
617 return;
618 if (end > max)
619 end = max;
620 memset(ptr + first, 0, end - first);
621}
622
623static struct sk_buff *
624cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
625{
626 struct sk_buff *skb_out;
627 u32 rem;
628 u32 offset;
629 u32 last_offset;
630 u16 n = 0;
631 u8 timeout = 0;
632
633 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb);
636 else
637 timeout = 1;
638
639 /*
640 * +----------------+
641 * | skb_out |
642 * +----------------+
643 * ^ offset
644 * ^ last_offset
645 */
646
647 /* check if we are resuming an OUT skb */
648 if (ctx->tx_curr_skb != NULL) {
649 /* pop variables */
650 skb_out = ctx->tx_curr_skb;
651 offset = ctx->tx_curr_offset;
652 last_offset = ctx->tx_curr_last_offset;
653 n = ctx->tx_curr_frame_num;
654
655 } else {
656 /* reset variables */
657 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
658 if (skb_out == NULL) {
659 if (skb != NULL) {
660 dev_kfree_skb_any(skb);
661 ctx->netdev->stats.tx_dropped++;
662 }
663 goto exit_no_skb;
664 }
665
666 /* make room for NTH and NDP */
667 offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
668 ctx->tx_ndp_modulus) +
669 sizeof(struct usb_cdc_ncm_ndp16) +
670 (ctx->tx_max_datagrams + 1) *
671 sizeof(struct usb_cdc_ncm_dpe16);
672
673 /* store last valid offset before alignment */
674 last_offset = offset;
675 /* align first Datagram offset correctly */
676 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
677 /* zero buffer till the first IP datagram */
678 cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
679 n = 0;
680 ctx->tx_curr_frame_num = 0;
681 }
682
683 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max)
686 break;
687
688 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset;
690
691 if (skb == NULL) {
692 skb = ctx->tx_rem_skb;
693 ctx->tx_rem_skb = NULL;
694
695 /* check for end of skb */
696 if (skb == NULL)
697 break;
698 }
699
700 if (skb->len > rem) {
701 if (n == 0) {
702 /* won't fit, MTU problem? */
703 dev_kfree_skb_any(skb);
704 skb = NULL;
705 ctx->netdev->stats.tx_dropped++;
706 } else {
707 /* no room for skb - store for later */
708 if (ctx->tx_rem_skb != NULL) {
709 dev_kfree_skb_any(ctx->tx_rem_skb);
710 ctx->netdev->stats.tx_dropped++;
711 }
712 ctx->tx_rem_skb = skb;
713 skb = NULL;
714
715 /* loop one more time */
716 timeout = 1;
717 }
718 break;
719 }
720
721 memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
722
723 ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
724 ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
725
726 /* update offset */
727 offset += skb->len;
728
729 /* store last valid offset before alignment */
730 last_offset = offset;
731
732 /* align offset correctly */
733 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
734
735 /* zero padding */
736 cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
737 ctx->tx_max);
738 dev_kfree_skb_any(skb);
739 skb = NULL;
740 }
741
742 /* free up any dangling skb */
743 if (skb != NULL) {
744 dev_kfree_skb_any(skb);
745 skb = NULL;
746 ctx->netdev->stats.tx_dropped++;
747 }
748
749 ctx->tx_curr_frame_num = n;
750
751 if (n == 0) {
752 /* wait for more frames */
753 /* push variables */
754 ctx->tx_curr_skb = skb_out;
755 ctx->tx_curr_offset = offset;
756 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb;
758
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
760 /* wait for more frames */
761 /* push variables */
762 ctx->tx_curr_skb = skb_out;
763 ctx->tx_curr_offset = offset;
764 ctx->tx_curr_last_offset = last_offset;
765 /* set the pending count */
766 if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
767 ctx->tx_timer_pending = 2;
768 goto exit_no_skb;
769
770 } else {
771 /* frame goes out */
772 /* variables will be reset at next call */
773 }
774
775 /* check for overflow */
776 if (last_offset > ctx->tx_max)
777 last_offset = ctx->tx_max;
778
779 /* revert offset */
780 offset = last_offset;
781
782 /*
783 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
784 * we send buffers as it is. If we get more data, it would be more
785 * efficient for USB HS mobile device with DMA engine to receive a full
786 * size NTB, than canceling DMA transfer and receiving a short packet.
787 */
788 if (offset > CDC_NCM_MIN_TX_PKT)
789 offset = ctx->tx_max;
790
791 /* final zero padding */
792 cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
793
794 /* store last offset */
795 last_offset = offset;
796
797 if ((last_offset < ctx->tx_max) && ((last_offset %
798 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
799 /* force short packet */
800 *(((u8 *)skb_out->data) + last_offset) = 0;
801 last_offset++;
802 }
803
804 /* zero the rest of the DPEs plus the last NULL entry */
805 for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
806 ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
807 ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
808 }
809
810 /* fill out 16-bit NTB header */
811 ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
812 ctx->tx_ncm.nth16.wHeaderLength =
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus);
818
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
820 ctx->tx_seq++;
821
822 /* fill out 16-bit NDP table */
823 ctx->tx_ncm.ndp16.dwSignature =
824 cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
829
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
831 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16));
833
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
835 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) *
838 sizeof(struct usb_cdc_ncm_dpe16));
839
840 /* set frame length */
841 skb_put(skb_out, last_offset);
842
843 /* return skb */
844 ctx->tx_curr_skb = NULL;
845 return skb_out;
846
847exit_no_skb:
848 return NULL;
849}
850
851static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
852{
853 /* start timer, if not already started */
854 if (timer_pending(&ctx->tx_timer) == 0) {
855 ctx->tx_timer.function = &cdc_ncm_tx_timeout;
856 ctx->tx_timer.data = (unsigned long)ctx;
857 ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
858 add_timer(&ctx->tx_timer);
859 }
860}
861
862static void cdc_ncm_tx_timeout(unsigned long arg)
863{
864 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
865 u8 restart;
866
867 spin_lock(&ctx->mtx);
868 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--;
870 restart = 1;
871 } else
872 restart = 0;
873
874 spin_unlock(&ctx->mtx);
875
876 if (restart)
877 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL)
879 usbnet_start_xmit(NULL, ctx->netdev);
880}
881
882static struct sk_buff *
883cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
884{
885 struct sk_buff *skb_out;
886 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
887 u8 need_timer = 0;
888
889 /*
890 * The Ethernet API we are using does not support transmitting
891 * multiple Ethernet frames in a single call. This driver will
892 * accumulate multiple Ethernet frames and send out a larger
893 * USB frame when the USB buffer is full or when a single jiffies
894 * timeout happens.
895 */
896 if (ctx == NULL)
897 goto error;
898
899 spin_lock(&ctx->mtx);
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904
905 /* Start timer, if there is a remaining skb */
906 if (need_timer)
907 cdc_ncm_tx_timeout_start(ctx);
908
909 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
911 return skb_out;
912
913error:
914 if (skb != NULL)
915 dev_kfree_skb_any(skb);
916
917 return NULL;
918}
919
920static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
921{
922 struct sk_buff *skb;
923 struct cdc_ncm_ctx *ctx;
924 int sumlen;
925 int actlen;
926 int temp;
927 int nframes;
928 int x;
929 int offset;
930
931 ctx = (struct cdc_ncm_ctx *)dev->data[0];
932 if (ctx == NULL)
933 goto error;
934
935 actlen = skb_in->len;
936 sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
937
938 if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
939 pr_debug("frame too short\n");
940 goto error;
941 }
942
943 memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
944 sizeof(ctx->rx_ncm.nth16));
945
946 if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
947 USB_CDC_NCM_NTH16_SIGN) {
948 pr_debug("invalid NTH16 signature <%u>\n",
949 le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
950 goto error;
951 }
952
953 temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
954 if (temp > sumlen) {
955 pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
956 goto error;
957 }
958
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n");
962 goto error;
963 }
964
965 memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
966 sizeof(ctx->rx_ncm.ndp16));
967
968 if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
969 USB_CDC_NCM_NDP16_NOCRC_SIGN) {
970 pr_debug("invalid DPT16 signature <%u>\n",
971 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
972 goto error;
973 }
974
975 if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
976 USB_CDC_NCM_NDP16_LENGTH_MIN) {
977 pr_debug("invalid DPT16 length <%u>\n",
978 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
979 goto error;
980 }
981
982 nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
983 sizeof(struct usb_cdc_ncm_ndp16)) /
984 sizeof(struct usb_cdc_ncm_dpe16));
985 nframes--; /* we process NDP entries except for the last one */
986
987 pr_debug("nframes = %u\n", nframes);
988
989 temp += sizeof(ctx->rx_ncm.ndp16);
990
991 if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
992 pr_debug("Invalid nframes = %d\n", nframes);
993 goto error;
994 }
995
996 if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
997 pr_debug("Truncating number of frames from %u to %u\n",
998 nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
999 nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
1000 }
1001
1002 memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
1003 nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
1004
1005 for (x = 0; x < nframes; x++) {
1006 offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
1007 temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
1008
1009 /*
1010 * CDC NCM ch. 3.7
1011 * All entries after first NULL entry are to be ignored
1012 */
1013 if ((offset == 0) || (temp == 0)) {
1014 if (!x)
1015 goto error; /* empty NTB */
1016 break;
1017 }
1018
1019 /* sanity checking */
1020 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb);
1025 if (!x)
1026 goto error;
1027 break;
1028
1029 } else {
1030 skb = skb_clone(skb_in, GFP_ATOMIC);
1031 skb->len = temp;
1032 skb->data = ((u8 *)skb_in->data) + offset;
1033 skb_set_tail_pointer(skb, temp);
1034 usbnet_skb_return(dev, skb);
1035 }
1036 }
1037 return 1;
1038error:
1039 return 0;
1040}
1041
1042static void
1043cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1044 struct connection_speed_change *data)
1045{
1046 uint32_t rx_speed = le32_to_cpu(data->USBitRate);
1047 uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
1048
1049 /*
1050 * Currently the USB-NET API does not support reporting the actual
1051 * device speed. Do print it instead.
1052 */
1053 if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
1054 ctx->tx_speed = tx_speed;
1055 ctx->rx_speed = rx_speed;
1056
1057 if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
1058 printk(KERN_INFO KBUILD_MODNAME
1059 ": %s: %u mbit/s downlink "
1060 "%u mbit/s uplink\n",
1061 ctx->netdev->name,
1062 (unsigned int)(rx_speed / 1000000U),
1063 (unsigned int)(tx_speed / 1000000U));
1064 } else {
1065 printk(KERN_INFO KBUILD_MODNAME
1066 ": %s: %u kbit/s downlink "
1067 "%u kbit/s uplink\n",
1068 ctx->netdev->name,
1069 (unsigned int)(rx_speed / 1000U),
1070 (unsigned int)(tx_speed / 1000U));
1071 }
1072 }
1073}
1074
1075static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1076{
1077 struct cdc_ncm_ctx *ctx;
1078 struct usb_cdc_notification *event;
1079
1080 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1081
1082 if (urb->actual_length < sizeof(*event))
1083 return;
1084
1085 /* test for split data in 8-byte chunks */
1086 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1087 cdc_ncm_speed_change(ctx,
1088 (struct connection_speed_change *)urb->transfer_buffer);
1089 return;
1090 }
1091
1092 event = urb->transfer_buffer;
1093
1094 switch (event->bNotificationType) {
1095 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
1096 /*
1097 * According to the CDC NCM specification ch.7.1
1098 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1099 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1100 */
1101 ctx->connected = event->wValue;
1102
1103 printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
1104 " %sconnected\n",
1105 ctx->netdev->name, ctx->connected ? "" : "dis");
1106
1107 if (ctx->connected)
1108 netif_carrier_on(dev->net);
1109 else {
1110 netif_carrier_off(dev->net);
1111 ctx->tx_speed = ctx->rx_speed = 0;
1112 }
1113 break;
1114
1115 case USB_CDC_NOTIFY_SPEED_CHANGE:
1116 if (urb->actual_length <
1117 (sizeof(*event) + sizeof(struct connection_speed_change)))
1118 set_bit(EVENT_STS_SPLIT, &dev->flags);
1119 else
1120 cdc_ncm_speed_change(ctx,
1121 (struct connection_speed_change *) &event[1]);
1122 break;
1123
1124 default:
1125 dev_err(&dev->udev->dev, "NCM: unexpected "
1126 "notification 0x%02x!\n", event->bNotificationType);
1127 break;
1128 }
1129}
1130
1131static int cdc_ncm_check_connect(struct usbnet *dev)
1132{
1133 struct cdc_ncm_ctx *ctx;
1134
1135 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1136 if (ctx == NULL)
1137 return 1; /* disconnected */
1138
1139 return !ctx->connected;
1140}
1141
1142static int
1143cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
1144{
1145 return usbnet_probe(udev, prod);
1146}
1147
1148static void cdc_ncm_disconnect(struct usb_interface *intf)
1149{
1150 struct usbnet *dev = usb_get_intfdata(intf);
1151
1152 if (dev == NULL)
1153 return; /* already disconnected */
1154
1155 usbnet_disconnect(intf);
1156}
1157
1158static int cdc_ncm_manage_power(struct usbnet *dev, int status)
1159{
1160 dev->intf->needs_remote_wakeup = status;
1161 return 0;
1162}
1163
1164static const struct driver_info cdc_ncm_info = {
1165 .description = "CDC NCM",
1166 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1167 .bind = cdc_ncm_bind,
1168 .unbind = cdc_ncm_unbind,
1169 .check_connect = cdc_ncm_check_connect,
1170 .manage_power = cdc_ncm_manage_power,
1171 .status = cdc_ncm_status,
1172 .rx_fixup = cdc_ncm_rx_fixup,
1173 .tx_fixup = cdc_ncm_tx_fixup,
1174};
1175
1176static struct usb_driver cdc_ncm_driver = {
1177 .name = "cdc_ncm",
1178 .id_table = cdc_devs,
1179 .probe = cdc_ncm_probe,
1180 .disconnect = cdc_ncm_disconnect,
1181 .suspend = usbnet_suspend,
1182 .resume = usbnet_resume,
1183 .supports_autosuspend = 1,
1184};
1185
1186static struct ethtool_ops cdc_ncm_ethtool_ops = {
1187 .get_drvinfo = cdc_ncm_get_drvinfo,
1188 .get_link = usbnet_get_link,
1189 .get_msglevel = usbnet_get_msglevel,
1190 .set_msglevel = usbnet_set_msglevel,
1191 .get_settings = usbnet_get_settings,
1192 .set_settings = usbnet_set_settings,
1193 .nway_reset = usbnet_nway_reset,
1194};
1195
1196static int __init cdc_ncm_init(void)
1197{
1198 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
1199 return usb_register(&cdc_ncm_driver);
1200}
1201
1202module_init(cdc_ncm_init);
1203
1204static void __exit cdc_ncm_exit(void)
1205{
1206 usb_deregister(&cdc_ncm_driver);
1207}
1208
1209module_exit(cdc_ncm_exit);
1210
1211MODULE_AUTHOR("Hans Petter Selasky");
1212MODULE_DESCRIPTION("USB CDC NCM host driver");
1213MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..93c6b5f62ac4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1745,7 +1745,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
1745 unsigned int cmd, unsigned long arg) 1745 unsigned int cmd, unsigned long arg)
1746{ 1746{
1747 struct hso_serial *serial = get_serial_by_tty(tty); 1747 struct hso_serial *serial = get_serial_by_tty(tty);
1748 void __user *uarg = (void __user *)arg;
1749 int ret = 0; 1748 int ret = 0;
1750 D4("IOCTL cmd: %d, arg: %ld", cmd, arg); 1749 D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
1751 1750
@@ -2994,12 +2993,14 @@ static int hso_probe(struct usb_interface *interface,
2994 2993
2995 case HSO_INTF_BULK: 2994 case HSO_INTF_BULK:
2996 /* It's a regular bulk interface */ 2995 /* It's a regular bulk interface */
2997 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && 2996 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
2998 !disable_net) 2997 if (!disable_net)
2999 hso_dev = hso_create_net_device(interface, port_spec); 2998 hso_dev =
3000 else 2999 hso_create_net_device(interface, port_spec);
3000 } else {
3001 hso_dev = 3001 hso_dev =
3002 hso_create_bulk_serial_device(interface, port_spec); 3002 hso_create_bulk_serial_device(interface, port_spec);
3003 }
3003 if (!hso_dev) 3004 if (!hso_dev)
3004 goto exit; 3005 goto exit;
3005 break; 3006 break;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
363 363
364 /* Paranoid */ 364 /* Paranoid */
365 if (skb->len > IPHETH_BUF_SIZE) { 365 if (skb->len > IPHETH_BUF_SIZE) {
366 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); 366 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
367 dev->net->stats.tx_dropped++; 367 dev->net->stats.tx_dropped++;
368 dev_kfree_skb_irq(skb); 368 dev_kfree_skb_irq(skb);
369 return NETDEV_TX_OK; 369 return NETDEV_TX_OK;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d6..ef3667690b12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
359 359
360static int mdio_read(struct net_device *dev, int phy_id, int loc) 360static int mdio_read(struct net_device *dev, int phy_id, int loc)
361{ 361{
362 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 362 pegasus_t *pegasus = netdev_priv(dev);
363 u16 res; 363 u16 res;
364 364
365 read_mii_word(pegasus, phy_id, loc, &res); 365 read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
397 397
398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) 398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
399{ 399{
400 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 400 pegasus_t *pegasus = netdev_priv(dev);
401 401
402 write_mii_word(pegasus, phy_id, loc, val); 402 write_mii_word(pegasus, phy_id, loc, val);
403} 403}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d1ac15c95faf..ed1b43210584 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -802,10 +802,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
802 802
803 dev_dbg(&dev->udev->dev, "%s", __func__); 803 dev_dbg(&dev->udev->dev, "%s", __func__);
804 804
805 /* Kill the timer then flush the work queue */ 805 /* kill the timer and work */
806 del_timer_sync(&priv->sync_timer); 806 del_timer_sync(&priv->sync_timer);
807 807 cancel_work_sync(&priv->sierra_net_kevent);
808 flush_scheduled_work();
809 808
810 /* tell modem we are going away */ 809 /* tell modem we are going away */
811 status = sierra_net_send_cmd(dev, priv->shdwn_msg, 810 status = sierra_net_send_cmd(dev, priv->shdwn_msg,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..ed9a41643ff4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
45#include <linux/usb/usbnet.h> 45#include <linux/usb/usbnet.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/kernel.h> 47#include <linux/kernel.h>
48#include <linux/pm_runtime.h>
48 49
49#define DRIVER_VERSION "22-Aug-2005" 50#define DRIVER_VERSION "22-Aug-2005"
50 51
@@ -390,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
390 goto error; 391 goto error;
391 // else network stack removes extra byte if we forced a short packet 392 // else network stack removes extra byte if we forced a short packet
392 393
393 if (skb->len) 394 if (skb->len) {
394 usbnet_skb_return (dev, skb); 395 /* all data was already cloned from skb inside the driver */
395 else { 396 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
396 netif_dbg(dev, rx_err, dev->net, "drop\n"); 397 dev_kfree_skb_any(skb);
397error: 398 else
398 dev->net->stats.rx_errors++; 399 usbnet_skb_return(dev, skb);
399 skb_queue_tail (&dev->done, skb); 400 return;
400 } 401 }
402
403 netif_dbg(dev, rx_err, dev->net, "drop\n");
404error:
405 dev->net->stats.rx_errors++;
406 skb_queue_tail(&dev->done, skb);
401} 407}
402 408
403/*-------------------------------------------------------------------------*/ 409/*-------------------------------------------------------------------------*/
@@ -970,7 +976,8 @@ static void tx_complete (struct urb *urb)
970 struct usbnet *dev = entry->dev; 976 struct usbnet *dev = entry->dev;
971 977
972 if (urb->status == 0) { 978 if (urb->status == 0) {
973 dev->net->stats.tx_packets++; 979 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
980 dev->net->stats.tx_packets++;
974 dev->net->stats.tx_bytes += entry->length; 981 dev->net->stats.tx_bytes += entry->length;
975 } else { 982 } else {
976 dev->net->stats.tx_errors++; 983 dev->net->stats.tx_errors++;
@@ -1043,8 +1050,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1043 if (info->tx_fixup) { 1050 if (info->tx_fixup) {
1044 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1051 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1045 if (!skb) { 1052 if (!skb) {
1046 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1053 if (netif_msg_tx_err(dev)) {
1047 goto drop; 1054 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1055 goto drop;
1056 } else {
1057 /* cdc_ncm collected packet; waits for more */
1058 goto not_drop;
1059 }
1048 } 1060 }
1049 } 1061 }
1050 length = skb->len; 1062 length = skb->len;
@@ -1066,13 +1078,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1066 /* don't assume the hardware handles USB_ZERO_PACKET 1078 /* don't assume the hardware handles USB_ZERO_PACKET
1067 * NOTE: strictly conforming cdc-ether devices should expect 1079 * NOTE: strictly conforming cdc-ether devices should expect
1068 * the ZLP here, but ignore the one-byte packet. 1080 * the ZLP here, but ignore the one-byte packet.
1081 * NOTE2: CDC NCM specification is different from CDC ECM when
1082 * handling ZLP/short packets, so cdc_ncm driver will make short
1083 * packet itself if needed.
1069 */ 1084 */
1070 if (length % dev->maxpacket == 0) { 1085 if (length % dev->maxpacket == 0) {
1071 if (!(info->flags & FLAG_SEND_ZLP)) { 1086 if (!(info->flags & FLAG_SEND_ZLP)) {
1072 urb->transfer_buffer_length++; 1087 if (!(info->flags & FLAG_MULTI_PACKET)) {
1073 if (skb_tailroom(skb)) { 1088 urb->transfer_buffer_length++;
1074 skb->data[skb->len] = 0; 1089 if (skb_tailroom(skb)) {
1075 __skb_put(skb, 1); 1090 skb->data[skb->len] = 0;
1091 __skb_put(skb, 1);
1092 }
1076 } 1093 }
1077 } else 1094 } else
1078 urb->transfer_flags |= URB_ZERO_PACKET; 1095 urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1121,6 +1138,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1121 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1138 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1122drop: 1139drop:
1123 dev->net->stats.tx_dropped++; 1140 dev->net->stats.tx_dropped++;
1141not_drop:
1124 if (skb) 1142 if (skb)
1125 dev_kfree_skb_any (skb); 1143 dev_kfree_skb_any (skb);
1126 usb_free_urb (urb); 1144 usb_free_urb (urb);
@@ -1230,8 +1248,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1230 net = dev->net; 1248 net = dev->net;
1231 unregister_netdev (net); 1249 unregister_netdev (net);
1232 1250
1233 /* we don't hold rtnl here ... */ 1251 cancel_work_sync(&dev->kevent);
1234 flush_scheduled_work ();
1235 1252
1236 if (dev->driver_info->unbind) 1253 if (dev->driver_info->unbind)
1237 dev->driver_info->unbind (dev, intf); 1254 dev->driver_info->unbind (dev, intf);
@@ -1273,6 +1290,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1273 struct usb_device *xdev; 1290 struct usb_device *xdev;
1274 int status; 1291 int status;
1275 const char *name; 1292 const char *name;
1293 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1294
1295 /* usbnet already took usb runtime pm, so have to enable the feature
1296 * for usb interface, otherwise usb_autopm_get_interface may return
1297 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1298 */
1299 if (!driver->supports_autosuspend) {
1300 driver->supports_autosuspend = 1;
1301 pm_runtime_enable(&udev->dev);
1302 }
1276 1303
1277 name = udev->dev.driver->name; 1304 name = udev->dev.driver->name;
1278 info = (struct driver_info *) prod->driver_info; 1305 info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9dbc493..5e7f069eab53 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.3" 33#define DRV_VERSION "1.5.0"
34#define DRV_RELDATE "2007-03-06" 34#define DRV_RELDATE "2010-10-09"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
100#include <linux/mii.h> 100#include <linux/mii.h>
101#include <linux/ethtool.h> 101#include <linux/ethtool.h>
102#include <linux/crc32.h> 102#include <linux/crc32.h>
103#include <linux/if_vlan.h>
103#include <linux/bitops.h> 104#include <linux/bitops.h>
104#include <linux/workqueue.h> 105#include <linux/workqueue.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */ 106#include <asm/processor.h> /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
135 136
137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
136/* 140/*
137 Theory of Operation 141 Theory of Operation
138 142
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279/* Offsets to the device registers. */ 283/* Offsets to the device registers. */
280enum register_offsets { 284enum register_offsets {
281 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
282 ChipCmd1=0x09, 286 ChipCmd1=0x09, TQWake=0x0A,
283 IntrStatus=0x0C, IntrEnable=0x0E, 287 IntrStatus=0x0C, IntrEnable=0x0E,
284 MulticastFilter0=0x10, MulticastFilter1=0x14, 288 MulticastFilter0=0x10, MulticastFilter1=0x14,
285 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
286 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, 290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
287 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
288 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
289 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
290 StickyHW=0x83, IntrStatus2=0x84, 294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
291 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
292 WOLcrClr1=0xA6, WOLcgClr=0xA7, 297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
293 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
299 BackCaptureEffect=0x04, BackRandom=0x08 304 BackCaptureEffect=0x04, BackRandom=0x08
300}; 305};
301 306
307/* Bits in the TxConfig (TCR) register */
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319/* Bits in the CamCon (CAMC) register */
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327/* Bits in the PCIBusConfig1 (BCR1) register */
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
339};
340
302#ifdef USE_MMIO 341#ifdef USE_MMIO
303/* Registers we check that mmio and reg are the same. */ 342/* Registers we check that mmio and reg are the same. */
304static const int mmio_verify_registers[] = { 343static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
356 DescOwn=0x80000000 395 DescOwn=0x80000000
357}; 396};
358 397
398/* Bits in *_desc.*_length */
399enum desc_length_bits {
400 DescTag=0x00010000
401};
402
359/* Bits in ChipCmd. */ 403/* Bits in ChipCmd. */
360enum chip_cmd_bits { 404enum chip_cmd_bits {
361 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 405 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
365}; 409};
366 410
367struct rhine_private { 411struct rhine_private {
412 /* Bit mask for configured VLAN ids */
413 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
414
368 /* Descriptor rings */ 415 /* Descriptor rings */
369 struct rx_desc *rx_ring; 416 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring; 417 struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
405 void __iomem *base; 452 void __iomem *base;
406}; 453};
407 454
455#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
456#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
457#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
458
459#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
460#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
461#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
462
463#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
464#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
465#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
466
467#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
468#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
469#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
470
471
408static int mdio_read(struct net_device *dev, int phy_id, int location); 472static int mdio_read(struct net_device *dev, int phy_id, int location);
409static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 473static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
410static int rhine_open(struct net_device *dev); 474static int rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
422static const struct ethtool_ops netdev_ethtool_ops; 486static const struct ethtool_ops netdev_ethtool_ops;
423static int rhine_close(struct net_device *dev); 487static int rhine_close(struct net_device *dev);
424static void rhine_shutdown (struct pci_dev *pdev); 488static void rhine_shutdown (struct pci_dev *pdev);
489static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
490static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
491static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
492static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
493static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
494static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
495static void rhine_init_cam_filter(struct net_device *dev);
496static void rhine_update_vcam(struct net_device *dev);
425 497
426#define RHINE_WAIT_FOR(condition) do { \ 498#define RHINE_WAIT_FOR(condition) do { \
427 int i=1024; \ 499 int i=1024; \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
629 .ndo_set_mac_address = eth_mac_addr, 701 .ndo_set_mac_address = eth_mac_addr,
630 .ndo_do_ioctl = netdev_ioctl, 702 .ndo_do_ioctl = netdev_ioctl,
631 .ndo_tx_timeout = rhine_tx_timeout, 703 .ndo_tx_timeout = rhine_tx_timeout,
704 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
705 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
632#ifdef CONFIG_NET_POLL_CONTROLLER 706#ifdef CONFIG_NET_POLL_CONTROLLER
633 .ndo_poll_controller = rhine_poll, 707 .ndo_poll_controller = rhine_poll,
634#endif 708#endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
795 if (rp->quirks & rqRhineI) 869 if (rp->quirks & rqRhineI)
796 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 870 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
797 871
872 if (pdev->revision >= VT6105M)
873 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
874 NETIF_F_HW_VLAN_FILTER;
875
798 /* dev->name not defined before register_netdev()! */ 876 /* dev->name not defined before register_netdev()! */
799 rc = register_netdev(dev); 877 rc = register_netdev(dev);
800 if (rc) 878 if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1040 netif_carrier_ok(mii->dev)); 1118 netif_carrier_ok(mii->dev));
1041} 1119}
1042 1120
1121/**
1122 * rhine_set_cam - set CAM multicast filters
1123 * @ioaddr: register block of this Rhine
1124 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1125 * @addr: multicast address (6 bytes)
1126 *
1127 * Load addresses into multicast filters.
1128 */
1129static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1130{
1131 int i;
1132
1133 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1134 wmb();
1135
1136 /* Paranoid -- idx out of range should never happen */
1137 idx &= (MCAM_SIZE - 1);
1138
1139 iowrite8((u8) idx, ioaddr + CamAddr);
1140
1141 for (i = 0; i < 6; i++, addr++)
1142 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1143 udelay(10);
1144 wmb();
1145
1146 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1147 udelay(10);
1148
1149 iowrite8(0, ioaddr + CamCon);
1150}
1151
1152/**
1153 * rhine_set_vlan_cam - set CAM VLAN filters
1154 * @ioaddr: register block of this Rhine
1155 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1156 * @addr: VLAN ID (2 bytes)
1157 *
1158 * Load addresses into VLAN filters.
1159 */
1160static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1161{
1162 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1163 wmb();
1164
1165 /* Paranoid -- idx out of range should never happen */
1166 idx &= (VCAM_SIZE - 1);
1167
1168 iowrite8((u8) idx, ioaddr + CamAddr);
1169
1170 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1171 udelay(10);
1172 wmb();
1173
1174 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1175 udelay(10);
1176
1177 iowrite8(0, ioaddr + CamCon);
1178}
1179
1180/**
1181 * rhine_set_cam_mask - set multicast CAM mask
1182 * @ioaddr: register block of this Rhine
1183 * @mask: multicast CAM mask
1184 *
1185 * Mask sets multicast filters active/inactive.
1186 */
1187static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1188{
1189 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1190 wmb();
1191
1192 /* write mask */
1193 iowrite32(mask, ioaddr + CamMask);
1194
1195 /* disable CAMEN */
1196 iowrite8(0, ioaddr + CamCon);
1197}
1198
1199/**
1200 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1201 * @ioaddr: register block of this Rhine
1202 * @mask: VLAN CAM mask
1203 *
1204 * Mask sets VLAN filters active/inactive.
1205 */
1206static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1207{
1208 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1209 wmb();
1210
1211 /* write mask */
1212 iowrite32(mask, ioaddr + CamMask);
1213
1214 /* disable CAMEN */
1215 iowrite8(0, ioaddr + CamCon);
1216}
1217
1218/**
1219 * rhine_init_cam_filter - initialize CAM filters
1220 * @dev: network device
1221 *
1222 * Initialize (disable) hardware VLAN and multicast support on this
1223 * Rhine.
1224 */
1225static void rhine_init_cam_filter(struct net_device *dev)
1226{
1227 struct rhine_private *rp = netdev_priv(dev);
1228 void __iomem *ioaddr = rp->base;
1229
1230 /* Disable all CAMs */
1231 rhine_set_vlan_cam_mask(ioaddr, 0);
1232 rhine_set_cam_mask(ioaddr, 0);
1233
1234 /* disable hardware VLAN support */
1235 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1236 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1237}
1238
1239/**
1240 * rhine_update_vcam - update VLAN CAM filters
1241 * @rp: rhine_private data of this Rhine
1242 *
1243 * Update VLAN CAM filters to match configuration change.
1244 */
1245static void rhine_update_vcam(struct net_device *dev)
1246{
1247 struct rhine_private *rp = netdev_priv(dev);
1248 void __iomem *ioaddr = rp->base;
1249 u16 vid;
1250 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1251 unsigned int i = 0;
1252
1253 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1254 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1255 vCAMmask |= 1 << i;
1256 if (++i >= VCAM_SIZE)
1257 break;
1258 }
1259 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1260}
1261
1262static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1263{
1264 struct rhine_private *rp = netdev_priv(dev);
1265
1266 spin_lock_irq(&rp->lock);
1267 set_bit(vid, rp->active_vlans);
1268 rhine_update_vcam(dev);
1269 spin_unlock_irq(&rp->lock);
1270}
1271
1272static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1273{
1274 struct rhine_private *rp = netdev_priv(dev);
1275
1276 spin_lock_irq(&rp->lock);
1277 clear_bit(vid, rp->active_vlans);
1278 rhine_update_vcam(dev);
1279 spin_unlock_irq(&rp->lock);
1280}
1281
1043static void init_registers(struct net_device *dev) 1282static void init_registers(struct net_device *dev)
1044{ 1283{
1045 struct rhine_private *rp = netdev_priv(dev); 1284 struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
1061 1300
1062 rhine_set_rx_mode(dev); 1301 rhine_set_rx_mode(dev);
1063 1302
1303 if (rp->pdev->revision >= VT6105M)
1304 rhine_init_cam_filter(dev);
1305
1064 napi_enable(&rp->napi); 1306 napi_enable(&rp->napi);
1065 1307
1066 /* Enable interrupts by setting the interrupt mask. */ 1308 /* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1276 rp->tx_ring[entry].desc_length = 1518 rp->tx_ring[entry].desc_length =
1277 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1519 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1278 1520
1521 if (unlikely(vlan_tx_tag_present(skb))) {
1522 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1523 /* request tagging */
1524 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1525 }
1526 else
1527 rp->tx_ring[entry].tx_status = 0;
1528
1279 /* lock eth irq */ 1529 /* lock eth irq */
1280 spin_lock_irqsave(&rp->lock, flags); 1530 spin_lock_irqsave(&rp->lock, flags);
1281 wmb(); 1531 wmb();
1282 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1532 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1283 wmb(); 1533 wmb();
1284 1534
1285 rp->cur_tx++; 1535 rp->cur_tx++;
1286 1536
1287 /* Non-x86 Todo: explicitly flush cache lines here. */ 1537 /* Non-x86 Todo: explicitly flush cache lines here. */
1288 1538
1539 if (vlan_tx_tag_present(skb))
1540 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1541 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1542
1289 /* Wake the potentially-idle transmit channel */ 1543 /* Wake the potentially-idle transmit channel */
1290 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1544 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1291 ioaddr + ChipCmd1); 1545 ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
1437 spin_unlock(&rp->lock); 1691 spin_unlock(&rp->lock);
1438} 1692}
1439 1693
1694/**
1695 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1696 * @skb: pointer to sk_buff
1697 * @data_size: used data area of the buffer including CRC
1698 *
1699 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1700 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1701 * aligned following the CRC.
1702 */
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1706 return ntohs(*(u16 *)trailer);
1707}
1708
1440/* Process up to limit frames from receive ring */ 1709/* Process up to limit frames from receive ring */
1441static int rhine_rx(struct net_device *dev, int limit) 1710static int rhine_rx(struct net_device *dev, int limit)
1442{ 1711{
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1454 for (count = 0; count < limit; ++count) { 1723 for (count = 0; count < limit; ++count) {
1455 struct rx_desc *desc = rp->rx_head_desc; 1724 struct rx_desc *desc = rp->rx_head_desc;
1456 u32 desc_status = le32_to_cpu(desc->rx_status); 1725 u32 desc_status = le32_to_cpu(desc->rx_status);
1726 u32 desc_length = le32_to_cpu(desc->desc_length);
1457 int data_size = desc_status >> 16; 1727 int data_size = desc_status >> 16;
1458 1728
1459 if (desc_status & DescOwn) 1729 if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1498 struct sk_buff *skb = NULL; 1768 struct sk_buff *skb = NULL;
1499 /* Length should omit the CRC */ 1769 /* Length should omit the CRC */
1500 int pkt_len = data_size - 4; 1770 int pkt_len = data_size - 4;
1771 u16 vlan_tci = 0;
1501 1772
1502 /* Check if the packet is long enough to accept without 1773 /* Check if the packet is long enough to accept without
1503 copying to a minimally-sized skbuff. */ 1774 copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
1532 rp->rx_buf_sz, 1803 rp->rx_buf_sz,
1533 PCI_DMA_FROMDEVICE); 1804 PCI_DMA_FROMDEVICE);
1534 } 1805 }
1806
1807 if (unlikely(desc_length & DescTag))
1808 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1809
1535 skb->protocol = eth_type_trans(skb, dev); 1810 skb->protocol = eth_type_trans(skb, dev);
1811
1812 if (unlikely(desc_length & DescTag))
1813 __vlan_hwaccel_put_tag(skb, vlan_tci);
1536 netif_receive_skb(skb); 1814 netif_receive_skb(skb);
1537 dev->stats.rx_bytes += pkt_len; 1815 dev->stats.rx_bytes += pkt_len;
1538 dev->stats.rx_packets++; 1816 dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
1596 1874
1597 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 1875 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1598 ioaddr + ChipCmd); 1876 ioaddr + ChipCmd);
1877
1878 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1879 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1880 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1881
1599 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1882 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1600 ioaddr + ChipCmd1); 1883 ioaddr + ChipCmd1);
1601 IOSYNC; 1884 IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1631 } 1914 }
1632 if (intr_status & IntrTxUnderrun) { 1915 if (intr_status & IntrTxUnderrun) {
1633 if (rp->tx_thresh < 0xE0) 1916 if (rp->tx_thresh < 0xE0)
1634 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1917 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1635 if (debug > 1) 1918 if (debug > 1)
1636 printk(KERN_INFO "%s: Transmitter underrun, Tx " 1919 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1637 "threshold now %2.2x.\n", 1920 "threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1646 (intr_status & (IntrTxAborted | 1929 (intr_status & (IntrTxAborted |
1647 IntrTxUnderrun | IntrTxDescRace)) == 0) { 1930 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1648 if (rp->tx_thresh < 0xE0) { 1931 if (rp->tx_thresh < 0xE0) {
1649 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1932 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1650 } 1933 }
1651 if (debug > 1) 1934 if (debug > 1)
1652 printk(KERN_INFO "%s: Unspecified error. Tx " 1935 printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
1688 struct rhine_private *rp = netdev_priv(dev); 1971 struct rhine_private *rp = netdev_priv(dev);
1689 void __iomem *ioaddr = rp->base; 1972 void __iomem *ioaddr = rp->base;
1690 u32 mc_filter[2]; /* Multicast hash filter */ 1973 u32 mc_filter[2]; /* Multicast hash filter */
1691 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ 1974 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1975 struct netdev_hw_addr *ha;
1692 1976
1693 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1977 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1694 rx_mode = 0x1C; 1978 rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
1699 /* Too many to match, or accept all multicasts. */ 1983 /* Too many to match, or accept all multicasts. */
1700 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1984 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1701 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1985 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1702 rx_mode = 0x0C; 1986 } else if (rp->pdev->revision >= VT6105M) {
1987 int i = 0;
1988 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1989 netdev_for_each_mc_addr(ha, dev) {
1990 if (i == MCAM_SIZE)
1991 break;
1992 rhine_set_cam(ioaddr, i, ha->addr);
1993 mCAMmask |= 1 << i;
1994 i++;
1995 }
1996 rhine_set_cam_mask(ioaddr, mCAMmask);
1703 } else { 1997 } else {
1704 struct netdev_hw_addr *ha;
1705
1706 memset(mc_filter, 0, sizeof(mc_filter)); 1998 memset(mc_filter, 0, sizeof(mc_filter));
1707 netdev_for_each_mc_addr(ha, dev) { 1999 netdev_for_each_mc_addr(ha, dev) {
1708 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2000 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
1711 } 2003 }
1712 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2004 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1713 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2005 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1714 rx_mode = 0x0C;
1715 } 2006 }
1716 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); 2007 /* enable/disable VLAN receive filtering */
2008 if (rp->pdev->revision >= VT6105M) {
2009 if (dev->flags & IFF_PROMISC)
2010 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2011 else
2012 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2013 }
2014 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
1717} 2015}
1718 2016
1719static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2017static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
1966 if (!netif_running(dev)) 2264 if (!netif_running(dev))
1967 return 0; 2265 return 0;
1968 2266
1969 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2267 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1970 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); 2268 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1971 2269
1972 ret = pci_set_power_state(pdev, PCI_D0); 2270 ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
986 goto unregister; 986 goto unregister;
987 } 987 }
988 988
989 vi->status = VIRTIO_NET_S_LINK_UP; 989 /* Assume link up if device can't report link status,
990 virtnet_update_status(vi); 990 otherwise get link status from config. */
991 netif_carrier_on(dev); 991 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
992 netif_carrier_off(dev);
993 virtnet_update_status(vi);
994 } else {
995 vi->status = VIRTIO_NET_S_LINK_UP;
996 netif_carrier_on(dev);
997 }
992 998
993 pr_debug("virtnet: registered device %s\n", dev->name); 999 pr_debug("virtnet: registered device %s\n", dev->name);
994 return 0; 1000 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d7..0169be7694a9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44 44
45static atomic_t devices_found; 45static atomic_t devices_found;
46 46
47#define VMXNET3_MAX_DEVICES 10
48static int enable_mq = 1;
49static int irq_share_mode;
47 50
48/* 51/*
49 * Enable/Disable the given intr 52 * Enable/Disable the given intr
@@ -99,7 +102,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
99static bool 102static bool
100vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
101{ 104{
102 return netif_queue_stopped(adapter->netdev); 105 return tq->stopped;
103} 106}
104 107
105 108
@@ -107,7 +110,7 @@ static void
107vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
108{ 111{
109 tq->stopped = false; 112 tq->stopped = false;
110 netif_start_queue(adapter->netdev); 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
111} 114}
112 115
113 116
@@ -115,7 +118,7 @@ static void
115vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
116{ 119{
117 tq->stopped = false; 120 tq->stopped = false;
118 netif_wake_queue(adapter->netdev); 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
119} 122}
120 123
121 124
@@ -124,7 +127,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124{ 127{
125 tq->stopped = true; 128 tq->stopped = true;
126 tq->num_stop++; 129 tq->num_stop++;
127 netif_stop_queue(adapter->netdev); 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
128} 131}
129 132
130 133
@@ -135,6 +138,7 @@ static void
135vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
136{ 139{
137 u32 ret; 140 u32 ret;
141 int i;
138 142
139 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
140 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
145 if (!netif_carrier_ok(adapter->netdev)) 149 if (!netif_carrier_ok(adapter->netdev))
146 netif_carrier_on(adapter->netdev); 150 netif_carrier_on(adapter->netdev);
147 151
148 if (affectTxQueue) 152 if (affectTxQueue) {
149 vmxnet3_tq_start(&adapter->tx_queue, adapter); 153 for (i = 0; i < adapter->num_tx_queues; i++)
154 vmxnet3_tq_start(&adapter->tx_queue[i],
155 adapter);
156 }
150 } else { 157 } else {
151 printk(KERN_INFO "%s: NIC Link is Down\n", 158 printk(KERN_INFO "%s: NIC Link is Down\n",
152 adapter->netdev->name); 159 adapter->netdev->name);
153 if (netif_carrier_ok(adapter->netdev)) 160 if (netif_carrier_ok(adapter->netdev))
154 netif_carrier_off(adapter->netdev); 161 netif_carrier_off(adapter->netdev);
155 162
156 if (affectTxQueue) 163 if (affectTxQueue) {
157 vmxnet3_tq_stop(&adapter->tx_queue, adapter); 164 for (i = 0; i < adapter->num_tx_queues; i++)
165 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
166 }
158 } 167 }
159} 168}
160 169
161static void 170static void
162vmxnet3_process_events(struct vmxnet3_adapter *adapter) 171vmxnet3_process_events(struct vmxnet3_adapter *adapter)
163{ 172{
173 int i;
164 u32 events = le32_to_cpu(adapter->shared->ecr); 174 u32 events = le32_to_cpu(adapter->shared->ecr);
165 if (!events) 175 if (!events)
166 return; 176 return;
@@ -176,16 +186,18 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
176 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
177 VMXNET3_CMD_GET_QUEUE_STATUS); 187 VMXNET3_CMD_GET_QUEUE_STATUS);
178 188
179 if (adapter->tqd_start->status.stopped) { 189 for (i = 0; i < adapter->num_tx_queues; i++)
180 printk(KERN_ERR "%s: tq error 0x%x\n", 190 if (adapter->tqd_start[i].status.stopped)
181 adapter->netdev->name, 191 dev_err(&adapter->netdev->dev,
182 le32_to_cpu(adapter->tqd_start->status.error)); 192 "%s: tq[%d] error 0x%x\n",
183 } 193 adapter->netdev->name, i, le32_to_cpu(
184 if (adapter->rqd_start->status.stopped) { 194 adapter->tqd_start[i].status.error));
185 printk(KERN_ERR "%s: rq error 0x%x\n", 195 for (i = 0; i < adapter->num_rx_queues; i++)
186 adapter->netdev->name, 196 if (adapter->rqd_start[i].status.stopped)
187 adapter->rqd_start->status.error); 197 dev_err(&adapter->netdev->dev,
188 } 198 "%s: rq[%d] error 0x%x\n",
199 adapter->netdev->name, i,
200 adapter->rqd_start[i].status.error);
189 201
190 schedule_work(&adapter->work); 202 schedule_work(&adapter->work);
191 } 203 }
@@ -410,7 +422,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
410} 422}
411 423
412 424
413void 425static void
414vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 426vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
415 struct vmxnet3_adapter *adapter) 427 struct vmxnet3_adapter *adapter)
416{ 428{
@@ -437,6 +449,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
437} 449}
438 450
439 451
452/* Destroy all tx queues */
453void
454vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
455{
456 int i;
457
458 for (i = 0; i < adapter->num_tx_queues; i++)
459 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
460}
461
462
440static void 463static void
441vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 464vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
442 struct vmxnet3_adapter *adapter) 465 struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@ err:
518 return -ENOMEM; 541 return -ENOMEM;
519} 542}
520 543
544static void
545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
546{
547 int i;
548
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
551}
521 552
522/* 553/*
523 * starting from ring->next2fill, allocate rx buffers for the given ring 554 * starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
732} 763}
733 764
734 765
766/* Init all tx queues */
767static void
768vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
769{
770 int i;
771
772 for (i = 0; i < adapter->num_tx_queues; i++)
773 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
774}
775
776
735/* 777/*
736 * parse and copy relevant protocol headers: 778 * parse and copy relevant protocol headers:
737 * For a tso pkt, relevant headers are L2/3/4 including options 779 * For a tso pkt, relevant headers are L2/3/4 including options
@@ -903,6 +945,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
903 } 945 }
904 } 946 }
905 947
948 spin_lock_irqsave(&tq->tx_lock, flags);
949
950 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
951 tq->stats.tx_ring_full++;
952 dev_dbg(&adapter->netdev->dev,
953 "tx queue stopped on %s, next2comp %u"
954 " next2fill %u\n", adapter->netdev->name,
955 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
956
957 vmxnet3_tq_stop(tq, adapter);
958 spin_unlock_irqrestore(&tq->tx_lock, flags);
959 return NETDEV_TX_BUSY;
960 }
961
962
906 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 963 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
907 if (ret >= 0) { 964 if (ret >= 0) {
908 BUG_ON(ret <= 0 && ctx.copy_size != 0); 965 BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -926,20 +983,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
926 goto drop_pkt; 983 goto drop_pkt;
927 } 984 }
928 985
929 spin_lock_irqsave(&tq->tx_lock, flags);
930
931 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
932 tq->stats.tx_ring_full++;
933 dev_dbg(&adapter->netdev->dev,
934 "tx queue stopped on %s, next2comp %u"
935 " next2fill %u\n", adapter->netdev->name,
936 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
937
938 vmxnet3_tq_stop(tq, adapter);
939 spin_unlock_irqrestore(&tq->tx_lock, flags);
940 return NETDEV_TX_BUSY;
941 }
942
943 /* fill tx descs related to addr & len */ 986 /* fill tx descs related to addr & len */
944 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 987 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
945 988
@@ -1000,7 +1043,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1000 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1043 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1001 le32_to_cpu(tq->shared->txThreshold)) { 1044 le32_to_cpu(tq->shared->txThreshold)) {
1002 tq->shared->txNumDeferred = 0; 1045 tq->shared->txNumDeferred = 0;
1003 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 1046 VMXNET3_WRITE_BAR0_REG(adapter,
1047 VMXNET3_REG_TXPROD + tq->qid * 8,
1004 tq->tx_ring.next2fill); 1048 tq->tx_ring.next2fill);
1005 } 1049 }
1006 1050
@@ -1020,7 +1064,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1020{ 1064{
1021 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1065 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1022 1066
1023 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev); 1067 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1068 return vmxnet3_tq_xmit(skb,
1069 &adapter->tx_queue[skb->queue_mapping],
1070 adapter, netdev);
1024} 1071}
1025 1072
1026 1073
@@ -1106,9 +1153,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1106 break; 1153 break;
1107 } 1154 }
1108 num_rxd++; 1155 num_rxd++;
1109 1156 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1110 idx = rcd->rxdIdx; 1157 idx = rcd->rxdIdx;
1111 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1158 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1112 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1159 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1113 &rxCmdDesc); 1160 &rxCmdDesc);
1114 rbi = rq->buf_info[ring_idx] + idx; 1161 rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1307,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1260} 1307}
1261 1308
1262 1309
1310static void
1311vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1312{
1313 int i;
1314
1315 for (i = 0; i < adapter->num_rx_queues; i++)
1316 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1317}
1318
1319
1263void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1320void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1264 struct vmxnet3_adapter *adapter) 1321 struct vmxnet3_adapter *adapter)
1265{ 1322{
@@ -1351,6 +1408,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1351 1408
1352 1409
1353static int 1410static int
1411vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1412{
1413 int i, err = 0;
1414
1415 for (i = 0; i < adapter->num_rx_queues; i++) {
1416 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1417 if (unlikely(err)) {
1418 dev_err(&adapter->netdev->dev, "%s: failed to "
1419 "initialize rx queue%i\n",
1420 adapter->netdev->name, i);
1421 break;
1422 }
1423 }
1424 return err;
1425
1426}
1427
1428
1429static int
1354vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1430vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1355{ 1431{
1356 int i; 1432 int i;
@@ -1398,33 +1474,177 @@ err:
1398 1474
1399 1475
1400static int 1476static int
1477vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1478{
1479 int i, err = 0;
1480
1481 for (i = 0; i < adapter->num_rx_queues; i++) {
1482 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1483 if (unlikely(err)) {
1484 dev_err(&adapter->netdev->dev,
1485 "%s: failed to create rx queue%i\n",
1486 adapter->netdev->name, i);
1487 goto err_out;
1488 }
1489 }
1490 return err;
1491err_out:
1492 vmxnet3_rq_destroy_all(adapter);
1493 return err;
1494
1495}
1496
1497/* Multiple queue aware polling function for tx and rx */
1498
1499static int
1401vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1500vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1402{ 1501{
1502 int rcd_done = 0, i;
1403 if (unlikely(adapter->shared->ecr)) 1503 if (unlikely(adapter->shared->ecr))
1404 vmxnet3_process_events(adapter); 1504 vmxnet3_process_events(adapter);
1505 for (i = 0; i < adapter->num_tx_queues; i++)
1506 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1405 1507
1406 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); 1508 for (i = 0; i < adapter->num_rx_queues; i++)
1407 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); 1509 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1510 adapter, budget);
1511 return rcd_done;
1408} 1512}
1409 1513
1410 1514
1411static int 1515static int
1412vmxnet3_poll(struct napi_struct *napi, int budget) 1516vmxnet3_poll(struct napi_struct *napi, int budget)
1413{ 1517{
1414 struct vmxnet3_adapter *adapter = container_of(napi, 1518 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1415 struct vmxnet3_adapter, napi); 1519 struct vmxnet3_rx_queue, napi);
1520 int rxd_done;
1521
1522 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1523
1524 if (rxd_done < budget) {
1525 napi_complete(napi);
1526 vmxnet3_enable_all_intrs(rx_queue->adapter);
1527 }
1528 return rxd_done;
1529}
1530
1531/*
1532 * NAPI polling function for MSI-X mode with multiple Rx queues
1533 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1534 */
1535
1536static int
1537vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1538{
1539 struct vmxnet3_rx_queue *rq = container_of(napi,
1540 struct vmxnet3_rx_queue, napi);
1541 struct vmxnet3_adapter *adapter = rq->adapter;
1416 int rxd_done; 1542 int rxd_done;
1417 1543
1418 rxd_done = vmxnet3_do_poll(adapter, budget); 1544 /* When sharing interrupt with corresponding tx queue, process
1545 * tx completions in that queue as well
1546 */
1547 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1548 struct vmxnet3_tx_queue *tq =
1549 &adapter->tx_queue[rq - adapter->rx_queue];
1550 vmxnet3_tq_tx_complete(tq, adapter);
1551 }
1552
1553 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1419 1554
1420 if (rxd_done < budget) { 1555 if (rxd_done < budget) {
1421 napi_complete(napi); 1556 napi_complete(napi);
1422 vmxnet3_enable_intr(adapter, 0); 1557 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1423 } 1558 }
1424 return rxd_done; 1559 return rxd_done;
1425} 1560}
1426 1561
1427 1562
1563#ifdef CONFIG_PCI_MSI
1564
1565/*
1566 * Handle completion interrupts on tx queues
1567 * Returns whether or not the intr is handled
1568 */
1569
1570static irqreturn_t
1571vmxnet3_msix_tx(int irq, void *data)
1572{
1573 struct vmxnet3_tx_queue *tq = data;
1574 struct vmxnet3_adapter *adapter = tq->adapter;
1575
1576 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1577 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1578
1579 /* Handle the case where only one irq is allocate for all tx queues */
1580 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1581 int i;
1582 for (i = 0; i < adapter->num_tx_queues; i++) {
1583 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1584 vmxnet3_tq_tx_complete(txq, adapter);
1585 }
1586 } else {
1587 vmxnet3_tq_tx_complete(tq, adapter);
1588 }
1589 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1590
1591 return IRQ_HANDLED;
1592}
1593
1594
1595/*
1596 * Handle completion interrupts on rx queues. Returns whether or not the
1597 * intr is handled
1598 */
1599
1600static irqreturn_t
1601vmxnet3_msix_rx(int irq, void *data)
1602{
1603 struct vmxnet3_rx_queue *rq = data;
1604 struct vmxnet3_adapter *adapter = rq->adapter;
1605
1606 /* disable intr if needed */
1607 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1608 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1609 napi_schedule(&rq->napi);
1610
1611 return IRQ_HANDLED;
1612}
1613
1614/*
1615 *----------------------------------------------------------------------------
1616 *
1617 * vmxnet3_msix_event --
1618 *
1619 * vmxnet3 msix event intr handler
1620 *
1621 * Result:
1622 * whether or not the intr is handled
1623 *
1624 *----------------------------------------------------------------------------
1625 */
1626
1627static irqreturn_t
1628vmxnet3_msix_event(int irq, void *data)
1629{
1630 struct net_device *dev = data;
1631 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1632
1633 /* disable intr if needed */
1634 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1635 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1636
1637 if (adapter->shared->ecr)
1638 vmxnet3_process_events(adapter);
1639
1640 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1641
1642 return IRQ_HANDLED;
1643}
1644
1645#endif /* CONFIG_PCI_MSI */
1646
1647
1428/* Interrupt handler for vmxnet3 */ 1648/* Interrupt handler for vmxnet3 */
1429static irqreturn_t 1649static irqreturn_t
1430vmxnet3_intr(int irq, void *dev_id) 1650vmxnet3_intr(int irq, void *dev_id)
@@ -1432,7 +1652,7 @@ vmxnet3_intr(int irq, void *dev_id)
1432 struct net_device *dev = dev_id; 1652 struct net_device *dev = dev_id;
1433 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1653 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1434 1654
1435 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { 1655 if (adapter->intr.type == VMXNET3_IT_INTX) {
1436 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1656 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1437 if (unlikely(icr == 0)) 1657 if (unlikely(icr == 0))
1438 /* not ours */ 1658 /* not ours */
@@ -1442,77 +1662,144 @@ vmxnet3_intr(int irq, void *dev_id)
1442 1662
1443 /* disable intr if needed */ 1663 /* disable intr if needed */
1444 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1664 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1445 vmxnet3_disable_intr(adapter, 0); 1665 vmxnet3_disable_all_intrs(adapter);
1446 1666
1447 napi_schedule(&adapter->napi); 1667 napi_schedule(&adapter->rx_queue[0].napi);
1448 1668
1449 return IRQ_HANDLED; 1669 return IRQ_HANDLED;
1450} 1670}
1451 1671
1452#ifdef CONFIG_NET_POLL_CONTROLLER 1672#ifdef CONFIG_NET_POLL_CONTROLLER
1453 1673
1454
1455/* netpoll callback. */ 1674/* netpoll callback. */
1456static void 1675static void
1457vmxnet3_netpoll(struct net_device *netdev) 1676vmxnet3_netpoll(struct net_device *netdev)
1458{ 1677{
1459 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1678 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1460 int irq;
1461 1679
1462#ifdef CONFIG_PCI_MSI 1680 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1463 if (adapter->intr.type == VMXNET3_IT_MSIX) 1681 vmxnet3_disable_all_intrs(adapter);
1464 irq = adapter->intr.msix_entries[0].vector; 1682
1465 else 1683 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1466#endif 1684 vmxnet3_enable_all_intrs(adapter);
1467 irq = adapter->pdev->irq;
1468 1685
1469 disable_irq(irq);
1470 vmxnet3_intr(irq, netdev);
1471 enable_irq(irq);
1472} 1686}
1473#endif 1687#endif /* CONFIG_NET_POLL_CONTROLLER */
1474 1688
1475static int 1689static int
1476vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1690vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1477{ 1691{
1478 int err; 1692 struct vmxnet3_intr *intr = &adapter->intr;
1693 int err = 0, i;
1694 int vector = 0;
1479 1695
1480#ifdef CONFIG_PCI_MSI 1696#ifdef CONFIG_PCI_MSI
1481 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1697 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1482 /* we only use 1 MSI-X vector */ 1698 for (i = 0; i < adapter->num_tx_queues; i++) {
1483 err = request_irq(adapter->intr.msix_entries[0].vector, 1699 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1484 vmxnet3_intr, 0, adapter->netdev->name, 1700 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1485 adapter->netdev); 1701 adapter->netdev->name, vector);
1486 } else if (adapter->intr.type == VMXNET3_IT_MSI) { 1702 err = request_irq(
1703 intr->msix_entries[vector].vector,
1704 vmxnet3_msix_tx, 0,
1705 adapter->tx_queue[i].name,
1706 &adapter->tx_queue[i]);
1707 } else {
1708 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1709 adapter->netdev->name, vector);
1710 }
1711 if (err) {
1712 dev_err(&adapter->netdev->dev,
1713 "Failed to request irq for MSIX, %s, "
1714 "error %d\n",
1715 adapter->tx_queue[i].name, err);
1716 return err;
1717 }
1718
1719 /* Handle the case where only 1 MSIx was allocated for
1720 * all tx queues */
1721 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1722 for (; i < adapter->num_tx_queues; i++)
1723 adapter->tx_queue[i].comp_ring.intr_idx
1724 = vector;
1725 vector++;
1726 break;
1727 } else {
1728 adapter->tx_queue[i].comp_ring.intr_idx
1729 = vector++;
1730 }
1731 }
1732 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1733 vector = 0;
1734
1735 for (i = 0; i < adapter->num_rx_queues; i++) {
1736 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1737 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1738 adapter->netdev->name, vector);
1739 else
1740 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1741 adapter->netdev->name, vector);
1742 err = request_irq(intr->msix_entries[vector].vector,
1743 vmxnet3_msix_rx, 0,
1744 adapter->rx_queue[i].name,
1745 &(adapter->rx_queue[i]));
1746 if (err) {
1747 printk(KERN_ERR "Failed to request irq for MSIX"
1748 ", %s, error %d\n",
1749 adapter->rx_queue[i].name, err);
1750 return err;
1751 }
1752
1753 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1754 }
1755
1756 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1757 adapter->netdev->name, vector);
1758 err = request_irq(intr->msix_entries[vector].vector,
1759 vmxnet3_msix_event, 0,
1760 intr->event_msi_vector_name, adapter->netdev);
1761 intr->event_intr_idx = vector;
1762
1763 } else if (intr->type == VMXNET3_IT_MSI) {
1764 adapter->num_rx_queues = 1;
1487 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1765 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1488 adapter->netdev->name, adapter->netdev); 1766 adapter->netdev->name, adapter->netdev);
1489 } else 1767 } else {
1490#endif 1768#endif
1491 { 1769 adapter->num_rx_queues = 1;
1492 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1770 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1493 IRQF_SHARED, adapter->netdev->name, 1771 IRQF_SHARED, adapter->netdev->name,
1494 adapter->netdev); 1772 adapter->netdev);
1773#ifdef CONFIG_PCI_MSI
1495 } 1774 }
1496 1775#endif
1497 if (err) 1776 intr->num_intrs = vector + 1;
1777 if (err) {
1498 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1778 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1499 ":%d\n", adapter->netdev->name, adapter->intr.type, err); 1779 ":%d\n", adapter->netdev->name, intr->type, err);
1780 } else {
1781 /* Number of rx queues will not change after this */
1782 for (i = 0; i < adapter->num_rx_queues; i++) {
1783 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1784 rq->qid = i;
1785 rq->qid2 = i + adapter->num_rx_queues;
1786 }
1500 1787
1501 1788
1502 if (!err) {
1503 int i;
1504 /* init our intr settings */
1505 for (i = 0; i < adapter->intr.num_intrs; i++)
1506 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1507 1789
1508 /* next setup intr index for all intr sources */ 1790 /* init our intr settings */
1509 adapter->tx_queue.comp_ring.intr_idx = 0; 1791 for (i = 0; i < intr->num_intrs; i++)
1510 adapter->rx_queue.comp_ring.intr_idx = 0; 1792 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1511 adapter->intr.event_intr_idx = 0; 1793 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1794 adapter->intr.event_intr_idx = 0;
1795 for (i = 0; i < adapter->num_tx_queues; i++)
1796 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1797 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1798 }
1512 1799
1513 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1800 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1514 "allocated\n", adapter->netdev->name, adapter->intr.type, 1801 "allocated\n", adapter->netdev->name, intr->type,
1515 adapter->intr.mask_mode, adapter->intr.num_intrs); 1802 intr->mask_mode, intr->num_intrs);
1516 } 1803 }
1517 1804
1518 return err; 1805 return err;
@@ -1522,18 +1809,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1522static void 1809static void
1523vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1810vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1524{ 1811{
1525 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || 1812 struct vmxnet3_intr *intr = &adapter->intr;
1526 adapter->intr.num_intrs <= 0); 1813 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1527 1814
1528 switch (adapter->intr.type) { 1815 switch (intr->type) {
1529#ifdef CONFIG_PCI_MSI 1816#ifdef CONFIG_PCI_MSI
1530 case VMXNET3_IT_MSIX: 1817 case VMXNET3_IT_MSIX:
1531 { 1818 {
1532 int i; 1819 int i, vector = 0;
1533 1820
1534 for (i = 0; i < adapter->intr.num_intrs; i++) 1821 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1535 free_irq(adapter->intr.msix_entries[i].vector, 1822 for (i = 0; i < adapter->num_tx_queues; i++) {
1536 adapter->netdev); 1823 free_irq(intr->msix_entries[vector++].vector,
1824 &(adapter->tx_queue[i]));
1825 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1826 break;
1827 }
1828 }
1829
1830 for (i = 0; i < adapter->num_rx_queues; i++) {
1831 free_irq(intr->msix_entries[vector++].vector,
1832 &(adapter->rx_queue[i]));
1833 }
1834
1835 free_irq(intr->msix_entries[vector].vector,
1836 adapter->netdev);
1837 BUG_ON(vector >= intr->num_intrs);
1537 break; 1838 break;
1538 } 1839 }
1539#endif 1840#endif
@@ -1727,6 +2028,15 @@ vmxnet3_set_mc(struct net_device *netdev)
1727 kfree(new_table); 2028 kfree(new_table);
1728} 2029}
1729 2030
2031void
2032vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2033{
2034 int i;
2035
2036 for (i = 0; i < adapter->num_rx_queues; i++)
2037 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2038}
2039
1730 2040
1731/* 2041/*
1732 * Set up driver_shared based on settings in adapter. 2042 * Set up driver_shared based on settings in adapter.
@@ -1774,40 +2084,72 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1774 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2084 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1775 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2085 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1776 devRead->misc.queueDescLen = cpu_to_le32( 2086 devRead->misc.queueDescLen = cpu_to_le32(
1777 sizeof(struct Vmxnet3_TxQueueDesc) + 2087 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
1778 sizeof(struct Vmxnet3_RxQueueDesc)); 2088 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
1779 2089
1780 /* tx queue settings */ 2090 /* tx queue settings */
1781 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 2091 devRead->misc.numTxQueues = adapter->num_tx_queues;
1782 2092 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 devRead->misc.numTxQueues = 1; 2093 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
1784 tqc = &adapter->tqd_start->conf; 2094 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
1785 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA); 2095 tqc = &adapter->tqd_start[i].conf;
1786 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA); 2096 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
1787 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA); 2097 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
1788 tqc->ddPA = cpu_to_le64(virt_to_phys( 2098 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
1789 adapter->tx_queue.buf_info)); 2099 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
1790 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size); 2100 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
1791 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size); 2101 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
1792 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size); 2102 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
1793 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) * 2103 tqc->ddLen = cpu_to_le32(
1794 tqc->txRingSize); 2104 sizeof(struct vmxnet3_tx_buf_info) *
1795 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 2105 tqc->txRingSize);
2106 tqc->intrIdx = tq->comp_ring.intr_idx;
2107 }
1796 2108
1797 /* rx queue settings */ 2109 /* rx queue settings */
1798 devRead->misc.numRxQueues = 1; 2110 devRead->misc.numRxQueues = adapter->num_rx_queues;
1799 rqc = &adapter->rqd_start->conf; 2111 for (i = 0; i < adapter->num_rx_queues; i++) {
1800 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA); 2112 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1801 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA); 2113 rqc = &adapter->rqd_start[i].conf;
1802 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA); 2114 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
1803 rqc->ddPA = cpu_to_le64(virt_to_phys( 2115 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
1804 adapter->rx_queue.buf_info)); 2116 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
1805 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size); 2117 rqc->ddPA = cpu_to_le64(virt_to_phys(
1806 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size); 2118 rq->buf_info));
1807 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size); 2119 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
1808 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) * 2120 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
1809 (rqc->rxRingSize[0] + rqc->rxRingSize[1])); 2121 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
1810 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 2122 rqc->ddLen = cpu_to_le32(
2123 sizeof(struct vmxnet3_rx_buf_info) *
2124 (rqc->rxRingSize[0] +
2125 rqc->rxRingSize[1]));
2126 rqc->intrIdx = rq->comp_ring.intr_idx;
2127 }
2128
2129#ifdef VMXNET3_RSS
2130 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2131
2132 if (adapter->rss) {
2133 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2134 devRead->misc.uptFeatures |= UPT1_F_RSS;
2135 devRead->misc.numRxQueues = adapter->num_rx_queues;
2136 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2137 UPT1_RSS_HASH_TYPE_IPV4 |
2138 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2139 UPT1_RSS_HASH_TYPE_IPV6;
2140 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2141 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2142 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2143 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2144 for (i = 0; i < rssConf->indTableSize; i++)
2145 rssConf->indTable[i] = i % adapter->num_rx_queues;
2146
2147 devRead->rssConfDesc.confVer = 1;
2148 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2149 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2150 }
2151
2152#endif /* VMXNET3_RSS */
1811 2153
1812 /* intr settings */ 2154 /* intr settings */
1813 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2155 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2171,18 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1829int 2171int
1830vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2172vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1831{ 2173{
1832 int err; 2174 int err, i;
1833 u32 ret; 2175 u32 ret;
1834 2176
1835 dev_dbg(&adapter->netdev->dev, 2177 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
1836 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 2178 " ring sizes %u %u %u\n", adapter->netdev->name,
1837 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 2179 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
1838 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 2180 adapter->tx_queue[0].tx_ring.size,
1839 adapter->rx_queue.rx_ring[0].size, 2181 adapter->rx_queue[0].rx_ring[0].size,
1840 adapter->rx_queue.rx_ring[1].size); 2182 adapter->rx_queue[0].rx_ring[1].size);
1841 2183
1842 vmxnet3_tq_init(&adapter->tx_queue, adapter); 2184 vmxnet3_tq_init_all(adapter);
1843 err = vmxnet3_rq_init(&adapter->rx_queue, adapter); 2185 err = vmxnet3_rq_init_all(adapter);
1844 if (err) { 2186 if (err) {
1845 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 2187 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1846 adapter->netdev->name, err); 2188 adapter->netdev->name, err);
@@ -1870,10 +2212,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1870 err = -EINVAL; 2212 err = -EINVAL;
1871 goto activate_err; 2213 goto activate_err;
1872 } 2214 }
1873 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, 2215
1874 adapter->rx_queue.rx_ring[0].next2fill); 2216 for (i = 0; i < adapter->num_rx_queues; i++) {
1875 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, 2217 VMXNET3_WRITE_BAR0_REG(adapter,
1876 adapter->rx_queue.rx_ring[1].next2fill); 2218 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2219 adapter->rx_queue[i].rx_ring[0].next2fill);
2220 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2221 (i * VMXNET3_REG_ALIGN)),
2222 adapter->rx_queue[i].rx_ring[1].next2fill);
2223 }
1877 2224
1878 /* Apply the rx filter settins last. */ 2225 /* Apply the rx filter settins last. */
1879 vmxnet3_set_mc(adapter->netdev); 2226 vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2230,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1883 * tx queue if the link is up. 2230 * tx queue if the link is up.
1884 */ 2231 */
1885 vmxnet3_check_link(adapter, true); 2232 vmxnet3_check_link(adapter, true);
1886 2233 for (i = 0; i < adapter->num_rx_queues; i++)
1887 napi_enable(&adapter->napi); 2234 napi_enable(&adapter->rx_queue[i].napi);
1888 vmxnet3_enable_all_intrs(adapter); 2235 vmxnet3_enable_all_intrs(adapter);
1889 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2236 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1890 return 0; 2237 return 0;
@@ -1896,7 +2243,7 @@ activate_err:
1896irq_err: 2243irq_err:
1897rq_err: 2244rq_err:
1898 /* free up buffers we allocated */ 2245 /* free up buffers we allocated */
1899 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2246 vmxnet3_rq_cleanup_all(adapter);
1900 return err; 2247 return err;
1901} 2248}
1902 2249
@@ -1911,6 +2258,7 @@ vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1911int 2258int
1912vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2259vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1913{ 2260{
2261 int i;
1914 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2262 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1915 return 0; 2263 return 0;
1916 2264
@@ -1919,13 +2267,14 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1919 VMXNET3_CMD_QUIESCE_DEV); 2267 VMXNET3_CMD_QUIESCE_DEV);
1920 vmxnet3_disable_all_intrs(adapter); 2268 vmxnet3_disable_all_intrs(adapter);
1921 2269
1922 napi_disable(&adapter->napi); 2270 for (i = 0; i < adapter->num_rx_queues; i++)
2271 napi_disable(&adapter->rx_queue[i].napi);
1923 netif_tx_disable(adapter->netdev); 2272 netif_tx_disable(adapter->netdev);
1924 adapter->link_speed = 0; 2273 adapter->link_speed = 0;
1925 netif_carrier_off(adapter->netdev); 2274 netif_carrier_off(adapter->netdev);
1926 2275
1927 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); 2276 vmxnet3_tq_cleanup_all(adapter);
1928 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2277 vmxnet3_rq_cleanup_all(adapter);
1929 vmxnet3_free_irqs(adapter); 2278 vmxnet3_free_irqs(adapter);
1930 return 0; 2279 return 0;
1931} 2280}
@@ -2047,7 +2396,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2047static void 2396static void
2048vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2397vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2049{ 2398{
2050 size_t sz; 2399 size_t sz, i, ring0_size, ring1_size, comp_size;
2400 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2401
2051 2402
2052 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2403 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2053 VMXNET3_MAX_ETH_HDR_SIZE) { 2404 VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2420,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2069 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2420 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2070 */ 2421 */
2071 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2422 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2072 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + 2423 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2073 sz - 1) / sz * sz; 2424 ring0_size = (ring0_size + sz - 1) / sz * sz;
2074 adapter->rx_queue.rx_ring[0].size = min_t(u32, 2425 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
2075 adapter->rx_queue.rx_ring[0].size, 2426 sz * sz);
2076 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 2427 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2428 comp_size = ring0_size + ring1_size;
2429
2430 for (i = 0; i < adapter->num_rx_queues; i++) {
2431 rq = &adapter->rx_queue[i];
2432 rq->rx_ring[0].size = ring0_size;
2433 rq->rx_ring[1].size = ring1_size;
2434 rq->comp_ring.size = comp_size;
2435 }
2077} 2436}
2078 2437
2079 2438
@@ -2081,29 +2440,53 @@ int
2081vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2440vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2082 u32 rx_ring_size, u32 rx_ring2_size) 2441 u32 rx_ring_size, u32 rx_ring2_size)
2083{ 2442{
2084 int err; 2443 int err = 0, i;
2085 2444
2086 adapter->tx_queue.tx_ring.size = tx_ring_size; 2445 for (i = 0; i < adapter->num_tx_queues; i++) {
2087 adapter->tx_queue.data_ring.size = tx_ring_size; 2446 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2088 adapter->tx_queue.comp_ring.size = tx_ring_size; 2447 tq->tx_ring.size = tx_ring_size;
2089 adapter->tx_queue.shared = &adapter->tqd_start->ctrl; 2448 tq->data_ring.size = tx_ring_size;
2090 adapter->tx_queue.stopped = true; 2449 tq->comp_ring.size = tx_ring_size;
2091 err = vmxnet3_tq_create(&adapter->tx_queue, adapter); 2450 tq->shared = &adapter->tqd_start[i].ctrl;
2092 if (err) 2451 tq->stopped = true;
2093 return err; 2452 tq->adapter = adapter;
2453 tq->qid = i;
2454 err = vmxnet3_tq_create(tq, adapter);
2455 /*
2456 * Too late to change num_tx_queues. We cannot do away with
2457 * lesser number of queues than what we asked for
2458 */
2459 if (err)
2460 goto queue_err;
2461 }
2094 2462
2095 adapter->rx_queue.rx_ring[0].size = rx_ring_size; 2463 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2096 adapter->rx_queue.rx_ring[1].size = rx_ring2_size; 2464 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2097 vmxnet3_adjust_rx_ring_size(adapter); 2465 vmxnet3_adjust_rx_ring_size(adapter);
2098 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + 2466 for (i = 0; i < adapter->num_rx_queues; i++) {
2099 adapter->rx_queue.rx_ring[1].size; 2467 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2100 adapter->rx_queue.qid = 0; 2468 /* qid and qid2 for rx queues will be assigned later when num
2101 adapter->rx_queue.qid2 = 1; 2469 * of rx queues is finalized after allocating intrs */
2102 adapter->rx_queue.shared = &adapter->rqd_start->ctrl; 2470 rq->shared = &adapter->rqd_start[i].ctrl;
2103 err = vmxnet3_rq_create(&adapter->rx_queue, adapter); 2471 rq->adapter = adapter;
2104 if (err) 2472 err = vmxnet3_rq_create(rq, adapter);
2105 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2473 if (err) {
2106 2474 if (i == 0) {
2475 printk(KERN_ERR "Could not allocate any rx"
2476 "queues. Aborting.\n");
2477 goto queue_err;
2478 } else {
2479 printk(KERN_INFO "Number of rx queues changed "
2480 "to : %d.\n", i);
2481 adapter->num_rx_queues = i;
2482 err = 0;
2483 break;
2484 }
2485 }
2486 }
2487 return err;
2488queue_err:
2489 vmxnet3_tq_destroy_all(adapter);
2107 return err; 2490 return err;
2108} 2491}
2109 2492
@@ -2111,11 +2494,12 @@ static int
2111vmxnet3_open(struct net_device *netdev) 2494vmxnet3_open(struct net_device *netdev)
2112{ 2495{
2113 struct vmxnet3_adapter *adapter; 2496 struct vmxnet3_adapter *adapter;
2114 int err; 2497 int err, i;
2115 2498
2116 adapter = netdev_priv(netdev); 2499 adapter = netdev_priv(netdev);
2117 2500
2118 spin_lock_init(&adapter->tx_queue.tx_lock); 2501 for (i = 0; i < adapter->num_tx_queues; i++)
2502 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2119 2503
2120 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2504 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2121 VMXNET3_DEF_RX_RING_SIZE, 2505 VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2514,8 @@ vmxnet3_open(struct net_device *netdev)
2130 return 0; 2514 return 0;
2131 2515
2132activate_err: 2516activate_err:
2133 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2517 vmxnet3_rq_destroy_all(adapter);
2134 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2518 vmxnet3_tq_destroy_all(adapter);
2135queue_err: 2519queue_err:
2136 return err; 2520 return err;
2137} 2521}
@@ -2151,8 +2535,8 @@ vmxnet3_close(struct net_device *netdev)
2151 2535
2152 vmxnet3_quiesce_dev(adapter); 2536 vmxnet3_quiesce_dev(adapter);
2153 2537
2154 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2538 vmxnet3_rq_destroy_all(adapter);
2155 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2539 vmxnet3_tq_destroy_all(adapter);
2156 2540
2157 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2541 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2158 2542
@@ -2164,6 +2548,8 @@ vmxnet3_close(struct net_device *netdev)
2164void 2548void
2165vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2549vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2166{ 2550{
2551 int i;
2552
2167 /* 2553 /*
2168 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2554 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2169 * vmxnet3_close() will deadlock. 2555 * vmxnet3_close() will deadlock.
@@ -2171,7 +2557,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2171 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2557 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2172 2558
2173 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2559 /* we need to enable NAPI, otherwise dev_close will deadlock */
2174 napi_enable(&adapter->napi); 2560 for (i = 0; i < adapter->num_rx_queues; i++)
2561 napi_enable(&adapter->rx_queue[i].napi);
2175 dev_close(adapter->netdev); 2562 dev_close(adapter->netdev);
2176} 2563}
2177 2564
@@ -2202,14 +2589,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2202 vmxnet3_reset_dev(adapter); 2589 vmxnet3_reset_dev(adapter);
2203 2590
2204 /* we need to re-create the rx queue based on the new mtu */ 2591 /* we need to re-create the rx queue based on the new mtu */
2205 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2592 vmxnet3_rq_destroy_all(adapter);
2206 vmxnet3_adjust_rx_ring_size(adapter); 2593 vmxnet3_adjust_rx_ring_size(adapter);
2207 adapter->rx_queue.comp_ring.size = 2594 err = vmxnet3_rq_create_all(adapter);
2208 adapter->rx_queue.rx_ring[0].size +
2209 adapter->rx_queue.rx_ring[1].size;
2210 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2211 if (err) { 2595 if (err) {
2212 printk(KERN_ERR "%s: failed to re-create rx queue," 2596 printk(KERN_ERR "%s: failed to re-create rx queues,"
2213 " error %d. Closing it.\n", netdev->name, err); 2597 " error %d. Closing it.\n", netdev->name, err);
2214 goto out; 2598 goto out;
2215 } 2599 }
@@ -2274,6 +2658,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2274 mac[5] = (tmp >> 8) & 0xff; 2658 mac[5] = (tmp >> 8) & 0xff;
2275} 2659}
2276 2660
2661#ifdef CONFIG_PCI_MSI
2662
2663/*
2664 * Enable MSIx vectors.
2665 * Returns :
2666 * 0 on successful enabling of required vectors,
2667 * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
2668 * could be enabled.
2669 * number of vectors which can be enabled otherwise (this number is smaller
2670 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2671 */
2672
2673static int
2674vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2675 int vectors)
2676{
2677 int err = 0, vector_threshold;
2678 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2679
2680 while (vectors >= vector_threshold) {
2681 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2682 vectors);
2683 if (!err) {
2684 adapter->intr.num_intrs = vectors;
2685 return 0;
2686 } else if (err < 0) {
2687 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2688 " %d\n", adapter->netdev->name, err);
2689 vectors = 0;
2690 } else if (err < vector_threshold) {
2691 break;
2692 } else {
2693 /* If fails to enable required number of MSI-x vectors
2694 * try enabling 3 of them. One each for rx, tx and event
2695 */
2696 vectors = vector_threshold;
2697 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2698 " %d instead\n", vectors, adapter->netdev->name,
2699 vector_threshold);
2700 }
2701 }
2702
2703 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2704 " are lower than min threshold required.\n");
2705 return err;
2706}
2707
2708
2709#endif /* CONFIG_PCI_MSI */
2277 2710
2278static void 2711static void
2279vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2712vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2726,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2293 2726
2294#ifdef CONFIG_PCI_MSI 2727#ifdef CONFIG_PCI_MSI
2295 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2728 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2296 int err; 2729 int vector, err = 0;
2297 2730
2298 adapter->intr.msix_entries[0].entry = 0; 2731 adapter->intr.num_intrs = (adapter->share_intr ==
2299 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2732 VMXNET3_INTR_TXSHARE) ? 1 :
2300 VMXNET3_LINUX_MAX_MSIX_VECT); 2733 adapter->num_tx_queues;
2301 if (!err) { 2734 adapter->intr.num_intrs += (adapter->share_intr ==
2302 adapter->intr.num_intrs = 1; 2735 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2303 adapter->intr.type = VMXNET3_IT_MSIX; 2736 adapter->num_rx_queues;
2737 adapter->intr.num_intrs += 1; /* for link event */
2738
2739 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2740 VMXNET3_LINUX_MIN_MSIX_VECT
2741 ? adapter->intr.num_intrs :
2742 VMXNET3_LINUX_MIN_MSIX_VECT);
2743
2744 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2745 adapter->intr.msix_entries[vector].entry = vector;
2746
2747 err = vmxnet3_acquire_msix_vectors(adapter,
2748 adapter->intr.num_intrs);
2749 /* If we cannot allocate one MSIx vector per queue
2750 * then limit the number of rx queues to 1
2751 */
2752 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2753 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2754 || adapter->num_rx_queues != 2) {
2755 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2756 printk(KERN_ERR "Number of rx queues : 1\n");
2757 adapter->num_rx_queues = 1;
2758 adapter->intr.num_intrs =
2759 VMXNET3_LINUX_MIN_MSIX_VECT;
2760 }
2304 return; 2761 return;
2305 } 2762 }
2763 if (!err)
2764 return;
2765
2766 /* If we cannot allocate MSIx vectors use only one rx queue */
2767 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2768 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2769
2306 adapter->intr.type = VMXNET3_IT_MSI; 2770 adapter->intr.type = VMXNET3_IT_MSI;
2307 } 2771 }
2308 2772
@@ -2310,12 +2774,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2310 int err; 2774 int err;
2311 err = pci_enable_msi(adapter->pdev); 2775 err = pci_enable_msi(adapter->pdev);
2312 if (!err) { 2776 if (!err) {
2777 adapter->num_rx_queues = 1;
2313 adapter->intr.num_intrs = 1; 2778 adapter->intr.num_intrs = 1;
2314 return; 2779 return;
2315 } 2780 }
2316 } 2781 }
2317#endif /* CONFIG_PCI_MSI */ 2782#endif /* CONFIG_PCI_MSI */
2318 2783
2784 adapter->num_rx_queues = 1;
2785 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
2319 adapter->intr.type = VMXNET3_IT_INTX; 2786 adapter->intr.type = VMXNET3_IT_INTX;
2320 2787
2321 /* INT-X related setting */ 2788 /* INT-X related setting */
@@ -2343,6 +2810,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
2343 2810
2344 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2811 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2345 schedule_work(&adapter->work); 2812 schedule_work(&adapter->work);
2813 netif_wake_queue(adapter->netdev);
2346} 2814}
2347 2815
2348 2816
@@ -2399,8 +2867,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2399 struct net_device *netdev; 2867 struct net_device *netdev;
2400 struct vmxnet3_adapter *adapter; 2868 struct vmxnet3_adapter *adapter;
2401 u8 mac[ETH_ALEN]; 2869 u8 mac[ETH_ALEN];
2870 int size;
2871 int num_tx_queues;
2872 int num_rx_queues;
2873
2874#ifdef VMXNET3_RSS
2875 if (enable_mq)
2876 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2877 (int)num_online_cpus());
2878 else
2879#endif
2880 num_rx_queues = 1;
2881
2882 if (enable_mq)
2883 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2884 (int)num_online_cpus());
2885 else
2886 num_tx_queues = 1;
2887
2888 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2889 max(num_tx_queues, num_rx_queues));
2890 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2891 num_tx_queues, num_rx_queues);
2402 2892
2403 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2404 if (!netdev) { 2893 if (!netdev) {
2405 printk(KERN_ERR "Failed to alloc ethernet device for adapter " 2894 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2406 "%s\n", pci_name(pdev)); 2895 "%s\n", pci_name(pdev));
@@ -2422,9 +2911,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2422 goto err_alloc_shared; 2911 goto err_alloc_shared;
2423 } 2912 }
2424 2913
2425 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, 2914 adapter->num_rx_queues = num_rx_queues;
2426 sizeof(struct Vmxnet3_TxQueueDesc) + 2915 adapter->num_tx_queues = num_tx_queues;
2427 sizeof(struct Vmxnet3_RxQueueDesc), 2916
2917 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2918 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2919 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2428 &adapter->queue_desc_pa); 2920 &adapter->queue_desc_pa);
2429 2921
2430 if (!adapter->tqd_start) { 2922 if (!adapter->tqd_start) {
@@ -2433,8 +2925,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2433 err = -ENOMEM; 2925 err = -ENOMEM;
2434 goto err_alloc_queue_desc; 2926 goto err_alloc_queue_desc;
2435 } 2927 }
2436 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start 2928 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2437 + 1); 2929 adapter->num_tx_queues);
2438 2930
2439 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2931 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2440 if (adapter->pm_conf == NULL) { 2932 if (adapter->pm_conf == NULL) {
@@ -2444,6 +2936,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2444 goto err_alloc_pm; 2936 goto err_alloc_pm;
2445 } 2937 }
2446 2938
2939#ifdef VMXNET3_RSS
2940
2941 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2942 if (adapter->rss_conf == NULL) {
2943 printk(KERN_ERR "Failed to allocate memory for %s\n",
2944 pci_name(pdev));
2945 err = -ENOMEM;
2946 goto err_alloc_rss;
2947 }
2948#endif /* VMXNET3_RSS */
2949
2447 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2950 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2448 if (err < 0) 2951 if (err < 0)
2449 goto err_alloc_pci; 2952 goto err_alloc_pci;
@@ -2471,18 +2974,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2471 vmxnet3_declare_features(adapter, dma64); 2974 vmxnet3_declare_features(adapter, dma64);
2472 2975
2473 adapter->dev_number = atomic_read(&devices_found); 2976 adapter->dev_number = atomic_read(&devices_found);
2977
2978 adapter->share_intr = irq_share_mode;
2979 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
2980 adapter->num_tx_queues != adapter->num_rx_queues)
2981 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
2982
2474 vmxnet3_alloc_intr_resources(adapter); 2983 vmxnet3_alloc_intr_resources(adapter);
2475 2984
2985#ifdef VMXNET3_RSS
2986 if (adapter->num_rx_queues > 1 &&
2987 adapter->intr.type == VMXNET3_IT_MSIX) {
2988 adapter->rss = true;
2989 printk(KERN_INFO "RSS is enabled.\n");
2990 } else {
2991 adapter->rss = false;
2992 }
2993#endif
2994
2476 vmxnet3_read_mac_addr(adapter, mac); 2995 vmxnet3_read_mac_addr(adapter, mac);
2477 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2996 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2478 2997
2479 netdev->netdev_ops = &vmxnet3_netdev_ops; 2998 netdev->netdev_ops = &vmxnet3_netdev_ops;
2480 netdev->watchdog_timeo = 5 * HZ;
2481 vmxnet3_set_ethtool_ops(netdev); 2999 vmxnet3_set_ethtool_ops(netdev);
3000 netdev->watchdog_timeo = 5 * HZ;
2482 3001
2483 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3002 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2484 3003
2485 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); 3004 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3005 int i;
3006 for (i = 0; i < adapter->num_rx_queues; i++) {
3007 netif_napi_add(adapter->netdev,
3008 &adapter->rx_queue[i].napi,
3009 vmxnet3_poll_rx_only, 64);
3010 }
3011 } else {
3012 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3013 vmxnet3_poll, 64);
3014 }
3015
3016 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3017 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3018
2486 SET_NETDEV_DEV(netdev, &pdev->dev); 3019 SET_NETDEV_DEV(netdev, &pdev->dev);
2487 err = register_netdev(netdev); 3020 err = register_netdev(netdev);
2488 3021
@@ -2502,11 +3035,14 @@ err_register:
2502err_ver: 3035err_ver:
2503 vmxnet3_free_pci_resources(adapter); 3036 vmxnet3_free_pci_resources(adapter);
2504err_alloc_pci: 3037err_alloc_pci:
3038#ifdef VMXNET3_RSS
3039 kfree(adapter->rss_conf);
3040err_alloc_rss:
3041#endif
2505 kfree(adapter->pm_conf); 3042 kfree(adapter->pm_conf);
2506err_alloc_pm: 3043err_alloc_pm:
2507 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3044 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
2508 sizeof(struct Vmxnet3_RxQueueDesc), 3045 adapter->queue_desc_pa);
2509 adapter->tqd_start, adapter->queue_desc_pa);
2510err_alloc_queue_desc: 3046err_alloc_queue_desc:
2511 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3047 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2512 adapter->shared, adapter->shared_pa); 3048 adapter->shared, adapter->shared_pa);
@@ -2522,17 +3058,32 @@ vmxnet3_remove_device(struct pci_dev *pdev)
2522{ 3058{
2523 struct net_device *netdev = pci_get_drvdata(pdev); 3059 struct net_device *netdev = pci_get_drvdata(pdev);
2524 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3060 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3061 int size = 0;
3062 int num_rx_queues;
2525 3063
2526 flush_scheduled_work(); 3064#ifdef VMXNET3_RSS
3065 if (enable_mq)
3066 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3067 (int)num_online_cpus());
3068 else
3069#endif
3070 num_rx_queues = 1;
3071
3072 cancel_work_sync(&adapter->work);
2527 3073
2528 unregister_netdev(netdev); 3074 unregister_netdev(netdev);
2529 3075
2530 vmxnet3_free_intr_resources(adapter); 3076 vmxnet3_free_intr_resources(adapter);
2531 vmxnet3_free_pci_resources(adapter); 3077 vmxnet3_free_pci_resources(adapter);
3078#ifdef VMXNET3_RSS
3079 kfree(adapter->rss_conf);
3080#endif
2532 kfree(adapter->pm_conf); 3081 kfree(adapter->pm_conf);
2533 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3082
2534 sizeof(struct Vmxnet3_RxQueueDesc), 3083 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2535 adapter->tqd_start, adapter->queue_desc_pa); 3084 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3085 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3086 adapter->queue_desc_pa);
2536 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3087 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2537 adapter->shared, adapter->shared_pa); 3088 adapter->shared, adapter->shared_pa);
2538 free_netdev(netdev); 3089 free_netdev(netdev);
@@ -2563,7 +3114,7 @@ vmxnet3_suspend(struct device *device)
2563 vmxnet3_free_intr_resources(adapter); 3114 vmxnet3_free_intr_resources(adapter);
2564 3115
2565 netif_device_detach(netdev); 3116 netif_device_detach(netdev);
2566 netif_stop_queue(netdev); 3117 netif_tx_stop_all_queues(netdev);
2567 3118
2568 /* Create wake-up filters. */ 3119 /* Create wake-up filters. */
2569 pmConf = adapter->pm_conf; 3120 pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92e..8e17fc8a7fe7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 151 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 152 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 153 struct net_device_stats *net_stats = &netdev->stats;
154 int i;
154 155
155 adapter = netdev_priv(netdev); 156 adapter = netdev_priv(netdev);
156 157
157 /* Collect the dev stats into the shared area */ 158 /* Collect the dev stats into the shared area */
158 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
159 160
160 /* Assuming that we have a single queue device */
161 devTxStats = &adapter->tqd_start->stats;
162 devRxStats = &adapter->rqd_start->stats;
163
164 /* Get access to the driver stats per queue */
165 drvTxStats = &adapter->tx_queue.stats;
166 drvRxStats = &adapter->rx_queue.stats;
167
168 memset(net_stats, 0, sizeof(*net_stats)); 161 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) {
163 devTxStats = &adapter->tqd_start[i].stats;
164 drvTxStats = &adapter->tx_queue[i].stats;
165 net_stats->tx_packets += devTxStats->ucastPktsTxOK +
166 devTxStats->mcastPktsTxOK +
167 devTxStats->bcastPktsTxOK;
168 net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
169 devTxStats->mcastBytesTxOK +
170 devTxStats->bcastBytesTxOK;
171 net_stats->tx_errors += devTxStats->pktsTxError;
172 net_stats->tx_dropped += drvTxStats->drop_total;
173 }
169 174
170 net_stats->rx_packets = devRxStats->ucastPktsRxOK + 175 for (i = 0; i < adapter->num_rx_queues; i++) {
171 devRxStats->mcastPktsRxOK + 176 devRxStats = &adapter->rqd_start[i].stats;
172 devRxStats->bcastPktsRxOK; 177 drvRxStats = &adapter->rx_queue[i].stats;
173 178 net_stats->rx_packets += devRxStats->ucastPktsRxOK +
174 net_stats->tx_packets = devTxStats->ucastPktsTxOK + 179 devRxStats->mcastPktsRxOK +
175 devTxStats->mcastPktsTxOK + 180 devRxStats->bcastPktsRxOK;
176 devTxStats->bcastPktsTxOK;
177
178 net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
179 devRxStats->mcastBytesRxOK +
180 devRxStats->bcastBytesRxOK;
181
182 net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
183 devTxStats->mcastBytesTxOK +
184 devTxStats->bcastBytesTxOK;
185 181
186 net_stats->rx_errors = devRxStats->pktsRxError; 182 net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
187 net_stats->tx_errors = devTxStats->pktsTxError; 183 devRxStats->mcastBytesRxOK +
188 net_stats->rx_dropped = drvRxStats->drop_total; 184 devRxStats->bcastBytesRxOK;
189 net_stats->tx_dropped = drvTxStats->drop_total;
190 net_stats->multicast = devRxStats->mcastPktsRxOK;
191 185
186 net_stats->rx_errors += devRxStats->pktsRxError;
187 net_stats->rx_dropped += drvRxStats->drop_total;
188 net_stats->multicast += devRxStats->mcastPktsRxOK;
189 }
192 return net_stats; 190 return net_stats;
193} 191}
194 192
@@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
307 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 305 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
308 u8 *base; 306 u8 *base;
309 int i; 307 int i;
308 int j = 0;
310 309
311 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
312 311
313 /* this does assume each counter is 64-bit wide */ 312 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */
314 314
315 base = (u8 *)&adapter->tqd_start->stats; 315 base = (u8 *)&adapter->tqd_start[j].stats;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
318 318
319 base = (u8 *)&adapter->tx_queue.stats; 319 base = (u8 *)&adapter->tx_queue[j].stats;
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
322 322
323 base = (u8 *)&adapter->rqd_start->stats; 323 base = (u8 *)&adapter->rqd_start[j].stats;
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); 325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 326
327 base = (u8 *)&adapter->rx_queue.stats; 327 base = (u8 *)&adapter->rx_queue[j].stats;
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
330 330
@@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 339{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 341 u32 *buf = p;
342 int i = 0;
342 343
343 memset(p, 0, vmxnet3_get_regs_len(netdev)); 344 memset(p, 0, vmxnet3_get_regs_len(netdev));
344 345
@@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
347 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 348 /* Update vmxnet3_get_regs_len if we want to dump more registers */
348 349
349 /* make each ring use multiple of 16 bytes */ 350 /* make each ring use multiple of 16 bytes */
350 buf[0] = adapter->tx_queue.tx_ring.next2fill; 351/* TODO change this for multiple queues */
351 buf[1] = adapter->tx_queue.tx_ring.next2comp; 352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
352 buf[2] = adapter->tx_queue.tx_ring.gen; 353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen;
353 buf[3] = 0; 355 buf[3] = 0;
354 356
355 buf[4] = adapter->tx_queue.comp_ring.next2proc; 357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
356 buf[5] = adapter->tx_queue.comp_ring.gen; 358 buf[5] = adapter->tx_queue[i].comp_ring.gen;
357 buf[6] = adapter->tx_queue.stopped; 359 buf[6] = adapter->tx_queue[i].stopped;
358 buf[7] = 0; 360 buf[7] = 0;
359 361
360 buf[8] = adapter->rx_queue.rx_ring[0].next2fill; 362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
361 buf[9] = adapter->rx_queue.rx_ring[0].next2comp; 363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
362 buf[10] = adapter->rx_queue.rx_ring[0].gen; 364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
363 buf[11] = 0; 365 buf[11] = 0;
364 366
365 buf[12] = adapter->rx_queue.rx_ring[1].next2fill; 367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
366 buf[13] = adapter->rx_queue.rx_ring[1].next2comp; 368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
367 buf[14] = adapter->rx_queue.rx_ring[1].gen; 369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
368 buf[15] = 0; 370 buf[15] = 0;
369 371
370 buf[16] = adapter->rx_queue.comp_ring.next2proc; 372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
371 buf[17] = adapter->rx_queue.comp_ring.gen; 373 buf[17] = adapter->rx_queue[i].comp_ring.gen;
372 buf[18] = 0; 374 buf[18] = 0;
373 buf[19] = 0; 375 buf[19] = 0;
374} 376}
@@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
435 param->rx_mini_max_pending = 0; 437 param->rx_mini_max_pending = 0;
436 param->rx_jumbo_max_pending = 0; 438 param->rx_jumbo_max_pending = 0;
437 439
438 param->rx_pending = adapter->rx_queue.rx_ring[0].size; 440 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
439 param->tx_pending = adapter->tx_queue.tx_ring.size; 441 adapter->num_rx_queues;
442 param->tx_pending = adapter->tx_queue[0].tx_ring.size *
443 adapter->num_tx_queues;
440 param->rx_mini_pending = 0; 444 param->rx_mini_pending = 0;
441 param->rx_jumbo_pending = 0; 445 param->rx_jumbo_pending = 0;
442} 446}
@@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
480 sz) != 0) 484 sz) != 0)
481 return -EINVAL; 485 return -EINVAL;
482 486
483 if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && 487 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
484 new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { 488 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
485 return 0; 489 return 0;
486 } 490 }
487 491
@@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
498 502
499 /* recreate the rx queue and the tx queue based on the 503 /* recreate the rx queue and the tx queue based on the
500 * new sizes */ 504 * new sizes */
501 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 505 vmxnet3_tq_destroy_all(adapter);
502 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 506 vmxnet3_rq_destroy_all(adapter);
503 507
504 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 508 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
505 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); 509 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
510
506 if (err) { 511 if (err) {
507 /* failed, most likely because of OOM, try default 512 /* failed, most likely because of OOM, try default
508 * size */ 513 * size */
@@ -535,6 +540,66 @@ out:
535} 540}
536 541
537 542
543static int
544vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
545 void *rules)
546{
547 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
548 switch (info->cmd) {
549 case ETHTOOL_GRXRINGS:
550 info->data = adapter->num_rx_queues;
551 return 0;
552 }
553 return -EOPNOTSUPP;
554}
555
556#ifdef VMXNET3_RSS
557static int
558vmxnet3_get_rss_indir(struct net_device *netdev,
559 struct ethtool_rxfh_indir *p)
560{
561 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
562 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
563 unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
564
565 p->size = rssConf->indTableSize;
566 while (n--)
567 p->ring_index[n] = rssConf->indTable[n];
568 return 0;
569
570}
571
572static int
573vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p)
575{
576 unsigned int i;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579
580 if (p->size != rssConf->indTableSize)
581 return -EINVAL;
582 for (i = 0; i < rssConf->indTableSize; i++) {
583 /*
584 * Return with error code if any of the queue indices
585 * is out of range
586 */
587 if (p->ring_index[i] < 0 ||
588 p->ring_index[i] >= adapter->num_rx_queues)
589 return -EINVAL;
590 }
591
592 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i];
594
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT);
597
598 return 0;
599
600}
601#endif
602
538static struct ethtool_ops vmxnet3_ethtool_ops = { 603static struct ethtool_ops vmxnet3_ethtool_ops = {
539 .get_settings = vmxnet3_get_settings, 604 .get_settings = vmxnet3_get_settings,
540 .get_drvinfo = vmxnet3_get_drvinfo, 605 .get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +623,11 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
558 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 623 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
559 .get_ringparam = vmxnet3_get_ringparam, 624 .get_ringparam = vmxnet3_get_ringparam,
560 .set_ringparam = vmxnet3_set_ringparam, 625 .set_ringparam = vmxnet3_set_ringparam,
626 .get_rxnfc = vmxnet3_get_rxnfc,
627#ifdef VMXNET3_RSS
628 .get_rxfh_indir = vmxnet3_get_rss_indir,
629 .set_rxfh_indir = vmxnet3_set_rss_indir,
630#endif
561}; 631};
562 632
563void vmxnet3_set_ethtool_ops(struct net_device *netdev) 633void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843afc..7fadeed37f03 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
75 75
76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */
78 #define VMXNET3_RSS
79#endif
76 80
77/* 81/*
78 * Capabilities 82 * Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
218}; 222};
219 223
220struct vmxnet3_tx_queue { 224struct vmxnet3_tx_queue {
225 char name[IFNAMSIZ+8]; /* To identify interrupt */
226 struct vmxnet3_adapter *adapter;
221 spinlock_t tx_lock; 227 spinlock_t tx_lock;
222 struct vmxnet3_cmd_ring tx_ring; 228 struct vmxnet3_cmd_ring tx_ring;
223 struct vmxnet3_tx_buf_info *buf_info; 229 struct vmxnet3_tx_buf_info *buf_info;
224 struct vmxnet3_tx_data_ring data_ring; 230 struct vmxnet3_tx_data_ring data_ring;
225 struct vmxnet3_comp_ring comp_ring; 231 struct vmxnet3_comp_ring comp_ring;
226 struct Vmxnet3_TxQueueCtrl *shared; 232 struct Vmxnet3_TxQueueCtrl *shared;
227 struct vmxnet3_tq_driver_stats stats; 233 struct vmxnet3_tq_driver_stats stats;
228 bool stopped; 234 bool stopped;
229 int num_stop; /* # of times the queue is 235 int num_stop; /* # of times the queue is
230 * stopped */ 236 * stopped */
237 int qid;
231} __attribute__((__aligned__(SMP_CACHE_BYTES))); 238} __attribute__((__aligned__(SMP_CACHE_BYTES)));
232 239
233enum vmxnet3_rx_buf_type { 240enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
259}; 266};
260 267
261struct vmxnet3_rx_queue { 268struct vmxnet3_rx_queue {
269 char name[IFNAMSIZ + 8]; /* To identify interrupt */
270 struct vmxnet3_adapter *adapter;
271 struct napi_struct napi;
262 struct vmxnet3_cmd_ring rx_ring[2]; 272 struct vmxnet3_cmd_ring rx_ring[2];
263 struct vmxnet3_comp_ring comp_ring; 273 struct vmxnet3_comp_ring comp_ring;
264 struct vmxnet3_rx_ctx rx_ctx; 274 struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
271 struct vmxnet3_rq_driver_stats stats; 281 struct vmxnet3_rq_driver_stats stats;
272} __attribute__((__aligned__(SMP_CACHE_BYTES))); 282} __attribute__((__aligned__(SMP_CACHE_BYTES)));
273 283
274#define VMXNET3_LINUX_MAX_MSIX_VECT 1 284#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
285#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
286
287/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
288#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
293
275 294
276struct vmxnet3_intr { 295struct vmxnet3_intr {
277 enum vmxnet3_intr_mask_mode mask_mode; 296 enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,32 @@ struct vmxnet3_intr {
279 u8 num_intrs; /* # of intr vectors */ 298 u8 num_intrs; /* # of intr vectors */
280 u8 event_intr_idx; /* idx of the intr vector for event */ 299 u8 event_intr_idx; /* idx of the intr vector for event */
281 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 300 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
301 char event_msi_vector_name[IFNAMSIZ+11];
282#ifdef CONFIG_PCI_MSI 302#ifdef CONFIG_PCI_MSI
283 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 303 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
284#endif 304#endif
285}; 305};
286 306
307/* Interrupt sharing schemes, share_intr */
308#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
309#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
310#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
311
312
287#define VMXNET3_STATE_BIT_RESETTING 0 313#define VMXNET3_STATE_BIT_RESETTING 0
288#define VMXNET3_STATE_BIT_QUIESCED 1 314#define VMXNET3_STATE_BIT_QUIESCED 1
289struct vmxnet3_adapter { 315struct vmxnet3_adapter {
290 struct vmxnet3_tx_queue tx_queue; 316 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
291 struct vmxnet3_rx_queue rx_queue; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
292 struct napi_struct napi; 318 struct vlan_group *vlan_grp;
293 struct vlan_group *vlan_grp; 319 struct vmxnet3_intr intr;
294 320 struct Vmxnet3_DriverShared *shared;
295 struct vmxnet3_intr intr; 321 struct Vmxnet3_PMConf *pm_conf;
296 322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
297 struct Vmxnet3_DriverShared *shared; 323 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
298 struct Vmxnet3_PMConf *pm_conf; 324 struct net_device *netdev;
299 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ 325 struct net_device_stats net_stats;
300 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ 326 struct pci_dev *pdev;
301 struct net_device *netdev;
302 struct pci_dev *pdev;
303 327
304 u8 __iomem *hw_addr0; /* for BAR 0 */ 328 u8 __iomem *hw_addr0; /* for BAR 0 */
305 u8 __iomem *hw_addr1; /* for BAR 1 */ 329 u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@ struct vmxnet3_adapter {
308 bool rxcsum; 332 bool rxcsum;
309 bool lro; 333 bool lro;
310 bool jumbo_frame; 334 bool jumbo_frame;
335#ifdef VMXNET3_RSS
336 struct UPT1_RSSConf *rss_conf;
337 bool rss;
338#endif
339 u32 num_rx_queues;
340 u32 num_tx_queues;
311 341
312 /* rx buffer related */ 342 /* rx buffer related */
313 unsigned skb_buf_size; 343 unsigned skb_buf_size;
@@ -327,6 +357,7 @@ struct vmxnet3_adapter {
327 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 357 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
328 358
329 int dev_number; 359 int dev_number;
360 int share_intr;
330}; 361};
331 362
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 363#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +397,10 @@ void
366vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 397vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
367 398
368void 399void
369vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 400vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
370 struct vmxnet3_adapter *adapter);
371 401
372void 402void
373vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 403vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
374 struct vmxnet3_adapter *adapter);
375 404
376int 405int
377vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 406vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..01c05f53e2f9 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,109 +19,128 @@
19 19
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22#include "vxge-main.h"
23static enum vxge_hw_status 23
24__vxge_hw_fifo_create( 24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 struct __vxge_hw_vpath_handle *vpath_handle, 25 status = __vxge_hw_vpath_stats_access(vpath, \
26 struct vxge_hw_fifo_attr *attr); 26 VXGE_HW_STATS_OP_READ, \
27 27 offset, \
28static enum vxge_hw_status 28 &val64); \
29__vxge_hw_fifo_abort( 29 if (status != VXGE_HW_OK) \
30 struct __vxge_hw_fifo *fifoh); 30 return status; \
31 31}
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35
36static enum vxge_hw_status
37__vxge_hw_fifo_delete(
38 struct __vxge_hw_vpath_handle *vpath_handle);
39
40static struct __vxge_hw_blockpool_entry *
41__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42 u32 size);
43 32
44static void 33static void
45__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, 34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
46 struct __vxge_hw_blockpool_entry *entry); 35{
47 36 u64 val64;
48static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49 void *block_addr,
50 u32 length,
51 struct pci_dev *dma_h,
52 struct pci_dev *acc_handle);
53
54static enum vxge_hw_status
55__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56 struct __vxge_hw_blockpool *blockpool,
57 u32 pool_size,
58 u32 pool_max);
59 37
60static void 38 val64 = readq(&vp_reg->rxmac_vcfg0);
61__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); 39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 = readq(&vp_reg->rxmac_vcfg0);
42}
62 43
63static void * 44/*
64__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, 45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
65 u32 size, 46 */
66 struct vxge_hw_mempool_dma *dma_object); 47int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48{
49 struct vxge_hw_vpath_reg __iomem *vp_reg;
50 struct __vxge_hw_virtualpath *vpath;
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
67 53
68static void 54 vpath = &hldev->virtual_paths[vp_id];
69__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, 55 vp_reg = vpath->vp_reg;
70 void *memblock,
71 u32 size,
72 struct vxge_hw_mempool_dma *dma_object);
73 56
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
74 58
75static struct __vxge_hw_channel* 59 /* Check that the ring controller for this vpath has enough free RxDs
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 60 * to send frames to the host. This is done by reading the
77 enum __vxge_hw_channel_type type, u32 length, 61 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
78 u32 per_dtr_space, void *userdata); 62 * RXD_SPAT value for the vpath.
63 */
64 val64 = readq(&vp_reg->prc_cfg6);
65 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 * leg room.
68 */
69 rxd_spat *= 2;
79 70
80static void 71 do {
81__vxge_hw_channel_free( 72 mdelay(1);
82 struct __vxge_hw_channel *channel);
83 73
84static enum vxge_hw_status 74 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87 75
88static enum vxge_hw_status 76 /* Check that the ring controller for this vpath does
89__vxge_hw_channel_reset( 77 * not have any frame in its pipeline.
90 struct __vxge_hw_channel *channel); 78 */
79 val64 = readq(&vp_reg->frm_in_progress_cnt);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
81 count = 0;
82 else
83 count++;
84 total_count++;
85 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 (total_count < VXGE_HW_MAX_POLLING_COUNT));
91 87
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); 88 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 __func__);
93 91
94static enum vxge_hw_status 92 return total_count;
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); 93}
96 94
97static enum vxge_hw_status 95/* vxge_hw_device_wait_receive_idle - This function waits until all frames
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); 96 * stored in the frame buffer for each vpath assigned to the given
97 * function (hldev) have been sent to the host.
98 */
99void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100{
101 int i, total_count = 0;
99 102
100static void 103 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev); 104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 continue;
102 106
103static void 107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); 108 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 break;
110 }
111}
105 112
113/*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
106static enum vxge_hw_status 118static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get( 119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
108 u32 vp_id, 120{
109 struct vxge_hw_vpath_reg __iomem *vpath_reg, 121 u64 val64;
110 struct vxge_hw_device_hw_info *hw_info); 122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
111 124
112static enum vxge_hw_status 125 udelay(10);
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114 126
115static void 127 do {
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); 128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
117 133
118static enum vxge_hw_status 134 i = 0;
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); 135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
120 141
121static enum vxge_hw_status 142 return ret;
122__vxge_hw_device_register_poll( 143}
123 void __iomem *reg,
124 u64 mask, u32 max_millis);
125 144
126static inline enum vxge_hw_status 145static inline enum vxge_hw_status
127__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, 146__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
@@ -129,139 +148,258 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
129{ 148{
130 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); 149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131 wmb(); 150 wmb();
132
133 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); 151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134 wmb(); 152 wmb();
135 153
136 return __vxge_hw_device_register_poll(addr, mask, max_millis); 154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
137} 155}
138 156
139static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata);
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146static enum vxge_hw_status 157static enum vxge_hw_status
147__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, 158vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
148 struct vxge_hw_vpath_stats_hw_info *hw_stats); 159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl)
161{
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 enum vxge_hw_status status;
164 u64 val64;
165 u32 retry = 0, max_retry = 100;
149 166
150static enum vxge_hw_status 167 vp_reg = vpath->vp_reg;
151vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152 168
153static enum vxge_hw_status 169 if (vpath->vp_open) {
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); 170 max_retry = 3;
171 spin_lock(&vpath->lock);
172 }
155 173
156static u64 174 writeq(*data0, &vp_reg->rts_access_steer_data0);
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id, 175 writeq(*data1, &vp_reg->rts_access_steer_data1);
158 struct vxge_hw_vpath_reg __iomem *vpath_reg); 176 wmb();
159 177
160static u32 178 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); 179 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
180 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
181 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
182 *steer_ctrl;
162 183
163static enum vxge_hw_status 184 status = __vxge_hw_pio_mem_write64(val64,
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 185 &vp_reg->rts_access_steer_ctrl,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]); 186 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
187 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
188
189 /* The __vxge_hw_device_register_poll can udelay for a significant
190 * amount of time, blocking other proccess from the CPU. If it delays
191 * for ~5secs, a NMI error can occur. A way around this is to give up
192 * the processor via msleep, but this is not allowed is under lock.
193 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
194 * 1sec and sleep for 10ms until the firmware operation has completed
195 * or timed-out.
196 */
197 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
198 if (!vpath->vp_open)
199 msleep(20);
200 status = __vxge_hw_device_register_poll(
201 &vp_reg->rts_access_steer_ctrl,
202 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
203 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
204 }
166 205
167static enum vxge_hw_status 206 if (status != VXGE_HW_OK)
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 207 goto out;
169 208
209 val64 = readq(&vp_reg->rts_access_steer_ctrl);
210 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
211 *data0 = readq(&vp_reg->rts_access_steer_data0);
212 *data1 = readq(&vp_reg->rts_access_steer_data1);
213 *steer_ctrl = val64;
214 } else
215 status = VXGE_HW_FAIL;
170 216
171static enum vxge_hw_status 217out:
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id); 218 if (vpath->vp_open)
219 spin_unlock(&vpath->lock);
220 return status;
221}
173 222
174static enum vxge_hw_status 223enum vxge_hw_status
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 224vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
176 struct vxge_hw_device_hw_info *hw_info); 225 u32 *minor, u32 *build)
226{
227 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
228 struct __vxge_hw_virtualpath *vpath;
229 enum vxge_hw_status status;
177 230
178static enum vxge_hw_status 231 vpath = &hldev->virtual_paths[hldev->first_vp_id];
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180 232
181static void 233 status = vxge_hw_vpath_fw_api(vpath,
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); 234 VXGE_HW_FW_UPGRADE_ACTION,
235 VXGE_HW_FW_UPGRADE_MEMO,
236 VXGE_HW_FW_UPGRADE_OFFSET_READ,
237 &data0, &data1, &steer_ctrl);
238 if (status != VXGE_HW_OK)
239 return status;
183 240
184static enum vxge_hw_status 241 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, 242 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
186 u32 operation, u32 offset, u64 *stat); 243 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
187 244
188static enum vxge_hw_status 245 return status;
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, 246}
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191 247
192static enum vxge_hw_status 248enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 249{
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); 250 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
251 struct __vxge_hw_virtualpath *vpath;
252 enum vxge_hw_status status;
253 u32 ret;
195 254
196/* 255 vpath = &hldev->virtual_paths[hldev->first_vp_id];
197 * __vxge_hw_channel_allocate - Allocate memory for channel 256
198 * This function allocates required memory for the channel and various arrays 257 status = vxge_hw_vpath_fw_api(vpath,
199 * in the channel 258 VXGE_HW_FW_UPGRADE_ACTION,
200 */ 259 VXGE_HW_FW_UPGRADE_MEMO,
201struct __vxge_hw_channel* 260 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
202__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 261 &data0, &data1, &steer_ctrl);
203 enum __vxge_hw_channel_type type, 262 if (status != VXGE_HW_OK) {
204 u32 length, u32 per_dtr_space, void *userdata) 263 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
264 goto exit;
265 }
266
267 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
268 if (ret != 1) {
269 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
270 __func__, ret);
271 status = VXGE_HW_FAIL;
272 }
273
274exit:
275 return status;
276}
277
278enum vxge_hw_status
279vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
205{ 280{
206 struct __vxge_hw_channel *channel; 281 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
207 struct __vxge_hw_device *hldev; 282 struct __vxge_hw_virtualpath *vpath;
208 int size = 0; 283 enum vxge_hw_status status;
209 u32 vp_id; 284 int ret_code, sec_code;
210 285
211 hldev = vph->vpath->hldev; 286 vpath = &hldev->virtual_paths[hldev->first_vp_id];
212 vp_id = vph->vpath->vp_id;
213 287
214 switch (type) { 288 /* send upgrade start command */
215 case VXGE_HW_CHANNEL_TYPE_FIFO: 289 status = vxge_hw_vpath_fw_api(vpath,
216 size = sizeof(struct __vxge_hw_fifo); 290 VXGE_HW_FW_UPGRADE_ACTION,
217 break; 291 VXGE_HW_FW_UPGRADE_MEMO,
218 case VXGE_HW_CHANNEL_TYPE_RING: 292 VXGE_HW_FW_UPGRADE_OFFSET_START,
219 size = sizeof(struct __vxge_hw_ring); 293 &data0, &data1, &steer_ctrl);
220 break; 294 if (status != VXGE_HW_OK) {
221 default: 295 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
222 break; 296 __func__);
297 return status;
223 } 298 }
224 299
225 channel = kzalloc(size, GFP_KERNEL); 300 /* Transfer fw image to adapter 16 bytes at a time */
226 if (channel == NULL) 301 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
227 goto exit0; 302 steer_ctrl = 0;
228 INIT_LIST_HEAD(&channel->item);
229 303
230 channel->common_reg = hldev->common_reg; 304 /* The next 128bits of fwdata to be loaded onto the adapter */
231 channel->first_vp_id = hldev->first_vp_id; 305 data0 = *((u64 *)fwdata);
232 channel->type = type; 306 data1 = *((u64 *)fwdata + 1);
233 channel->devh = hldev;
234 channel->vph = vph;
235 channel->userdata = userdata;
236 channel->per_dtr_space = per_dtr_space;
237 channel->length = length;
238 channel->vp_id = vp_id;
239 307
240 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 308 status = vxge_hw_vpath_fw_api(vpath,
241 if (channel->work_arr == NULL) 309 VXGE_HW_FW_UPGRADE_ACTION,
242 goto exit1; 310 VXGE_HW_FW_UPGRADE_MEMO,
311 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
312 &data0, &data1, &steer_ctrl);
313 if (status != VXGE_HW_OK) {
314 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
315 __func__);
316 goto out;
317 }
243 318
244 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 319 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
245 if (channel->free_arr == NULL) 320 switch (ret_code) {
246 goto exit1; 321 case VXGE_HW_FW_UPGRADE_OK:
247 channel->free_ptr = length; 322 /* All OK, send next 16 bytes. */
323 break;
324 case VXGE_FW_UPGRADE_BYTES2SKIP:
325 /* skip bytes in the stream */
326 fwdata += (data0 >> 8) & 0xFFFFFFFF;
327 break;
328 case VXGE_HW_FW_UPGRADE_DONE:
329 goto out;
330 case VXGE_HW_FW_UPGRADE_ERR:
331 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
332 switch (sec_code) {
333 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
334 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
335 printk(KERN_ERR
336 "corrupted data from .ncf file\n");
337 break;
338 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
339 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
340 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
341 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
342 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
343 printk(KERN_ERR "invalid .ncf file\n");
344 break;
345 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
346 printk(KERN_ERR "buffer overflow\n");
347 break;
348 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
349 printk(KERN_ERR "failed to flash the image\n");
350 break;
351 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
352 printk(KERN_ERR
353 "generic error. Unknown error type\n");
354 break;
355 default:
356 printk(KERN_ERR "Unknown error of type %d\n",
357 sec_code);
358 break;
359 }
360 status = VXGE_HW_FAIL;
361 goto out;
362 default:
363 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
364 status = VXGE_HW_FAIL;
365 goto out;
366 }
367 /* point to next 16 bytes */
368 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
369 }
370out:
371 return status;
372}
248 373
249 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 374enum vxge_hw_status
250 if (channel->reserve_arr == NULL) 375vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
251 goto exit1; 376 struct eprom_image *img)
252 channel->reserve_ptr = length; 377{
253 channel->reserve_top = 0; 378 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
379 struct __vxge_hw_virtualpath *vpath;
380 enum vxge_hw_status status;
381 int i;
254 382
255 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 383 vpath = &hldev->virtual_paths[hldev->first_vp_id];
256 if (channel->orig_arr == NULL)
257 goto exit1;
258 384
259 return channel; 385 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
260exit1: 386 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
261 __vxge_hw_channel_free(channel); 387 data1 = steer_ctrl = 0;
262 388
263exit0: 389 status = vxge_hw_vpath_fw_api(vpath,
264 return NULL; 390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV,
392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK)
394 break;
395
396 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
397 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
398 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
399 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
400 }
401
402 return status;
265} 403}
266 404
267/* 405/*
@@ -269,7 +407,7 @@ exit0:
269 * This function deallocates memory from the channel and various arrays 407 * This function deallocates memory from the channel and various arrays
270 * in the channel 408 * in the channel
271 */ 409 */
272void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) 410static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273{ 411{
274 kfree(channel->work_arr); 412 kfree(channel->work_arr);
275 kfree(channel->free_arr); 413 kfree(channel->free_arr);
@@ -283,7 +421,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
283 * This function initializes a channel by properly setting the 421 * This function initializes a channel by properly setting the
284 * various references 422 * various references
285 */ 423 */
286enum vxge_hw_status 424static enum vxge_hw_status
287__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) 425__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288{ 426{
289 u32 i; 427 u32 i;
@@ -318,7 +456,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
318 * __vxge_hw_channel_reset - Resets a channel 456 * __vxge_hw_channel_reset - Resets a channel
319 * This function resets a channel by properly setting the various references 457 * This function resets a channel by properly setting the various references
320 */ 458 */
321enum vxge_hw_status 459static enum vxge_hw_status
322__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) 460__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323{ 461{
324 u32 i; 462 u32 i;
@@ -345,8 +483,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
345 * Initialize certain PCI/PCI-X configuration registers 483 * Initialize certain PCI/PCI-X configuration registers
346 * with recommended values. Save config space for future hw resets. 484 * with recommended values. Save config space for future hw resets.
347 */ 485 */
348void 486static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
349__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350{ 487{
351 u16 cmd = 0; 488 u16 cmd = 0;
352 489
@@ -358,39 +495,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
358 pci_save_state(hldev->pdev); 495 pci_save_state(hldev->pdev);
359} 496}
360 497
361/* 498/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
362 * __vxge_hw_device_register_poll
363 * Will poll certain register for specified amount of time.
364 * Will poll until masked bit is not cleared.
365 */
366static enum vxge_hw_status
367__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
368{
369 u64 val64;
370 u32 i = 0;
371 enum vxge_hw_status ret = VXGE_HW_FAIL;
372
373 udelay(10);
374
375 do {
376 val64 = readq(reg);
377 if (!(val64 & mask))
378 return VXGE_HW_OK;
379 udelay(100);
380 } while (++i <= 9);
381
382 i = 0;
383 do {
384 val64 = readq(reg);
385 if (!(val64 & mask))
386 return VXGE_HW_OK;
387 mdelay(1);
388 } while (++i <= max_millis);
389
390 return ret;
391}
392
393 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394 * in progress 499 * in progress
395 * This routine checks the vpath reset in progress register is turned zero 500 * This routine checks the vpath reset in progress register is turned zero
396 */ 501 */
@@ -405,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
405} 510}
406 511
407/* 512/*
513 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
514 * Set the swapper bits appropriately for the lagacy section.
515 */
516static enum vxge_hw_status
517__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
518{
519 u64 val64;
520 enum vxge_hw_status status = VXGE_HW_OK;
521
522 val64 = readq(&legacy_reg->toc_swapper_fb);
523
524 wmb();
525
526 switch (val64) {
527 case VXGE_HW_SWAPPER_INITIAL_VALUE:
528 return status;
529
530 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
531 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
532 &legacy_reg->pifm_rd_swap_en);
533 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
534 &legacy_reg->pifm_rd_flip_en);
535 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
536 &legacy_reg->pifm_wr_swap_en);
537 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
538 &legacy_reg->pifm_wr_flip_en);
539 break;
540
541 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
542 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
543 &legacy_reg->pifm_rd_swap_en);
544 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
545 &legacy_reg->pifm_wr_swap_en);
546 break;
547
548 case VXGE_HW_SWAPPER_BIT_FLIPPED:
549 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
550 &legacy_reg->pifm_rd_flip_en);
551 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
552 &legacy_reg->pifm_wr_flip_en);
553 break;
554 }
555
556 wmb();
557
558 val64 = readq(&legacy_reg->toc_swapper_fb);
559
560 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
561 status = VXGE_HW_ERR_SWAPPER_CTRL;
562
563 return status;
564}
565
566/*
408 * __vxge_hw_device_toc_get 567 * __vxge_hw_device_toc_get
409 * This routine sets the swapper and reads the toc pointer and returns the 568 * This routine sets the swapper and reads the toc pointer and returns the
410 * memory mapped address of the toc 569 * memory mapped address of the toc
@@ -435,7 +594,7 @@ exit:
435 * register location pointers in the device object. It waits until the ric is 594 * register location pointers in the device object. It waits until the ric is
436 * completed initializing registers. 595 * completed initializing registers.
437 */ 596 */
438enum vxge_hw_status 597static enum vxge_hw_status
439__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) 598__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440{ 599{
441 u64 val64; 600 u64 val64;
@@ -496,26 +655,6 @@ exit:
496} 655}
497 656
498/* 657/*
499 * __vxge_hw_device_id_get
500 * This routine returns sets the device id and revision numbers into the device
501 * structure
502 */
503void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504{
505 u64 val64;
506
507 val64 = readq(&hldev->common_reg->titan_asic_id);
508 hldev->device_id =
509 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511 hldev->major_revision =
512 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514 hldev->minor_revision =
515 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516}
517
518/*
519 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver 658 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520 * This routine returns the Access Rights of the driver 659 * This routine returns the Access Rights of the driver
521 */ 660 */
@@ -568,10 +707,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
568} 707}
569 708
570/* 709/*
710 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
711 * Returns the function number of the vpath.
712 */
713static u32
714__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
715{
716 u64 val64;
717
718 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
719
720 return
721 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
722}
723
724/*
571 * __vxge_hw_device_host_info_get 725 * __vxge_hw_device_host_info_get
572 * This routine returns the host type assignments 726 * This routine returns the host type assignments
573 */ 727 */
574void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) 728static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575{ 729{
576 u64 val64; 730 u64 val64;
577 u32 i; 731 u32 i;
@@ -584,16 +738,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
584 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); 738 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585 739
586 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 740 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 741 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589 continue; 742 continue;
590 743
591 hldev->func_id = 744 hldev->func_id =
592 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); 745 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
593 746
594 hldev->access_rights = __vxge_hw_device_access_rights_get( 747 hldev->access_rights = __vxge_hw_device_access_rights_get(
595 hldev->host_type, hldev->func_id); 748 hldev->host_type, hldev->func_id);
596 749
750 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
751 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
752
597 hldev->first_vp_id = i; 753 hldev->first_vp_id = i;
598 break; 754 break;
599 } 755 }
@@ -634,7 +790,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
634 * __vxge_hw_device_initialize 790 * __vxge_hw_device_initialize
635 * Initialize Titan-V hardware. 791 * Initialize Titan-V hardware.
636 */ 792 */
637enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) 793static enum vxge_hw_status
794__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638{ 795{
639 enum vxge_hw_status status = VXGE_HW_OK; 796 enum vxge_hw_status status = VXGE_HW_OK;
640 797
@@ -650,6 +807,196 @@ exit:
650 return status; 807 return status;
651} 808}
652 809
810/*
811 * __vxge_hw_vpath_fw_ver_get - Get the fw version
812 * Returns FW Version
813 */
814static enum vxge_hw_status
815__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
816 struct vxge_hw_device_hw_info *hw_info)
817{
818 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
819 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
820 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
821 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
822 u64 data0, data1 = 0, steer_ctrl = 0;
823 enum vxge_hw_status status;
824
825 status = vxge_hw_vpath_fw_api(vpath,
826 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
827 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
828 0, &data0, &data1, &steer_ctrl);
829 if (status != VXGE_HW_OK)
830 goto exit;
831
832 fw_date->day =
833 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
834 fw_date->month =
835 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
836 fw_date->year =
837 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
838
839 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
840 fw_date->month, fw_date->day, fw_date->year);
841
842 fw_version->major =
843 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
844 fw_version->minor =
845 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
846 fw_version->build =
847 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
848
849 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
850 fw_version->major, fw_version->minor, fw_version->build);
851
852 flash_date->day =
853 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
854 flash_date->month =
855 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
856 flash_date->year =
857 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
858
859 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
860 flash_date->month, flash_date->day, flash_date->year);
861
862 flash_version->major =
863 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
864 flash_version->minor =
865 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
866 flash_version->build =
867 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
868
869 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
870 flash_version->major, flash_version->minor,
871 flash_version->build);
872
873exit:
874 return status;
875}
876
877/*
878 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
879 * part number and product description.
880 */
881static enum vxge_hw_status
882__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
883 struct vxge_hw_device_hw_info *hw_info)
884{
885 enum vxge_hw_status status;
886 u64 data0, data1 = 0, steer_ctrl = 0;
887 u8 *serial_number = hw_info->serial_number;
888 u8 *part_number = hw_info->part_number;
889 u8 *product_desc = hw_info->product_desc;
890 u32 i, j = 0;
891
892 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
893
894 status = vxge_hw_vpath_fw_api(vpath,
895 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
896 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
897 0, &data0, &data1, &steer_ctrl);
898 if (status != VXGE_HW_OK)
899 return status;
900
901 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
902 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
903
904 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
905 data1 = steer_ctrl = 0;
906
907 status = vxge_hw_vpath_fw_api(vpath,
908 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
909 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
910 0, &data0, &data1, &steer_ctrl);
911 if (status != VXGE_HW_OK)
912 return status;
913
914 ((u64 *)part_number)[0] = be64_to_cpu(data0);
915 ((u64 *)part_number)[1] = be64_to_cpu(data1);
916
917 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
918 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
919 data0 = i;
920 data1 = steer_ctrl = 0;
921
922 status = vxge_hw_vpath_fw_api(vpath,
923 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
924 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
925 0, &data0, &data1, &steer_ctrl);
926 if (status != VXGE_HW_OK)
927 return status;
928
929 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
930 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
931 }
932
933 return status;
934}
935
936/*
937 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
938 * Returns pci function mode
939 */
940static enum vxge_hw_status
941__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
942 struct vxge_hw_device_hw_info *hw_info)
943{
944 u64 data0, data1 = 0, steer_ctrl = 0;
945 enum vxge_hw_status status;
946
947 data0 = 0;
948
949 status = vxge_hw_vpath_fw_api(vpath,
950 VXGE_HW_FW_API_GET_FUNC_MODE,
951 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
952 0, &data0, &data1, &steer_ctrl);
953 if (status != VXGE_HW_OK)
954 return status;
955
956 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
957 return status;
958}
959
960/*
961 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
962 * from MAC address table.
963 */
964static enum vxge_hw_status
965__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
966 u8 *macaddr, u8 *macaddr_mask)
967{
968 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
969 data0 = 0, data1 = 0, steer_ctrl = 0;
970 enum vxge_hw_status status;
971 int i;
972
973 do {
974 status = vxge_hw_vpath_fw_api(vpath, action,
975 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
976 0, &data0, &data1, &steer_ctrl);
977 if (status != VXGE_HW_OK)
978 goto exit;
979
980 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
981 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
982 data1);
983
984 for (i = ETH_ALEN; i > 0; i--) {
985 macaddr[i - 1] = (u8) (data0 & 0xFF);
986 data0 >>= 8;
987
988 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
989 data1 >>= 8;
990 }
991
992 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
993 data0 = 0, data1 = 0, steer_ctrl = 0;
994
995 } while (!is_valid_ether_addr(macaddr));
996exit:
997 return status;
998}
999
653/** 1000/**
654 * vxge_hw_device_hw_info_get - Get the hw information 1001 * vxge_hw_device_hw_info_get - Get the hw information
655 * Returns the vpath mask that has the bits set for each vpath allocated 1002 * Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1012,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
665 struct vxge_hw_toc_reg __iomem *toc; 1012 struct vxge_hw_toc_reg __iomem *toc;
666 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; 1013 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667 struct vxge_hw_common_reg __iomem *common_reg; 1014 struct vxge_hw_common_reg __iomem *common_reg;
668 struct vxge_hw_vpath_reg __iomem *vpath_reg;
669 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 1015 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670 enum vxge_hw_status status; 1016 enum vxge_hw_status status;
1017 struct __vxge_hw_virtualpath vpath;
671 1018
672 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); 1019 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673 1020
@@ -693,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
693 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); 1040 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
694 1041
695 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1042 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
696
697 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1043 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
698 continue; 1044 continue;
699 1045
@@ -702,7 +1048,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
702 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) 1048 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703 (bar0 + val64); 1049 (bar0 + val64);
704 1050
705 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); 1051 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
706 if (__vxge_hw_device_access_rights_get(hw_info->host_type, 1052 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707 hw_info->func_id) & 1053 hw_info->func_id) &
708 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { 1054 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1064,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
718 1064
719 val64 = readq(&toc->toc_vpath_pointer[i]); 1065 val64 = readq(&toc->toc_vpath_pointer[i]);
720 1066
721 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1067 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1068 (bar0 + val64);
1069 vpath.vp_open = 0;
722 1070
723 hw_info->function_mode = 1071 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
724 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); 1072 if (status != VXGE_HW_OK)
1073 goto exit;
725 1074
726 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); 1075 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
727 if (status != VXGE_HW_OK) 1076 if (status != VXGE_HW_OK)
728 goto exit; 1077 goto exit;
729 1078
730 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); 1079 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
731 if (status != VXGE_HW_OK) 1080 if (status != VXGE_HW_OK)
732 goto exit; 1081 goto exit;
733 1082
@@ -735,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
735 } 1084 }
736 1085
737 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1086 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1087 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740 continue; 1088 continue;
741 1089
742 val64 = readq(&toc->toc_vpath_pointer[i]); 1090 val64 = readq(&toc->toc_vpath_pointer[i]);
743 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1091 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1092 (bar0 + val64);
1093 vpath.vp_open = 0;
744 1094
745 status = __vxge_hw_vpath_addr_get(i, vpath_reg, 1095 status = __vxge_hw_vpath_addr_get(&vpath,
746 hw_info->mac_addrs[i], 1096 hw_info->mac_addrs[i],
747 hw_info->mac_addr_masks[i]); 1097 hw_info->mac_addr_masks[i]);
748 if (status != VXGE_HW_OK) 1098 if (status != VXGE_HW_OK)
@@ -753,6 +1103,218 @@ exit:
753} 1103}
754 1104
755/* 1105/*
1106 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1107 */
1108static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1109{
1110 struct __vxge_hw_device *hldev;
1111 struct list_head *p, *n;
1112 u16 ret;
1113
1114 if (blockpool == NULL) {
1115 ret = 1;
1116 goto exit;
1117 }
1118
1119 hldev = blockpool->hldev;
1120
1121 list_for_each_safe(p, n, &blockpool->free_block_list) {
1122 pci_unmap_single(hldev->pdev,
1123 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124 ((struct __vxge_hw_blockpool_entry *)p)->length,
1125 PCI_DMA_BIDIRECTIONAL);
1126
1127 vxge_os_dma_free(hldev->pdev,
1128 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1130
1131 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1132 kfree(p);
1133 blockpool->pool_size--;
1134 }
1135
1136 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1138 kfree((void *)p);
1139 }
1140 ret = 0;
1141exit:
1142 return;
1143}
1144
1145/*
1146 * __vxge_hw_blockpool_create - Create block pool
1147 */
1148static enum vxge_hw_status
1149__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150 struct __vxge_hw_blockpool *blockpool,
1151 u32 pool_size,
1152 u32 pool_max)
1153{
1154 u32 i;
1155 struct __vxge_hw_blockpool_entry *entry = NULL;
1156 void *memblock;
1157 dma_addr_t dma_addr;
1158 struct pci_dev *dma_handle;
1159 struct pci_dev *acc_handle;
1160 enum vxge_hw_status status = VXGE_HW_OK;
1161
1162 if (blockpool == NULL) {
1163 status = VXGE_HW_FAIL;
1164 goto blockpool_create_exit;
1165 }
1166
1167 blockpool->hldev = hldev;
1168 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169 blockpool->pool_size = 0;
1170 blockpool->pool_max = pool_max;
1171 blockpool->req_out = 0;
1172
1173 INIT_LIST_HEAD(&blockpool->free_block_list);
1174 INIT_LIST_HEAD(&blockpool->free_entry_list);
1175
1176 for (i = 0; i < pool_size + pool_max; i++) {
1177 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1178 GFP_KERNEL);
1179 if (entry == NULL) {
1180 __vxge_hw_blockpool_destroy(blockpool);
1181 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182 goto blockpool_create_exit;
1183 }
1184 list_add(&entry->item, &blockpool->free_entry_list);
1185 }
1186
1187 for (i = 0; i < pool_size; i++) {
1188 memblock = vxge_os_dma_malloc(
1189 hldev->pdev,
1190 VXGE_HW_BLOCK_SIZE,
1191 &dma_handle,
1192 &acc_handle);
1193 if (memblock == NULL) {
1194 __vxge_hw_blockpool_destroy(blockpool);
1195 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196 goto blockpool_create_exit;
1197 }
1198
1199 dma_addr = pci_map_single(hldev->pdev, memblock,
1200 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1202 dma_addr))) {
1203 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204 __vxge_hw_blockpool_destroy(blockpool);
1205 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206 goto blockpool_create_exit;
1207 }
1208
1209 if (!list_empty(&blockpool->free_entry_list))
1210 entry = (struct __vxge_hw_blockpool_entry *)
1211 list_first_entry(&blockpool->free_entry_list,
1212 struct __vxge_hw_blockpool_entry,
1213 item);
1214
1215 if (entry == NULL)
1216 entry =
1217 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1218 GFP_KERNEL);
1219 if (entry != NULL) {
1220 list_del(&entry->item);
1221 entry->length = VXGE_HW_BLOCK_SIZE;
1222 entry->memblock = memblock;
1223 entry->dma_addr = dma_addr;
1224 entry->acc_handle = acc_handle;
1225 entry->dma_handle = dma_handle;
1226 list_add(&entry->item,
1227 &blockpool->free_block_list);
1228 blockpool->pool_size++;
1229 } else {
1230 __vxge_hw_blockpool_destroy(blockpool);
1231 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232 goto blockpool_create_exit;
1233 }
1234 }
1235
1236blockpool_create_exit:
1237 return status;
1238}
1239
1240/*
1241 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242 * Check the fifo configuration
1243 */
1244static enum vxge_hw_status
1245__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1246{
1247 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1250
1251 return VXGE_HW_OK;
1252}
1253
1254/*
1255 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256 * Check the vpath configuration
1257 */
1258static enum vxge_hw_status
1259__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1260{
1261 enum vxge_hw_status status;
1262
1263 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1266
1267 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268 if (status != VXGE_HW_OK)
1269 return status;
1270
1271 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274 return VXGE_HW_BADCFG_VPATH_MTU;
1275
1276 if ((vp_config->rpa_strip_vlan_tag !=
1277 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278 (vp_config->rpa_strip_vlan_tag !=
1279 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280 (vp_config->rpa_strip_vlan_tag !=
1281 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1283
1284 return VXGE_HW_OK;
1285}
1286
1287/*
1288 * __vxge_hw_device_config_check - Check device configuration.
1289 * Check the device configuration
1290 */
1291static enum vxge_hw_status
1292__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1293{
1294 u32 i;
1295 enum vxge_hw_status status;
1296
1297 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301 return VXGE_HW_BADCFG_INTR_MODE;
1302
1303 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305 return VXGE_HW_BADCFG_RTS_MAC_EN;
1306
1307 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308 status = __vxge_hw_device_vpath_config_check(
1309 &new_config->vp_config[i]);
1310 if (status != VXGE_HW_OK)
1311 return status;
1312 }
1313
1314 return VXGE_HW_OK;
1315}
1316
1317/*
756 * vxge_hw_device_initialize - Initialize Titan device. 1318 * vxge_hw_device_initialize - Initialize Titan device.
757 * Initialize Titan device. Note that all the arguments of this public API 1319 * Initialize Titan device. Note that all the arguments of this public API
758 * are 'IN', including @hldev. Driver cooperates with 1320 * are 'IN', including @hldev. Driver cooperates with
@@ -776,14 +1338,12 @@ vxge_hw_device_initialize(
776 if (status != VXGE_HW_OK) 1338 if (status != VXGE_HW_OK)
777 goto exit; 1339 goto exit;
778 1340
779 hldev = (struct __vxge_hw_device *) 1341 hldev = vzalloc(sizeof(struct __vxge_hw_device));
780 vmalloc(sizeof(struct __vxge_hw_device));
781 if (hldev == NULL) { 1342 if (hldev == NULL) {
782 status = VXGE_HW_ERR_OUT_OF_MEMORY; 1343 status = VXGE_HW_ERR_OUT_OF_MEMORY;
783 goto exit; 1344 goto exit;
784 } 1345 }
785 1346
786 memset(hldev, 0, sizeof(struct __vxge_hw_device));
787 hldev->magic = VXGE_HW_DEVICE_MAGIC; 1347 hldev->magic = VXGE_HW_DEVICE_MAGIC;
788 1348
789 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); 1349 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1366,6 @@ vxge_hw_device_initialize(
806 vfree(hldev); 1366 vfree(hldev);
807 goto exit; 1367 goto exit;
808 } 1368 }
809 __vxge_hw_device_id_get(hldev);
810 1369
811 __vxge_hw_device_host_info_get(hldev); 1370 __vxge_hw_device_host_info_get(hldev);
812 1371
@@ -814,7 +1373,6 @@ vxge_hw_device_initialize(
814 nblocks++; 1373 nblocks++;
815 1374
816 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 1376 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819 continue; 1377 continue;
820 1378
@@ -839,7 +1397,6 @@ vxge_hw_device_initialize(
839 } 1397 }
840 1398
841 status = __vxge_hw_device_initialize(hldev); 1399 status = __vxge_hw_device_initialize(hldev);
842
843 if (status != VXGE_HW_OK) { 1400 if (status != VXGE_HW_OK) {
844 vxge_hw_device_terminate(hldev); 1401 vxge_hw_device_terminate(hldev);
845 goto exit; 1402 goto exit;
@@ -865,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
865} 1422}
866 1423
867/* 1424/*
1425 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426 * and offset and perform an operation
1427 */
1428static enum vxge_hw_status
1429__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430 u32 operation, u32 offset, u64 *stat)
1431{
1432 u64 val64;
1433 enum vxge_hw_status status = VXGE_HW_OK;
1434 struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
1436 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438 goto vpath_stats_access_exit;
1439 }
1440
1441 vp_reg = vpath->vp_reg;
1442
1443 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1446
1447 status = __vxge_hw_pio_mem_write64(val64,
1448 &vp_reg->xmac_stats_access_cmd,
1449 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450 vpath->hldev->config.device_poll_millis);
1451 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452 *stat = readq(&vp_reg->xmac_stats_access_data);
1453 else
1454 *stat = 0;
1455
1456vpath_stats_access_exit:
1457 return status;
1458}
1459
1460/*
1461 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1462 */
1463static enum vxge_hw_status
1464__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1466{
1467 u64 *val64;
1468 int i;
1469 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470 enum vxge_hw_status status = VXGE_HW_OK;
1471
1472 val64 = (u64 *)vpath_tx_stats;
1473
1474 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1476 goto exit;
1477 }
1478
1479 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480 status = __vxge_hw_vpath_stats_access(vpath,
1481 VXGE_HW_STATS_OP_READ,
1482 offset, val64);
1483 if (status != VXGE_HW_OK)
1484 goto exit;
1485 offset++;
1486 val64++;
1487 }
1488exit:
1489 return status;
1490}
1491
1492/*
1493 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1494 */
1495static enum vxge_hw_status
1496__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1498{
1499 u64 *val64;
1500 enum vxge_hw_status status = VXGE_HW_OK;
1501 int i;
1502 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503 val64 = (u64 *) vpath_rx_stats;
1504
1505 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1507 goto exit;
1508 }
1509 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510 status = __vxge_hw_vpath_stats_access(vpath,
1511 VXGE_HW_STATS_OP_READ,
1512 offset >> 3, val64);
1513 if (status != VXGE_HW_OK)
1514 goto exit;
1515
1516 offset += 8;
1517 val64++;
1518 }
1519exit:
1520 return status;
1521}
1522
1523/*
1524 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1525 */
1526static enum vxge_hw_status
1527__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1529{
1530 u64 val64;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532 struct vxge_hw_vpath_reg __iomem *vp_reg;
1533
1534 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1536 goto exit;
1537 }
1538 vp_reg = vpath->vp_reg;
1539
1540 val64 = readq(&vp_reg->vpath_debug_stats0);
1541 hw_stats->ini_num_mwr_sent =
1542 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1543
1544 val64 = readq(&vp_reg->vpath_debug_stats1);
1545 hw_stats->ini_num_mrd_sent =
1546 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1547
1548 val64 = readq(&vp_reg->vpath_debug_stats2);
1549 hw_stats->ini_num_cpl_rcvd =
1550 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1551
1552 val64 = readq(&vp_reg->vpath_debug_stats3);
1553 hw_stats->ini_num_mwr_byte_sent =
1554 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1555
1556 val64 = readq(&vp_reg->vpath_debug_stats4);
1557 hw_stats->ini_num_cpl_byte_rcvd =
1558 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1559
1560 val64 = readq(&vp_reg->vpath_debug_stats5);
1561 hw_stats->wrcrdtarb_xoff =
1562 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1563
1564 val64 = readq(&vp_reg->vpath_debug_stats6);
1565 hw_stats->rdcrdtarb_xoff =
1566 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1567
1568 val64 = readq(&vp_reg->vpath_genstats_count01);
1569 hw_stats->vpath_genstats_count0 =
1570 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1571 val64);
1572
1573 val64 = readq(&vp_reg->vpath_genstats_count01);
1574 hw_stats->vpath_genstats_count1 =
1575 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1576 val64);
1577
1578 val64 = readq(&vp_reg->vpath_genstats_count23);
1579 hw_stats->vpath_genstats_count2 =
1580 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1581 val64);
1582
1583 val64 = readq(&vp_reg->vpath_genstats_count01);
1584 hw_stats->vpath_genstats_count3 =
1585 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1586 val64);
1587
1588 val64 = readq(&vp_reg->vpath_genstats_count4);
1589 hw_stats->vpath_genstats_count4 =
1590 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1591 val64);
1592
1593 val64 = readq(&vp_reg->vpath_genstats_count5);
1594 hw_stats->vpath_genstats_count5 =
1595 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1596 val64);
1597
1598 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599 if (status != VXGE_HW_OK)
1600 goto exit;
1601
1602 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603 if (status != VXGE_HW_OK)
1604 goto exit;
1605
1606 VXGE_HW_VPATH_STATS_PIO_READ(
1607 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1608
1609 hw_stats->prog_event_vnum0 =
1610 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1611
1612 hw_stats->prog_event_vnum1 =
1613 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1614
1615 VXGE_HW_VPATH_STATS_PIO_READ(
1616 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1617
1618 hw_stats->prog_event_vnum2 =
1619 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1620
1621 hw_stats->prog_event_vnum3 =
1622 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1623
1624 val64 = readq(&vp_reg->rx_multi_cast_stats);
1625 hw_stats->rx_multi_cast_frame_discard =
1626 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1627
1628 val64 = readq(&vp_reg->rx_frm_transferred);
1629 hw_stats->rx_frm_transferred =
1630 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1631
1632 val64 = readq(&vp_reg->rxd_returned);
1633 hw_stats->rxd_returned =
1634 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1635
1636 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637 hw_stats->rx_mpa_len_fail_frms =
1638 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639 hw_stats->rx_mpa_mrk_fail_frms =
1640 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641 hw_stats->rx_mpa_crc_fail_frms =
1642 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1643
1644 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645 hw_stats->rx_permitted_frms =
1646 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647 hw_stats->rx_vp_reset_discarded_frms =
1648 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649 hw_stats->rx_wol_frms =
1650 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1651
1652 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653 hw_stats->tx_vp_reset_discarded_frms =
1654 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1655 val64);
1656exit:
1657 return status;
1658}
1659
1660/*
868 * vxge_hw_device_stats_get - Get the device hw statistics. 1661 * vxge_hw_device_stats_get - Get the device hw statistics.
869 * Returns the vpath h/w stats for the device. 1662 * Returns the vpath h/w stats for the device.
870 */ 1663 */
@@ -876,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
876 enum vxge_hw_status status = VXGE_HW_OK; 1669 enum vxge_hw_status status = VXGE_HW_OK;
877 1670
878 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1671 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || 1672 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881 (hldev->virtual_paths[i].vp_open == 1673 (hldev->virtual_paths[i].vp_open ==
882 VXGE_HW_VP_NOT_OPEN)) 1674 VXGE_HW_VP_NOT_OPEN))
@@ -1031,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1031 1823
1032 status = vxge_hw_device_xmac_aggr_stats_get(hldev, 1824 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1033 0, &xmac_stats->aggr_stats[0]); 1825 0, &xmac_stats->aggr_stats[0]);
1034
1035 if (status != VXGE_HW_OK) 1826 if (status != VXGE_HW_OK)
1036 goto exit; 1827 goto exit;
1037 1828
@@ -1165,7 +1956,6 @@ exit:
1165 * It can be used to set or reset Pause frame generation or reception 1956 * It can be used to set or reset Pause frame generation or reception
1166 * support of the NIC. 1957 * support of the NIC.
1167 */ 1958 */
1168
1169enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, 1959enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170 u32 port, u32 tx, u32 rx) 1960 u32 port, u32 tx, u32 rx)
1171{ 1961{
@@ -1407,190 +2197,359 @@ exit:
1407} 2197}
1408 2198
1409/* 2199/*
1410 * __vxge_hw_ring_create - Create a Ring 2200 * __vxge_hw_channel_allocate - Allocate memory for channel
1411 * This function creates Ring and initializes it. 2201 * This function allocates required memory for the channel and various arrays
1412 * 2202 * in the channel
1413 */ 2203 */
1414static enum vxge_hw_status 2204static struct __vxge_hw_channel *
1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 2205__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
1416 struct vxge_hw_ring_attr *attr) 2206 enum __vxge_hw_channel_type type,
2207 u32 length, u32 per_dtr_space,
2208 void *userdata)
1417{ 2209{
1418 enum vxge_hw_status status = VXGE_HW_OK; 2210 struct __vxge_hw_channel *channel;
1419 struct __vxge_hw_ring *ring;
1420 u32 ring_length;
1421 struct vxge_hw_ring_config *config;
1422 struct __vxge_hw_device *hldev; 2211 struct __vxge_hw_device *hldev;
2212 int size = 0;
1423 u32 vp_id; 2213 u32 vp_id;
1424 struct vxge_hw_mempool_cbs ring_mp_callback;
1425 2214
1426 if ((vp == NULL) || (attr == NULL)) { 2215 hldev = vph->vpath->hldev;
2216 vp_id = vph->vpath->vp_id;
2217
2218 switch (type) {
2219 case VXGE_HW_CHANNEL_TYPE_FIFO:
2220 size = sizeof(struct __vxge_hw_fifo);
2221 break;
2222 case VXGE_HW_CHANNEL_TYPE_RING:
2223 size = sizeof(struct __vxge_hw_ring);
2224 break;
2225 default:
2226 break;
2227 }
2228
2229 channel = kzalloc(size, GFP_KERNEL);
2230 if (channel == NULL)
2231 goto exit0;
2232 INIT_LIST_HEAD(&channel->item);
2233
2234 channel->common_reg = hldev->common_reg;
2235 channel->first_vp_id = hldev->first_vp_id;
2236 channel->type = type;
2237 channel->devh = hldev;
2238 channel->vph = vph;
2239 channel->userdata = userdata;
2240 channel->per_dtr_space = per_dtr_space;
2241 channel->length = length;
2242 channel->vp_id = vp_id;
2243
2244 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245 if (channel->work_arr == NULL)
2246 goto exit1;
2247
2248 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249 if (channel->free_arr == NULL)
2250 goto exit1;
2251 channel->free_ptr = length;
2252
2253 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254 if (channel->reserve_arr == NULL)
2255 goto exit1;
2256 channel->reserve_ptr = length;
2257 channel->reserve_top = 0;
2258
2259 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260 if (channel->orig_arr == NULL)
2261 goto exit1;
2262
2263 return channel;
2264exit1:
2265 __vxge_hw_channel_free(channel);
2266
2267exit0:
2268 return NULL;
2269}
2270
2271/*
2272 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273 * Adds a block to block pool
2274 */
2275static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2276 void *block_addr,
2277 u32 length,
2278 struct pci_dev *dma_h,
2279 struct pci_dev *acc_handle)
2280{
2281 struct __vxge_hw_blockpool *blockpool;
2282 struct __vxge_hw_blockpool_entry *entry = NULL;
2283 dma_addr_t dma_addr;
2284 enum vxge_hw_status status = VXGE_HW_OK;
2285 u32 req_out;
2286
2287 blockpool = &devh->block_pool;
2288
2289 if (block_addr == NULL) {
2290 blockpool->req_out--;
1427 status = VXGE_HW_FAIL; 2291 status = VXGE_HW_FAIL;
1428 goto exit; 2292 goto exit;
1429 } 2293 }
1430 2294
1431 hldev = vp->vpath->hldev; 2295 dma_addr = pci_map_single(devh->pdev, block_addr, length,
1432 vp_id = vp->vpath->vp_id; 2296 PCI_DMA_BIDIRECTIONAL);
1433 2297
1434 config = &hldev->config.vp_config[vp_id].ring; 2298 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300 blockpool->req_out--;
2301 status = VXGE_HW_FAIL;
2302 goto exit;
2303 }
1435 2304
1436 ring_length = config->ring_blocks * 2305 if (!list_empty(&blockpool->free_entry_list))
1437 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2306 entry = (struct __vxge_hw_blockpool_entry *)
2307 list_first_entry(&blockpool->free_entry_list,
2308 struct __vxge_hw_blockpool_entry,
2309 item);
1438 2310
1439 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, 2311 if (entry == NULL)
1440 VXGE_HW_CHANNEL_TYPE_RING, 2312 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
1441 ring_length, 2313 else
1442 attr->per_rxd_space, 2314 list_del(&entry->item);
1443 attr->userdata);
1444 2315
1445 if (ring == NULL) { 2316 if (entry != NULL) {
2317 entry->length = length;
2318 entry->memblock = block_addr;
2319 entry->dma_addr = dma_addr;
2320 entry->acc_handle = acc_handle;
2321 entry->dma_handle = dma_h;
2322 list_add(&entry->item, &blockpool->free_block_list);
2323 blockpool->pool_size++;
2324 status = VXGE_HW_OK;
2325 } else
1446 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2326 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1447 goto exit;
1448 }
1449 2327
1450 vp->vpath->ringh = ring; 2328 blockpool->req_out--;
1451 ring->vp_id = vp_id;
1452 ring->vp_reg = vp->vpath->vp_reg;
1453 ring->common_reg = hldev->common_reg;
1454 ring->stats = &vp->vpath->sw_stats->ring_stats;
1455 ring->config = config;
1456 ring->callback = attr->callback;
1457 ring->rxd_init = attr->rxd_init;
1458 ring->rxd_term = attr->rxd_term;
1459 ring->buffer_mode = config->buffer_mode;
1460 ring->rxds_limit = config->rxds_limit;
1461 2329
1462 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2330 req_out = blockpool->req_out;
1463 ring->rxd_priv_size = 2331exit:
1464 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; 2332 return;
1465 ring->per_rxd_space = attr->per_rxd_space; 2333}
1466 2334
1467 ring->rxd_priv_size = 2335static inline void
1468 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / 2336vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
1469 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; 2337{
2338 gfp_t flags;
2339 void *vaddr;
1470 2340
1471 /* how many RxDs can fit into one block. Depends on configured 2341 if (in_interrupt())
1472 * buffer_mode. */ 2342 flags = GFP_ATOMIC | GFP_DMA;
1473 ring->rxds_per_block = 2343 else
1474 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2344 flags = GFP_KERNEL | GFP_DMA;
1475 2345
1476 /* calculate actual RxD block private size */ 2346 vaddr = kmalloc((size), flags);
1477 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1478 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1479 ring->mempool = __vxge_hw_mempool_create(hldev,
1480 VXGE_HW_BLOCK_SIZE,
1481 VXGE_HW_BLOCK_SIZE,
1482 ring->rxdblock_priv_size,
1483 ring->config->ring_blocks,
1484 ring->config->ring_blocks,
1485 &ring_mp_callback,
1486 ring);
1487 2347
1488 if (ring->mempool == NULL) { 2348 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1489 __vxge_hw_ring_delete(vp); 2349}
1490 return VXGE_HW_ERR_OUT_OF_MEMORY;
1491 }
1492 2350
1493 status = __vxge_hw_channel_initialize(&ring->channel); 2351/*
1494 if (status != VXGE_HW_OK) { 2352 * __vxge_hw_blockpool_blocks_add - Request additional blocks
1495 __vxge_hw_ring_delete(vp); 2353 */
1496 goto exit; 2354static
2355void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2356{
2357 u32 nreq = 0, i;
2358
2359 if ((blockpool->pool_size + blockpool->req_out) <
2360 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362 blockpool->req_out += nreq;
1497 } 2363 }
1498 2364
1499 /* Note: 2365 for (i = 0; i < nreq; i++)
1500 * Specifying rxd_init callback means two things: 2366 vxge_os_dma_malloc_async(
1501 * 1) rxds need to be initialized by driver at channel-open time; 2367 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1502 * 2) rxds need to be posted at channel-open time 2368 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
1503 * (that's what the initial_replenish() below does) 2369}
1504 * Currently we don't have a case when the 1) is done without the 2). 2370
1505 */ 2371/*
1506 if (ring->rxd_init) { 2372 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
1507 status = vxge_hw_ring_replenish(ring); 2373 * Allocates a block of memory of given size, either from block pool
1508 if (status != VXGE_HW_OK) { 2374 * or by calling vxge_os_dma_malloc()
1509 __vxge_hw_ring_delete(vp); 2375 */
2376static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377 struct vxge_hw_mempool_dma *dma_object)
2378{
2379 struct __vxge_hw_blockpool_entry *entry = NULL;
2380 struct __vxge_hw_blockpool *blockpool;
2381 void *memblock = NULL;
2382 enum vxge_hw_status status = VXGE_HW_OK;
2383
2384 blockpool = &devh->block_pool;
2385
2386 if (size != blockpool->block_size) {
2387
2388 memblock = vxge_os_dma_malloc(devh->pdev, size,
2389 &dma_object->handle,
2390 &dma_object->acc_handle);
2391
2392 if (memblock == NULL) {
2393 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1510 goto exit; 2394 goto exit;
1511 } 2395 }
1512 }
1513 2396
1514 /* initial replenish will increment the counter in its post() routine, 2397 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
1515 * we have to reset it */ 2398 PCI_DMA_BIDIRECTIONAL);
1516 ring->stats->common_stats.usage_cnt = 0; 2399
2400 if (unlikely(pci_dma_mapping_error(devh->pdev,
2401 dma_object->addr))) {
2402 vxge_os_dma_free(devh->pdev, memblock,
2403 &dma_object->acc_handle);
2404 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2405 goto exit;
2406 }
2407
2408 } else {
2409
2410 if (!list_empty(&blockpool->free_block_list))
2411 entry = (struct __vxge_hw_blockpool_entry *)
2412 list_first_entry(&blockpool->free_block_list,
2413 struct __vxge_hw_blockpool_entry,
2414 item);
2415
2416 if (entry != NULL) {
2417 list_del(&entry->item);
2418 dma_object->addr = entry->dma_addr;
2419 dma_object->handle = entry->dma_handle;
2420 dma_object->acc_handle = entry->acc_handle;
2421 memblock = entry->memblock;
2422
2423 list_add(&entry->item,
2424 &blockpool->free_entry_list);
2425 blockpool->pool_size--;
2426 }
2427
2428 if (memblock != NULL)
2429 __vxge_hw_blockpool_blocks_add(blockpool);
2430 }
1517exit: 2431exit:
1518 return status; 2432 return memblock;
1519} 2433}
1520 2434
1521/* 2435/*
1522 * __vxge_hw_ring_abort - Returns the RxD 2436 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
1523 * This function terminates the RxDs of ring
1524 */ 2437 */
1525static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) 2438static void
2439__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
1526{ 2440{
1527 void *rxdh; 2441 struct list_head *p, *n;
1528 struct __vxge_hw_channel *channel;
1529
1530 channel = &ring->channel;
1531 2442
1532 for (;;) { 2443 list_for_each_safe(p, n, &blockpool->free_block_list) {
1533 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1534 2444
1535 if (rxdh == NULL) 2445 if (blockpool->pool_size < blockpool->pool_max)
1536 break; 2446 break;
1537 2447
1538 vxge_hw_channel_dtr_complete(channel); 2448 pci_unmap_single(
2449 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451 ((struct __vxge_hw_blockpool_entry *)p)->length,
2452 PCI_DMA_BIDIRECTIONAL);
1539 2453
1540 if (ring->rxd_term) 2454 vxge_os_dma_free(
1541 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, 2455 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1542 channel->userdata); 2456 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1543 2458
1544 vxge_hw_channel_dtr_free(channel, rxdh); 2459 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1545 }
1546 2460
1547 return VXGE_HW_OK; 2461 list_add(p, &blockpool->free_entry_list);
2462
2463 blockpool->pool_size--;
2464
2465 }
1548} 2466}
1549 2467
1550/* 2468/*
1551 * __vxge_hw_ring_reset - Resets the ring 2469 * __vxge_hw_blockpool_free - Frees the memory allcoated with
1552 * This function resets the ring during vpath reset operation 2470 * __vxge_hw_blockpool_malloc
1553 */ 2471 */
1554static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) 2472static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473 void *memblock, u32 size,
2474 struct vxge_hw_mempool_dma *dma_object)
1555{ 2475{
2476 struct __vxge_hw_blockpool_entry *entry = NULL;
2477 struct __vxge_hw_blockpool *blockpool;
1556 enum vxge_hw_status status = VXGE_HW_OK; 2478 enum vxge_hw_status status = VXGE_HW_OK;
1557 struct __vxge_hw_channel *channel;
1558 2479
1559 channel = &ring->channel; 2480 blockpool = &devh->block_pool;
1560 2481
1561 __vxge_hw_ring_abort(ring); 2482 if (size != blockpool->block_size) {
2483 pci_unmap_single(devh->pdev, dma_object->addr, size,
2484 PCI_DMA_BIDIRECTIONAL);
2485 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2486 } else {
1562 2487
1563 status = __vxge_hw_channel_reset(channel); 2488 if (!list_empty(&blockpool->free_entry_list))
2489 entry = (struct __vxge_hw_blockpool_entry *)
2490 list_first_entry(&blockpool->free_entry_list,
2491 struct __vxge_hw_blockpool_entry,
2492 item);
1564 2493
1565 if (status != VXGE_HW_OK) 2494 if (entry == NULL)
1566 goto exit; 2495 entry = vmalloc(sizeof(
2496 struct __vxge_hw_blockpool_entry));
2497 else
2498 list_del(&entry->item);
1567 2499
1568 if (ring->rxd_init) { 2500 if (entry != NULL) {
1569 status = vxge_hw_ring_replenish(ring); 2501 entry->length = size;
1570 if (status != VXGE_HW_OK) 2502 entry->memblock = memblock;
1571 goto exit; 2503 entry->dma_addr = dma_object->addr;
2504 entry->acc_handle = dma_object->acc_handle;
2505 entry->dma_handle = dma_object->handle;
2506 list_add(&entry->item,
2507 &blockpool->free_block_list);
2508 blockpool->pool_size++;
2509 status = VXGE_HW_OK;
2510 } else
2511 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2512
2513 if (status == VXGE_HW_OK)
2514 __vxge_hw_blockpool_blocks_remove(blockpool);
1572 } 2515 }
1573exit:
1574 return status;
1575} 2516}
1576 2517
1577/* 2518/*
1578 * __vxge_hw_ring_delete - Removes the ring 2519 * vxge_hw_mempool_destroy
1579 * This function freeup the memory pool and removes the ring
1580 */ 2520 */
1581static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) 2521static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1582{ 2522{
1583 struct __vxge_hw_ring *ring = vp->vpath->ringh; 2523 u32 i, j;
2524 struct __vxge_hw_device *devh = mempool->devh;
1584 2525
1585 __vxge_hw_ring_abort(ring); 2526 for (i = 0; i < mempool->memblocks_allocated; i++) {
2527 struct vxge_hw_mempool_dma *dma_object;
1586 2528
1587 if (ring->mempool) 2529 vxge_assert(mempool->memblocks_arr[i]);
1588 __vxge_hw_mempool_destroy(ring->mempool); 2530 vxge_assert(mempool->memblocks_dma_arr + i);
1589 2531
1590 vp->vpath->ringh = NULL; 2532 dma_object = mempool->memblocks_dma_arr + i;
1591 __vxge_hw_channel_free(&ring->channel);
1592 2533
1593 return VXGE_HW_OK; 2534 for (j = 0; j < mempool->items_per_memblock; j++) {
2535 u32 index = i * mempool->items_per_memblock + j;
2536
2537 /* to skip last partially filled(if any) memblock */
2538 if (index >= mempool->items_current)
2539 break;
2540 }
2541
2542 vfree(mempool->memblocks_priv_arr[i]);
2543
2544 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545 mempool->memblock_size, dma_object);
2546 }
2547
2548 vfree(mempool->items_arr);
2549 vfree(mempool->memblocks_dma_arr);
2550 vfree(mempool->memblocks_priv_arr);
2551 vfree(mempool->memblocks_arr);
2552 vfree(mempool);
1594} 2553}
1595 2554
1596/* 2555/*
@@ -1627,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1627 * allocate new memblock and its private part at once. 2586 * allocate new memblock and its private part at once.
1628 * This helps to minimize memory usage a lot. */ 2587 * This helps to minimize memory usage a lot. */
1629 mempool->memblocks_priv_arr[i] = 2588 mempool->memblocks_priv_arr[i] =
1630 vmalloc(mempool->items_priv_size * n_items); 2589 vzalloc(mempool->items_priv_size * n_items);
1631 if (mempool->memblocks_priv_arr[i] == NULL) { 2590 if (mempool->memblocks_priv_arr[i] == NULL) {
1632 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2591 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1633 goto exit; 2592 goto exit;
1634 } 2593 }
1635 2594
1636 memset(mempool->memblocks_priv_arr[i], 0,
1637 mempool->items_priv_size * n_items);
1638
1639 /* allocate DMA-capable memblock */ 2595 /* allocate DMA-capable memblock */
1640 mempool->memblocks_arr[i] = 2596 mempool->memblocks_arr[i] =
1641 __vxge_hw_blockpool_malloc(mempool->devh, 2597 __vxge_hw_blockpool_malloc(mempool->devh,
@@ -1686,16 +2642,15 @@ exit:
1686 * with size enough to hold %items_initial number of items. Memory is 2642 * with size enough to hold %items_initial number of items. Memory is
1687 * DMA-able but client must map/unmap before interoperating with the device. 2643 * DMA-able but client must map/unmap before interoperating with the device.
1688 */ 2644 */
1689static struct vxge_hw_mempool* 2645static struct vxge_hw_mempool *
1690__vxge_hw_mempool_create( 2646__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
1691 struct __vxge_hw_device *devh, 2647 u32 memblock_size,
1692 u32 memblock_size, 2648 u32 item_size,
1693 u32 item_size, 2649 u32 items_priv_size,
1694 u32 items_priv_size, 2650 u32 items_initial,
1695 u32 items_initial, 2651 u32 items_max,
1696 u32 items_max, 2652 struct vxge_hw_mempool_cbs *mp_callback,
1697 struct vxge_hw_mempool_cbs *mp_callback, 2653 void *userdata)
1698 void *userdata)
1699{ 2654{
1700 enum vxge_hw_status status = VXGE_HW_OK; 2655 enum vxge_hw_status status = VXGE_HW_OK;
1701 u32 memblocks_to_allocate; 2656 u32 memblocks_to_allocate;
@@ -1707,13 +2662,11 @@ __vxge_hw_mempool_create(
1707 goto exit; 2662 goto exit;
1708 } 2663 }
1709 2664
1710 mempool = (struct vxge_hw_mempool *) 2665 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
1711 vmalloc(sizeof(struct vxge_hw_mempool));
1712 if (mempool == NULL) { 2666 if (mempool == NULL) {
1713 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2667 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1714 goto exit; 2668 goto exit;
1715 } 2669 }
1716 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1717 2670
1718 mempool->devh = devh; 2671 mempool->devh = devh;
1719 mempool->memblock_size = memblock_size; 2672 mempool->memblock_size = memblock_size;
@@ -1733,53 +2686,43 @@ __vxge_hw_mempool_create(
1733 2686
1734 /* allocate array of memblocks */ 2687 /* allocate array of memblocks */
1735 mempool->memblocks_arr = 2688 mempool->memblocks_arr =
1736 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2689 vzalloc(sizeof(void *) * mempool->memblocks_max);
1737 if (mempool->memblocks_arr == NULL) { 2690 if (mempool->memblocks_arr == NULL) {
1738 __vxge_hw_mempool_destroy(mempool); 2691 __vxge_hw_mempool_destroy(mempool);
1739 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2692 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1740 mempool = NULL; 2693 mempool = NULL;
1741 goto exit; 2694 goto exit;
1742 } 2695 }
1743 memset(mempool->memblocks_arr, 0,
1744 sizeof(void *) * mempool->memblocks_max);
1745 2696
1746 /* allocate array of private parts of items per memblocks */ 2697 /* allocate array of private parts of items per memblocks */
1747 mempool->memblocks_priv_arr = 2698 mempool->memblocks_priv_arr =
1748 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2699 vzalloc(sizeof(void *) * mempool->memblocks_max);
1749 if (mempool->memblocks_priv_arr == NULL) { 2700 if (mempool->memblocks_priv_arr == NULL) {
1750 __vxge_hw_mempool_destroy(mempool); 2701 __vxge_hw_mempool_destroy(mempool);
1751 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2702 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1752 mempool = NULL; 2703 mempool = NULL;
1753 goto exit; 2704 goto exit;
1754 } 2705 }
1755 memset(mempool->memblocks_priv_arr, 0,
1756 sizeof(void *) * mempool->memblocks_max);
1757 2706
1758 /* allocate array of memblocks DMA objects */ 2707 /* allocate array of memblocks DMA objects */
1759 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) 2708 mempool->memblocks_dma_arr =
1760 vmalloc(sizeof(struct vxge_hw_mempool_dma) * 2709 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
1761 mempool->memblocks_max); 2710 mempool->memblocks_max);
1762
1763 if (mempool->memblocks_dma_arr == NULL) { 2711 if (mempool->memblocks_dma_arr == NULL) {
1764 __vxge_hw_mempool_destroy(mempool); 2712 __vxge_hw_mempool_destroy(mempool);
1765 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2713 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1766 mempool = NULL; 2714 mempool = NULL;
1767 goto exit; 2715 goto exit;
1768 } 2716 }
1769 memset(mempool->memblocks_dma_arr, 0,
1770 sizeof(struct vxge_hw_mempool_dma) *
1771 mempool->memblocks_max);
1772 2717
1773 /* allocate hash array of items */ 2718 /* allocate hash array of items */
1774 mempool->items_arr = 2719 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
1775 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1776 if (mempool->items_arr == NULL) { 2720 if (mempool->items_arr == NULL) {
1777 __vxge_hw_mempool_destroy(mempool); 2721 __vxge_hw_mempool_destroy(mempool);
1778 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2722 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1779 mempool = NULL; 2723 mempool = NULL;
1780 goto exit; 2724 goto exit;
1781 } 2725 }
1782 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1783 2726
1784 /* calculate initial number of memblocks */ 2727 /* calculate initial number of memblocks */
1785 memblocks_to_allocate = (mempool->items_initial + 2728 memblocks_to_allocate = (mempool->items_initial +
@@ -1801,122 +2744,188 @@ exit:
1801} 2744}
1802 2745
1803/* 2746/*
1804 * vxge_hw_mempool_destroy 2747 * __vxge_hw_ring_abort - Returns the RxD
2748 * This function terminates the RxDs of ring
1805 */ 2749 */
1806static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) 2750static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1807{ 2751{
1808 u32 i, j; 2752 void *rxdh;
1809 struct __vxge_hw_device *devh = mempool->devh; 2753 struct __vxge_hw_channel *channel;
1810
1811 for (i = 0; i < mempool->memblocks_allocated; i++) {
1812 struct vxge_hw_mempool_dma *dma_object;
1813 2754
1814 vxge_assert(mempool->memblocks_arr[i]); 2755 channel = &ring->channel;
1815 vxge_assert(mempool->memblocks_dma_arr + i);
1816 2756
1817 dma_object = mempool->memblocks_dma_arr + i; 2757 for (;;) {
2758 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1818 2759
1819 for (j = 0; j < mempool->items_per_memblock; j++) { 2760 if (rxdh == NULL)
1820 u32 index = i * mempool->items_per_memblock + j; 2761 break;
1821 2762
1822 /* to skip last partially filled(if any) memblock */ 2763 vxge_hw_channel_dtr_complete(channel);
1823 if (index >= mempool->items_current)
1824 break;
1825 }
1826 2764
1827 vfree(mempool->memblocks_priv_arr[i]); 2765 if (ring->rxd_term)
2766 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2767 channel->userdata);
1828 2768
1829 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], 2769 vxge_hw_channel_dtr_free(channel, rxdh);
1830 mempool->memblock_size, dma_object);
1831 } 2770 }
1832 2771
1833 vfree(mempool->items_arr); 2772 return VXGE_HW_OK;
2773}
1834 2774
1835 vfree(mempool->memblocks_dma_arr); 2775/*
2776 * __vxge_hw_ring_reset - Resets the ring
2777 * This function resets the ring during vpath reset operation
2778 */
2779static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2780{
2781 enum vxge_hw_status status = VXGE_HW_OK;
2782 struct __vxge_hw_channel *channel;
1836 2783
1837 vfree(mempool->memblocks_priv_arr); 2784 channel = &ring->channel;
1838 2785
1839 vfree(mempool->memblocks_arr); 2786 __vxge_hw_ring_abort(ring);
1840 2787
1841 vfree(mempool); 2788 status = __vxge_hw_channel_reset(channel);
2789
2790 if (status != VXGE_HW_OK)
2791 goto exit;
2792
2793 if (ring->rxd_init) {
2794 status = vxge_hw_ring_replenish(ring);
2795 if (status != VXGE_HW_OK)
2796 goto exit;
2797 }
2798exit:
2799 return status;
1842} 2800}
1843 2801
1844/* 2802/*
1845 * __vxge_hw_device_fifo_config_check - Check fifo configuration. 2803 * __vxge_hw_ring_delete - Removes the ring
1846 * Check the fifo configuration 2804 * This function freeup the memory pool and removes the ring
1847 */ 2805 */
1848enum vxge_hw_status 2806static enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) 2807__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1850{ 2808{
1851 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || 2809 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1852 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) 2810
1853 return VXGE_HW_BADCFG_FIFO_BLOCKS; 2811 __vxge_hw_ring_abort(ring);
2812
2813 if (ring->mempool)
2814 __vxge_hw_mempool_destroy(ring->mempool);
2815
2816 vp->vpath->ringh = NULL;
2817 __vxge_hw_channel_free(&ring->channel);
1854 2818
1855 return VXGE_HW_OK; 2819 return VXGE_HW_OK;
1856} 2820}
1857 2821
1858/* 2822/*
1859 * __vxge_hw_device_vpath_config_check - Check vpath configuration. 2823 * __vxge_hw_ring_create - Create a Ring
1860 * Check the vpath configuration 2824 * This function creates Ring and initializes it.
1861 */ 2825 */
1862static enum vxge_hw_status 2826static enum vxge_hw_status
1863__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) 2827__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828 struct vxge_hw_ring_attr *attr)
1864{ 2829{
1865 enum vxge_hw_status status; 2830 enum vxge_hw_status status = VXGE_HW_OK;
2831 struct __vxge_hw_ring *ring;
2832 u32 ring_length;
2833 struct vxge_hw_ring_config *config;
2834 struct __vxge_hw_device *hldev;
2835 u32 vp_id;
2836 struct vxge_hw_mempool_cbs ring_mp_callback;
1866 2837
1867 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || 2838 if ((vp == NULL) || (attr == NULL)) {
1868 (vp_config->min_bandwidth > 2839 status = VXGE_HW_FAIL;
1869 VXGE_HW_VPATH_BANDWIDTH_MAX)) 2840 goto exit;
1870 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; 2841 }
1871 2842
1872 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); 2843 hldev = vp->vpath->hldev;
1873 if (status != VXGE_HW_OK) 2844 vp_id = vp->vpath->vp_id;
1874 return status;
1875 2845
1876 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && 2846 config = &hldev->config.vp_config[vp_id].ring;
1877 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1878 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1879 return VXGE_HW_BADCFG_VPATH_MTU;
1880 2847
1881 if ((vp_config->rpa_strip_vlan_tag != 2848 ring_length = config->ring_blocks *
1882 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && 2849 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1883 (vp_config->rpa_strip_vlan_tag !=
1884 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1885 (vp_config->rpa_strip_vlan_tag !=
1886 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1887 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1888 2850
1889 return VXGE_HW_OK; 2851 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1890} 2852 VXGE_HW_CHANNEL_TYPE_RING,
2853 ring_length,
2854 attr->per_rxd_space,
2855 attr->userdata);
2856 if (ring == NULL) {
2857 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2858 goto exit;
2859 }
1891 2860
1892/* 2861 vp->vpath->ringh = ring;
1893 * __vxge_hw_device_config_check - Check device configuration. 2862 ring->vp_id = vp_id;
1894 * Check the device configuration 2863 ring->vp_reg = vp->vpath->vp_reg;
1895 */ 2864 ring->common_reg = hldev->common_reg;
1896enum vxge_hw_status 2865 ring->stats = &vp->vpath->sw_stats->ring_stats;
1897__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) 2866 ring->config = config;
1898{ 2867 ring->callback = attr->callback;
1899 u32 i; 2868 ring->rxd_init = attr->rxd_init;
1900 enum vxge_hw_status status; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode;
2871 ring->rxds_limit = config->rxds_limit;
1901 2872
1902 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && 2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1903 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && 2874 ring->rxd_priv_size =
1904 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && 2875 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1905 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) 2876 ring->per_rxd_space = attr->per_rxd_space;
1906 return VXGE_HW_BADCFG_INTR_MODE;
1907 2877
1908 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && 2878 ring->rxd_priv_size =
1909 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) 2879 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1910 return VXGE_HW_BADCFG_RTS_MAC_EN; 2880 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1911 2881
1912 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2882 /* how many RxDs can fit into one block. Depends on configured
1913 status = __vxge_hw_device_vpath_config_check( 2883 * buffer_mode. */
1914 &new_config->vp_config[i]); 2884 ring->rxds_per_block =
1915 if (status != VXGE_HW_OK) 2885 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1916 return status; 2886
2887 /* calculate actual RxD block private size */
2888 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2889 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2890 ring->mempool = __vxge_hw_mempool_create(hldev,
2891 VXGE_HW_BLOCK_SIZE,
2892 VXGE_HW_BLOCK_SIZE,
2893 ring->rxdblock_priv_size,
2894 ring->config->ring_blocks,
2895 ring->config->ring_blocks,
2896 &ring_mp_callback,
2897 ring);
2898 if (ring->mempool == NULL) {
2899 __vxge_hw_ring_delete(vp);
2900 return VXGE_HW_ERR_OUT_OF_MEMORY;
1917 } 2901 }
1918 2902
1919 return VXGE_HW_OK; 2903 status = __vxge_hw_channel_initialize(&ring->channel);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908
2909 /* Note:
2910 * Specifying rxd_init callback means two things:
2911 * 1) rxds need to be initialized by driver at channel-open time;
2912 * 2) rxds need to be posted at channel-open time
2913 * (that's what the initial_replenish() below does)
2914 * Currently we don't have a case when the 1) is done without the 2).
2915 */
2916 if (ring->rxd_init) {
2917 status = vxge_hw_ring_replenish(ring);
2918 if (status != VXGE_HW_OK) {
2919 __vxge_hw_ring_delete(vp);
2920 goto exit;
2921 }
2922 }
2923
2924 /* initial replenish will increment the counter in its post() routine,
2925 * we have to reset it */
2926 ring->stats->common_stats.usage_cnt = 0;
2927exit:
2928 return status;
1920} 2929}
1921 2930
1922/* 2931/*
@@ -1938,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1938 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; 2947 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
1939 2948
1940 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2949 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1941
1942 device_config->vp_config[i].vp_id = i; 2950 device_config->vp_config[i].vp_id = i;
1943 2951
1944 device_config->vp_config[i].min_bandwidth = 2952 device_config->vp_config[i].min_bandwidth =
@@ -2078,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2078} 3086}
2079 3087
2080/* 3088/*
2081 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2082 * Set the swapper bits appropriately for the lagacy section.
2083 */
2084static enum vxge_hw_status
2085__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2086{
2087 u64 val64;
2088 enum vxge_hw_status status = VXGE_HW_OK;
2089
2090 val64 = readq(&legacy_reg->toc_swapper_fb);
2091
2092 wmb();
2093
2094 switch (val64) {
2095
2096 case VXGE_HW_SWAPPER_INITIAL_VALUE:
2097 return status;
2098
2099 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2100 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2101 &legacy_reg->pifm_rd_swap_en);
2102 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2103 &legacy_reg->pifm_rd_flip_en);
2104 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2105 &legacy_reg->pifm_wr_swap_en);
2106 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2107 &legacy_reg->pifm_wr_flip_en);
2108 break;
2109
2110 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2111 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2112 &legacy_reg->pifm_rd_swap_en);
2113 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2114 &legacy_reg->pifm_wr_swap_en);
2115 break;
2116
2117 case VXGE_HW_SWAPPER_BIT_FLIPPED:
2118 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2119 &legacy_reg->pifm_rd_flip_en);
2120 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2121 &legacy_reg->pifm_wr_flip_en);
2122 break;
2123 }
2124
2125 wmb();
2126
2127 val64 = readq(&legacy_reg->toc_swapper_fb);
2128
2129 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2130 status = VXGE_HW_ERR_SWAPPER_CTRL;
2131
2132 return status;
2133}
2134
2135/*
2136 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. 3089 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2137 * Set the swapper bits appropriately for the vpath. 3090 * Set the swapper bits appropriately for the vpath.
2138 */ 3091 */
@@ -2156,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2156 * Set the swapper bits appropriately for the vpath. 3109 * Set the swapper bits appropriately for the vpath.
2157 */ 3110 */
2158static enum vxge_hw_status 3111static enum vxge_hw_status
2159__vxge_hw_kdfc_swapper_set( 3112__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2160 struct vxge_hw_legacy_reg __iomem *legacy_reg, 3113 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2161 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2162{ 3114{
2163 u64 val64; 3115 u64 val64;
2164 3116
@@ -2408,6 +3360,69 @@ exit:
2408} 3360}
2409 3361
2410/* 3362/*
3363 * __vxge_hw_fifo_abort - Returns the TxD
3364 * This function terminates the TxDs of fifo
3365 */
3366static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3367{
3368 void *txdlh;
3369
3370 for (;;) {
3371 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3372
3373 if (txdlh == NULL)
3374 break;
3375
3376 vxge_hw_channel_dtr_complete(&fifo->channel);
3377
3378 if (fifo->txdl_term) {
3379 fifo->txdl_term(txdlh,
3380 VXGE_HW_TXDL_STATE_POSTED,
3381 fifo->channel.userdata);
3382 }
3383
3384 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3385 }
3386
3387 return VXGE_HW_OK;
3388}
3389
3390/*
3391 * __vxge_hw_fifo_reset - Resets the fifo
3392 * This function resets the fifo during vpath reset operation
3393 */
3394static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3395{
3396 enum vxge_hw_status status = VXGE_HW_OK;
3397
3398 __vxge_hw_fifo_abort(fifo);
3399 status = __vxge_hw_channel_reset(&fifo->channel);
3400
3401 return status;
3402}
3403
3404/*
3405 * __vxge_hw_fifo_delete - Removes the FIFO
3406 * This function freeup the memory pool and removes the FIFO
3407 */
3408static enum vxge_hw_status
3409__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3410{
3411 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3412
3413 __vxge_hw_fifo_abort(fifo);
3414
3415 if (fifo->mempool)
3416 __vxge_hw_mempool_destroy(fifo->mempool);
3417
3418 vp->vpath->fifoh = NULL;
3419
3420 __vxge_hw_channel_free(&fifo->channel);
3421
3422 return VXGE_HW_OK;
3423}
3424
3425/*
2411 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD 3426 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2412 * list callback 3427 * list callback
2413 * This function is callback passed to __vxge_hw_mempool_create to create memory 3428 * This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2453,7 +3468,7 @@ __vxge_hw_fifo_mempool_item_alloc(
2453 * __vxge_hw_fifo_create - Create a FIFO 3468 * __vxge_hw_fifo_create - Create a FIFO
2454 * This function creates FIFO and initializes it. 3469 * This function creates FIFO and initializes it.
2455 */ 3470 */
2456enum vxge_hw_status 3471static enum vxge_hw_status
2457__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, 3472__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458 struct vxge_hw_fifo_attr *attr) 3473 struct vxge_hw_fifo_attr *attr)
2459{ 3474{
@@ -2572,68 +3587,6 @@ exit:
2572} 3587}
2573 3588
2574/* 3589/*
2575 * __vxge_hw_fifo_abort - Returns the TxD
2576 * This function terminates the TxDs of fifo
2577 */
2578static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2579{
2580 void *txdlh;
2581
2582 for (;;) {
2583 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2584
2585 if (txdlh == NULL)
2586 break;
2587
2588 vxge_hw_channel_dtr_complete(&fifo->channel);
2589
2590 if (fifo->txdl_term) {
2591 fifo->txdl_term(txdlh,
2592 VXGE_HW_TXDL_STATE_POSTED,
2593 fifo->channel.userdata);
2594 }
2595
2596 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2597 }
2598
2599 return VXGE_HW_OK;
2600}
2601
2602/*
2603 * __vxge_hw_fifo_reset - Resets the fifo
2604 * This function resets the fifo during vpath reset operation
2605 */
2606static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2607{
2608 enum vxge_hw_status status = VXGE_HW_OK;
2609
2610 __vxge_hw_fifo_abort(fifo);
2611 status = __vxge_hw_channel_reset(&fifo->channel);
2612
2613 return status;
2614}
2615
2616/*
2617 * __vxge_hw_fifo_delete - Removes the FIFO
2618 * This function freeup the memory pool and removes the FIFO
2619 */
2620enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621{
2622 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623
2624 __vxge_hw_fifo_abort(fifo);
2625
2626 if (fifo->mempool)
2627 __vxge_hw_mempool_destroy(fifo->mempool);
2628
2629 vp->vpath->fifoh = NULL;
2630
2631 __vxge_hw_channel_free(&fifo->channel);
2632
2633 return VXGE_HW_OK;
2634}
2635
2636/*
2637 * __vxge_hw_vpath_pci_read - Read the content of given address 3590 * __vxge_hw_vpath_pci_read - Read the content of given address
2638 * in pci config space. 3591 * in pci config space.
2639 * Read from the vpath pci config space. 3592 * Read from the vpath pci config space.
@@ -2675,297 +3628,6 @@ exit:
2675 return status; 3628 return status;
2676} 3629}
2677 3630
2678/*
2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680 * Returns the function number of the vpath.
2681 */
2682static u32
2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685{
2686 u64 val64;
2687
2688 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690 return
2691 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692}
2693
2694/*
2695 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696 */
2697static inline void
2698__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699 u64 dta_struct_sel)
2700{
2701 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702 wmb();
2703 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704 writeq(0, &vpath_reg->rts_access_steer_data1);
2705 wmb();
2706}
2707
2708
2709/*
2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711 * part number and product description.
2712 */
2713static enum vxge_hw_status
2714__vxge_hw_vpath_card_info_get(
2715 u32 vp_id,
2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717 struct vxge_hw_device_hw_info *hw_info)
2718{
2719 u32 i, j;
2720 u64 val64;
2721 u64 data1 = 0ULL;
2722 u64 data2 = 0ULL;
2723 enum vxge_hw_status status = VXGE_HW_OK;
2724 u8 *serial_number = hw_info->serial_number;
2725 u8 *part_number = hw_info->part_number;
2726 u8 *product_desc = hw_info->product_desc;
2727
2728 __vxge_hw_read_rts_ds(vpath_reg,
2729 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738 status = __vxge_hw_pio_mem_write64(val64,
2739 &vpath_reg->rts_access_steer_ctrl,
2740 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743 if (status != VXGE_HW_OK)
2744 return status;
2745
2746 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754 status = VXGE_HW_OK;
2755 } else
2756 *serial_number = 0;
2757
2758 __vxge_hw_read_rts_ds(vpath_reg,
2759 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768 status = __vxge_hw_pio_mem_write64(val64,
2769 &vpath_reg->rts_access_steer_ctrl,
2770 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773 if (status != VXGE_HW_OK)
2774 return status;
2775
2776 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786 status = VXGE_HW_OK;
2787
2788 } else
2789 *part_number = 0;
2790
2791 j = 0;
2792
2793 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805 status = __vxge_hw_pio_mem_write64(val64,
2806 &vpath_reg->rts_access_steer_ctrl,
2807 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810 if (status != VXGE_HW_OK)
2811 return status;
2812
2813 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817 data1 = readq(&vpath_reg->rts_access_steer_data0);
2818 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820 data2 = readq(&vpath_reg->rts_access_steer_data1);
2821 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823 status = VXGE_HW_OK;
2824 } else
2825 *product_desc = 0;
2826 }
2827
2828 return status;
2829}
2830
2831/*
2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833 * Returns FW Version
2834 */
2835static enum vxge_hw_status
2836__vxge_hw_vpath_fw_ver_get(
2837 u32 vp_id,
2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839 struct vxge_hw_device_hw_info *hw_info)
2840{
2841 u64 val64;
2842 u64 data1 = 0ULL;
2843 u64 data2 = 0ULL;
2844 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848 enum vxge_hw_status status = VXGE_HW_OK;
2849
2850 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857 status = __vxge_hw_pio_mem_write64(val64,
2858 &vpath_reg->rts_access_steer_ctrl,
2859 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862 if (status != VXGE_HW_OK)
2863 goto exit;
2864
2865 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872 fw_date->day =
2873 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874 data1);
2875 fw_date->month =
2876 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877 data1);
2878 fw_date->year =
2879 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880 data1);
2881
2882 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883 fw_date->month, fw_date->day, fw_date->year);
2884
2885 fw_version->major =
2886 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887 fw_version->minor =
2888 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889 fw_version->build =
2890 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893 fw_version->major, fw_version->minor, fw_version->build);
2894
2895 flash_date->day =
2896 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897 flash_date->month =
2898 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899 flash_date->year =
2900 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903 "%2.2d/%2.2d/%4.4d",
2904 flash_date->month, flash_date->day, flash_date->year);
2905
2906 flash_version->major =
2907 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908 flash_version->minor =
2909 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910 flash_version->build =
2911 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914 flash_version->major, flash_version->minor,
2915 flash_version->build);
2916
2917 status = VXGE_HW_OK;
2918
2919 } else
2920 status = VXGE_HW_FAIL;
2921exit:
2922 return status;
2923}
2924
2925/*
2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927 * Returns pci function mode
2928 */
2929static u64
2930__vxge_hw_vpath_pci_func_mode_get(
2931 u32 vp_id,
2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933{
2934 u64 val64;
2935 u64 data1 = 0ULL;
2936 enum vxge_hw_status status = VXGE_HW_OK;
2937
2938 __vxge_hw_read_rts_ds(vpath_reg,
2939 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948 status = __vxge_hw_pio_mem_write64(val64,
2949 &vpath_reg->rts_access_steer_ctrl,
2950 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953 if (status != VXGE_HW_OK)
2954 goto exit;
2955
2956 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960 status = VXGE_HW_OK;
2961 } else {
2962 data1 = 0;
2963 status = VXGE_HW_FAIL;
2964 }
2965exit:
2966 return data1;
2967}
2968
2969/** 3631/**
2970 * vxge_hw_device_flick_link_led - Flick (blink) link LED. 3632 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971 * @hldev: HW device. 3633 * @hldev: HW device.
@@ -2974,37 +3636,24 @@ exit:
2974 * Flicker the link LED. 3636 * Flicker the link LED.
2975 */ 3637 */
2976enum vxge_hw_status 3638enum vxge_hw_status
2977vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, 3639vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
2978 u64 on_off)
2979{ 3640{
2980 u64 val64; 3641 struct __vxge_hw_virtualpath *vpath;
2981 enum vxge_hw_status status = VXGE_HW_OK; 3642 u64 data0, data1 = 0, steer_ctrl = 0;
2982 struct vxge_hw_vpath_reg __iomem *vp_reg; 3643 enum vxge_hw_status status;
2983 3644
2984 if (hldev == NULL) { 3645 if (hldev == NULL) {
2985 status = VXGE_HW_ERR_INVALID_DEVICE; 3646 status = VXGE_HW_ERR_INVALID_DEVICE;
2986 goto exit; 3647 goto exit;
2987 } 3648 }
2988 3649
2989 vp_reg = hldev->vpath_reg[hldev->first_vp_id]; 3650 vpath = &hldev->virtual_paths[hldev->first_vp_id];
2990 3651
2991 writeq(0, &vp_reg->rts_access_steer_ctrl); 3652 data0 = on_off;
2992 wmb(); 3653 status = vxge_hw_vpath_fw_api(vpath,
2993 writeq(on_off, &vp_reg->rts_access_steer_data0); 3654 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
2994 writeq(0, &vp_reg->rts_access_steer_data1); 3655 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
2995 wmb(); 3656 0, &data0, &data1, &steer_ctrl);
2996
2997 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2998 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2999 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3000 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3001 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3002 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003
3004 status = __vxge_hw_pio_mem_write64(val64,
3005 &vp_reg->rts_access_steer_ctrl,
3006 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3007 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3008exit: 3657exit:
3009 return status; 3658 return status;
3010} 3659}
@@ -3013,63 +3662,38 @@ exit:
3013 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables 3662 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014 */ 3663 */
3015enum vxge_hw_status 3664enum vxge_hw_status
3016__vxge_hw_vpath_rts_table_get( 3665__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3017 struct __vxge_hw_vpath_handle *vp, 3666 u32 action, u32 rts_table, u32 offset,
3018 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) 3667 u64 *data0, u64 *data1)
3019{ 3668{
3020 u64 val64; 3669 enum vxge_hw_status status;
3021 struct __vxge_hw_virtualpath *vpath; 3670 u64 steer_ctrl = 0;
3022 struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024 enum vxge_hw_status status = VXGE_HW_OK;
3025 3671
3026 if (vp == NULL) { 3672 if (vp == NULL) {
3027 status = VXGE_HW_ERR_INVALID_HANDLE; 3673 status = VXGE_HW_ERR_INVALID_HANDLE;
3028 goto exit; 3674 goto exit;
3029 } 3675 }
3030 3676
3031 vpath = vp->vpath;
3032 vp_reg = vpath->vp_reg;
3033
3034 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039 if ((rts_table == 3677 if ((rts_table ==
3040 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || 3678 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041 (rts_table == 3679 (rts_table ==
3042 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || 3680 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043 (rts_table == 3681 (rts_table ==
3044 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || 3682 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045 (rts_table == 3683 (rts_table ==
3046 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { 3684 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; 3685 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048 } 3686 }
3049 3687
3050 status = __vxge_hw_pio_mem_write64(val64, 3688 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3051 &vp_reg->rts_access_steer_ctrl, 3689 data0, data1, &steer_ctrl);
3052 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053 vpath->hldev->config.device_poll_millis);
3054
3055 if (status != VXGE_HW_OK) 3690 if (status != VXGE_HW_OK)
3056 goto exit; 3691 goto exit;
3057 3692
3058 val64 = readq(&vp_reg->rts_access_steer_ctrl); 3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3059 3694 (rts_table !=
3060 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { 3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3061 3696 *data1 = 0;
3062 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064 if ((rts_table ==
3065 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066 (rts_table ==
3067 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068 *data2 = readq(&vp_reg->rts_access_steer_data1);
3069 }
3070 status = VXGE_HW_OK;
3071 } else
3072 status = VXGE_HW_FAIL;
3073exit: 3697exit:
3074 return status; 3698 return status;
3075} 3699}
@@ -3078,107 +3702,27 @@ exit:
3078 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables 3702 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079 */ 3703 */
3080enum vxge_hw_status 3704enum vxge_hw_status
3081__vxge_hw_vpath_rts_table_set( 3705__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3082 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, 3706 u32 rts_table, u32 offset, u64 steer_data0,
3083 u32 offset, u64 data1, u64 data2) 3707 u64 steer_data1)
3084{ 3708{
3085 u64 val64; 3709 u64 data0, data1 = 0, steer_ctrl = 0;
3086 struct __vxge_hw_virtualpath *vpath; 3710 enum vxge_hw_status status;
3087 enum vxge_hw_status status = VXGE_HW_OK;
3088 struct vxge_hw_vpath_reg __iomem *vp_reg;
3089 3711
3090 if (vp == NULL) { 3712 if (vp == NULL) {
3091 status = VXGE_HW_ERR_INVALID_HANDLE; 3713 status = VXGE_HW_ERR_INVALID_HANDLE;
3092 goto exit; 3714 goto exit;
3093 } 3715 }
3094 3716
3095 vpath = vp->vpath; 3717 data0 = steer_data0;
3096 vp_reg = vpath->vp_reg;
3097
3098 writeq(data1, &vp_reg->rts_access_steer_data0);
3099 wmb();
3100 3718
3101 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3719 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102 (rts_table == 3720 (rts_table ==
3103 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { 3721 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3104 writeq(data2, &vp_reg->rts_access_steer_data1); 3722 data1 = steer_data1;
3105 wmb();
3106 }
3107
3108 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3109 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3110 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113 status = __vxge_hw_pio_mem_write64(val64,
3114 &vp_reg->rts_access_steer_ctrl,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116 vpath->hldev->config.device_poll_millis);
3117
3118 if (status != VXGE_HW_OK)
3119 goto exit;
3120
3121 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124 status = VXGE_HW_OK;
3125 else
3126 status = VXGE_HW_FAIL;
3127exit:
3128 return status;
3129}
3130
3131/*
3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133 * from MAC address table.
3134 */
3135static enum vxge_hw_status
3136__vxge_hw_vpath_addr_get(
3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139{
3140 u32 i;
3141 u64 val64;
3142 u64 data1 = 0ULL;
3143 u64 data2 = 0ULL;
3144 enum vxge_hw_status status = VXGE_HW_OK;
3145
3146 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153 status = __vxge_hw_pio_mem_write64(val64,
3154 &vpath_reg->rts_access_steer_ctrl,
3155 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158 if (status != VXGE_HW_OK)
3159 goto exit;
3160
3161 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164 3723
3165 data1 = readq(&vpath_reg->rts_access_steer_data0); 3724 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3166 data2 = readq(&vpath_reg->rts_access_steer_data1); 3725 &data0, &data1, &steer_ctrl);
3167
3168 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170 data2);
3171
3172 for (i = ETH_ALEN; i > 0; i--) {
3173 macaddr[i-1] = (u8)(data1 & 0xFF);
3174 data1 >>= 8;
3175
3176 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177 data2 >>= 8;
3178 }
3179 status = VXGE_HW_OK;
3180 } else
3181 status = VXGE_HW_FAIL;
3182exit: 3726exit:
3183 return status; 3727 return status;
3184} 3728}
@@ -3204,6 +3748,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3204 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, 3748 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 3749 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206 0, &data0, &data1); 3750 0, &data0, &data1);
3751 if (status != VXGE_HW_OK)
3752 goto exit;
3207 3753
3208 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | 3754 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3209 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); 3755 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -3771,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3771 vp_reg = vpath->vp_reg; 4317 vp_reg = vpath->vp_reg;
3772 config = vpath->vp_config; 4318 config = vpath->vp_config;
3773 4319
3774 writeq((u64)0, &vp_reg->tim_dest_addr); 4320 writeq(0, &vp_reg->tim_dest_addr);
3775 writeq((u64)0, &vp_reg->tim_vpath_map); 4321 writeq(0, &vp_reg->tim_vpath_map);
3776 writeq((u64)0, &vp_reg->tim_bitmap); 4322 writeq(0, &vp_reg->tim_bitmap);
3777 writeq((u64)0, &vp_reg->tim_remap); 4323 writeq(0, &vp_reg->tim_remap);
3778 4324
3779 if (config->ring.enable == VXGE_HW_RING_ENABLE) 4325 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3780 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( 4326 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3876,8 +4422,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3876 4422
3877 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { 4423 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3878 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); 4424 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3879 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( 4425 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
3880 config->tti.util_sel);
3881 } 4426 }
3882 4427
3883 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { 4428 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3981,8 +4526,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3981 4526
3982 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { 4527 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3983 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); 4528 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3984 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( 4529 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
3985 config->rti.util_sel);
3986 } 4530 }
3987 4531
3988 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { 4532 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4003,11 +4547,15 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4003 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); 4547 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4004 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4005 4549
4550 val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4551 val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4552 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4553 writeq(val64, &vp_reg->tim_wrkld_clc);
4554
4006 return status; 4555 return status;
4007} 4556}
4008 4557
4009void 4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4010vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4011{ 4559{
4012 struct __vxge_hw_virtualpath *vpath; 4560 struct __vxge_hw_virtualpath *vpath;
4013 struct vxge_hw_vpath_reg __iomem *vp_reg; 4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4018,17 +4566,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4018 vp_reg = vpath->vp_reg; 4566 vp_reg = vpath->vp_reg;
4019 config = vpath->vp_config; 4567 config = vpath->vp_config;
4020 4568
4021 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4022 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4023 4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4024 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { 4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4025 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4026 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4027 writeq(val64,
4028 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4029 }
4030 } 4575 }
4031} 4576}
4577
4032/* 4578/*
4033 * __vxge_hw_vpath_initialize 4579 * __vxge_hw_vpath_initialize
4034 * This routine is the final phase of init which initializes the 4580 * This routine is the final phase of init which initializes the
@@ -4052,22 +4598,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4052 vp_reg = vpath->vp_reg; 4598 vp_reg = vpath->vp_reg;
4053 4599
4054 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); 4600 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4055
4056 if (status != VXGE_HW_OK) 4601 if (status != VXGE_HW_OK)
4057 goto exit; 4602 goto exit;
4058 4603
4059 status = __vxge_hw_vpath_mac_configure(hldev, vp_id); 4604 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4060
4061 if (status != VXGE_HW_OK) 4605 if (status != VXGE_HW_OK)
4062 goto exit; 4606 goto exit;
4063 4607
4064 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); 4608 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4065
4066 if (status != VXGE_HW_OK) 4609 if (status != VXGE_HW_OK)
4067 goto exit; 4610 goto exit;
4068 4611
4069 status = __vxge_hw_vpath_tim_configure(hldev, vp_id); 4612 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4070
4071 if (status != VXGE_HW_OK) 4613 if (status != VXGE_HW_OK)
4072 goto exit; 4614 goto exit;
4073 4615
@@ -4075,7 +4617,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4075 4617
4076 /* Get MRRS value from device control */ 4618 /* Get MRRS value from device control */
4077 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); 4619 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4078
4079 if (status == VXGE_HW_OK) { 4620 if (status == VXGE_HW_OK) {
4080 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; 4621 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4081 val64 &= 4622 val64 &=
@@ -4099,6 +4640,28 @@ exit:
4099} 4640}
4100 4641
4101/* 4642/*
4643 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4644 * This routine closes all channels it opened and freeup memory
4645 */
4646static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4647{
4648 struct __vxge_hw_virtualpath *vpath;
4649
4650 vpath = &hldev->virtual_paths[vp_id];
4651
4652 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4653 goto exit;
4654
4655 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4656 vpath->hldev->tim_int_mask1, vpath->vp_id);
4657 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4658
4659 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4660exit:
4661 return;
4662}
4663
4664/*
4102 * __vxge_hw_vp_initialize - Initialize Virtual Path structure 4665 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4103 * This routine is the initial phase of init which resets the vpath and 4666 * This routine is the initial phase of init which resets the vpath and
4104 * initializes the software support structures. 4667 * initializes the software support structures.
@@ -4117,6 +4680,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4117 4680
4118 vpath = &hldev->virtual_paths[vp_id]; 4681 vpath = &hldev->virtual_paths[vp_id];
4119 4682
4683 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4120 vpath->vp_id = vp_id; 4684 vpath->vp_id = vp_id;
4121 vpath->vp_open = VXGE_HW_VP_OPEN; 4685 vpath->vp_open = VXGE_HW_VP_OPEN;
4122 vpath->hldev = hldev; 4686 vpath->hldev = hldev;
@@ -4127,14 +4691,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4127 __vxge_hw_vpath_reset(hldev, vp_id); 4691 __vxge_hw_vpath_reset(hldev, vp_id);
4128 4692
4129 status = __vxge_hw_vpath_reset_check(vpath); 4693 status = __vxge_hw_vpath_reset_check(vpath);
4130
4131 if (status != VXGE_HW_OK) { 4694 if (status != VXGE_HW_OK) {
4132 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4695 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4133 goto exit; 4696 goto exit;
4134 } 4697 }
4135 4698
4136 status = __vxge_hw_vpath_mgmt_read(hldev, vpath); 4699 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4137
4138 if (status != VXGE_HW_OK) { 4700 if (status != VXGE_HW_OK) {
4139 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4701 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4140 goto exit; 4702 goto exit;
@@ -4148,7 +4710,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4148 hldev->tim_int_mask1, vp_id); 4710 hldev->tim_int_mask1, vp_id);
4149 4711
4150 status = __vxge_hw_vpath_initialize(hldev, vp_id); 4712 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4151
4152 if (status != VXGE_HW_OK) 4713 if (status != VXGE_HW_OK)
4153 __vxge_hw_vp_terminate(hldev, vp_id); 4714 __vxge_hw_vp_terminate(hldev, vp_id);
4154exit: 4715exit:
@@ -4156,29 +4717,6 @@ exit:
4156} 4717}
4157 4718
4158/* 4719/*
4159 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4160 * This routine closes all channels it opened and freeup memory
4161 */
4162static void
4163__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4164{
4165 struct __vxge_hw_virtualpath *vpath;
4166
4167 vpath = &hldev->virtual_paths[vp_id];
4168
4169 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4170 goto exit;
4171
4172 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4173 vpath->hldev->tim_int_mask1, vpath->vp_id);
4174 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4175
4176 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4177exit:
4178 return;
4179}
4180
4181/*
4182 * vxge_hw_vpath_mtu_set - Set MTU. 4720 * vxge_hw_vpath_mtu_set - Set MTU.
4183 * Set new MTU value. Example, to use jumbo frames: 4721 * Set new MTU value. Example, to use jumbo frames:
4184 * vxge_hw_vpath_mtu_set(my_device, 9600); 4722 * vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4215,6 +4753,64 @@ exit:
4215} 4753}
4216 4754
4217/* 4755/*
4756 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4757 * Enable the DMA vpath statistics. The function is to be called to re-enable
4758 * the adapter to update stats into the host memory
4759 */
4760static enum vxge_hw_status
4761vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4762{
4763 enum vxge_hw_status status = VXGE_HW_OK;
4764 struct __vxge_hw_virtualpath *vpath;
4765
4766 vpath = vp->vpath;
4767
4768 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4769 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4770 goto exit;
4771 }
4772
4773 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4774 sizeof(struct vxge_hw_vpath_stats_hw_info));
4775
4776 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4777exit:
4778 return status;
4779}
4780
4781/*
4782 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4783 * This function allocates a block from block pool or from the system
4784 */
4785static struct __vxge_hw_blockpool_entry *
4786__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4787{
4788 struct __vxge_hw_blockpool_entry *entry = NULL;
4789 struct __vxge_hw_blockpool *blockpool;
4790
4791 blockpool = &devh->block_pool;
4792
4793 if (size == blockpool->block_size) {
4794
4795 if (!list_empty(&blockpool->free_block_list))
4796 entry = (struct __vxge_hw_blockpool_entry *)
4797 list_first_entry(&blockpool->free_block_list,
4798 struct __vxge_hw_blockpool_entry,
4799 item);
4800
4801 if (entry != NULL) {
4802 list_del(&entry->item);
4803 blockpool->pool_size--;
4804 }
4805 }
4806
4807 if (entry != NULL)
4808 __vxge_hw_blockpool_blocks_add(blockpool);
4809
4810 return entry;
4811}
4812
4813/*
4218 * vxge_hw_vpath_open - Open a virtual path on a given adapter 4814 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4219 * This function is used to open access to virtual path of an 4815 * This function is used to open access to virtual path of an
4220 * adapter for offload, GRO operations. This function returns 4816 * adapter for offload, GRO operations. This function returns
@@ -4238,19 +4834,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4238 4834
4239 status = __vxge_hw_vp_initialize(hldev, attr->vp_id, 4835 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4240 &hldev->config.vp_config[attr->vp_id]); 4836 &hldev->config.vp_config[attr->vp_id]);
4241
4242 if (status != VXGE_HW_OK) 4837 if (status != VXGE_HW_OK)
4243 goto vpath_open_exit1; 4838 goto vpath_open_exit1;
4244 4839
4245 vp = (struct __vxge_hw_vpath_handle *) 4840 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4246 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4247 if (vp == NULL) { 4841 if (vp == NULL) {
4248 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4842 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4249 goto vpath_open_exit2; 4843 goto vpath_open_exit2;
4250 } 4844 }
4251 4845
4252 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4253
4254 vp->vpath = vpath; 4846 vp->vpath = vpath;
4255 4847
4256 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4848 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4273,7 +4865,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4273 4865
4274 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, 4866 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4275 VXGE_HW_BLOCK_SIZE); 4867 VXGE_HW_BLOCK_SIZE);
4276
4277 if (vpath->stats_block == NULL) { 4868 if (vpath->stats_block == NULL) {
4278 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4869 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4279 goto vpath_open_exit8; 4870 goto vpath_open_exit8;
@@ -4332,19 +4923,20 @@ vpath_open_exit1:
4332 * This function is used to close access to virtual path opened 4923 * This function is used to close access to virtual path opened
4333 * earlier. 4924 * earlier.
4334 */ 4925 */
4335void 4926void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4336vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4337{ 4927{
4338 struct __vxge_hw_virtualpath *vpath = NULL; 4928 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4929 struct __vxge_hw_ring *ring = vpath->ringh;
4930 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4339 u64 new_count, val64, val164; 4931 u64 new_count, val64, val164;
4340 struct __vxge_hw_ring *ring;
4341 4932
4342 vpath = vp->vpath; 4933 if (vdev->titan1) {
4343 ring = vpath->ringh; 4934 new_count = readq(&vpath->vp_reg->rxdmem_size);
4935 new_count &= 0x1fff;
4936 } else
4937 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4344 4938
4345 new_count = readq(&vpath->vp_reg->rxdmem_size); 4939 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4346 new_count &= 0x1fff;
4347 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4348 4940
4349 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), 4941 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4350 &vpath->vp_reg->prc_rxd_doorbell); 4942 &vpath->vp_reg->prc_rxd_doorbell);
@@ -4367,6 +4959,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4367} 4959}
4368 4960
4369/* 4961/*
4962 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4963 * @devh: Hal device
4964 * @entry: Entry of block to be freed
4965 *
4966 * This function frees a block from block pool
4967 */
4968static void
4969__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4970 struct __vxge_hw_blockpool_entry *entry)
4971{
4972 struct __vxge_hw_blockpool *blockpool;
4973
4974 blockpool = &devh->block_pool;
4975
4976 if (entry->length == blockpool->block_size) {
4977 list_add(&entry->item, &blockpool->free_block_list);
4978 blockpool->pool_size++;
4979 }
4980
4981 __vxge_hw_blockpool_blocks_remove(blockpool);
4982}
4983
4984/*
4370 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open 4985 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4371 * This function is used to close access to virtual path opened 4986 * This function is used to close access to virtual path opened
4372 * earlier. 4987 * earlier.
@@ -4414,7 +5029,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4414 5029
4415 __vxge_hw_vp_terminate(devh, vp_id); 5030 __vxge_hw_vp_terminate(devh, vp_id);
4416 5031
5032 spin_lock(&vpath->lock);
4417 vpath->vp_open = VXGE_HW_VP_NOT_OPEN; 5033 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
5034 spin_unlock(&vpath->lock);
4418 5035
4419vpath_close_exit: 5036vpath_close_exit:
4420 return status; 5037 return status;
@@ -4515,730 +5132,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4515 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 5132 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4516 &hldev->common_reg->cmn_rsthdlr_cfg1); 5133 &hldev->common_reg->cmn_rsthdlr_cfg1);
4517} 5134}
4518
4519/*
4520 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4521 * Enable the DMA vpath statistics. The function is to be called to re-enable
4522 * the adapter to update stats into the host memory
4523 */
4524static enum vxge_hw_status
4525vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4526{
4527 enum vxge_hw_status status = VXGE_HW_OK;
4528 struct __vxge_hw_virtualpath *vpath;
4529
4530 vpath = vp->vpath;
4531
4532 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4533 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4534 goto exit;
4535 }
4536
4537 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4538 sizeof(struct vxge_hw_vpath_stats_hw_info));
4539
4540 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4541exit:
4542 return status;
4543}
4544
4545/*
4546 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4547 * and offset and perform an operation
4548 */
4549static enum vxge_hw_status
4550__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4551 u32 operation, u32 offset, u64 *stat)
4552{
4553 u64 val64;
4554 enum vxge_hw_status status = VXGE_HW_OK;
4555 struct vxge_hw_vpath_reg __iomem *vp_reg;
4556
4557 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4558 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4559 goto vpath_stats_access_exit;
4560 }
4561
4562 vp_reg = vpath->vp_reg;
4563
4564 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4565 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4566 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4567
4568 status = __vxge_hw_pio_mem_write64(val64,
4569 &vp_reg->xmac_stats_access_cmd,
4570 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4571 vpath->hldev->config.device_poll_millis);
4572
4573 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4574 *stat = readq(&vp_reg->xmac_stats_access_data);
4575 else
4576 *stat = 0;
4577
4578vpath_stats_access_exit:
4579 return status;
4580}
4581
4582/*
4583 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4584 */
4585static enum vxge_hw_status
4586__vxge_hw_vpath_xmac_tx_stats_get(
4587 struct __vxge_hw_virtualpath *vpath,
4588 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4589{
4590 u64 *val64;
4591 int i;
4592 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4593 enum vxge_hw_status status = VXGE_HW_OK;
4594
4595 val64 = (u64 *) vpath_tx_stats;
4596
4597 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4598 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4599 goto exit;
4600 }
4601
4602 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4603 status = __vxge_hw_vpath_stats_access(vpath,
4604 VXGE_HW_STATS_OP_READ,
4605 offset, val64);
4606 if (status != VXGE_HW_OK)
4607 goto exit;
4608 offset++;
4609 val64++;
4610 }
4611exit:
4612 return status;
4613}
4614
4615/*
4616 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4617 */
4618static enum vxge_hw_status
4619__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4620 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4621{
4622 u64 *val64;
4623 enum vxge_hw_status status = VXGE_HW_OK;
4624 int i;
4625 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4626 val64 = (u64 *) vpath_rx_stats;
4627
4628 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4629 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4630 goto exit;
4631 }
4632 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4633 status = __vxge_hw_vpath_stats_access(vpath,
4634 VXGE_HW_STATS_OP_READ,
4635 offset >> 3, val64);
4636 if (status != VXGE_HW_OK)
4637 goto exit;
4638
4639 offset += 8;
4640 val64++;
4641 }
4642exit:
4643 return status;
4644}
4645
4646/*
4647 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4648 */
4649static enum vxge_hw_status
4650__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4651 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4652{
4653 u64 val64;
4654 enum vxge_hw_status status = VXGE_HW_OK;
4655 struct vxge_hw_vpath_reg __iomem *vp_reg;
4656
4657 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4658 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4659 goto exit;
4660 }
4661 vp_reg = vpath->vp_reg;
4662
4663 val64 = readq(&vp_reg->vpath_debug_stats0);
4664 hw_stats->ini_num_mwr_sent =
4665 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4666
4667 val64 = readq(&vp_reg->vpath_debug_stats1);
4668 hw_stats->ini_num_mrd_sent =
4669 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4670
4671 val64 = readq(&vp_reg->vpath_debug_stats2);
4672 hw_stats->ini_num_cpl_rcvd =
4673 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4674
4675 val64 = readq(&vp_reg->vpath_debug_stats3);
4676 hw_stats->ini_num_mwr_byte_sent =
4677 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4678
4679 val64 = readq(&vp_reg->vpath_debug_stats4);
4680 hw_stats->ini_num_cpl_byte_rcvd =
4681 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4682
4683 val64 = readq(&vp_reg->vpath_debug_stats5);
4684 hw_stats->wrcrdtarb_xoff =
4685 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4686
4687 val64 = readq(&vp_reg->vpath_debug_stats6);
4688 hw_stats->rdcrdtarb_xoff =
4689 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4690
4691 val64 = readq(&vp_reg->vpath_genstats_count01);
4692 hw_stats->vpath_genstats_count0 =
4693 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4694 val64);
4695
4696 val64 = readq(&vp_reg->vpath_genstats_count01);
4697 hw_stats->vpath_genstats_count1 =
4698 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4699 val64);
4700
4701 val64 = readq(&vp_reg->vpath_genstats_count23);
4702 hw_stats->vpath_genstats_count2 =
4703 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4704 val64);
4705
4706 val64 = readq(&vp_reg->vpath_genstats_count01);
4707 hw_stats->vpath_genstats_count3 =
4708 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4709 val64);
4710
4711 val64 = readq(&vp_reg->vpath_genstats_count4);
4712 hw_stats->vpath_genstats_count4 =
4713 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4714 val64);
4715
4716 val64 = readq(&vp_reg->vpath_genstats_count5);
4717 hw_stats->vpath_genstats_count5 =
4718 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4719 val64);
4720
4721 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4722 if (status != VXGE_HW_OK)
4723 goto exit;
4724
4725 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4726 if (status != VXGE_HW_OK)
4727 goto exit;
4728
4729 VXGE_HW_VPATH_STATS_PIO_READ(
4730 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4731
4732 hw_stats->prog_event_vnum0 =
4733 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4734
4735 hw_stats->prog_event_vnum1 =
4736 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4737
4738 VXGE_HW_VPATH_STATS_PIO_READ(
4739 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4740
4741 hw_stats->prog_event_vnum2 =
4742 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4743
4744 hw_stats->prog_event_vnum3 =
4745 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4746
4747 val64 = readq(&vp_reg->rx_multi_cast_stats);
4748 hw_stats->rx_multi_cast_frame_discard =
4749 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4750
4751 val64 = readq(&vp_reg->rx_frm_transferred);
4752 hw_stats->rx_frm_transferred =
4753 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4754
4755 val64 = readq(&vp_reg->rxd_returned);
4756 hw_stats->rxd_returned =
4757 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4758
4759 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4760 hw_stats->rx_mpa_len_fail_frms =
4761 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4762 hw_stats->rx_mpa_mrk_fail_frms =
4763 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4764 hw_stats->rx_mpa_crc_fail_frms =
4765 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4766
4767 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4768 hw_stats->rx_permitted_frms =
4769 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4770 hw_stats->rx_vp_reset_discarded_frms =
4771 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4772 hw_stats->rx_wol_frms =
4773 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4774
4775 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4776 hw_stats->tx_vp_reset_discarded_frms =
4777 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4778 val64);
4779exit:
4780 return status;
4781}
4782
4783
4784static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4785 unsigned long size)
4786{
4787 gfp_t flags;
4788 void *vaddr;
4789
4790 if (in_interrupt())
4791 flags = GFP_ATOMIC | GFP_DMA;
4792 else
4793 flags = GFP_KERNEL | GFP_DMA;
4794
4795 vaddr = kmalloc((size), flags);
4796
4797 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4798}
4799
4800static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4801 struct pci_dev **p_dma_acch)
4802{
4803 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4804 u8 *tmp = (u8 *)vaddr;
4805 tmp -= misaligned;
4806 kfree((void *)tmp);
4807}
4808
4809/*
4810 * __vxge_hw_blockpool_create - Create block pool
4811 */
4812
4813enum vxge_hw_status
4814__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4815 struct __vxge_hw_blockpool *blockpool,
4816 u32 pool_size,
4817 u32 pool_max)
4818{
4819 u32 i;
4820 struct __vxge_hw_blockpool_entry *entry = NULL;
4821 void *memblock;
4822 dma_addr_t dma_addr;
4823 struct pci_dev *dma_handle;
4824 struct pci_dev *acc_handle;
4825 enum vxge_hw_status status = VXGE_HW_OK;
4826
4827 if (blockpool == NULL) {
4828 status = VXGE_HW_FAIL;
4829 goto blockpool_create_exit;
4830 }
4831
4832 blockpool->hldev = hldev;
4833 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4834 blockpool->pool_size = 0;
4835 blockpool->pool_max = pool_max;
4836 blockpool->req_out = 0;
4837
4838 INIT_LIST_HEAD(&blockpool->free_block_list);
4839 INIT_LIST_HEAD(&blockpool->free_entry_list);
4840
4841 for (i = 0; i < pool_size + pool_max; i++) {
4842 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4843 GFP_KERNEL);
4844 if (entry == NULL) {
4845 __vxge_hw_blockpool_destroy(blockpool);
4846 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4847 goto blockpool_create_exit;
4848 }
4849 list_add(&entry->item, &blockpool->free_entry_list);
4850 }
4851
4852 for (i = 0; i < pool_size; i++) {
4853
4854 memblock = vxge_os_dma_malloc(
4855 hldev->pdev,
4856 VXGE_HW_BLOCK_SIZE,
4857 &dma_handle,
4858 &acc_handle);
4859
4860 if (memblock == NULL) {
4861 __vxge_hw_blockpool_destroy(blockpool);
4862 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4863 goto blockpool_create_exit;
4864 }
4865
4866 dma_addr = pci_map_single(hldev->pdev, memblock,
4867 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4868
4869 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4870 dma_addr))) {
4871
4872 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4873 __vxge_hw_blockpool_destroy(blockpool);
4874 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4875 goto blockpool_create_exit;
4876 }
4877
4878 if (!list_empty(&blockpool->free_entry_list))
4879 entry = (struct __vxge_hw_blockpool_entry *)
4880 list_first_entry(&blockpool->free_entry_list,
4881 struct __vxge_hw_blockpool_entry,
4882 item);
4883
4884 if (entry == NULL)
4885 entry =
4886 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4887 GFP_KERNEL);
4888 if (entry != NULL) {
4889 list_del(&entry->item);
4890 entry->length = VXGE_HW_BLOCK_SIZE;
4891 entry->memblock = memblock;
4892 entry->dma_addr = dma_addr;
4893 entry->acc_handle = acc_handle;
4894 entry->dma_handle = dma_handle;
4895 list_add(&entry->item,
4896 &blockpool->free_block_list);
4897 blockpool->pool_size++;
4898 } else {
4899 __vxge_hw_blockpool_destroy(blockpool);
4900 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4901 goto blockpool_create_exit;
4902 }
4903 }
4904
4905blockpool_create_exit:
4906 return status;
4907}
4908
4909/*
4910 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4911 */
4912
4913void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4914{
4915
4916 struct __vxge_hw_device *hldev;
4917 struct list_head *p, *n;
4918 u16 ret;
4919
4920 if (blockpool == NULL) {
4921 ret = 1;
4922 goto exit;
4923 }
4924
4925 hldev = blockpool->hldev;
4926
4927 list_for_each_safe(p, n, &blockpool->free_block_list) {
4928
4929 pci_unmap_single(hldev->pdev,
4930 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4931 ((struct __vxge_hw_blockpool_entry *)p)->length,
4932 PCI_DMA_BIDIRECTIONAL);
4933
4934 vxge_os_dma_free(hldev->pdev,
4935 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4936 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4937
4938 list_del(
4939 &((struct __vxge_hw_blockpool_entry *)p)->item);
4940 kfree(p);
4941 blockpool->pool_size--;
4942 }
4943
4944 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4945 list_del(
4946 &((struct __vxge_hw_blockpool_entry *)p)->item);
4947 kfree((void *)p);
4948 }
4949 ret = 0;
4950exit:
4951 return;
4952}
4953
4954/*
4955 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4956 */
4957static
4958void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4959{
4960 u32 nreq = 0, i;
4961
4962 if ((blockpool->pool_size + blockpool->req_out) <
4963 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4964 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4965 blockpool->req_out += nreq;
4966 }
4967
4968 for (i = 0; i < nreq; i++)
4969 vxge_os_dma_malloc_async(
4970 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4971 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4972}
4973
4974/*
4975 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4976 */
4977static
4978void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4979{
4980 struct list_head *p, *n;
4981
4982 list_for_each_safe(p, n, &blockpool->free_block_list) {
4983
4984 if (blockpool->pool_size < blockpool->pool_max)
4985 break;
4986
4987 pci_unmap_single(
4988 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4989 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4990 ((struct __vxge_hw_blockpool_entry *)p)->length,
4991 PCI_DMA_BIDIRECTIONAL);
4992
4993 vxge_os_dma_free(
4994 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4995 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4996 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4997
4998 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4999
5000 list_add(p, &blockpool->free_entry_list);
5001
5002 blockpool->pool_size--;
5003
5004 }
5005}
5006
5007/*
5008 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5009 * Adds a block to block pool
5010 */
5011static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5012 void *block_addr,
5013 u32 length,
5014 struct pci_dev *dma_h,
5015 struct pci_dev *acc_handle)
5016{
5017 struct __vxge_hw_blockpool *blockpool;
5018 struct __vxge_hw_blockpool_entry *entry = NULL;
5019 dma_addr_t dma_addr;
5020 enum vxge_hw_status status = VXGE_HW_OK;
5021 u32 req_out;
5022
5023 blockpool = &devh->block_pool;
5024
5025 if (block_addr == NULL) {
5026 blockpool->req_out--;
5027 status = VXGE_HW_FAIL;
5028 goto exit;
5029 }
5030
5031 dma_addr = pci_map_single(devh->pdev, block_addr, length,
5032 PCI_DMA_BIDIRECTIONAL);
5033
5034 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5035
5036 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5037 blockpool->req_out--;
5038 status = VXGE_HW_FAIL;
5039 goto exit;
5040 }
5041
5042
5043 if (!list_empty(&blockpool->free_entry_list))
5044 entry = (struct __vxge_hw_blockpool_entry *)
5045 list_first_entry(&blockpool->free_entry_list,
5046 struct __vxge_hw_blockpool_entry,
5047 item);
5048
5049 if (entry == NULL)
5050 entry = (struct __vxge_hw_blockpool_entry *)
5051 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5052 else
5053 list_del(&entry->item);
5054
5055 if (entry != NULL) {
5056 entry->length = length;
5057 entry->memblock = block_addr;
5058 entry->dma_addr = dma_addr;
5059 entry->acc_handle = acc_handle;
5060 entry->dma_handle = dma_h;
5061 list_add(&entry->item, &blockpool->free_block_list);
5062 blockpool->pool_size++;
5063 status = VXGE_HW_OK;
5064 } else
5065 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5066
5067 blockpool->req_out--;
5068
5069 req_out = blockpool->req_out;
5070exit:
5071 return;
5072}
5073
5074/*
5075 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5076 * Allocates a block of memory of given size, either from block pool
5077 * or by calling vxge_os_dma_malloc()
5078 */
5079void *
5080__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5081 struct vxge_hw_mempool_dma *dma_object)
5082{
5083 struct __vxge_hw_blockpool_entry *entry = NULL;
5084 struct __vxge_hw_blockpool *blockpool;
5085 void *memblock = NULL;
5086 enum vxge_hw_status status = VXGE_HW_OK;
5087
5088 blockpool = &devh->block_pool;
5089
5090 if (size != blockpool->block_size) {
5091
5092 memblock = vxge_os_dma_malloc(devh->pdev, size,
5093 &dma_object->handle,
5094 &dma_object->acc_handle);
5095
5096 if (memblock == NULL) {
5097 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5098 goto exit;
5099 }
5100
5101 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5102 PCI_DMA_BIDIRECTIONAL);
5103
5104 if (unlikely(pci_dma_mapping_error(devh->pdev,
5105 dma_object->addr))) {
5106 vxge_os_dma_free(devh->pdev, memblock,
5107 &dma_object->acc_handle);
5108 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5109 goto exit;
5110 }
5111
5112 } else {
5113
5114 if (!list_empty(&blockpool->free_block_list))
5115 entry = (struct __vxge_hw_blockpool_entry *)
5116 list_first_entry(&blockpool->free_block_list,
5117 struct __vxge_hw_blockpool_entry,
5118 item);
5119
5120 if (entry != NULL) {
5121 list_del(&entry->item);
5122 dma_object->addr = entry->dma_addr;
5123 dma_object->handle = entry->dma_handle;
5124 dma_object->acc_handle = entry->acc_handle;
5125 memblock = entry->memblock;
5126
5127 list_add(&entry->item,
5128 &blockpool->free_entry_list);
5129 blockpool->pool_size--;
5130 }
5131
5132 if (memblock != NULL)
5133 __vxge_hw_blockpool_blocks_add(blockpool);
5134 }
5135exit:
5136 return memblock;
5137}
5138
5139/*
5140 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5141 __vxge_hw_blockpool_malloc
5142 */
5143void
5144__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5145 void *memblock, u32 size,
5146 struct vxge_hw_mempool_dma *dma_object)
5147{
5148 struct __vxge_hw_blockpool_entry *entry = NULL;
5149 struct __vxge_hw_blockpool *blockpool;
5150 enum vxge_hw_status status = VXGE_HW_OK;
5151
5152 blockpool = &devh->block_pool;
5153
5154 if (size != blockpool->block_size) {
5155 pci_unmap_single(devh->pdev, dma_object->addr, size,
5156 PCI_DMA_BIDIRECTIONAL);
5157 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5158 } else {
5159
5160 if (!list_empty(&blockpool->free_entry_list))
5161 entry = (struct __vxge_hw_blockpool_entry *)
5162 list_first_entry(&blockpool->free_entry_list,
5163 struct __vxge_hw_blockpool_entry,
5164 item);
5165
5166 if (entry == NULL)
5167 entry = (struct __vxge_hw_blockpool_entry *)
5168 vmalloc(sizeof(
5169 struct __vxge_hw_blockpool_entry));
5170 else
5171 list_del(&entry->item);
5172
5173 if (entry != NULL) {
5174 entry->length = size;
5175 entry->memblock = memblock;
5176 entry->dma_addr = dma_object->addr;
5177 entry->acc_handle = dma_object->acc_handle;
5178 entry->dma_handle = dma_object->handle;
5179 list_add(&entry->item,
5180 &blockpool->free_block_list);
5181 blockpool->pool_size++;
5182 status = VXGE_HW_OK;
5183 } else
5184 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5185
5186 if (status == VXGE_HW_OK)
5187 __vxge_hw_blockpool_blocks_remove(blockpool);
5188 }
5189}
5190
5191/*
5192 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5193 * This function allocates a block from block pool or from the system
5194 */
5195struct __vxge_hw_blockpool_entry *
5196__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5197{
5198 struct __vxge_hw_blockpool_entry *entry = NULL;
5199 struct __vxge_hw_blockpool *blockpool;
5200
5201 blockpool = &devh->block_pool;
5202
5203 if (size == blockpool->block_size) {
5204
5205 if (!list_empty(&blockpool->free_block_list))
5206 entry = (struct __vxge_hw_blockpool_entry *)
5207 list_first_entry(&blockpool->free_block_list,
5208 struct __vxge_hw_blockpool_entry,
5209 item);
5210
5211 if (entry != NULL) {
5212 list_del(&entry->item);
5213 blockpool->pool_size--;
5214 }
5215 }
5216
5217 if (entry != NULL)
5218 __vxge_hw_blockpool_blocks_add(blockpool);
5219
5220 return entry;
5221}
5222
5223/*
5224 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5225 * @devh: Hal device
5226 * @entry: Entry of block to be freed
5227 *
5228 * This function frees a block from block pool
5229 */
5230void
5231__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5232 struct __vxge_hw_blockpool_entry *entry)
5233{
5234 struct __vxge_hw_blockpool *blockpool;
5235
5236 blockpool = &devh->block_pool;
5237
5238 if (entry->length == blockpool->block_size) {
5239 list_add(&entry->item, &blockpool->free_block_list);
5240 blockpool->pool_size++;
5241 }
5242
5243 __vxge_hw_blockpool_blocks_remove(blockpool);
5244}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..e249e288d160 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
20#define VXGE_CACHE_LINE_SIZE 128 20#define VXGE_CACHE_LINE_SIZE 128
21#endif 21#endif
22 22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN 23#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \ 24#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) 25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
36#define VXGE_HW_MAX_MTU 9600 29#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500 30#define VXGE_HW_DEFAULT_MTU 1500
38 31
39#ifdef VXGE_DEBUG_ASSERT 32#define VXGE_HW_MAX_ROM_IMAGES 8
40 33
34struct eprom_image {
35 u8 is_valid:1;
36 u8 index;
37 u8 type;
38 u16 version;
39};
40
41#ifdef VXGE_DEBUG_ASSERT
41/** 42/**
42 * vxge_assert 43 * vxge_assert
43 * @test: C-condition to check 44 * @test: C-condition to check
@@ -48,16 +49,13 @@
48 * compilation 49 * compilation
49 * time. 50 * time.
50 */ 51 */
51#define vxge_assert(test) { \ 52#define vxge_assert(test) BUG_ON(!(test))
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else 53#else
56#define vxge_assert(test) 54#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */ 55#endif /* end of VXGE_DEBUG_ASSERT */
58 56
59/** 57/**
60 * enum enum vxge_debug_level 58 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled 59 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out 60 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs 61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
159}; 157};
160 158
161/** 159/**
160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
163 * @VXGE_HW_FW_UPGRADE_ERR: upload error
164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
165 *
166 */
167enum vxge_hw_fw_upgrade_code {
168 VXGE_HW_FW_UPGRADE_OK = 0,
169 VXGE_HW_FW_UPGRADE_DONE = 1,
170 VXGE_HW_FW_UPGRADE_ERR = 2,
171 VXGE_FW_UPGRADE_BYTES2SKIP = 3
172};
173
174/**
175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
186 */
187enum vxge_hw_fw_upgrade_err_code {
188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
198};
199
200/**
162 * struct vxge_hw_device_date - Date Format 201 * struct vxge_hw_device_date - Date Format
163 * @day: Day 202 * @day: Day
164 * @month: Month 203 * @month: Month
@@ -275,9 +314,9 @@ struct vxge_hw_ring_config {
275#define VXGE_HW_RING_DEFAULT 1 314#define VXGE_HW_RING_DEFAULT 1
276 315
277 u32 ring_blocks; 316 u32 ring_blocks;
278#define VXGE_HW_MIN_RING_BLOCKS 1 317#define VXGE_HW_MIN_RING_BLOCKS 1
279#define VXGE_HW_MAX_RING_BLOCKS 128 318#define VXGE_HW_MAX_RING_BLOCKS 128
280#define VXGE_HW_DEF_RING_BLOCKS 2 319#define VXGE_HW_DEF_RING_BLOCKS 2
281 320
282 u32 buffer_mode; 321 u32 buffer_mode;
283#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 322#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
465 * See also: vxge_hw_driver_initialize(). 504 * See also: vxge_hw_driver_initialize().
466 */ 505 */
467struct vxge_hw_uld_cbs { 506struct vxge_hw_uld_cbs {
468
469 void (*link_up)(struct __vxge_hw_device *devh); 507 void (*link_up)(struct __vxge_hw_device *devh);
470 void (*link_down)(struct __vxge_hw_device *devh); 508 void (*link_down)(struct __vxge_hw_device *devh);
471 void (*crit_err)(struct __vxge_hw_device *devh, 509 void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
652 struct vxge_hw_vpath_stats_hw_info *hw_stats; 690 struct vxge_hw_vpath_stats_hw_info *hw_stats;
653 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; 691 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
654 struct vxge_hw_vpath_stats_sw_info *sw_stats; 692 struct vxge_hw_vpath_stats_sw_info *sw_stats;
693 spinlock_t lock;
655}; 694};
656 695
657/* 696/*
@@ -661,7 +700,7 @@ struct __vxge_hw_virtualpath {
661 * 700 *
662 * This structure is used to store the callback information. 701 * This structure is used to store the callback information.
663 */ 702 */
664struct __vxge_hw_vpath_handle{ 703struct __vxge_hw_vpath_handle {
665 struct list_head item; 704 struct list_head item;
666 struct __vxge_hw_virtualpath *vpath; 705 struct __vxge_hw_virtualpath *vpath;
667}; 706};
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
674/** 713/**
675 * struct __vxge_hw_device - Hal device object 714 * struct __vxge_hw_device - Hal device object
676 * @magic: Magic Number 715 * @magic: Magic Number
677 * @device_id: PCI Device Id of the adapter
678 * @major_revision: PCI Device major revision
679 * @minor_revision: PCI Device minor revision
680 * @bar0: BAR0 virtual address. 716 * @bar0: BAR0 virtual address.
681 * @pdev: Physical device handle 717 * @pdev: Physical device handle
682 * @config: Confguration passed by the LL driver at initialization 718 * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
688 u32 magic; 724 u32 magic;
689#define VXGE_HW_DEVICE_MAGIC 0x12345678 725#define VXGE_HW_DEVICE_MAGIC 0x12345678
690#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD 726#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
691 u16 device_id;
692 u8 major_revision;
693 u8 minor_revision;
694 void __iomem *bar0; 727 void __iomem *bar0;
695 struct pci_dev *pdev; 728 struct pci_dev *pdev;
696 struct net_device *ndev; 729 struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
731 u32 debug_level; 764 u32 debug_level;
732 u32 level_err; 765 u32 level_err;
733 u32 level_trace; 766 u32 level_trace;
767 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
734}; 768};
735 769
736#define VXGE_HW_INFO_LEN 64 770#define VXGE_HW_INFO_LEN 64
@@ -781,8 +815,8 @@ struct vxge_hw_device_hw_info {
781 u8 serial_number[VXGE_HW_INFO_LEN]; 815 u8 serial_number[VXGE_HW_INFO_LEN];
782 u8 part_number[VXGE_HW_INFO_LEN]; 816 u8 part_number[VXGE_HW_INFO_LEN];
783 u8 product_desc[VXGE_HW_INFO_LEN]; 817 u8 product_desc[VXGE_HW_INFO_LEN];
784 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 818 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
785 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 819 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
786}; 820};
787 821
788/** 822/**
@@ -829,20 +863,10 @@ struct vxge_hw_device_attr {
829 loc, \ 863 loc, \
830 offset, \ 864 offset, \
831 &val64); \ 865 &val64); \
832 \
833 if (status != VXGE_HW_OK) \ 866 if (status != VXGE_HW_OK) \
834 return status; \ 867 return status; \
835} 868}
836 869
837#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
838 status = __vxge_hw_vpath_stats_access(vpath, \
839 VXGE_HW_STATS_OP_READ, \
840 offset, \
841 &val64); \
842 if (status != VXGE_HW_OK) \
843 return status; \
844}
845
846/* 870/*
847 * struct __vxge_hw_ring - Ring channel. 871 * struct __vxge_hw_ring - Ring channel.
848 * @channel: Channel "base" of this ring, the common part of all HW 872 * @channel: Channel "base" of this ring, the common part of all HW
@@ -1114,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper {
1114 * lookup to determine the transmit port. 1138 * lookup to determine the transmit port.
1115 * 01: Send on physical Port1. 1139 * 01: Send on physical Port1.
1116 * 10: Send on physical Port0. 1140 * 10: Send on physical Port0.
1117 * 11: Send on both ports. 1141 * 11: Send on both ports.
1118 * Bits 18 to 21 - Reserved 1142 * Bits 18 to 21 - Reserved
1119 * Bits 22 to 23 - Gather_Code. This field is set by the host and 1143 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1120 * is used to describe how individual buffers comprise a frame. 1144 * is used to describe how individual buffers comprise a frame.
@@ -1413,12 +1437,12 @@ enum vxge_hw_rth_algoritms {
1413 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). 1437 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1414 */ 1438 */
1415struct vxge_hw_rth_hash_types { 1439struct vxge_hw_rth_hash_types {
1416 u8 hash_type_tcpipv4_en; 1440 u8 hash_type_tcpipv4_en:1,
1417 u8 hash_type_ipv4_en; 1441 hash_type_ipv4_en:1,
1418 u8 hash_type_tcpipv6_en; 1442 hash_type_tcpipv6_en:1,
1419 u8 hash_type_ipv6_en; 1443 hash_type_ipv6_en:1,
1420 u8 hash_type_tcpipv6ex_en; 1444 hash_type_tcpipv6ex_en:1,
1421 u8 hash_type_ipv6ex_en; 1445 hash_type_ipv6ex_en:1;
1422}; 1446};
1423 1447
1424void vxge_hw_device_debug_set( 1448void vxge_hw_device_debug_set(
@@ -1893,6 +1917,15 @@ out:
1893 return vaddr; 1917 return vaddr;
1894} 1918}
1895 1919
1920static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1921 struct pci_dev **p_dma_acch)
1922{
1923 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1924 u8 *tmp = (u8 *)vaddr;
1925 tmp -= misaligned;
1926 kfree((void *)tmp);
1927}
1928
1896/* 1929/*
1897 * __vxge_hw_mempool_item_priv - will return pointer on per item private space 1930 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1898 */ 1931 */
@@ -1962,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
1962void 1995void
1963vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); 1996vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
1964 1997
1965
1966#ifndef readq 1998#ifndef readq
1967static inline u64 readq(void __iomem *addr) 1999static inline u64 readq(void __iomem *addr)
1968{ 2000{
@@ -2000,7 +2032,7 @@ enum vxge_hw_status
2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2032vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001 2033
2002/** 2034/**
2003 * vxge_debug 2035 * vxge_debug_ll
2004 * @level: level of debug verbosity. 2036 * @level: level of debug verbosity.
2005 * @mask: mask for the debug 2037 * @mask: mask for the debug
2006 * @buf: Circular buffer for tracing 2038 * @buf: Circular buffer for tracing
@@ -2012,26 +2044,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2012 * may be compiled out if DEBUG macro was never defined. 2044 * may be compiled out if DEBUG macro was never defined.
2013 * See also: enum vxge_debug_level{}. 2045 * See also: enum vxge_debug_level{}.
2014 */ 2046 */
2015
2016#define vxge_trace_aux(level, mask, fmt, ...) \
2017{\
2018 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2019}
2020
2021#define vxge_debug(module, level, mask, fmt, ...) { \
2022if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2023 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2024 if ((mask & VXGE_DEBUG_MASK) == mask)\
2025 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2026} \
2027}
2028
2029#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) 2047#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2030#define vxge_debug_ll(level, mask, fmt, ...) \ 2048#define vxge_debug_ll(level, mask, fmt, ...) do { \
2031{\ 2049 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2032 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ 2050 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2033} 2051 if ((mask & VXGE_DEBUG_MASK) == mask) \
2034 2052 printk(fmt "\n", __VA_ARGS__); \
2053} while (0)
2035#else 2054#else
2036#define vxge_debug_ll(level, mask, fmt, ...) 2055#define vxge_debug_ll(level, mask, fmt, ...)
2037#endif 2056#endif
@@ -2051,4 +2070,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2051 2070
2052enum vxge_hw_status 2071enum vxge_hw_status
2053__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); 2072__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2073
2074#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2075#define VXGE_HW_MAX_POLLING_COUNT 100
2076
2077void
2078vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2079
2080enum vxge_hw_status
2081vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2082 u32 *minor, u32 *build);
2083
2084enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2085
2086enum vxge_hw_status
2087vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2088 int size);
2089
2090enum vxge_hw_status
2091vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2092 struct eprom_image *eprom_image_data);
2093
2094int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2054#endif 2095#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..1dd3a21b3a43 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include<linux/ethtool.h> 14#include <linux/ethtool.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
29 * Return value: 29 * Return value:
30 * 0 on success. 30 * 0 on success.
31 */ 31 */
32
33static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) 32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
34{ 33{
35 /* We currently only support 10Gb/FULL */ 34 /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
79 * Returns driver specefic information like name, version etc.. to ethtool. 78 * Returns driver specefic information like name, version etc.. to ethtool.
80 */ 79 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev, 80static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info) 81 struct ethtool_drvinfo *info)
83{ 82{
84 struct vxgedev *vdev; 83 struct vxgedev *vdev = netdev_priv(dev);
85 vdev = (struct vxgedev *)netdev_priv(dev);
86 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); 84 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
87 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); 85 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
88 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); 86 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
104 * buffer area. 102 * buffer area.
105 */ 103 */
106static void vxge_ethtool_gregs(struct net_device *dev, 104static void vxge_ethtool_gregs(struct net_device *dev,
107 struct ethtool_regs *regs, void *space) 105 struct ethtool_regs *regs, void *space)
108{ 106{
109 int index, offset; 107 int index, offset;
110 enum vxge_hw_status status; 108 enum vxge_hw_status status;
111 u64 reg; 109 u64 reg;
112 u64 *reg_space = (u64 *) space; 110 u64 *reg_space = (u64 *)space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 111 struct vxgedev *vdev = netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 112 struct __vxge_hw_device *hldev = vdev->devh;
115 pci_get_drvdata(vdev->pdev);
116 113
117 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 114 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
118 regs->version = vdev->pdev->subsystem_device; 115 regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
147 */ 144 */
148static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 145static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
149{ 146{
150 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 147 struct vxgedev *vdev = netdev_priv(dev);
151 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 148 struct __vxge_hw_device *hldev = vdev->devh;
152 pci_get_drvdata(vdev->pdev);
153 149
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
155 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
168 * void 164 * void
169 */ 165 */
170static void vxge_ethtool_getpause_data(struct net_device *dev, 166static void vxge_ethtool_getpause_data(struct net_device *dev,
171 struct ethtool_pauseparam *ep) 167 struct ethtool_pauseparam *ep)
172{ 168{
173 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = netdev_priv(dev);
174 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 170 struct __vxge_hw_device *hldev = vdev->devh;
175 pci_get_drvdata(vdev->pdev);
176 171
177 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); 172 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
178} 173}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
188 * int, returns 0 on Success 183 * int, returns 0 on Success
189 */ 184 */
190static int vxge_ethtool_setpause_data(struct net_device *dev, 185static int vxge_ethtool_setpause_data(struct net_device *dev,
191 struct ethtool_pauseparam *ep) 186 struct ethtool_pauseparam *ep)
192{ 187{
193 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 188 struct vxgedev *vdev = netdev_priv(dev);
194 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 189 struct __vxge_hw_device *hldev = vdev->devh;
195 pci_get_drvdata(vdev->pdev);
196 190
197 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); 191 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
198 192
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
209 enum vxge_hw_status status; 203 enum vxge_hw_status status;
210 enum vxge_hw_status swstatus; 204 enum vxge_hw_status swstatus;
211 struct vxge_vpath *vpath = NULL; 205 struct vxge_vpath *vpath = NULL;
212 206 struct vxgedev *vdev = netdev_priv(dev);
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 207 struct __vxge_hw_device *hldev = vdev->devh;
214 struct __vxge_hw_device *hldev = vdev->devh;
215 struct vxge_hw_xmac_stats *xmac_stats; 208 struct vxge_hw_xmac_stats *xmac_stats;
216 struct vxge_hw_device_stats_sw_info *sw_stats; 209 struct vxge_hw_device_stats_sw_info *sw_stats;
217 struct vxge_hw_device_stats_hw_info *hw_stats; 210 struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
574 kfree(hw_stats); 567 kfree(hw_stats);
575} 568}
576 569
577static void vxge_ethtool_get_strings(struct net_device *dev, 570static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
578 u32 stringset, u8 *data) 571 u8 *data)
579{ 572{
580 int stat_size = 0; 573 int stat_size = 0;
581 int i, j; 574 int i, j;
582 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 575 struct vxgedev *vdev = netdev_priv(dev);
583 switch (stringset) { 576 switch (stringset) {
584 case ETH_SS_STATS: 577 case ETH_SS_STATS:
585 vxge_add_string("VPATH STATISTICS%s\t\t\t", 578 vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
1066 1059
1067static int vxge_ethtool_get_regs_len(struct net_device *dev) 1060static int vxge_ethtool_get_regs_len(struct net_device *dev)
1068{ 1061{
1069 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1062 struct vxgedev *vdev = netdev_priv(dev);
1070 1063
1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 1064 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1072} 1065}
1073 1066
1074static u32 vxge_get_rx_csum(struct net_device *dev) 1067static u32 vxge_get_rx_csum(struct net_device *dev)
1075{ 1068{
1076 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1069 struct vxgedev *vdev = netdev_priv(dev);
1077 1070
1078 return vdev->rx_csum; 1071 return vdev->rx_csum;
1079} 1072}
1080 1073
1081static int vxge_set_rx_csum(struct net_device *dev, u32 data) 1074static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1082{ 1075{
1083 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1076 struct vxgedev *vdev = netdev_priv(dev);
1084 1077
1085 if (data) 1078 if (data)
1086 vdev->rx_csum = 1; 1079 vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1102 1095
1103static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) 1096static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1104{ 1097{
1105 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1098 struct vxgedev *vdev = netdev_priv(dev);
1106 1099
1107 switch (sset) { 1100 switch (sset) {
1108 case ETH_SS_STATS: 1101 case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1119 } 1112 }
1120} 1113}
1121 1114
1115static int vxge_set_flags(struct net_device *dev, u32 data)
1116{
1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status;
1119
1120 if (data & ~ETH_FLAG_RXHASH)
1121 return -EOPNOTSUPP;
1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0;
1125
1126 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1127 return -EINVAL;
1128
1129 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1130
1131 /* Enabling RTH requires some of the logic in vxge_device_register and a
1132 * vpath reset. Due to these restrictions, only allow modification
1133 * while the interface is down.
1134 */
1135 status = vxge_reset_all_vpaths(vdev);
1136 if (status != VXGE_HW_OK) {
1137 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1138 return -EFAULT;
1139 }
1140
1141 if (vdev->devh->config.rth_en)
1142 dev->features |= NETIF_F_RXHASH;
1143 else
1144 dev->features &= ~NETIF_F_RXHASH;
1145
1146 return 0;
1147}
1148
1149static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1150{
1151 struct vxgedev *vdev = netdev_priv(dev);
1152
1153 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1154 printk(KERN_INFO "Single Function Mode is required to flash the"
1155 " firmware\n");
1156 return -EINVAL;
1157 }
1158
1159 if (netif_running(dev)) {
1160 printk(KERN_INFO "Interface %s must be down to flash the "
1161 "firmware\n", dev->name);
1162 return -EBUSY;
1163 }
1164
1165 return vxge_fw_upgrade(vdev, parms->data, 1);
1166}
1167
1122static const struct ethtool_ops vxge_ethtool_ops = { 1168static const struct ethtool_ops vxge_ethtool_ops = {
1123 .get_settings = vxge_ethtool_gset, 1169 .get_settings = vxge_ethtool_gset,
1124 .set_settings = vxge_ethtool_sset, 1170 .set_settings = vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1131 .get_rx_csum = vxge_get_rx_csum, 1177 .get_rx_csum = vxge_get_rx_csum,
1132 .set_rx_csum = vxge_set_rx_csum, 1178 .set_rx_csum = vxge_set_rx_csum,
1133 .get_tx_csum = ethtool_op_get_tx_csum, 1179 .get_tx_csum = ethtool_op_get_tx_csum,
1134 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1180 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1135 .get_sg = ethtool_op_get_sg, 1181 .get_sg = ethtool_op_get_sg,
1136 .set_sg = ethtool_op_set_sg, 1182 .set_sg = ethtool_op_set_sg,
1137 .get_tso = ethtool_op_get_tso, 1183 .get_tso = ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1140 .phys_id = vxge_ethtool_idnic, 1186 .phys_id = vxge_ethtool_idnic,
1141 .get_sset_count = vxge_ethtool_get_sset_count, 1187 .get_sset_count = vxge_ethtool_get_sset_count,
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1188 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags,
1190 .flash_device = vxge_fw_flash,
1143}; 1191};
1144 1192
1145void vxge_initialize_ethtool_ops(struct net_device *ndev) 1193void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..537ad874f11c 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
50#include <net/ip.h> 50#include <net/ip.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/firmware.h>
54#include <linux/net_tstamp.h>
53#include "vxge-main.h" 55#include "vxge-main.h"
54#include "vxge-reg.h" 56#include "vxge-reg.h"
55 57
@@ -82,16 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
82 84
83static struct vxge_drv_config *driver_config; 85static struct vxge_drv_config *driver_config;
84 86
85static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
86 struct macInfo *mac);
87static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
88 struct macInfo *mac);
89static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94
95static inline int is_vxge_card_up(struct vxgedev *vdev) 87static inline int is_vxge_card_up(struct vxgedev *vdev)
96{ 88{
97 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 89 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -148,11 +140,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
148 * This function is called during interrupt context to notify link up state 140 * This function is called during interrupt context to notify link up state
149 * change. 141 * change.
150 */ 142 */
151static void 143static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
152vxge_callback_link_up(struct __vxge_hw_device *hldev)
153{ 144{
154 struct net_device *dev = hldev->ndev; 145 struct net_device *dev = hldev->ndev;
155 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 146 struct vxgedev *vdev = netdev_priv(dev);
156 147
157 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 148 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
158 vdev->ndev->name, __func__, __LINE__); 149 vdev->ndev->name, __func__, __LINE__);
@@ -172,11 +163,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
172 * This function is called during interrupt context to notify link down state 163 * This function is called during interrupt context to notify link down state
173 * change. 164 * change.
174 */ 165 */
175static void 166static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
176vxge_callback_link_down(struct __vxge_hw_device *hldev)
177{ 167{
178 struct net_device *dev = hldev->ndev; 168 struct net_device *dev = hldev->ndev;
179 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = netdev_priv(dev);
180 170
181 vxge_debug_entryexit(VXGE_TRACE, 171 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 172 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -195,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
195 * 185 *
196 * Allocate SKB. 186 * Allocate SKB.
197 */ 187 */
198static struct sk_buff* 188static struct sk_buff *
199vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) 189vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
200{ 190{
201 struct net_device *dev; 191 struct net_device *dev;
@@ -369,7 +359,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
369 u8 t_code, void *userdata) 359 u8 t_code, void *userdata)
370{ 360{
371 struct vxge_ring *ring = (struct vxge_ring *)userdata; 361 struct vxge_ring *ring = (struct vxge_ring *)userdata;
372 struct net_device *dev = ring->ndev; 362 struct net_device *dev = ring->ndev;
373 unsigned int dma_sizes; 363 unsigned int dma_sizes;
374 void *first_dtr = NULL; 364 void *first_dtr = NULL;
375 int dtr_cnt = 0; 365 int dtr_cnt = 0;
@@ -413,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
413 403
414 prefetch((char *)skb + L1_CACHE_BYTES); 404 prefetch((char *)skb + L1_CACHE_BYTES);
415 if (unlikely(t_code)) { 405 if (unlikely(t_code)) {
416
417 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != 406 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
418 VXGE_HW_OK) { 407 VXGE_HW_OK) {
419 408
@@ -436,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
436 } 425 }
437 426
438 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { 427 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
439
440 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { 428 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
441
442 if (!vxge_rx_map(dtr, ring)) { 429 if (!vxge_rx_map(dtr, ring)) {
443 skb_put(skb, pkt_length); 430 skb_put(skb, pkt_length);
444 431
@@ -513,6 +500,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
513 else 500 else
514 skb_checksum_none_assert(skb); 501 skb_checksum_none_assert(skb);
515 502
503
504 if (ring->rx_hwts) {
505 struct skb_shared_hwtstamps *skb_hwts;
506 u32 ns = *(u32 *)(skb->head + pkt_length);
507
508 skb_hwts = skb_hwtstamps(skb);
509 skb_hwts->hwtstamp = ns_to_ktime(ns);
510 skb_hwts->syststamp.tv64 = 0;
511 }
512
513 /* rth_hash_type and rth_it_hit are non-zero regardless of
514 * whether rss is enabled. Only the rth_value is zero/non-zero
515 * if rss is disabled/enabled, so key off of that.
516 */
517 if (ext_info.rth_value)
518 skb->rxhash = ext_info.rth_value;
519
516 vxge_rx_complete(ring, skb, ext_info.vlan, 520 vxge_rx_complete(ring, skb, ext_info.vlan,
517 pkt_length, &ext_info); 521 pkt_length, &ext_info);
518 522
@@ -660,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
660 return FALSE; 664 return FALSE;
661} 665}
662 666
667static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
668{
669 struct vxge_mac_addrs *new_mac_entry;
670 u8 *mac_address = NULL;
671
672 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
673 return TRUE;
674
675 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
676 if (!new_mac_entry) {
677 vxge_debug_mem(VXGE_ERR,
678 "%s: memory allocation failed",
679 VXGE_DRIVER_NAME);
680 return FALSE;
681 }
682
683 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
684
685 /* Copy the new mac address to the list */
686 mac_address = (u8 *)&new_mac_entry->macaddr;
687 memcpy(mac_address, mac->macaddr, ETH_ALEN);
688
689 new_mac_entry->state = mac->state;
690 vpath->mac_addr_cnt++;
691
692 /* Is this a multicast address */
693 if (0x01 & mac->macaddr[0])
694 vpath->mcast_addr_cnt++;
695
696 return TRUE;
697}
698
699/* Add a mac address to DA table */
700static enum vxge_hw_status
701vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
702{
703 enum vxge_hw_status status = VXGE_HW_OK;
704 struct vxge_vpath *vpath;
705 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
706
707 if (0x01 & mac->macaddr[0]) /* multicast address */
708 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
709 else
710 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
711
712 vpath = &vdev->vpaths[mac->vpath_no];
713 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
714 mac->macmask, duplicate_mode);
715 if (status != VXGE_HW_OK) {
716 vxge_debug_init(VXGE_ERR,
717 "DA config add entry failed for vpath:%d",
718 vpath->device_id);
719 } else
720 if (FALSE == vxge_mac_list_add(vpath, mac))
721 status = -EPERM;
722
723 return status;
724}
725
663static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) 726static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
664{ 727{
665 struct macInfo mac_info; 728 struct macInfo mac_info;
@@ -670,7 +733,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
670 struct vxge_vpath *vpath = NULL; 733 struct vxge_vpath *vpath = NULL;
671 struct __vxge_hw_device *hldev; 734 struct __vxge_hw_device *hldev;
672 735
673 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 736 hldev = pci_get_drvdata(vdev->pdev);
674 737
675 mac_address = (u8 *)&mac_addr; 738 mac_address = (u8 *)&mac_addr;
676 memcpy(mac_address, mac_header, ETH_ALEN); 739 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +832,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
769 return NETDEV_TX_OK; 832 return NETDEV_TX_OK;
770 } 833 }
771 834
772 vdev = (struct vxgedev *)netdev_priv(dev); 835 vdev = netdev_priv(dev);
773 836
774 if (unlikely(!is_vxge_card_up(vdev))) { 837 if (unlikely(!is_vxge_card_up(vdev))) {
775 vxge_debug_tx(VXGE_ERR, 838 vxge_debug_tx(VXGE_ERR,
@@ -1005,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1005 "%s:%d Exiting...", __func__, __LINE__); 1068 "%s:%d Exiting...", __func__, __LINE__);
1006} 1069}
1007 1070
1071static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1072{
1073 struct list_head *entry, *next;
1074 u64 del_mac = 0;
1075 u8 *mac_address = (u8 *) (&del_mac);
1076
1077 /* Copy the mac address to delete from the list */
1078 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1079
1080 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1081 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1082 list_del(entry);
1083 kfree((struct vxge_mac_addrs *)entry);
1084 vpath->mac_addr_cnt--;
1085
1086 /* Is this a multicast address */
1087 if (0x01 & mac->macaddr[0])
1088 vpath->mcast_addr_cnt--;
1089 return TRUE;
1090 }
1091 }
1092
1093 return FALSE;
1094}
1095
1096/* delete a mac address from DA table */
1097static enum vxge_hw_status
1098vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1099{
1100 enum vxge_hw_status status = VXGE_HW_OK;
1101 struct vxge_vpath *vpath;
1102
1103 vpath = &vdev->vpaths[mac->vpath_no];
1104 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1105 mac->macmask);
1106 if (status != VXGE_HW_OK) {
1107 vxge_debug_init(VXGE_ERR,
1108 "DA config delete entry failed for vpath:%d",
1109 vpath->device_id);
1110 } else
1111 vxge_mac_list_del(vpath, mac);
1112 return status;
1113}
1114
1008/** 1115/**
1009 * vxge_set_multicast 1116 * vxge_set_multicast
1010 * @dev: pointer to the device structure 1117 * @dev: pointer to the device structure
@@ -1034,7 +1141,7 @@ static void vxge_set_multicast(struct net_device *dev)
1034 vxge_debug_entryexit(VXGE_TRACE, 1141 vxge_debug_entryexit(VXGE_TRACE,
1035 "%s:%d", __func__, __LINE__); 1142 "%s:%d", __func__, __LINE__);
1036 1143
1037 vdev = (struct vxgedev *)netdev_priv(dev); 1144 vdev = netdev_priv(dev);
1038 hldev = (struct __vxge_hw_device *)vdev->devh; 1145 hldev = (struct __vxge_hw_device *)vdev->devh;
1039 1146
1040 if (unlikely(!is_vxge_card_up(vdev))) 1147 if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1201,7 @@ static void vxge_set_multicast(struct net_device *dev)
1094 /* Delete previous MC's */ 1201 /* Delete previous MC's */
1095 for (i = 0; i < mcast_cnt; i++) { 1202 for (i = 0; i < mcast_cnt; i++) {
1096 list_for_each_safe(entry, next, list_head) { 1203 list_for_each_safe(entry, next, list_head) {
1097 mac_entry = (struct vxge_mac_addrs *) entry; 1204 mac_entry = (struct vxge_mac_addrs *)entry;
1098 /* Copy the mac address to delete */ 1205 /* Copy the mac address to delete */
1099 mac_address = (u8 *)&mac_entry->macaddr; 1206 mac_address = (u8 *)&mac_entry->macaddr;
1100 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1207 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1244,7 @@ _set_all_mcast:
1137 /* Delete previous MC's */ 1244 /* Delete previous MC's */
1138 for (i = 0; i < mcast_cnt; i++) { 1245 for (i = 0; i < mcast_cnt; i++) {
1139 list_for_each_safe(entry, next, list_head) { 1246 list_for_each_safe(entry, next, list_head) {
1140 mac_entry = (struct vxge_mac_addrs *) entry; 1247 mac_entry = (struct vxge_mac_addrs *)entry;
1141 /* Copy the mac address to delete */ 1248 /* Copy the mac address to delete */
1142 mac_address = (u8 *)&mac_entry->macaddr; 1249 mac_address = (u8 *)&mac_entry->macaddr;
1143 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1250 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1291,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1184{ 1291{
1185 struct sockaddr *addr = p; 1292 struct sockaddr *addr = p;
1186 struct vxgedev *vdev; 1293 struct vxgedev *vdev;
1187 struct __vxge_hw_device *hldev; 1294 struct __vxge_hw_device *hldev;
1188 enum vxge_hw_status status = VXGE_HW_OK; 1295 enum vxge_hw_status status = VXGE_HW_OK;
1189 struct macInfo mac_info_new, mac_info_old; 1296 struct macInfo mac_info_new, mac_info_old;
1190 int vpath_idx = 0; 1297 int vpath_idx = 0;
1191 1298
1192 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1299 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1193 1300
1194 vdev = (struct vxgedev *)netdev_priv(dev); 1301 vdev = netdev_priv(dev);
1195 hldev = vdev->devh; 1302 hldev = vdev->devh;
1196 1303
1197 if (!is_valid_ether_addr(addr->sa_data)) 1304 if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1399,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1399static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1293{ 1400{
1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1401 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1402 struct __vxge_hw_device *hldev;
1295 int msix_id; 1403 int msix_id;
1296 1404
1405 hldev = pci_get_drvdata(vdev->pdev);
1406
1407 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1408
1297 vxge_hw_vpath_intr_disable(vpath->handle); 1409 vxge_hw_vpath_intr_disable(vpath->handle);
1298 1410
1299 if (vdev->config.intr_type == INTA) 1411 if (vdev->config.intr_type == INTA)
@@ -1310,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1310 } 1422 }
1311} 1423}
1312 1424
1425/* list all mac addresses from DA table */
1426static enum vxge_hw_status
1427vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1428{
1429 enum vxge_hw_status status = VXGE_HW_OK;
1430 unsigned char macmask[ETH_ALEN];
1431 unsigned char macaddr[ETH_ALEN];
1432
1433 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1434 macaddr, macmask);
1435 if (status != VXGE_HW_OK) {
1436 vxge_debug_init(VXGE_ERR,
1437 "DA config list entry failed for vpath:%d",
1438 vpath->device_id);
1439 return status;
1440 }
1441
1442 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1443 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1444 macaddr, macmask);
1445 if (status != VXGE_HW_OK)
1446 break;
1447 }
1448
1449 return status;
1450}
1451
1452/* Store all mac addresses from the list to the DA table */
1453static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1454{
1455 enum vxge_hw_status status = VXGE_HW_OK;
1456 struct macInfo mac_info;
1457 u8 *mac_address = NULL;
1458 struct list_head *entry, *next;
1459
1460 memset(&mac_info, 0, sizeof(struct macInfo));
1461
1462 if (vpath->is_open) {
1463 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1464 mac_address =
1465 (u8 *)&
1466 ((struct vxge_mac_addrs *)entry)->macaddr;
1467 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1468 ((struct vxge_mac_addrs *)entry)->state =
1469 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1470 /* does this mac address already exist in da table? */
1471 status = vxge_search_mac_addr_in_da_table(vpath,
1472 &mac_info);
1473 if (status != VXGE_HW_OK) {
1474 /* Add this mac address to the DA table */
1475 status = vxge_hw_vpath_mac_addr_add(
1476 vpath->handle, mac_info.macaddr,
1477 mac_info.macmask,
1478 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1479 if (status != VXGE_HW_OK) {
1480 vxge_debug_init(VXGE_ERR,
1481 "DA add entry failed for vpath:%d",
1482 vpath->device_id);
1483 ((struct vxge_mac_addrs *)entry)->state
1484 = VXGE_LL_MAC_ADDR_IN_LIST;
1485 }
1486 }
1487 }
1488 }
1489
1490 return status;
1491}
1492
1493/* Store all vlan ids from the list to the vid table */
1494static enum vxge_hw_status
1495vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1496{
1497 enum vxge_hw_status status = VXGE_HW_OK;
1498 struct vxgedev *vdev = vpath->vdev;
1499 u16 vid;
1500
1501 if (vdev->vlgrp && vpath->is_open) {
1502
1503 for (vid = 0; vid < VLAN_N_VID; vid++) {
1504 if (!vlan_group_get_device(vdev->vlgrp, vid))
1505 continue;
1506 /* Add these vlan to the vid table */
1507 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1508 }
1509 }
1510
1511 return status;
1512}
1513
1313/* 1514/*
1314 * vxge_reset_vpath 1515 * vxge_reset_vpath
1315 * @vdev: pointer to vdev 1516 * @vdev: pointer to vdev
@@ -1405,12 +1606,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1405 } 1606 }
1406 1607
1407 if (event == VXGE_LL_FULL_RESET) { 1608 if (event == VXGE_LL_FULL_RESET) {
1609 netif_carrier_off(vdev->ndev);
1610
1408 /* wait for all the vpath reset to complete */ 1611 /* wait for all the vpath reset to complete */
1409 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1612 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1410 while (test_bit(vp_id, &vdev->vp_reset)) 1613 while (test_bit(vp_id, &vdev->vp_reset))
1411 msleep(50); 1614 msleep(50);
1412 } 1615 }
1413 1616
1617 netif_carrier_on(vdev->ndev);
1618
1414 /* if execution mode is set to debug, don't reset the adapter */ 1619 /* if execution mode is set to debug, don't reset the adapter */
1415 if (unlikely(vdev->exec_mode)) { 1620 if (unlikely(vdev->exec_mode)) {
1416 vxge_debug_init(VXGE_ERR, 1621 vxge_debug_init(VXGE_ERR,
@@ -1423,6 +1628,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1423 } 1628 }
1424 1629
1425 if (event == VXGE_LL_FULL_RESET) { 1630 if (event == VXGE_LL_FULL_RESET) {
1631 vxge_hw_device_wait_receive_idle(vdev->devh);
1426 vxge_hw_device_intr_disable(vdev->devh); 1632 vxge_hw_device_intr_disable(vdev->devh);
1427 1633
1428 switch (vdev->cric_err_event) { 1634 switch (vdev->cric_err_event) {
@@ -1563,9 +1769,14 @@ out:
1563 * 1769 *
1564 * driver may reset the chip on events of serr, eccerr, etc 1770 * driver may reset the chip on events of serr, eccerr, etc
1565 */ 1771 */
1566static int vxge_reset(struct vxgedev *vdev) 1772static void vxge_reset(struct work_struct *work)
1567{ 1773{
1568 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1774 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1775
1776 if (!netif_running(vdev->ndev))
1777 return;
1778
1779 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1569} 1780}
1570 1781
1571/** 1782/**
@@ -1608,8 +1819,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1608 int budget_org = budget; 1819 int budget_org = budget;
1609 struct vxge_ring *ring; 1820 struct vxge_ring *ring;
1610 1821
1611 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1822 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1612 pci_get_drvdata(vdev->pdev);
1613 1823
1614 for (i = 0; i < vdev->no_of_vpath; i++) { 1824 for (i = 0; i < vdev->no_of_vpath; i++) {
1615 ring = &vdev->vpaths[i].ring; 1825 ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1855,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1645 */ 1855 */
1646static void vxge_netpoll(struct net_device *dev) 1856static void vxge_netpoll(struct net_device *dev)
1647{ 1857{
1648 struct __vxge_hw_device *hldev; 1858 struct __vxge_hw_device *hldev;
1649 struct vxgedev *vdev; 1859 struct vxgedev *vdev;
1650 1860
1651 vdev = (struct vxgedev *)netdev_priv(dev); 1861 vdev = netdev_priv(dev);
1652 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1862 hldev = pci_get_drvdata(vdev->pdev);
1653 1863
1654 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1864 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1655 1865
@@ -1689,15 +1899,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1689 mtable[index] = index % vdev->no_of_vpath; 1899 mtable[index] = index % vdev->no_of_vpath;
1690 } 1900 }
1691 1901
1692 /* Fill RTH hash types */
1693 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1694 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1695 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1696 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1697 hash_types.hash_type_tcpipv6ex_en =
1698 vdev->config.rth_hash_type_tcpipv6ex;
1699 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1700
1701 /* set indirection table, bucket-to-vpath mapping */ 1902 /* set indirection table, bucket-to-vpath mapping */
1702 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1903 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1703 vdev->no_of_vpath, 1904 vdev->no_of_vpath,
@@ -1710,19 +1911,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1710 return status; 1911 return status;
1711 } 1912 }
1712 1913
1914 /* Fill RTH hash types */
1915 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1916 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1917 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1918 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1919 hash_types.hash_type_tcpipv6ex_en =
1920 vdev->config.rth_hash_type_tcpipv6ex;
1921 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1922
1713 /* 1923 /*
1714 * Because the itable_set() method uses the active_table field 1924 * Because the itable_set() method uses the active_table field
1715 * for the target virtual path the RTH config should be updated 1925 * for the target virtual path the RTH config should be updated
1716 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1926 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1717 * when steering frames. 1927 * when steering frames.
1718 */ 1928 */
1719 for (index = 0; index < vdev->no_of_vpath; index++) { 1929 for (index = 0; index < vdev->no_of_vpath; index++) {
1720 status = vxge_hw_vpath_rts_rth_set( 1930 status = vxge_hw_vpath_rts_rth_set(
1721 vdev->vpaths[index].handle, 1931 vdev->vpaths[index].handle,
1722 vdev->config.rth_algorithm, 1932 vdev->config.rth_algorithm,
1723 &hash_types, 1933 &hash_types,
1724 vdev->config.rth_bkt_sz); 1934 vdev->config.rth_bkt_sz);
1725
1726 if (status != VXGE_HW_OK) { 1935 if (status != VXGE_HW_OK) {
1727 vxge_debug_init(VXGE_ERR, 1936 vxge_debug_init(VXGE_ERR,
1728 "RTH configuration failed for vpath:%d", 1937 "RTH configuration failed for vpath:%d",
@@ -1734,201 +1943,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1734 return status; 1943 return status;
1735} 1944}
1736 1945
1737static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1738{
1739 struct vxge_mac_addrs *new_mac_entry;
1740 u8 *mac_address = NULL;
1741
1742 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1743 return TRUE;
1744
1745 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1746 if (!new_mac_entry) {
1747 vxge_debug_mem(VXGE_ERR,
1748 "%s: memory allocation failed",
1749 VXGE_DRIVER_NAME);
1750 return FALSE;
1751 }
1752
1753 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1754
1755 /* Copy the new mac address to the list */
1756 mac_address = (u8 *)&new_mac_entry->macaddr;
1757 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1758
1759 new_mac_entry->state = mac->state;
1760 vpath->mac_addr_cnt++;
1761
1762 /* Is this a multicast address */
1763 if (0x01 & mac->macaddr[0])
1764 vpath->mcast_addr_cnt++;
1765
1766 return TRUE;
1767}
1768
1769/* Add a mac address to DA table */
1770static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1771 struct macInfo *mac)
1772{
1773 enum vxge_hw_status status = VXGE_HW_OK;
1774 struct vxge_vpath *vpath;
1775 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1776
1777 if (0x01 & mac->macaddr[0]) /* multicast address */
1778 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1779 else
1780 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1781
1782 vpath = &vdev->vpaths[mac->vpath_no];
1783 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1784 mac->macmask, duplicate_mode);
1785 if (status != VXGE_HW_OK) {
1786 vxge_debug_init(VXGE_ERR,
1787 "DA config add entry failed for vpath:%d",
1788 vpath->device_id);
1789 } else
1790 if (FALSE == vxge_mac_list_add(vpath, mac))
1791 status = -EPERM;
1792
1793 return status;
1794}
1795
1796static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1797{
1798 struct list_head *entry, *next;
1799 u64 del_mac = 0;
1800 u8 *mac_address = (u8 *) (&del_mac);
1801
1802 /* Copy the mac address to delete from the list */
1803 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1804
1805 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1806 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1807 list_del(entry);
1808 kfree((struct vxge_mac_addrs *)entry);
1809 vpath->mac_addr_cnt--;
1810
1811 /* Is this a multicast address */
1812 if (0x01 & mac->macaddr[0])
1813 vpath->mcast_addr_cnt--;
1814 return TRUE;
1815 }
1816 }
1817
1818 return FALSE;
1819}
1820/* delete a mac address from DA table */
1821static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1822 struct macInfo *mac)
1823{
1824 enum vxge_hw_status status = VXGE_HW_OK;
1825 struct vxge_vpath *vpath;
1826
1827 vpath = &vdev->vpaths[mac->vpath_no];
1828 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1829 mac->macmask);
1830 if (status != VXGE_HW_OK) {
1831 vxge_debug_init(VXGE_ERR,
1832 "DA config delete entry failed for vpath:%d",
1833 vpath->device_id);
1834 } else
1835 vxge_mac_list_del(vpath, mac);
1836 return status;
1837}
1838
1839/* list all mac addresses from DA table */
1840enum vxge_hw_status
1841static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1842 struct macInfo *mac)
1843{
1844 enum vxge_hw_status status = VXGE_HW_OK;
1845 unsigned char macmask[ETH_ALEN];
1846 unsigned char macaddr[ETH_ALEN];
1847
1848 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1849 macaddr, macmask);
1850 if (status != VXGE_HW_OK) {
1851 vxge_debug_init(VXGE_ERR,
1852 "DA config list entry failed for vpath:%d",
1853 vpath->device_id);
1854 return status;
1855 }
1856
1857 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1858
1859 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1860 macaddr, macmask);
1861 if (status != VXGE_HW_OK)
1862 break;
1863 }
1864
1865 return status;
1866}
1867
1868/* Store all vlan ids from the list to the vid table */
1869static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1870{
1871 enum vxge_hw_status status = VXGE_HW_OK;
1872 struct vxgedev *vdev = vpath->vdev;
1873 u16 vid;
1874
1875 if (vdev->vlgrp && vpath->is_open) {
1876
1877 for (vid = 0; vid < VLAN_N_VID; vid++) {
1878 if (!vlan_group_get_device(vdev->vlgrp, vid))
1879 continue;
1880 /* Add these vlan to the vid table */
1881 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1882 }
1883 }
1884
1885 return status;
1886}
1887
1888/* Store all mac addresses from the list to the DA table */
1889static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1890{
1891 enum vxge_hw_status status = VXGE_HW_OK;
1892 struct macInfo mac_info;
1893 u8 *mac_address = NULL;
1894 struct list_head *entry, *next;
1895
1896 memset(&mac_info, 0, sizeof(struct macInfo));
1897
1898 if (vpath->is_open) {
1899
1900 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1901 mac_address =
1902 (u8 *)&
1903 ((struct vxge_mac_addrs *)entry)->macaddr;
1904 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1905 ((struct vxge_mac_addrs *)entry)->state =
1906 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1907 /* does this mac address already exist in da table? */
1908 status = vxge_search_mac_addr_in_da_table(vpath,
1909 &mac_info);
1910 if (status != VXGE_HW_OK) {
1911 /* Add this mac address to the DA table */
1912 status = vxge_hw_vpath_mac_addr_add(
1913 vpath->handle, mac_info.macaddr,
1914 mac_info.macmask,
1915 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1916 if (status != VXGE_HW_OK) {
1917 vxge_debug_init(VXGE_ERR,
1918 "DA add entry failed for vpath:%d",
1919 vpath->device_id);
1920 ((struct vxge_mac_addrs *)entry)->state
1921 = VXGE_LL_MAC_ADDR_IN_LIST;
1922 }
1923 }
1924 }
1925 }
1926
1927 return status;
1928}
1929
1930/* reset vpaths */ 1946/* reset vpaths */
1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1947enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1932{ 1948{
1933 enum vxge_hw_status status = VXGE_HW_OK; 1949 enum vxge_hw_status status = VXGE_HW_OK;
1934 struct vxge_vpath *vpath; 1950 struct vxge_vpath *vpath;
@@ -1988,8 +2004,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
1988 2004
1989 for (i = 0; i < vdev->no_of_vpath; i++) { 2005 for (i = 0; i < vdev->no_of_vpath; i++) {
1990 vpath = &vdev->vpaths[i]; 2006 vpath = &vdev->vpaths[i];
1991
1992 vxge_assert(vpath->is_configured); 2007 vxge_assert(vpath->is_configured);
2008
2009 if (!vdev->titan1) {
2010 struct vxge_hw_vp_config *vcfg;
2011 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2012
2013 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2014 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2015 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2016 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2017 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2018 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2019 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2020 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2021 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2022 }
2023
1993 attr.vp_id = vpath->device_id; 2024 attr.vp_id = vpath->device_id;
1994 attr.fifo_attr.callback = vxge_xmit_compl; 2025 attr.fifo_attr.callback = vxge_xmit_compl;
1995 attr.fifo_attr.txdl_term = vxge_tx_term; 2026 attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2004,6 +2035,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2004 2035
2005 vpath->ring.ndev = vdev->ndev; 2036 vpath->ring.ndev = vdev->ndev;
2006 vpath->ring.pdev = vdev->pdev; 2037 vpath->ring.pdev = vdev->pdev;
2038
2007 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); 2039 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2008 if (status == VXGE_HW_OK) { 2040 if (status == VXGE_HW_OK) {
2009 vpath->fifo.handle = 2041 vpath->fifo.handle =
@@ -2024,6 +2056,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2024 vdev->config.fifo_indicate_max_pkts; 2056 vdev->config.fifo_indicate_max_pkts;
2025 vpath->ring.rx_vector_no = 0; 2057 vpath->ring.rx_vector_no = 0;
2026 vpath->ring.rx_csum = vdev->rx_csum; 2058 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts;
2027 vpath->is_open = 1; 2060 vpath->is_open = 1;
2028 vdev->vp_handles[i] = vpath->handle; 2061 vdev->vp_handles[i] = vpath->handle;
2029 vpath->ring.gro_enable = vdev->config.gro_enable; 2062 vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2031,11 +2064,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2031 vdev->stats.vpaths_open++; 2064 vdev->stats.vpaths_open++;
2032 } else { 2065 } else {
2033 vdev->stats.vpath_open_fail++; 2066 vdev->stats.vpath_open_fail++;
2034 vxge_debug_init(VXGE_ERR, 2067 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2035 "%s: vpath: %d failed to open " 2068 "open with status: %d",
2036 "with status: %d", 2069 vdev->ndev->name, vpath->device_id,
2037 vdev->ndev->name, vpath->device_id, 2070 status);
2038 status);
2039 vxge_close_vpaths(vdev, 0); 2071 vxge_close_vpaths(vdev, 0);
2040 return -EPERM; 2072 return -EPERM;
2041 } 2073 }
@@ -2043,6 +2075,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2043 vp_id = vpath->handle->vpath->vp_id; 2075 vp_id = vpath->handle->vpath->vp_id;
2044 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2076 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2045 } 2077 }
2078
2046 return VXGE_HW_OK; 2079 return VXGE_HW_OK;
2047} 2080}
2048 2081
@@ -2062,21 +2095,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2062 struct __vxge_hw_device *hldev; 2095 struct __vxge_hw_device *hldev;
2063 u64 reason; 2096 u64 reason;
2064 enum vxge_hw_status status; 2097 enum vxge_hw_status status;
2065 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2098 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2066 2099
2067 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2100 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2068 2101
2069 dev = vdev->ndev; 2102 dev = vdev->ndev;
2070 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2103 hldev = pci_get_drvdata(vdev->pdev);
2071 2104
2072 if (pci_channel_offline(vdev->pdev)) 2105 if (pci_channel_offline(vdev->pdev))
2073 return IRQ_NONE; 2106 return IRQ_NONE;
2074 2107
2075 if (unlikely(!is_vxge_card_up(vdev))) 2108 if (unlikely(!is_vxge_card_up(vdev)))
2076 return IRQ_NONE; 2109 return IRQ_HANDLED;
2077 2110
2078 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2111 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2079 &reason);
2080 if (status == VXGE_HW_OK) { 2112 if (status == VXGE_HW_OK) {
2081 vxge_hw_device_mask_all(hldev); 2113 vxge_hw_device_mask_all(hldev);
2082 2114
@@ -2301,8 +2333,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2301 2333
2302static void vxge_rem_isr(struct vxgedev *vdev) 2334static void vxge_rem_isr(struct vxgedev *vdev)
2303{ 2335{
2304 struct __vxge_hw_device *hldev; 2336 struct __vxge_hw_device *hldev;
2305 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2337 hldev = pci_get_drvdata(vdev->pdev);
2306 2338
2307#ifdef CONFIG_PCI_MSI 2339#ifdef CONFIG_PCI_MSI
2308 if (vdev->config.intr_type == MSI_X) { 2340 if (vdev->config.intr_type == MSI_X) {
@@ -2529,8 +2561,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2529 * Return value: '0' on success and an appropriate (-)ve integer as 2561 * Return value: '0' on success and an appropriate (-)ve integer as
2530 * defined in errno.h file on failure. 2562 * defined in errno.h file on failure.
2531 */ 2563 */
2532static int 2564static int vxge_open(struct net_device *dev)
2533vxge_open(struct net_device *dev)
2534{ 2565{
2535 enum vxge_hw_status status; 2566 enum vxge_hw_status status;
2536 struct vxgedev *vdev; 2567 struct vxgedev *vdev;
@@ -2539,11 +2570,12 @@ vxge_open(struct net_device *dev)
2539 int ret = 0; 2570 int ret = 0;
2540 int i; 2571 int i;
2541 u64 val64, function_mode; 2572 u64 val64, function_mode;
2573
2542 vxge_debug_entryexit(VXGE_TRACE, 2574 vxge_debug_entryexit(VXGE_TRACE,
2543 "%s: %s:%d", dev->name, __func__, __LINE__); 2575 "%s: %s:%d", dev->name, __func__, __LINE__);
2544 2576
2545 vdev = (struct vxgedev *)netdev_priv(dev); 2577 vdev = netdev_priv(dev);
2546 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2578 hldev = pci_get_drvdata(vdev->pdev);
2547 function_mode = vdev->config.device_hw_info.function_mode; 2579 function_mode = vdev->config.device_hw_info.function_mode;
2548 2580
2549 /* make sure you have link off by default every time Nic is 2581 /* make sure you have link off by default every time Nic is
@@ -2598,6 +2630,8 @@ vxge_open(struct net_device *dev)
2598 goto out2; 2630 goto out2;
2599 } 2631 }
2600 } 2632 }
2633 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2634 hldev->config.rth_en ? "enabled" : "disabled");
2601 2635
2602 for (i = 0; i < vdev->no_of_vpath; i++) { 2636 for (i = 0; i < vdev->no_of_vpath; i++) {
2603 vpath = &vdev->vpaths[i]; 2637 vpath = &vdev->vpaths[i];
@@ -2683,9 +2717,10 @@ vxge_open(struct net_device *dev)
2683 vxge_os_timer(vdev->vp_reset_timer, 2717 vxge_os_timer(vdev->vp_reset_timer,
2684 vxge_poll_vp_reset, vdev, (HZ/2)); 2718 vxge_poll_vp_reset, vdev, (HZ/2));
2685 2719
2686 if (vdev->vp_lockup_timer.function == NULL) 2720 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2687 vxge_os_timer(vdev->vp_lockup_timer, 2721 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2688 vxge_poll_vp_lockup, vdev, (HZ/2)); 2722 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2723 HZ / 2);
2689 2724
2690 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2725 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2691 2726
@@ -2767,8 +2802,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2767 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2802 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2768 dev->name, __func__, __LINE__); 2803 dev->name, __func__, __LINE__);
2769 2804
2770 vdev = (struct vxgedev *)netdev_priv(dev); 2805 vdev = netdev_priv(dev);
2771 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2806 hldev = pci_get_drvdata(vdev->pdev);
2772 2807
2773 if (unlikely(!is_vxge_card_up(vdev))) 2808 if (unlikely(!is_vxge_card_up(vdev)))
2774 return 0; 2809 return 0;
@@ -2778,7 +2813,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2778 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2813 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2779 msleep(50); 2814 msleep(50);
2780 2815
2781 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2782 if (do_io) { 2816 if (do_io) {
2783 /* Put the vpath back in normal mode */ 2817 /* Put the vpath back in normal mode */
2784 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2818 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2789,7 +2823,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2789 struct vxge_hw_mrpcim_reg, 2823 struct vxge_hw_mrpcim_reg,
2790 rts_mgr_cbasin_cfg), 2824 rts_mgr_cbasin_cfg),
2791 &val64); 2825 &val64);
2792
2793 if (status == VXGE_HW_OK) { 2826 if (status == VXGE_HW_OK) {
2794 val64 &= ~vpath_vector; 2827 val64 &= ~vpath_vector;
2795 status = vxge_hw_mgmt_reg_write(vdev->devh, 2828 status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2818,10 +2851,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2818 2851
2819 smp_wmb(); 2852 smp_wmb();
2820 } 2853 }
2821 del_timer_sync(&vdev->vp_lockup_timer); 2854
2855 if (vdev->titan1)
2856 del_timer_sync(&vdev->vp_lockup_timer);
2822 2857
2823 del_timer_sync(&vdev->vp_reset_timer); 2858 del_timer_sync(&vdev->vp_reset_timer);
2824 2859
2860 if (do_io)
2861 vxge_hw_device_wait_receive_idle(hldev);
2862
2863 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2864
2825 /* Disable napi */ 2865 /* Disable napi */
2826 if (vdev->config.intr_type != MSI_X) 2866 if (vdev->config.intr_type != MSI_X)
2827 napi_disable(&vdev->napi); 2867 napi_disable(&vdev->napi);
@@ -2838,8 +2878,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2838 if (do_io) 2878 if (do_io)
2839 vxge_hw_device_intr_disable(vdev->devh); 2879 vxge_hw_device_intr_disable(vdev->devh);
2840 2880
2841 mdelay(1000);
2842
2843 vxge_rem_isr(vdev); 2881 vxge_rem_isr(vdev);
2844 2882
2845 vxge_napi_del_all(vdev); 2883 vxge_napi_del_all(vdev);
@@ -2868,8 +2906,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2868 * Return value: '0' on success and an appropriate (-)ve integer as 2906 * Return value: '0' on success and an appropriate (-)ve integer as
2869 * defined in errno.h file on failure. 2907 * defined in errno.h file on failure.
2870 */ 2908 */
2871static int 2909static int vxge_close(struct net_device *dev)
2872vxge_close(struct net_device *dev)
2873{ 2910{
2874 do_vxge_close(dev, 1); 2911 do_vxge_close(dev, 1);
2875 return 0; 2912 return 0;
@@ -2943,9 +2980,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2943 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 2980 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
2944 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; 2981 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
2945 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; 2982 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
2946 net_stats->rx_dropped += 2983 net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
2947 vdev->vpaths[k].ring.stats.rx_dropped;
2948
2949 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; 2984 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
2950 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; 2985 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
2951 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; 2986 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2954,6 +2989,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2954 return net_stats; 2989 return net_stats;
2955} 2990}
2956 2991
2992static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
2993 int enable)
2994{
2995 enum vxge_hw_status status;
2996 u64 val64;
2997
2998 /* Timestamp is passed to the driver via the FCS, therefore we
2999 * must disable the FCS stripping by the adapter. Since this is
3000 * required for the driver to load (due to a hardware bug),
3001 * there is no need to do anything special here.
3002 */
3003 if (enable)
3004 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3005 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3006 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3007 else
3008 val64 = 0;
3009
3010 status = vxge_hw_mgmt_reg_write(vdev->devh,
3011 vxge_hw_mgmt_reg_type_mrpcim,
3012 0,
3013 offsetof(struct vxge_hw_mrpcim_reg,
3014 xmac_timestamp),
3015 val64);
3016 vxge_hw_device_flush_io(vdev->devh);
3017 return status;
3018}
3019
3020static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3021{
3022 struct hwtstamp_config config;
3023 enum vxge_hw_status status;
3024 int i;
3025
3026 if (copy_from_user(&config, data, sizeof(config)))
3027 return -EFAULT;
3028
3029 /* reserved for future extensions */
3030 if (config.flags)
3031 return -EINVAL;
3032
3033 /* Transmit HW Timestamp not supported */
3034 switch (config.tx_type) {
3035 case HWTSTAMP_TX_OFF:
3036 break;
3037 case HWTSTAMP_TX_ON:
3038 default:
3039 return -ERANGE;
3040 }
3041
3042 switch (config.rx_filter) {
3043 case HWTSTAMP_FILTER_NONE:
3044 status = vxge_timestamp_config(vdev, 0);
3045 if (status != VXGE_HW_OK)
3046 return -EFAULT;
3047
3048 vdev->rx_hwts = 0;
3049 config.rx_filter = HWTSTAMP_FILTER_NONE;
3050 break;
3051
3052 case HWTSTAMP_FILTER_ALL:
3053 case HWTSTAMP_FILTER_SOME:
3054 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3055 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3056 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3057 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3058 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3059 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3060 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3061 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3062 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3063 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3064 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3065 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3066 status = vxge_timestamp_config(vdev, 1);
3067 if (status != VXGE_HW_OK)
3068 return -EFAULT;
3069
3070 vdev->rx_hwts = 1;
3071 config.rx_filter = HWTSTAMP_FILTER_ALL;
3072 break;
3073
3074 default:
3075 return -ERANGE;
3076 }
3077
3078 for (i = 0; i < vdev->no_of_vpath; i++)
3079 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3080
3081 if (copy_to_user(data, &config, sizeof(config)))
3082 return -EFAULT;
3083
3084 return 0;
3085}
3086
2957/** 3087/**
2958 * vxge_ioctl 3088 * vxge_ioctl
2959 * @dev: Device pointer. 3089 * @dev: Device pointer.
@@ -2966,7 +3096,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2966 */ 3096 */
2967static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3097static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2968{ 3098{
2969 return -EOPNOTSUPP; 3099 struct vxgedev *vdev = netdev_priv(dev);
3100 int ret;
3101
3102 switch (cmd) {
3103 case SIOCSHWTSTAMP:
3104 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3105 if (ret)
3106 return ret;
3107 break;
3108 default:
3109 return -EOPNOTSUPP;
3110 }
3111
3112 return 0;
2970} 3113}
2971 3114
2972/** 3115/**
@@ -2977,18 +3120,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2977 * This function is triggered if the Tx Queue is stopped 3120 * This function is triggered if the Tx Queue is stopped
2978 * for a pre-defined amount of time when the Interface is still up. 3121 * for a pre-defined amount of time when the Interface is still up.
2979 */ 3122 */
2980static void 3123static void vxge_tx_watchdog(struct net_device *dev)
2981vxge_tx_watchdog(struct net_device *dev)
2982{ 3124{
2983 struct vxgedev *vdev; 3125 struct vxgedev *vdev;
2984 3126
2985 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3127 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2986 3128
2987 vdev = (struct vxgedev *)netdev_priv(dev); 3129 vdev = netdev_priv(dev);
2988 3130
2989 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 3131 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
2990 3132
2991 vxge_reset(vdev); 3133 schedule_work(&vdev->reset_task);
2992 vxge_debug_entryexit(VXGE_TRACE, 3134 vxge_debug_entryexit(VXGE_TRACE,
2993 "%s:%d Exiting...", __func__, __LINE__); 3135 "%s:%d Exiting...", __func__, __LINE__);
2994} 3136}
@@ -3012,7 +3154,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3012 3154
3013 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3155 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3014 3156
3015 vdev = (struct vxgedev *)netdev_priv(dev); 3157 vdev = netdev_priv(dev);
3016 3158
3017 vpath = &vdev->vpaths[0]; 3159 vpath = &vdev->vpaths[0];
3018 if ((NULL == grp) && (vpath->is_open)) { 3160 if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3203,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3061 struct vxge_vpath *vpath; 3203 struct vxge_vpath *vpath;
3062 int vp_id; 3204 int vp_id;
3063 3205
3064 vdev = (struct vxgedev *)netdev_priv(dev); 3206 vdev = netdev_priv(dev);
3065 3207
3066 /* Add these vlan to the vid table */ 3208 /* Add these vlan to the vid table */
3067 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3209 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3230,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3088 3230
3089 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3231 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3090 3232
3091 vdev = (struct vxgedev *)netdev_priv(dev); 3233 vdev = netdev_priv(dev);
3092 3234
3093 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3235 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3094 3236
@@ -3110,21 +3252,31 @@ static const struct net_device_ops vxge_netdev_ops = {
3110 .ndo_start_xmit = vxge_xmit, 3252 .ndo_start_xmit = vxge_xmit,
3111 .ndo_validate_addr = eth_validate_addr, 3253 .ndo_validate_addr = eth_validate_addr,
3112 .ndo_set_multicast_list = vxge_set_multicast, 3254 .ndo_set_multicast_list = vxge_set_multicast,
3113
3114 .ndo_do_ioctl = vxge_ioctl, 3255 .ndo_do_ioctl = vxge_ioctl,
3115
3116 .ndo_set_mac_address = vxge_set_mac_addr, 3256 .ndo_set_mac_address = vxge_set_mac_addr,
3117 .ndo_change_mtu = vxge_change_mtu, 3257 .ndo_change_mtu = vxge_change_mtu,
3118 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3258 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3119 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3259 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3120 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3260 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3121
3122 .ndo_tx_timeout = vxge_tx_watchdog, 3261 .ndo_tx_timeout = vxge_tx_watchdog,
3123#ifdef CONFIG_NET_POLL_CONTROLLER 3262#ifdef CONFIG_NET_POLL_CONTROLLER
3124 .ndo_poll_controller = vxge_netpoll, 3263 .ndo_poll_controller = vxge_netpoll,
3125#endif 3264#endif
3126}; 3265};
3127 3266
3267static int __devinit vxge_device_revision(struct vxgedev *vdev)
3268{
3269 int ret;
3270 u8 revision;
3271
3272 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3273 if (ret)
3274 return -EIO;
3275
3276 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3277 return 0;
3278}
3279
3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3280static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3129 struct vxge_config *config, 3281 struct vxge_config *config,
3130 int high_dma, int no_of_vpath, 3282 int high_dma, int no_of_vpath,
@@ -3163,6 +3315,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3163 vdev->pdev = hldev->pdev; 3315 vdev->pdev = hldev->pdev;
3164 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3316 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3165 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3317 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3318 vdev->rx_hwts = 0;
3319
3320 ret = vxge_device_revision(vdev);
3321 if (ret < 0)
3322 goto _out1;
3166 3323
3167 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3324 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3168 3325
@@ -3175,9 +3332,15 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3175 ndev->netdev_ops = &vxge_netdev_ops; 3332 ndev->netdev_ops = &vxge_netdev_ops;
3176 3333
3177 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3334 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3335 INIT_WORK(&vdev->reset_task, vxge_reset);
3178 3336
3179 vxge_initialize_ethtool_ops(ndev); 3337 vxge_initialize_ethtool_ops(ndev);
3180 3338
3339 if (vdev->config.rth_steering != NO_STEERING) {
3340 ndev->features |= NETIF_F_RXHASH;
3341 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3342 }
3343
3181 /* Allocate memory for vpath */ 3344 /* Allocate memory for vpath */
3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3345 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3183 no_of_vpath, GFP_KERNEL); 3346 no_of_vpath, GFP_KERNEL);
@@ -3191,7 +3354,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3191 3354
3192 ndev->features |= NETIF_F_SG; 3355 ndev->features |= NETIF_F_SG;
3193 3356
3194 ndev->features |= NETIF_F_HW_CSUM; 3357 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3195 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3358 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3196 "%s : checksuming enabled", __func__); 3359 "%s : checksuming enabled", __func__);
3197 3360
@@ -3227,6 +3390,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3227 "%s: Ethernet device registered", 3390 "%s: Ethernet device registered",
3228 ndev->name); 3391 ndev->name);
3229 3392
3393 hldev->ndev = ndev;
3230 *vdev_out = vdev; 3394 *vdev_out = vdev;
3231 3395
3232 /* Resetting the Device stats */ 3396 /* Resetting the Device stats */
@@ -3261,36 +3425,27 @@ _out0:
3261 * 3425 *
3262 * This function will unregister and free network device 3426 * This function will unregister and free network device
3263 */ 3427 */
3264static void 3428static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3266{ 3429{
3267 struct vxgedev *vdev; 3430 struct vxgedev *vdev;
3268 struct net_device *dev; 3431 struct net_device *dev;
3269 char buf[IFNAMSIZ]; 3432 char buf[IFNAMSIZ];
3270#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3271 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3272 u32 level_trace;
3273#endif
3274 3433
3275 dev = hldev->ndev; 3434 dev = hldev->ndev;
3276 vdev = netdev_priv(dev); 3435 vdev = netdev_priv(dev);
3277#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3278 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3279 level_trace = vdev->level_trace;
3280#endif
3281 vxge_debug_entryexit(level_trace,
3282 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3283 3436
3284 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3437 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3438 __func__, __LINE__);
3439
3440 strncpy(buf, dev->name, IFNAMSIZ);
3285 3441
3286 /* in 2.6 will call stop() if device is up */ 3442 /* in 2.6 will call stop() if device is up */
3287 unregister_netdev(dev); 3443 unregister_netdev(dev);
3288 3444
3289 flush_scheduled_work(); 3445 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3290 3446 buf);
3291 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3447 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3292 vxge_debug_entryexit(level_trace, 3448 __func__, __LINE__);
3293 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3294} 3449}
3295 3450
3296/* 3451/*
@@ -3304,7 +3459,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3304 enum vxge_hw_event type, u64 vp_id) 3459 enum vxge_hw_event type, u64 vp_id)
3305{ 3460{
3306 struct net_device *dev = hldev->ndev; 3461 struct net_device *dev = hldev->ndev;
3307 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3462 struct vxgedev *vdev = netdev_priv(dev);
3308 struct vxge_vpath *vpath = NULL; 3463 struct vxge_vpath *vpath = NULL;
3309 int vpath_idx; 3464 int vpath_idx;
3310 3465
@@ -3527,9 +3682,9 @@ static int __devinit vxge_config_vpaths(
3527 device_config->vp_config[i].tti.timer_ac_en = 3682 device_config->vp_config[i].tti.timer_ac_en =
3528 VXGE_HW_TIM_TIMER_AC_ENABLE; 3683 VXGE_HW_TIM_TIMER_AC_ENABLE;
3529 3684
3530 /* For msi-x with napi (each vector 3685 /* For msi-x with napi (each vector has a handler of its own) -
3531 has a handler of its own) - 3686 * Set CI to OFF for all vpaths
3532 Set CI to OFF for all vpaths */ 3687 */
3533 device_config->vp_config[i].tti.timer_ci_en = 3688 device_config->vp_config[i].tti.timer_ci_en =
3534 VXGE_HW_TIM_TIMER_CI_DISABLE; 3689 VXGE_HW_TIM_TIMER_CI_DISABLE;
3535 3690
@@ -3559,10 +3714,13 @@ static int __devinit vxge_config_vpaths(
3559 3714
3560 device_config->vp_config[i].ring.ring_blocks = 3715 device_config->vp_config[i].ring.ring_blocks =
3561 VXGE_HW_DEF_RING_BLOCKS; 3716 VXGE_HW_DEF_RING_BLOCKS;
3717
3562 device_config->vp_config[i].ring.buffer_mode = 3718 device_config->vp_config[i].ring.buffer_mode =
3563 VXGE_HW_RING_RXD_BUFFER_MODE_1; 3719 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3720
3564 device_config->vp_config[i].ring.rxds_limit = 3721 device_config->vp_config[i].ring.rxds_limit =
3565 VXGE_HW_DEF_RING_RXDS_LIMIT; 3722 VXGE_HW_DEF_RING_RXDS_LIMIT;
3723
3566 device_config->vp_config[i].ring.scatter_mode = 3724 device_config->vp_config[i].ring.scatter_mode =
3567 VXGE_HW_RING_SCATTER_MODE_A; 3725 VXGE_HW_RING_SCATTER_MODE_A;
3568 3726
@@ -3642,6 +3800,7 @@ static void __devinit vxge_device_config_init(
3642 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3800 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3643 break; 3801 break;
3644 } 3802 }
3803
3645 /* Timer period between device poll */ 3804 /* Timer period between device poll */
3646 device_config->device_poll_millis = VXGE_TIMER_DELAY; 3805 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3647 3806
@@ -3653,16 +3812,10 @@ static void __devinit vxge_device_config_init(
3653 3812
3654 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", 3813 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3655 __func__); 3814 __func__);
3656 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3657 device_config->dma_blockpool_initial);
3658 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3659 device_config->dma_blockpool_max);
3660 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", 3815 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3661 device_config->intr_mode); 3816 device_config->intr_mode);
3662 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", 3817 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3663 device_config->device_poll_millis); 3818 device_config->device_poll_millis);
3664 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3665 device_config->rts_mac_en);
3666 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", 3819 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3667 device_config->rth_en); 3820 device_config->rth_en);
3668 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", 3821 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3751,9 +3904,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3751 vxge_debug_init(VXGE_TRACE, 3904 vxge_debug_init(VXGE_TRACE,
3752 "%s: MAC Address learning enabled", vdev->ndev->name); 3905 "%s: MAC Address learning enabled", vdev->ndev->name);
3753 3906
3754 vxge_debug_init(VXGE_TRACE,
3755 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3756
3757 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3907 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3758 if (!vxge_bVALn(vpath_mask, i, 1)) 3908 if (!vxge_bVALn(vpath_mask, i, 1))
3759 continue; 3909 continue;
@@ -3766,14 +3916,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3766 ((struct __vxge_hw_device *)(vdev->devh))-> 3916 ((struct __vxge_hw_device *)(vdev->devh))->
3767 config.vp_config[i].rpa_strip_vlan_tag 3917 config.vp_config[i].rpa_strip_vlan_tag
3768 ? "Enabled" : "Disabled"); 3918 ? "Enabled" : "Disabled");
3769 vxge_debug_init(VXGE_TRACE,
3770 "%s: Ring blocks : %d", vdev->ndev->name,
3771 ((struct __vxge_hw_device *)(vdev->devh))->
3772 config.vp_config[i].ring.ring_blocks);
3773 vxge_debug_init(VXGE_TRACE,
3774 "%s: Fifo blocks : %d", vdev->ndev->name,
3775 ((struct __vxge_hw_device *)(vdev->devh))->
3776 config.vp_config[i].fifo.fifo_blocks);
3777 vxge_debug_ll_config(VXGE_TRACE, 3919 vxge_debug_ll_config(VXGE_TRACE,
3778 "%s: Max frags : %d", vdev->ndev->name, 3920 "%s: Max frags : %d", vdev->ndev->name,
3779 ((struct __vxge_hw_device *)(vdev->devh))-> 3921 ((struct __vxge_hw_device *)(vdev->devh))->
@@ -3813,8 +3955,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
3813static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3955static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3814 pci_channel_state_t state) 3956 pci_channel_state_t state)
3815{ 3957{
3816 struct __vxge_hw_device *hldev = 3958 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3817 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3818 struct net_device *netdev = hldev->ndev; 3959 struct net_device *netdev = hldev->ndev;
3819 3960
3820 netif_device_detach(netdev); 3961 netif_device_detach(netdev);
@@ -3843,8 +3984,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3843 */ 3984 */
3844static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 3985static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3845{ 3986{
3846 struct __vxge_hw_device *hldev = 3987 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3847 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3848 struct net_device *netdev = hldev->ndev; 3988 struct net_device *netdev = hldev->ndev;
3849 3989
3850 struct vxgedev *vdev = netdev_priv(netdev); 3990 struct vxgedev *vdev = netdev_priv(netdev);
@@ -3855,7 +3995,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3855 } 3995 }
3856 3996
3857 pci_set_master(pdev); 3997 pci_set_master(pdev);
3858 vxge_reset(vdev); 3998 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
3859 3999
3860 return PCI_ERS_RESULT_RECOVERED; 4000 return PCI_ERS_RESULT_RECOVERED;
3861} 4001}
@@ -3869,8 +4009,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3869 */ 4009 */
3870static void vxge_io_resume(struct pci_dev *pdev) 4010static void vxge_io_resume(struct pci_dev *pdev)
3871{ 4011{
3872 struct __vxge_hw_device *hldev = 4012 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3873 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3874 struct net_device *netdev = hldev->ndev; 4013 struct net_device *netdev = hldev->ndev;
3875 4014
3876 if (netif_running(netdev)) { 4015 if (netif_running(netdev)) {
@@ -3914,6 +4053,156 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3914 return num_functions; 4053 return num_functions;
3915} 4054}
3916 4055
4056int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4057{
4058 struct __vxge_hw_device *hldev = vdev->devh;
4059 u32 maj, min, bld, cmaj, cmin, cbld;
4060 enum vxge_hw_status status;
4061 const struct firmware *fw;
4062 int ret;
4063
4064 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4065 if (ret) {
4066 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4067 VXGE_DRIVER_NAME, fw_name);
4068 goto out;
4069 }
4070
4071 /* Load the new firmware onto the adapter */
4072 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4073 if (status != VXGE_HW_OK) {
4074 vxge_debug_init(VXGE_ERR,
4075 "%s: FW image download to adapter failed '%s'.",
4076 VXGE_DRIVER_NAME, fw_name);
4077 ret = -EIO;
4078 goto out;
4079 }
4080
4081 /* Read the version of the new firmware */
4082 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4083 if (status != VXGE_HW_OK) {
4084 vxge_debug_init(VXGE_ERR,
4085 "%s: Upgrade read version failed '%s'.",
4086 VXGE_DRIVER_NAME, fw_name);
4087 ret = -EIO;
4088 goto out;
4089 }
4090
4091 cmaj = vdev->config.device_hw_info.fw_version.major;
4092 cmin = vdev->config.device_hw_info.fw_version.minor;
4093 cbld = vdev->config.device_hw_info.fw_version.build;
4094 /* It's possible the version in /lib/firmware is not the latest version.
4095 * If so, we could get into a loop of trying to upgrade to the latest
4096 * and flashing the older version.
4097 */
4098 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4099 !override) {
4100 ret = -EINVAL;
4101 goto out;
4102 }
4103
4104 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4105 maj, min, bld);
4106
4107 /* Flash the adapter with the new firmware */
4108 status = vxge_hw_flash_fw(hldev);
4109 if (status != VXGE_HW_OK) {
4110 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4111 VXGE_DRIVER_NAME, fw_name);
4112 ret = -EIO;
4113 goto out;
4114 }
4115
4116 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4117 "hard reset before using, thus requiring a system reboot or a "
4118 "hotplug event.\n");
4119
4120out:
4121 return ret;
4122}
4123
4124static int vxge_probe_fw_update(struct vxgedev *vdev)
4125{
4126 u32 maj, min, bld;
4127 int ret, gpxe = 0;
4128 char *fw_name;
4129
4130 maj = vdev->config.device_hw_info.fw_version.major;
4131 min = vdev->config.device_hw_info.fw_version.minor;
4132 bld = vdev->config.device_hw_info.fw_version.build;
4133
4134 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4135 return 0;
4136
4137 /* Ignore the build number when determining if the current firmware is
4138 * "too new" to load the driver
4139 */
4140 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4141 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4142 "version, unable to load driver\n",
4143 VXGE_DRIVER_NAME);
4144 return -EINVAL;
4145 }
4146
4147 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4148 * work with this driver.
4149 */
4150 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4151 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4152 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4153 return -EINVAL;
4154 }
4155
4156 /* If file not specified, determine gPXE or not */
4157 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4158 int i;
4159 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4160 if (vdev->devh->eprom_versions[i]) {
4161 gpxe = 1;
4162 break;
4163 }
4164 }
4165 if (gpxe)
4166 fw_name = "vxge/X3fw-pxe.ncf";
4167 else
4168 fw_name = "vxge/X3fw.ncf";
4169
4170 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4171 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4172 * probe, so ignore them
4173 */
4174 if (ret != -EINVAL && ret != -ENOENT)
4175 return -EIO;
4176 else
4177 ret = 0;
4178
4179 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4180 VXGE_FW_VER(maj, min, 0)) {
4181 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4182 " be used with this driver.\n"
4183 "Please get the latest version from "
4184 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4185 VXGE_DRIVER_NAME, maj, min, bld);
4186 return -EINVAL;
4187 }
4188
4189 return ret;
4190}
4191
4192static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4193{
4194 int pos;
4195 u16 ctrl;
4196
4197 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4198 if (pos) {
4199 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4200 if (ctrl & PCI_SRIOV_CTRL_VFE)
4201 return 1;
4202 }
4203 return 0;
4204}
4205
3917/** 4206/**
3918 * vxge_probe 4207 * vxge_probe
3919 * @pdev : structure containing the PCI related information of the device. 4208 * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4217,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3928static int __devinit 4217static int __devinit
3929vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4218vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3930{ 4219{
3931 struct __vxge_hw_device *hldev; 4220 struct __vxge_hw_device *hldev;
3932 enum vxge_hw_status status; 4221 enum vxge_hw_status status;
3933 int ret; 4222 int ret;
3934 int high_dma = 0; 4223 int high_dma = 0;
@@ -3951,9 +4240,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3951 attr.pdev = pdev; 4240 attr.pdev = pdev;
3952 4241
3953 /* In SRIOV-17 mode, functions of the same adapter 4242 /* In SRIOV-17 mode, functions of the same adapter
3954 * can be deployed on different buses */ 4243 * can be deployed on different buses
3955 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || 4244 */
3956 (device != PCI_SLOT(pdev->devfn)))) 4245 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4246 !pdev->is_virtfn)
3957 new_device = 1; 4247 new_device = 1;
3958 4248
3959 bus = pdev->bus->number; 4249 bus = pdev->bus->number;
@@ -3971,6 +4261,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3971 driver_config->config_dev_cnt = 0; 4261 driver_config->config_dev_cnt = 0;
3972 driver_config->total_dev_cnt = 0; 4262 driver_config->total_dev_cnt = 0;
3973 } 4263 }
4264
3974 /* Now making the CPU based no of vpath calculation 4265 /* Now making the CPU based no of vpath calculation
3975 * applicable for individual functions as well. 4266 * applicable for individual functions as well.
3976 */ 4267 */
@@ -3993,11 +4284,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3993 goto _exit0; 4284 goto _exit0;
3994 } 4285 }
3995 4286
3996 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); 4287 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
3997 if (!ll_config) { 4288 if (!ll_config) {
3998 ret = -ENOMEM; 4289 ret = -ENOMEM;
3999 vxge_debug_init(VXGE_ERR, 4290 vxge_debug_init(VXGE_ERR,
4000 "ll_config : malloc failed %s %d", 4291 "device_config : malloc failed %s %d",
4001 __FILE__, __LINE__); 4292 __FILE__, __LINE__);
4002 goto _exit0; 4293 goto _exit0;
4003 } 4294 }
@@ -4041,7 +4332,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4041 goto _exit1; 4332 goto _exit1;
4042 } 4333 }
4043 4334
4044 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { 4335 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
4045 vxge_debug_init(VXGE_ERR, 4336 vxge_debug_init(VXGE_ERR,
4046 "%s : request regions failed", __func__); 4337 "%s : request regions failed", __func__);
4047 ret = -ENODEV; 4338 ret = -ENODEV;
@@ -4072,16 +4363,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4072 goto _exit3; 4363 goto _exit3;
4073 } 4364 }
4074 4365
4075 if (ll_config->device_hw_info.fw_version.major !=
4076 VXGE_DRIVER_FW_VERSION_MAJOR) {
4077 vxge_debug_init(VXGE_ERR,
4078 "%s: Incorrect firmware version."
4079 "Please upgrade the firmware to version 1.x.x",
4080 VXGE_DRIVER_NAME);
4081 ret = -EINVAL;
4082 goto _exit3;
4083 }
4084
4085 vpath_mask = ll_config->device_hw_info.vpath_mask; 4366 vpath_mask = ll_config->device_hw_info.vpath_mask;
4086 if (vpath_mask == 0) { 4367 if (vpath_mask == 0) {
4087 vxge_debug_ll_config(VXGE_TRACE, 4368 vxge_debug_ll_config(VXGE_TRACE,
@@ -4110,14 +4391,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4110 num_vfs = vxge_get_num_vfs(function_mode) - 1; 4391 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4111 4392
4112 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4393 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4113 if (is_sriov(function_mode) && (max_config_dev > 1) && 4394 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4114 (ll_config->intr_type != INTA) && 4395 (ll_config->intr_type != INTA)) {
4115 (is_privileged == VXGE_HW_OK)) { 4396 ret = pci_enable_sriov(pdev, num_vfs);
4116 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4117 ? (max_config_dev - 1) : num_vfs);
4118 if (ret) 4397 if (ret)
4119 vxge_debug_ll_config(VXGE_ERR, 4398 vxge_debug_ll_config(VXGE_ERR,
4120 "Failed in enabling SRIOV mode: %d\n", ret); 4399 "Failed in enabling SRIOV mode: %d\n", ret);
4400 /* No need to fail out, as an error here is non-fatal */
4121 } 4401 }
4122 4402
4123 /* 4403 /*
@@ -4145,11 +4425,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4145 goto _exit3; 4425 goto _exit3;
4146 } 4426 }
4147 4427
4428 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4429 ll_config->device_hw_info.fw_version.minor,
4430 ll_config->device_hw_info.fw_version.build) >=
4431 VXGE_EPROM_FW_VER) {
4432 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4433
4434 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4435 if (status != VXGE_HW_OK) {
4436 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4437 VXGE_DRIVER_NAME);
4438 /* This is a non-fatal error, continue */
4439 }
4440
4441 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4442 hldev->eprom_versions[i] = img[i].version;
4443 if (!img[i].is_valid)
4444 break;
4445 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4446 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
4447 VXGE_EPROM_IMG_MAJOR(img[i].version),
4448 VXGE_EPROM_IMG_MINOR(img[i].version),
4449 VXGE_EPROM_IMG_FIX(img[i].version),
4450 VXGE_EPROM_IMG_BUILD(img[i].version));
4451 }
4452 }
4453
4148 /* if FCS stripping is not disabled in MAC fail driver load */ 4454 /* if FCS stripping is not disabled in MAC fail driver load */
4149 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4455 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4150 vxge_debug_init(VXGE_ERR, 4456 if (status != VXGE_HW_OK) {
4151 "%s: FCS stripping is not disabled in MAC" 4457 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4152 " failing driver load", VXGE_DRIVER_NAME); 4458 " failing driver load", VXGE_DRIVER_NAME);
4153 ret = -EINVAL; 4459 ret = -EINVAL;
4154 goto _exit4; 4460 goto _exit4;
4155 } 4461 }
@@ -4163,28 +4469,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4163 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4469 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4164 ll_config->addr_learn_en = addr_learn_en; 4470 ll_config->addr_learn_en = addr_learn_en;
4165 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4471 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4166 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4472 ll_config->rth_hash_type_tcpipv4 = 1;
4167 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4473 ll_config->rth_hash_type_ipv4 = 0;
4168 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4474 ll_config->rth_hash_type_tcpipv6 = 0;
4169 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4475 ll_config->rth_hash_type_ipv6 = 0;
4170 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4476 ll_config->rth_hash_type_tcpipv6ex = 0;
4171 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4477 ll_config->rth_hash_type_ipv6ex = 0;
4172 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4478 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4173 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4479 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4174 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4480 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4175 4481
4176 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4482 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4177 &vdev)) { 4483 &vdev);
4484 if (ret) {
4178 ret = -EINVAL; 4485 ret = -EINVAL;
4179 goto _exit4; 4486 goto _exit4;
4180 } 4487 }
4181 4488
4489 ret = vxge_probe_fw_update(vdev);
4490 if (ret)
4491 goto _exit5;
4492
4182 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4493 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4183 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4494 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4184 vxge_hw_device_trace_level_get(hldev)); 4495 vxge_hw_device_trace_level_get(hldev));
4185 4496
4186 /* set private HW device info */ 4497 /* set private HW device info */
4187 hldev->ndev = vdev->ndev;
4188 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4498 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4189 vdev->bar0 = attr.bar0; 4499 vdev->bar0 = attr.bar0;
4190 vdev->max_vpath_supported = max_vpath_supported; 4500 vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4588,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4278 4588
4279 /* Copy the station mac address to the list */ 4589 /* Copy the station mac address to the list */
4280 for (i = 0; i < vdev->no_of_vpath; i++) { 4590 for (i = 0; i < vdev->no_of_vpath; i++) {
4281 entry = (struct vxge_mac_addrs *) 4591 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4282 kzalloc(sizeof(struct vxge_mac_addrs),
4283 GFP_KERNEL);
4284 if (NULL == entry) { 4592 if (NULL == entry) {
4285 vxge_debug_init(VXGE_ERR, 4593 vxge_debug_init(VXGE_ERR,
4286 "%s: mac_addr_list : memory allocation failed", 4594 "%s: mac_addr_list : memory allocation failed",
4287 vdev->ndev->name); 4595 vdev->ndev->name);
4288 ret = -EPERM; 4596 ret = -EPERM;
4289 goto _exit5; 4597 goto _exit6;
4290 } 4598 }
4291 macaddr = (u8 *)&entry->macaddr; 4599 macaddr = (u8 *)&entry->macaddr;
4292 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4600 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4634,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4326 kfree(ll_config); 4634 kfree(ll_config);
4327 return 0; 4635 return 0;
4328 4636
4329_exit5: 4637_exit6:
4330 for (i = 0; i < vdev->no_of_vpath; i++) 4638 for (i = 0; i < vdev->no_of_vpath; i++)
4331 vxge_free_mac_add_list(&vdev->vpaths[i]); 4639 vxge_free_mac_add_list(&vdev->vpaths[i]);
4332 4640_exit5:
4333 vxge_device_unregister(hldev); 4641 vxge_device_unregister(hldev);
4334_exit4: 4642_exit4:
4335 pci_disable_sriov(pdev); 4643 pci_disable_sriov(pdev);
@@ -4337,7 +4645,7 @@ _exit4:
4337_exit3: 4645_exit3:
4338 iounmap(attr.bar0); 4646 iounmap(attr.bar0);
4339_exit2: 4647_exit2:
4340 pci_release_regions(pdev); 4648 pci_release_region(pdev, 0);
4341_exit1: 4649_exit1:
4342 pci_disable_device(pdev); 4650 pci_disable_device(pdev);
4343_exit0: 4651_exit0:
@@ -4354,34 +4662,25 @@ _exit0:
4354 * Description: This function is called by the Pci subsystem to release a 4662 * Description: This function is called by the Pci subsystem to release a
4355 * PCI device and free up all resource held up by the device. 4663 * PCI device and free up all resource held up by the device.
4356 */ 4664 */
4357static void __devexit 4665static void __devexit vxge_remove(struct pci_dev *pdev)
4358vxge_remove(struct pci_dev *pdev)
4359{ 4666{
4360 struct __vxge_hw_device *hldev; 4667 struct __vxge_hw_device *hldev;
4361 struct vxgedev *vdev = NULL; 4668 struct vxgedev *vdev = NULL;
4362 struct net_device *dev; 4669 struct net_device *dev;
4363 int i = 0; 4670 int i = 0;
4364#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4365 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4366 u32 level_trace;
4367#endif
4368 4671
4369 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4672 hldev = pci_get_drvdata(pdev);
4370 4673
4371 if (hldev == NULL) 4674 if (hldev == NULL)
4372 return; 4675 return;
4676
4373 dev = hldev->ndev; 4677 dev = hldev->ndev;
4374 vdev = netdev_priv(dev); 4678 vdev = netdev_priv(dev);
4375 4679
4376#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4680 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4377 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4378 level_trace = vdev->level_trace;
4379#endif
4380 vxge_debug_entryexit(level_trace,
4381 "%s:%d", __func__, __LINE__);
4382 4681
4383 vxge_debug_init(level_trace, 4682 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4384 "%s : removing PCI device...", __func__); 4683 __func__);
4385 vxge_device_unregister(hldev); 4684 vxge_device_unregister(hldev);
4386 4685
4387 for (i = 0; i < vdev->no_of_vpath; i++) { 4686 for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4394,21 +4693,19 @@ vxge_remove(struct pci_dev *pdev)
4394 4693
4395 iounmap(vdev->bar0); 4694 iounmap(vdev->bar0);
4396 4695
4397 pci_disable_sriov(pdev);
4398
4399 /* we are safe to free it now */ 4696 /* we are safe to free it now */
4400 free_netdev(dev); 4697 free_netdev(dev);
4401 4698
4402 vxge_debug_init(level_trace, 4699 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4403 "%s:%d Device unregistered", __func__, __LINE__); 4700 __func__, __LINE__);
4404 4701
4405 vxge_hw_device_terminate(hldev); 4702 vxge_hw_device_terminate(hldev);
4406 4703
4407 pci_disable_device(pdev); 4704 pci_disable_device(pdev);
4408 pci_release_regions(pdev); 4705 pci_release_region(pdev, 0);
4409 pci_set_drvdata(pdev, NULL); 4706 pci_set_drvdata(pdev, NULL);
4410 vxge_debug_entryexit(level_trace, 4707 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4411 "%s:%d Exiting...", __func__, __LINE__); 4708 __LINE__);
4412} 4709}
4413 4710
4414static struct pci_error_handlers vxge_err_handler = { 4711static struct pci_error_handlers vxge_err_handler = {
@@ -4444,6 +4741,10 @@ vxge_starter(void)
4444 return -ENOMEM; 4741 return -ENOMEM;
4445 4742
4446 ret = pci_register_driver(&vxge_driver); 4743 ret = pci_register_driver(&vxge_driver);
4744 if (ret) {
4745 kfree(driver_config);
4746 goto err;
4747 }
4447 4748
4448 if (driver_config->config_dev_cnt && 4749 if (driver_config->config_dev_cnt &&
4449 (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) 4750 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4451,10 +4752,7 @@ vxge_starter(void)
4451 "%s: Configured %d of %d devices", 4752 "%s: Configured %d of %d devices",
4452 VXGE_DRIVER_NAME, driver_config->config_dev_cnt, 4753 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4453 driver_config->total_dev_cnt); 4754 driver_config->total_dev_cnt);
4454 4755err:
4455 if (ret)
4456 kfree(driver_config);
4457
4458 return ret; 4756 return ret;
4459} 4757}
4460 4758
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..5746fedc356f 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
29 29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733 30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_HW_TITAN1_PCI_REVISION 1
33#define VXGE_HW_TITAN1A_PCI_REVISION 2
34
32#define VXGE_USE_DEFAULT 0xffffffff 35#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 36#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2 37#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
53 56
54#define VXGE_TTI_BTIMER_VAL 250000 57#define VXGE_TTI_BTIMER_VAL 250000
55 58
56#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
57#define VXGE_TTI_RTIMER_VAL 0 60#define VXGE_T1A_TTI_LTIMER_VAL 80
58#define VXGE_RTI_BTIMER_VAL 250 61#define VXGE_TTI_RTIMER_VAL 0
59#define VXGE_RTI_LTIMER_VAL 100 62#define VXGE_T1A_TTI_RTIMER_VAL 400
60#define VXGE_RTI_RTIMER_VAL 0 63#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0
61#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
62#define VXGE_ISR_POLLING_CNT 8 67#define VXGE_ISR_POLLING_CNT 8
63#define VXGE_MAX_CONFIG_DEV 0xFF 68#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
76#define TTI_TX_UFC_B 40 81#define TTI_TX_UFC_B 40
77#define TTI_TX_UFC_C 60 82#define TTI_TX_UFC_C 60
78#define TTI_TX_UFC_D 100 83#define TTI_TX_UFC_D 100
84#define TTI_T1A_TX_UFC_A 30
85#define TTI_T1A_TX_UFC_B 80
86/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
87/* Slope - 93 */
88/* 60 - 9k Mtu, 140 - 1.5k mtu */
89#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
90
91/* Slope - 37 */
92/* 100 - 9k Mtu, 300 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
94
95
96#define RTI_RX_URANGE_A 5
97#define RTI_RX_URANGE_B 15
98#define RTI_RX_URANGE_C 40
99#define RTI_T1A_RX_URANGE_A 1
100#define RTI_T1A_RX_URANGE_B 20
101#define RTI_T1A_RX_URANGE_C 50
102#define RTI_RX_UFC_A 1
103#define RTI_RX_UFC_B 5
104#define RTI_RX_UFC_C 10
105#define RTI_RX_UFC_D 15
106#define RTI_T1A_RX_UFC_B 20
107#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60
79 109
80#define RTI_RX_URANGE_A 5
81#define RTI_RX_URANGE_B 15
82#define RTI_RX_URANGE_C 40
83#define RTI_RX_UFC_A 1
84#define RTI_RX_UFC_B 5
85#define RTI_RX_UFC_C 10
86#define RTI_RX_UFC_D 15
87 110
88/* Milli secs timer period */ 111/* Milli secs timer period */
89#define VXGE_TIMER_DELAY 10000 112#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
145 168
146 int addr_learn_en; 169 int addr_learn_en;
147 170
148 int rth_steering; 171 u32 rth_steering:2,
149 int rth_algorithm; 172 rth_algorithm:2,
150 int rth_hash_type_tcpipv4; 173 rth_hash_type_tcpipv4:1,
151 int rth_hash_type_ipv4; 174 rth_hash_type_ipv4:1,
152 int rth_hash_type_tcpipv6; 175 rth_hash_type_tcpipv6:1,
153 int rth_hash_type_ipv6; 176 rth_hash_type_ipv6:1,
154 int rth_hash_type_tcpipv6ex; 177 rth_hash_type_tcpipv6ex:1,
155 int rth_hash_type_ipv6ex; 178 rth_hash_type_ipv6ex:1,
156 int rth_bkt_sz; 179 rth_bkt_sz:8;
157 int rth_jhash_golden_ratio; 180 int rth_jhash_golden_ratio;
158 int tx_steering_type; 181 int tx_steering_type;
159 int fifo_indicate_max_pkts; 182 int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
248 */ 271 */
249 int driver_id; 272 int driver_id;
250 273
251 /* copy of the flag indicating whether rx_csum is to be used */ 274 /* copy of the flag indicating whether rx_csum is to be used */
252 u32 rx_csum; 275 u32 rx_csum:1,
276 rx_hwts:1;
253 277
254 int pkts_processed; 278 int pkts_processed;
255 int budget; 279 int budget;
@@ -281,8 +305,8 @@ struct vxge_vpath {
281 int is_configured; 305 int is_configured;
282 int is_open; 306 int is_open;
283 struct vxgedev *vdev; 307 struct vxgedev *vdev;
284 u8 (macaddr)[ETH_ALEN]; 308 u8 macaddr[ETH_ALEN];
285 u8 (macmask)[ETH_ALEN]; 309 u8 macmask[ETH_ALEN];
286 310
287#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 311#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
288 /* mac addresses currently programmed into NIC */ 312 /* mac addresses currently programmed into NIC */
@@ -327,7 +351,9 @@ struct vxgedev {
327 u16 all_multi_flg; 351 u16 all_multi_flg;
328 352
329 /* A flag indicating whether rx_csum is to be used or not. */ 353 /* A flag indicating whether rx_csum is to be used or not. */
330 u32 rx_csum; 354 u32 rx_csum:1,
355 rx_hwts:1,
356 titan1:1;
331 357
332 struct vxge_msix_entry *vxge_entries; 358 struct vxge_msix_entry *vxge_entries;
333 struct msix_entry *entries; 359 struct msix_entry *entries;
@@ -369,6 +395,7 @@ struct vxgedev {
369 u32 level_err; 395 u32 level_err;
370 u32 level_trace; 396 u32 level_trace;
371 char fw_version[VXGE_HW_FW_STRLEN]; 397 char fw_version[VXGE_HW_FW_STRLEN];
398 struct work_struct reset_task;
372}; 399};
373 400
374struct vxge_rx_priv { 401struct vxge_rx_priv {
@@ -387,8 +414,6 @@ struct vxge_tx_priv {
387 static int p = val; \ 414 static int p = val; \
388 module_param(p, int, 0) 415 module_param(p, int, 0)
389 416
390#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
391
392#define vxge_os_timer(timer, handle, arg, exp) do { \ 417#define vxge_os_timer(timer, handle, arg, exp) do { \
393 init_timer(&timer); \ 418 init_timer(&timer); \
394 timer.function = handle; \ 419 timer.function = handle; \
@@ -396,7 +421,10 @@ struct vxge_tx_priv {
396 mod_timer(&timer, (jiffies + exp)); \ 421 mod_timer(&timer, (jiffies + exp)); \
397 } while (0); 422 } while (0);
398 423
399extern void vxge_initialize_ethtool_ops(struct net_device *ndev); 424void vxge_initialize_ethtool_ops(struct net_device *ndev);
425enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
426int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
427
400/** 428/**
401 * #define VXGE_DEBUG_INIT: debug for initialization functions 429 * #define VXGE_DEBUG_INIT: debug for initialization functions
402 * #define VXGE_DEBUG_TX : debug transmit related functions 430 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51 51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
52#define VXGE_HW_ASIC_MODE_RESERVED 0 79#define VXGE_HW_ASIC_MODE_RESERVED 0
53#define VXGE_HW_ASIC_MODE_NO_IOV 1 80#define VXGE_HW_ASIC_MODE_NO_IOV 1
54#define VXGE_HW_ASIC_MODE_SR_IOV 2 81#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
165#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
166#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
167#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
168#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
169#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
170#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
171#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
172#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
173#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
174#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
175#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
176 203
177#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ 204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
437#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ 464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
438 vxge_bVALn(bits, 48, 16) 465 vxge_bVALn(bits, 48, 16)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) 466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
440 468
441#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ 469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
442 vxge_bVALn(bits, 0, 18) 470 vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
3998#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) 4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
3999#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) 4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4000#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) 4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4001/*0x00a78*/ u64 prc_cfg7; 4030/*0x00a78*/ u64 prc_cfg7;
4002#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) 4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4003#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) 4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611a6842..42cc29843ac7 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
17#include "vxge-config.h" 17#include "vxge-config.h"
18#include "vxge-main.h" 18#include "vxge-main.h"
19 19
20static enum vxge_hw_status
21__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23static enum vxge_hw_status
24__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
25 u32 skip_alarms);
26
27/* 20/*
28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
29 * @vp: Virtual Path handle. 22 * @vp: Virtual Path handle.
@@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
419} 412}
420 413
421/** 414/**
415 * __vxge_hw_device_handle_error - Handle error
416 * @hldev: HW device
417 * @vp_id: Vpath Id
418 * @type: Error type. Please see enum vxge_hw_event{}
419 *
420 * Handle error.
421 */
422static enum vxge_hw_status
423__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
424 enum vxge_hw_event type)
425{
426 switch (type) {
427 case VXGE_HW_EVENT_UNKNOWN:
428 break;
429 case VXGE_HW_EVENT_RESET_START:
430 case VXGE_HW_EVENT_RESET_COMPLETE:
431 case VXGE_HW_EVENT_LINK_DOWN:
432 case VXGE_HW_EVENT_LINK_UP:
433 goto out;
434 case VXGE_HW_EVENT_ALARM_CLEARED:
435 goto out;
436 case VXGE_HW_EVENT_ECCERR:
437 case VXGE_HW_EVENT_MRPCIM_ECCERR:
438 goto out;
439 case VXGE_HW_EVENT_FIFO_ERR:
440 case VXGE_HW_EVENT_VPATH_ERR:
441 case VXGE_HW_EVENT_CRITICAL_ERR:
442 case VXGE_HW_EVENT_SERR:
443 break;
444 case VXGE_HW_EVENT_SRPCIM_SERR:
445 case VXGE_HW_EVENT_MRPCIM_SERR:
446 goto out;
447 case VXGE_HW_EVENT_SLOT_FREEZE:
448 break;
449 default:
450 vxge_assert(0);
451 goto out;
452 }
453
454 /* notify driver */
455 if (hldev->uld_callbacks.crit_err)
456 hldev->uld_callbacks.crit_err(
457 (struct __vxge_hw_device *)hldev,
458 type, vp_id);
459out:
460
461 return VXGE_HW_OK;
462}
463
464/*
465 * __vxge_hw_device_handle_link_down_ind
466 * @hldev: HW device handle.
467 *
468 * Link down indication handler. The function is invoked by HW when
469 * Titan indicates that the link is down.
470 */
471static enum vxge_hw_status
472__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
473{
474 /*
475 * If the previous link state is not down, return.
476 */
477 if (hldev->link_state == VXGE_HW_LINK_DOWN)
478 goto exit;
479
480 hldev->link_state = VXGE_HW_LINK_DOWN;
481
482 /* notify driver */
483 if (hldev->uld_callbacks.link_down)
484 hldev->uld_callbacks.link_down(hldev);
485exit:
486 return VXGE_HW_OK;
487}
488
489/*
490 * __vxge_hw_device_handle_link_up_ind
491 * @hldev: HW device handle.
492 *
493 * Link up indication handler. The function is invoked by HW when
494 * Titan indicates that the link is up for programmable amount of time.
495 */
496static enum vxge_hw_status
497__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
498{
499 /*
500 * If the previous link state is not down, return.
501 */
502 if (hldev->link_state == VXGE_HW_LINK_UP)
503 goto exit;
504
505 hldev->link_state = VXGE_HW_LINK_UP;
506
507 /* notify driver */
508 if (hldev->uld_callbacks.link_up)
509 hldev->uld_callbacks.link_up(hldev);
510exit:
511 return VXGE_HW_OK;
512}
513
514/*
515 * __vxge_hw_vpath_alarm_process - Process Alarms.
516 * @vpath: Virtual Path.
517 * @skip_alarms: Do not clear the alarms
518 *
519 * Process vpath alarms.
520 *
521 */
522static enum vxge_hw_status
523__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
524 u32 skip_alarms)
525{
526 u64 val64;
527 u64 alarm_status;
528 u64 pic_status;
529 struct __vxge_hw_device *hldev = NULL;
530 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
531 u64 mask64;
532 struct vxge_hw_vpath_stats_sw_info *sw_stats;
533 struct vxge_hw_vpath_reg __iomem *vp_reg;
534
535 if (vpath == NULL) {
536 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
537 alarm_event);
538 goto out2;
539 }
540
541 hldev = vpath->hldev;
542 vp_reg = vpath->vp_reg;
543 alarm_status = readq(&vp_reg->vpath_general_int_status);
544
545 if (alarm_status == VXGE_HW_ALL_FOXES) {
546 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
547 alarm_event);
548 goto out;
549 }
550
551 sw_stats = vpath->sw_stats;
552
553 if (alarm_status & ~(
554 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
555 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
556 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
557 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
558 sw_stats->error_stats.unknown_alarms++;
559
560 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
561 alarm_event);
562 goto out;
563 }
564
565 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
566
567 val64 = readq(&vp_reg->xgmac_vp_int_status);
568
569 if (val64 &
570 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
571
572 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
573
574 if (((val64 &
575 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
576 (!(val64 &
577 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
578 ((val64 &
579 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
580 (!(val64 &
581 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
582 ))) {
583 sw_stats->error_stats.network_sustained_fault++;
584
585 writeq(
586 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
587 &vp_reg->asic_ntwk_vp_err_mask);
588
589 __vxge_hw_device_handle_link_down_ind(hldev);
590 alarm_event = VXGE_HW_SET_LEVEL(
591 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
592 }
593
594 if (((val64 &
595 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
596 (!(val64 &
597 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
598 ((val64 &
599 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
600 (!(val64 &
601 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
602 ))) {
603
604 sw_stats->error_stats.network_sustained_ok++;
605
606 writeq(
607 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
608 &vp_reg->asic_ntwk_vp_err_mask);
609
610 __vxge_hw_device_handle_link_up_ind(hldev);
611 alarm_event = VXGE_HW_SET_LEVEL(
612 VXGE_HW_EVENT_LINK_UP, alarm_event);
613 }
614
615 writeq(VXGE_HW_INTR_MASK_ALL,
616 &vp_reg->asic_ntwk_vp_err_reg);
617
618 alarm_event = VXGE_HW_SET_LEVEL(
619 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
620
621 if (skip_alarms)
622 return VXGE_HW_OK;
623 }
624 }
625
626 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
627
628 pic_status = readq(&vp_reg->vpath_ppif_int_status);
629
630 if (pic_status &
631 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
632
633 val64 = readq(&vp_reg->general_errors_reg);
634 mask64 = readq(&vp_reg->general_errors_mask);
635
636 if ((val64 &
637 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
638 ~mask64) {
639 sw_stats->error_stats.ini_serr_det++;
640
641 alarm_event = VXGE_HW_SET_LEVEL(
642 VXGE_HW_EVENT_SERR, alarm_event);
643 }
644
645 if ((val64 &
646 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
647 ~mask64) {
648 sw_stats->error_stats.dblgen_fifo0_overflow++;
649
650 alarm_event = VXGE_HW_SET_LEVEL(
651 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
652 }
653
654 if ((val64 &
655 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
656 ~mask64)
657 sw_stats->error_stats.statsb_pif_chain_error++;
658
659 if ((val64 &
660 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
661 ~mask64)
662 sw_stats->error_stats.statsb_drop_timeout++;
663
664 if ((val64 &
665 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
666 ~mask64)
667 sw_stats->error_stats.target_illegal_access++;
668
669 if (!skip_alarms) {
670 writeq(VXGE_HW_INTR_MASK_ALL,
671 &vp_reg->general_errors_reg);
672 alarm_event = VXGE_HW_SET_LEVEL(
673 VXGE_HW_EVENT_ALARM_CLEARED,
674 alarm_event);
675 }
676 }
677
678 if (pic_status &
679 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
680
681 val64 = readq(&vp_reg->kdfcctl_errors_reg);
682 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
683
684 if ((val64 &
685 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
686 ~mask64) {
687 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
688
689 alarm_event = VXGE_HW_SET_LEVEL(
690 VXGE_HW_EVENT_FIFO_ERR,
691 alarm_event);
692 }
693
694 if ((val64 &
695 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
696 ~mask64) {
697 sw_stats->error_stats.kdfcctl_fifo0_poison++;
698
699 alarm_event = VXGE_HW_SET_LEVEL(
700 VXGE_HW_EVENT_FIFO_ERR,
701 alarm_event);
702 }
703
704 if ((val64 &
705 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
706 ~mask64) {
707 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
708
709 alarm_event = VXGE_HW_SET_LEVEL(
710 VXGE_HW_EVENT_FIFO_ERR,
711 alarm_event);
712 }
713
714 if (!skip_alarms) {
715 writeq(VXGE_HW_INTR_MASK_ALL,
716 &vp_reg->kdfcctl_errors_reg);
717 alarm_event = VXGE_HW_SET_LEVEL(
718 VXGE_HW_EVENT_ALARM_CLEARED,
719 alarm_event);
720 }
721 }
722
723 }
724
725 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
726
727 val64 = readq(&vp_reg->wrdma_alarm_status);
728
729 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
730
731 val64 = readq(&vp_reg->prc_alarm_reg);
732 mask64 = readq(&vp_reg->prc_alarm_mask);
733
734 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
735 ~mask64)
736 sw_stats->error_stats.prc_ring_bumps++;
737
738 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
739 ~mask64) {
740 sw_stats->error_stats.prc_rxdcm_sc_err++;
741
742 alarm_event = VXGE_HW_SET_LEVEL(
743 VXGE_HW_EVENT_VPATH_ERR,
744 alarm_event);
745 }
746
747 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
748 & ~mask64) {
749 sw_stats->error_stats.prc_rxdcm_sc_abort++;
750
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_VPATH_ERR,
753 alarm_event);
754 }
755
756 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
757 & ~mask64) {
758 sw_stats->error_stats.prc_quanta_size_err++;
759
760 alarm_event = VXGE_HW_SET_LEVEL(
761 VXGE_HW_EVENT_VPATH_ERR,
762 alarm_event);
763 }
764
765 if (!skip_alarms) {
766 writeq(VXGE_HW_INTR_MASK_ALL,
767 &vp_reg->prc_alarm_reg);
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_ALARM_CLEARED,
770 alarm_event);
771 }
772 }
773 }
774out:
775 hldev->stats.sw_dev_err_stats.vpath_alarms++;
776out2:
777 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
778 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
779 return VXGE_HW_OK;
780
781 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
782
783 if (alarm_event == VXGE_HW_EVENT_SERR)
784 return VXGE_HW_ERR_CRITICAL;
785
786 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
787 VXGE_HW_ERR_SLOT_FREEZE :
788 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
789 VXGE_HW_ERR_VPATH;
790}
791
792/**
422 * vxge_hw_device_begin_irq - Begin IRQ processing. 793 * vxge_hw_device_begin_irq - Begin IRQ processing.
423 * @hldev: HW device handle. 794 * @hldev: HW device handle.
424 * @skip_alarms: Do not clear the alarms 795 * @skip_alarms: Do not clear the alarms
@@ -513,108 +884,6 @@ exit:
513 return ret; 884 return ret;
514} 885}
515 886
516/*
517 * __vxge_hw_device_handle_link_up_ind
518 * @hldev: HW device handle.
519 *
520 * Link up indication handler. The function is invoked by HW when
521 * Titan indicates that the link is up for programmable amount of time.
522 */
523static enum vxge_hw_status
524__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
525{
526 /*
527 * If the previous link state is not down, return.
528 */
529 if (hldev->link_state == VXGE_HW_LINK_UP)
530 goto exit;
531
532 hldev->link_state = VXGE_HW_LINK_UP;
533
534 /* notify driver */
535 if (hldev->uld_callbacks.link_up)
536 hldev->uld_callbacks.link_up(hldev);
537exit:
538 return VXGE_HW_OK;
539}
540
541/*
542 * __vxge_hw_device_handle_link_down_ind
543 * @hldev: HW device handle.
544 *
545 * Link down indication handler. The function is invoked by HW when
546 * Titan indicates that the link is down.
547 */
548static enum vxge_hw_status
549__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
550{
551 /*
552 * If the previous link state is not down, return.
553 */
554 if (hldev->link_state == VXGE_HW_LINK_DOWN)
555 goto exit;
556
557 hldev->link_state = VXGE_HW_LINK_DOWN;
558
559 /* notify driver */
560 if (hldev->uld_callbacks.link_down)
561 hldev->uld_callbacks.link_down(hldev);
562exit:
563 return VXGE_HW_OK;
564}
565
566/**
567 * __vxge_hw_device_handle_error - Handle error
568 * @hldev: HW device
569 * @vp_id: Vpath Id
570 * @type: Error type. Please see enum vxge_hw_event{}
571 *
572 * Handle error.
573 */
574static enum vxge_hw_status
575__vxge_hw_device_handle_error(
576 struct __vxge_hw_device *hldev,
577 u32 vp_id,
578 enum vxge_hw_event type)
579{
580 switch (type) {
581 case VXGE_HW_EVENT_UNKNOWN:
582 break;
583 case VXGE_HW_EVENT_RESET_START:
584 case VXGE_HW_EVENT_RESET_COMPLETE:
585 case VXGE_HW_EVENT_LINK_DOWN:
586 case VXGE_HW_EVENT_LINK_UP:
587 goto out;
588 case VXGE_HW_EVENT_ALARM_CLEARED:
589 goto out;
590 case VXGE_HW_EVENT_ECCERR:
591 case VXGE_HW_EVENT_MRPCIM_ECCERR:
592 goto out;
593 case VXGE_HW_EVENT_FIFO_ERR:
594 case VXGE_HW_EVENT_VPATH_ERR:
595 case VXGE_HW_EVENT_CRITICAL_ERR:
596 case VXGE_HW_EVENT_SERR:
597 break;
598 case VXGE_HW_EVENT_SRPCIM_SERR:
599 case VXGE_HW_EVENT_MRPCIM_SERR:
600 goto out;
601 case VXGE_HW_EVENT_SLOT_FREEZE:
602 break;
603 default:
604 vxge_assert(0);
605 goto out;
606 }
607
608 /* notify driver */
609 if (hldev->uld_callbacks.crit_err)
610 hldev->uld_callbacks.crit_err(
611 (struct __vxge_hw_device *)hldev,
612 type, vp_id);
613out:
614
615 return VXGE_HW_OK;
616}
617
618/** 887/**
619 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the 888 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
620 * condition that has caused the Tx and RX interrupt. 889 * condition that has caused the Tx and RX interrupt.
@@ -699,8 +968,8 @@ _alloc_after_swap:
699 * Posts a dtr to work array. 968 * Posts a dtr to work array.
700 * 969 *
701 */ 970 */
702static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, 971static void
703 void *dtrh) 972vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
704{ 973{
705 vxge_assert(channel->work_arr[channel->post_index] == NULL); 974 vxge_assert(channel->work_arr[channel->post_index] == NULL);
706 975
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
911 */ 1180 */
912void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) 1181void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
913{ 1182{
914 struct __vxge_hw_channel *channel;
915
916 channel = &ring->channel;
917
918 wmb(); 1183 wmb();
919 vxge_hw_ring_rxd_post_post(ring, rxdh); 1184 vxge_hw_ring_rxd_post_post(ring, rxdh);
920} 1185}
@@ -1868,284 +2133,6 @@ exit:
1868} 2133}
1869 2134
1870/* 2135/*
1871 * __vxge_hw_vpath_alarm_process - Process Alarms.
1872 * @vpath: Virtual Path.
1873 * @skip_alarms: Do not clear the alarms
1874 *
1875 * Process vpath alarms.
1876 *
1877 */
1878static enum vxge_hw_status
1879__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1880 u32 skip_alarms)
1881{
1882 u64 val64;
1883 u64 alarm_status;
1884 u64 pic_status;
1885 struct __vxge_hw_device *hldev = NULL;
1886 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1887 u64 mask64;
1888 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1889 struct vxge_hw_vpath_reg __iomem *vp_reg;
1890
1891 if (vpath == NULL) {
1892 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1893 alarm_event);
1894 goto out2;
1895 }
1896
1897 hldev = vpath->hldev;
1898 vp_reg = vpath->vp_reg;
1899 alarm_status = readq(&vp_reg->vpath_general_int_status);
1900
1901 if (alarm_status == VXGE_HW_ALL_FOXES) {
1902 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1903 alarm_event);
1904 goto out;
1905 }
1906
1907 sw_stats = vpath->sw_stats;
1908
1909 if (alarm_status & ~(
1910 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1911 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1912 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1913 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1914 sw_stats->error_stats.unknown_alarms++;
1915
1916 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1917 alarm_event);
1918 goto out;
1919 }
1920
1921 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1922
1923 val64 = readq(&vp_reg->xgmac_vp_int_status);
1924
1925 if (val64 &
1926 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1927
1928 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1929
1930 if (((val64 &
1931 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1932 (!(val64 &
1933 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1934 ((val64 &
1935 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1936 (!(val64 &
1937 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1938 ))) {
1939 sw_stats->error_stats.network_sustained_fault++;
1940
1941 writeq(
1942 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1943 &vp_reg->asic_ntwk_vp_err_mask);
1944
1945 __vxge_hw_device_handle_link_down_ind(hldev);
1946 alarm_event = VXGE_HW_SET_LEVEL(
1947 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1948 }
1949
1950 if (((val64 &
1951 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1952 (!(val64 &
1953 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1954 ((val64 &
1955 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1956 (!(val64 &
1957 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1958 ))) {
1959
1960 sw_stats->error_stats.network_sustained_ok++;
1961
1962 writeq(
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1964 &vp_reg->asic_ntwk_vp_err_mask);
1965
1966 __vxge_hw_device_handle_link_up_ind(hldev);
1967 alarm_event = VXGE_HW_SET_LEVEL(
1968 VXGE_HW_EVENT_LINK_UP, alarm_event);
1969 }
1970
1971 writeq(VXGE_HW_INTR_MASK_ALL,
1972 &vp_reg->asic_ntwk_vp_err_reg);
1973
1974 alarm_event = VXGE_HW_SET_LEVEL(
1975 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1976
1977 if (skip_alarms)
1978 return VXGE_HW_OK;
1979 }
1980 }
1981
1982 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
1983
1984 pic_status = readq(&vp_reg->vpath_ppif_int_status);
1985
1986 if (pic_status &
1987 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
1988
1989 val64 = readq(&vp_reg->general_errors_reg);
1990 mask64 = readq(&vp_reg->general_errors_mask);
1991
1992 if ((val64 &
1993 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
1994 ~mask64) {
1995 sw_stats->error_stats.ini_serr_det++;
1996
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_SERR, alarm_event);
1999 }
2000
2001 if ((val64 &
2002 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2003 ~mask64) {
2004 sw_stats->error_stats.dblgen_fifo0_overflow++;
2005
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2008 }
2009
2010 if ((val64 &
2011 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2012 ~mask64)
2013 sw_stats->error_stats.statsb_pif_chain_error++;
2014
2015 if ((val64 &
2016 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2017 ~mask64)
2018 sw_stats->error_stats.statsb_drop_timeout++;
2019
2020 if ((val64 &
2021 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2022 ~mask64)
2023 sw_stats->error_stats.target_illegal_access++;
2024
2025 if (!skip_alarms) {
2026 writeq(VXGE_HW_INTR_MASK_ALL,
2027 &vp_reg->general_errors_reg);
2028 alarm_event = VXGE_HW_SET_LEVEL(
2029 VXGE_HW_EVENT_ALARM_CLEARED,
2030 alarm_event);
2031 }
2032 }
2033
2034 if (pic_status &
2035 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2036
2037 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2038 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2039
2040 if ((val64 &
2041 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2042 ~mask64) {
2043 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2044
2045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR,
2047 alarm_event);
2048 }
2049
2050 if ((val64 &
2051 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2052 ~mask64) {
2053 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2054
2055 alarm_event = VXGE_HW_SET_LEVEL(
2056 VXGE_HW_EVENT_FIFO_ERR,
2057 alarm_event);
2058 }
2059
2060 if ((val64 &
2061 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2062 ~mask64) {
2063 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2064
2065 alarm_event = VXGE_HW_SET_LEVEL(
2066 VXGE_HW_EVENT_FIFO_ERR,
2067 alarm_event);
2068 }
2069
2070 if (!skip_alarms) {
2071 writeq(VXGE_HW_INTR_MASK_ALL,
2072 &vp_reg->kdfcctl_errors_reg);
2073 alarm_event = VXGE_HW_SET_LEVEL(
2074 VXGE_HW_EVENT_ALARM_CLEARED,
2075 alarm_event);
2076 }
2077 }
2078
2079 }
2080
2081 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2082
2083 val64 = readq(&vp_reg->wrdma_alarm_status);
2084
2085 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2086
2087 val64 = readq(&vp_reg->prc_alarm_reg);
2088 mask64 = readq(&vp_reg->prc_alarm_mask);
2089
2090 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2091 ~mask64)
2092 sw_stats->error_stats.prc_ring_bumps++;
2093
2094 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2095 ~mask64) {
2096 sw_stats->error_stats.prc_rxdcm_sc_err++;
2097
2098 alarm_event = VXGE_HW_SET_LEVEL(
2099 VXGE_HW_EVENT_VPATH_ERR,
2100 alarm_event);
2101 }
2102
2103 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2104 & ~mask64) {
2105 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2106
2107 alarm_event = VXGE_HW_SET_LEVEL(
2108 VXGE_HW_EVENT_VPATH_ERR,
2109 alarm_event);
2110 }
2111
2112 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2113 & ~mask64) {
2114 sw_stats->error_stats.prc_quanta_size_err++;
2115
2116 alarm_event = VXGE_HW_SET_LEVEL(
2117 VXGE_HW_EVENT_VPATH_ERR,
2118 alarm_event);
2119 }
2120
2121 if (!skip_alarms) {
2122 writeq(VXGE_HW_INTR_MASK_ALL,
2123 &vp_reg->prc_alarm_reg);
2124 alarm_event = VXGE_HW_SET_LEVEL(
2125 VXGE_HW_EVENT_ALARM_CLEARED,
2126 alarm_event);
2127 }
2128 }
2129 }
2130out:
2131 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2132out2:
2133 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2134 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2135 return VXGE_HW_OK;
2136
2137 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2138
2139 if (alarm_event == VXGE_HW_EVENT_SERR)
2140 return VXGE_HW_ERR_CRITICAL;
2141
2142 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2143 VXGE_HW_ERR_SLOT_FREEZE :
2144 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2145 VXGE_HW_ERR_VPATH;
2146}
2147
2148/*
2149 * vxge_hw_vpath_alarm_process - Process Alarms. 2136 * vxge_hw_vpath_alarm_process - Process Alarms.
2150 * @vpath: Virtual Path. 2137 * @vpath: Virtual Path.
2151 * @skip_alarms: Do not clear the alarms 2138 * @skip_alarms: Do not clear the alarms
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d0..8c3103fb6442 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF 1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905}; 1905};
1906 1906
1907/**
1908 * enum enum vxge_hw_ring_hash_type - RTH hash types
1909 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1910 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1911 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1912 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1913 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1914 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1915 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1916 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1917 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1918 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1919 *
1920 * RTH hash types
1921 */
1922enum vxge_hw_ring_hash_type {
1923 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1924 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1925 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1926 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1927 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1928 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1929 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1930 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1931 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1932 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1933};
1934
1935enum vxge_hw_status vxge_hw_ring_rxd_reserve( 1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1936 struct __vxge_hw_ring *ring_handle, 1908 struct __vxge_hw_ring *ring_handle,
1937 void **rxdh); 1909 void **rxdh);
@@ -2109,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
2109#endif 2081#endif
2110}; 2082};
2111 2083
2112/* ========================= FIFO PRIVATE API ============================= */
2113
2114struct vxge_hw_fifo_attr;
2115
2116struct vxge_hw_mempool_cbs { 2084struct vxge_hw_mempool_cbs {
2117 void (*item_func_alloc)( 2085 void (*item_func_alloc)(
2118 struct vxge_hw_mempool *mempoolh, 2086 struct vxge_hw_mempool *mempoolh,
@@ -2186,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
2186enum vxge_hw_status 2154enum vxge_hw_status
2187vxge_hw_vpath_mac_addr_add( 2155vxge_hw_vpath_mac_addr_add(
2188 struct __vxge_hw_vpath_handle *vpath_handle, 2156 struct __vxge_hw_vpath_handle *vpath_handle,
2189 u8 (macaddr)[ETH_ALEN], 2157 u8 *macaddr,
2190 u8 (macaddr_mask)[ETH_ALEN], 2158 u8 *macaddr_mask,
2191 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); 2159 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2192 2160
2193enum vxge_hw_status 2161enum vxge_hw_status
2194vxge_hw_vpath_mac_addr_get( 2162vxge_hw_vpath_mac_addr_get(
2195 struct __vxge_hw_vpath_handle *vpath_handle, 2163 struct __vxge_hw_vpath_handle *vpath_handle,
2196 u8 (macaddr)[ETH_ALEN], 2164 u8 *macaddr,
2197 u8 (macaddr_mask)[ETH_ALEN]); 2165 u8 *macaddr_mask);
2198 2166
2199enum vxge_hw_status 2167enum vxge_hw_status
2200vxge_hw_vpath_mac_addr_get_next( 2168vxge_hw_vpath_mac_addr_get_next(
2201 struct __vxge_hw_vpath_handle *vpath_handle, 2169 struct __vxge_hw_vpath_handle *vpath_handle,
2202 u8 (macaddr)[ETH_ALEN], 2170 u8 *macaddr,
2203 u8 (macaddr_mask)[ETH_ALEN]); 2171 u8 *macaddr_mask);
2204 2172
2205enum vxge_hw_status 2173enum vxge_hw_status
2206vxge_hw_vpath_mac_addr_delete( 2174vxge_hw_vpath_mac_addr_delete(
2207 struct __vxge_hw_vpath_handle *vpath_handle, 2175 struct __vxge_hw_vpath_handle *vpath_handle,
2208 u8 (macaddr)[ETH_ALEN], 2176 u8 *macaddr,
2209 u8 (macaddr_mask)[ETH_ALEN]); 2177 u8 *macaddr_mask);
2210 2178
2211enum vxge_hw_status 2179enum vxge_hw_status
2212vxge_hw_vpath_vid_add( 2180vxge_hw_vpath_vid_add(
@@ -2313,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2313 2281
2314int 2282int
2315vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284
2316void 2285void
2317vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2318 2287
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..ad2f99b9bcf3 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -15,8 +15,35 @@
15#define VXGE_VERSION_H 15#define VXGE_VERSION_H
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "0" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "9" 19#define VXGE_VERSION_FIX "1"
20#define VXGE_VERSION_BUILD "20840" 20#define VXGE_VERSION_BUILD "22082"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
22#endif 49#endif
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..cf05504d9511 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
498static int x25_asy_close(struct net_device *dev) 498static int x25_asy_close(struct net_device *dev)
499{ 499{
500 struct x25_asy *sl = netdev_priv(dev); 500 struct x25_asy *sl = netdev_priv(dev);
501 int err;
502 501
503 spin_lock(&sl->lock); 502 spin_lock(&sl->lock);
504 if (sl->tty) 503 if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
507 netif_stop_queue(dev); 506 netif_stop_queue(dev);
508 sl->rcount = 0; 507 sl->rcount = 0;
509 sl->xleft = 0; 508 sl->xleft = 0;
510 err = lapb_unregister(dev);
511 if (err != LAPB_OK)
512 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
513 err);
514 spin_unlock(&sl->lock); 509 spin_unlock(&sl->lock);
515 return 0; 510 return 0;
516} 511}
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
595static void x25_asy_close_tty(struct tty_struct *tty) 590static void x25_asy_close_tty(struct tty_struct *tty)
596{ 591{
597 struct x25_asy *sl = tty->disc_data; 592 struct x25_asy *sl = tty->disc_data;
593 int err;
598 594
599 /* First make sure we're connected. */ 595 /* First make sure we're connected. */
600 if (!sl || sl->magic != X25_ASY_MAGIC) 596 if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
605 dev_close(sl->dev); 601 dev_close(sl->dev);
606 rtnl_unlock(); 602 rtnl_unlock();
607 603
604 err = lapb_unregister(sl->dev);
605 if (err != LAPB_OK)
606 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
607 err);
608
608 tty->disc_data = NULL; 609 tty->disc_data = NULL;
609 sl->tty = NULL; 610 sl->tty = NULL;
610 x25_asy_free(sl); 611 x25_asy_free(sl);
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index cdedab46ba21..f0603327aafa 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@ MODULE_PARM_DESC(barkers,
92 "signal; values are appended to a list--setting one value " 92 "signal; values are appended to a list--setting one value "
93 "as zero cleans the existing list and starts a new one."); 93 "as zero cleans the existing list and starts a new one.");
94 94
95static
96struct i2400m_work *__i2400m_work_setup(
97 struct i2400m *i2400m, void (*fn)(struct work_struct *),
98 gfp_t gfp_flags, const void *pl, size_t pl_size)
99{
100 struct i2400m_work *iw;
101
102 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
103 if (iw == NULL)
104 return NULL;
105 iw->i2400m = i2400m_get(i2400m);
106 iw->pl_size = pl_size;
107 memcpy(iw->pl, pl, pl_size);
108 INIT_WORK(&iw->ws, fn);
109 return iw;
110}
111
112
113/*
114 * Schedule i2400m's specific work on the system's queue.
115 *
116 * Used for a few cases where we really need it; otherwise, identical
117 * to i2400m_queue_work().
118 *
119 * Returns < 0 errno code on error, 1 if ok.
120 *
121 * If it returns zero, something really bad happened, as it means the
122 * works struct was already queued, but we have just allocated it, so
123 * it should not happen.
124 */
125static int i2400m_schedule_work(struct i2400m *i2400m,
126 void (*fn)(struct work_struct *), gfp_t gfp_flags,
127 const void *pl, size_t pl_size)
128{
129 int result;
130 struct i2400m_work *iw;
131
132 result = -ENOMEM;
133 iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
134 if (iw != NULL) {
135 result = schedule_work(&iw->ws);
136 if (WARN_ON(result == 0))
137 result = -ENXIO;
138 }
139 return result;
140}
141
142
143/* 95/*
144 * WiMAX stack operation: relay a message from user space 96 * WiMAX stack operation: relay a message from user space
145 * 97 *
@@ -648,17 +600,11 @@ EXPORT_SYMBOL_GPL(i2400m_post_reset);
648static 600static
649void __i2400m_dev_reset_handle(struct work_struct *ws) 601void __i2400m_dev_reset_handle(struct work_struct *ws)
650{ 602{
651 int result; 603 struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
652 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 604 const char *reason = i2400m->reset_reason;
653 const char *reason;
654 struct i2400m *i2400m = iw->i2400m;
655 struct device *dev = i2400m_dev(i2400m); 605 struct device *dev = i2400m_dev(i2400m);
656 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; 606 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
657 607 int result;
658 if (WARN_ON(iw->pl_size != sizeof(reason)))
659 reason = "SW BUG: reason n/a";
660 else
661 memcpy(&reason, iw->pl, sizeof(reason));
662 608
663 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); 609 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
664 610
@@ -733,8 +679,6 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
733 } 679 }
734 } 680 }
735out: 681out:
736 i2400m_put(i2400m);
737 kfree(iw);
738 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", 682 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
739 ws, i2400m, reason); 683 ws, i2400m, reason);
740} 684}
@@ -754,8 +698,8 @@ out:
754 */ 698 */
755int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) 699int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
756{ 700{
757 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 701 i2400m->reset_reason = reason;
758 GFP_ATOMIC, &reason, sizeof(reason)); 702 return schedule_work(&i2400m->reset_ws);
759} 703}
760EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 704EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
761 705
@@ -768,14 +712,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
768static 712static
769void __i2400m_error_recovery(struct work_struct *ws) 713void __i2400m_error_recovery(struct work_struct *ws)
770{ 714{
771 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 715 struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
772 struct i2400m *i2400m = iw->i2400m;
773 716
774 i2400m_reset(i2400m, I2400M_RT_BUS); 717 i2400m_reset(i2400m, I2400M_RT_BUS);
775
776 i2400m_put(i2400m);
777 kfree(iw);
778 return;
779} 718}
780 719
781/* 720/*
@@ -805,18 +744,10 @@ void __i2400m_error_recovery(struct work_struct *ws)
805 */ 744 */
806void i2400m_error_recovery(struct i2400m *i2400m) 745void i2400m_error_recovery(struct i2400m *i2400m)
807{ 746{
808 struct device *dev = i2400m_dev(i2400m); 747 if (atomic_add_return(1, &i2400m->error_recovery) == 1)
809 748 schedule_work(&i2400m->recovery_ws);
810 if (atomic_add_return(1, &i2400m->error_recovery) == 1) { 749 else
811 if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
812 GFP_ATOMIC, NULL, 0) < 0) {
813 dev_err(dev, "run out of memory for "
814 "scheduling an error recovery ?\n");
815 atomic_dec(&i2400m->error_recovery);
816 }
817 } else
818 atomic_dec(&i2400m->error_recovery); 750 atomic_dec(&i2400m->error_recovery);
819 return;
820} 751}
821EXPORT_SYMBOL_GPL(i2400m_error_recovery); 752EXPORT_SYMBOL_GPL(i2400m_error_recovery);
822 753
@@ -886,6 +817,10 @@ void i2400m_init(struct i2400m *i2400m)
886 817
887 mutex_init(&i2400m->init_mutex); 818 mutex_init(&i2400m->init_mutex);
888 /* wake_tx_ws is initialized in i2400m_tx_setup() */ 819 /* wake_tx_ws is initialized in i2400m_tx_setup() */
820
821 INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
822 INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
823
889 atomic_set(&i2400m->bus_reset_retries, 0); 824 atomic_set(&i2400m->bus_reset_retries, 0);
890 825
891 i2400m->alive = 0; 826 i2400m->alive = 0;
@@ -1040,6 +975,9 @@ void i2400m_release(struct i2400m *i2400m)
1040 975
1041 i2400m_dev_stop(i2400m); 976 i2400m_dev_stop(i2400m);
1042 977
978 cancel_work_sync(&i2400m->reset_ws);
979 cancel_work_sync(&i2400m->recovery_ws);
980
1043 i2400m_debugfs_rm(i2400m); 981 i2400m_debugfs_rm(i2400m);
1044 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 982 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
1045 &i2400m_dev_attr_group); 983 &i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@ module_init(i2400m_driver_init);
1083static 1021static
1084void __exit i2400m_driver_exit(void) 1022void __exit i2400m_driver_exit(void)
1085{ 1023{
1086 /* for scheds i2400m_dev_reset_handle() */
1087 flush_scheduled_work();
1088 i2400m_barker_db_exit(); 1024 i2400m_barker_db_exit();
1089} 1025}
1090module_exit(i2400m_driver_exit); 1026module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 59ac7705e76e..17ecaa41a807 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -632,6 +632,11 @@ struct i2400m {
632 struct work_struct wake_tx_ws; 632 struct work_struct wake_tx_ws;
633 struct sk_buff *wake_tx_skb; 633 struct sk_buff *wake_tx_skb;
634 634
635 struct work_struct reset_ws;
636 const char *reset_reason;
637
638 struct work_struct recovery_ws;
639
635 struct dentry *debugfs_dentry; 640 struct dentry *debugfs_dentry;
636 const char *fw_name; /* name of the current firmware image */ 641 const char *fw_name; /* name of the current firmware image */
637 unsigned long fw_version; /* version of the firmware interface */ 642 unsigned long fw_version; /* version of the firmware interface */
@@ -896,20 +901,6 @@ struct device *i2400m_dev(struct i2400m *i2400m)
896 return i2400m->wimax_dev.net_dev->dev.parent; 901 return i2400m->wimax_dev.net_dev->dev.parent;
897} 902}
898 903
899/*
900 * Helper for scheduling simple work functions
901 *
902 * This struct can get any kind of payload attached (normally in the
903 * form of a struct where you pack the stuff you want to pass to the
904 * _work function).
905 */
906struct i2400m_work {
907 struct work_struct ws;
908 struct i2400m *i2400m;
909 size_t pl_size;
910 u8 pl[0];
911};
912
913extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 904extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
914 char *, size_t); 905 char *, size_t);
915extern int i2400m_msg_size_check(struct i2400m *, 906extern int i2400m_msg_size_check(struct i2400m *,
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 9bfc26e1bc6b..be428cae28d8 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -590,7 +590,6 @@ module_init(i2400ms_driver_init);
590static 590static
591void __exit i2400ms_driver_exit(void) 591void __exit i2400ms_driver_exit(void)
592{ 592{
593 flush_scheduled_work(); /* for the stuff we schedule */
594 sdio_unregister_driver(&i2400m_sdio_driver); 593 sdio_unregister_driver(&i2400m_sdio_driver);
595} 594}
596module_exit(i2400ms_driver_exit); 595module_exit(i2400ms_driver_exit);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d3365ac85dde..10e3ab352175 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -780,7 +780,6 @@ module_init(i2400mu_driver_init);
780static 780static
781void __exit i2400mu_driver_exit(void) 781void __exit i2400mu_driver_exit(void)
782{ 782{
783 flush_scheduled_work(); /* for the stuff we schedule from sysfs.c */
784 usb_deregister(&i2400mu_driver); 783 usb_deregister(&i2400mu_driver);
785} 784}
786module_exit(i2400mu_driver_exit); 785module_exit(i2400mu_driver_exit);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1f5aa51b9cef..a16b3dae5b34 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -62,6 +62,7 @@
62static int ar9003_hw_power_interpolate(int32_t x, 62static int ar9003_hw_power_interpolate(int32_t x,
63 int32_t *px, int32_t *py, u_int16_t np); 63 int32_t *px, int32_t *py, u_int16_t np);
64 64
65
65static const struct ar9300_eeprom ar9300_default = { 66static const struct ar9300_eeprom ar9300_default = {
66 .eepromVersion = 2, 67 .eepromVersion = 2,
67 .templateVersion = 2, 68 .templateVersion = 2,
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dbb986946e1a..18d63f57777d 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -858,7 +858,10 @@ void hostap_free_data(struct ap_data *ap)
858 return; 858 return;
859 } 859 }
860 860
861 flush_work_sync(&ap->add_sta_proc_queue);
862
861#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 863#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
864 flush_work_sync(&ap->wds_oper_queue);
862 if (ap->crypt) 865 if (ap->crypt)
863 ap->crypt->deinit(ap->crypt_priv); 866 ap->crypt->deinit(ap->crypt_priv);
864 ap->crypt = ap->crypt_priv = NULL; 867 ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b7cb165d612b..a8bddd81b4d1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3317,7 +3317,13 @@ static void prism2_free_local_data(struct net_device *dev)
3317 3317
3318 unregister_netdev(local->dev); 3318 unregister_netdev(local->dev);
3319 3319
3320 flush_scheduled_work(); 3320 flush_work_sync(&local->reset_queue);
3321 flush_work_sync(&local->set_multicast_list_queue);
3322 flush_work_sync(&local->set_tim_queue);
3323#ifndef PRISM2_NO_STATION_MODES
3324 flush_work_sync(&local->info_queue);
3325#endif
3326 flush_work_sync(&local->comms_qual_update);
3321 3327
3322 lib80211_crypt_info_free(&local->crypt_info); 3328 lib80211_crypt_info_free(&local->crypt_info);
3323 3329
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ade30251608e..6f383cd684b0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -209,9 +209,6 @@ config RT2X00_LIB_LEDS
209 boolean 209 boolean
210 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 210 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
211 211
212comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
213 depends on RT2X00_LIB=y && LEDS_CLASS=m
214
215config RT2X00_LIB_DEBUGFS 212config RT2X00_LIB_DEBUGFS
216 bool "Ralink debugfs support" 213 bool "Ralink debugfs support"
217 depends on RT2X00_LIB && MAC80211_DEBUGFS 214 depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 30f8d404958b..6a9b66051cf7 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Allocate a single memory block for values and addresses. */
119 count16 = 2*count; 119 count16 = 2*count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
120 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
121 GFP_KERNEL); 122 GFP_KERNEL);
122 if (!a16) { 123 if (!a16) {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca68..de6c3086d232 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -515,7 +515,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
515 */ 515 */
516static int xemaclite_set_mac_address(struct net_device *dev, void *address) 516static int xemaclite_set_mac_address(struct net_device *dev, void *address)
517{ 517{
518 struct net_local *lp = (struct net_local *) netdev_priv(dev); 518 struct net_local *lp = netdev_priv(dev);
519 struct sockaddr *addr = address; 519 struct sockaddr *addr = address;
520 520
521 if (netif_running(dev)) 521 if (netif_running(dev))
@@ -534,7 +534,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
534 */ 534 */
535static void xemaclite_tx_timeout(struct net_device *dev) 535static void xemaclite_tx_timeout(struct net_device *dev)
536{ 536{
537 struct net_local *lp = (struct net_local *) netdev_priv(dev); 537 struct net_local *lp = netdev_priv(dev);
538 unsigned long flags; 538 unsigned long flags;
539 539
540 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", 540 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
578 */ 578 */
579static void xemaclite_tx_handler(struct net_device *dev) 579static void xemaclite_tx_handler(struct net_device *dev)
580{ 580{
581 struct net_local *lp = (struct net_local *) netdev_priv(dev); 581 struct net_local *lp = netdev_priv(dev);
582 582
583 dev->stats.tx_packets++; 583 dev->stats.tx_packets++;
584 if (lp->deferred_skb) { 584 if (lp->deferred_skb) {
@@ -605,7 +605,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
605 */ 605 */
606static void xemaclite_rx_handler(struct net_device *dev) 606static void xemaclite_rx_handler(struct net_device *dev)
607{ 607{
608 struct net_local *lp = (struct net_local *) netdev_priv(dev); 608 struct net_local *lp = netdev_priv(dev);
609 struct sk_buff *skb; 609 struct sk_buff *skb;
610 unsigned int align; 610 unsigned int align;
611 u32 len; 611 u32 len;
@@ -661,7 +661,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
661{ 661{
662 bool tx_complete = 0; 662 bool tx_complete = 0;
663 struct net_device *dev = dev_id; 663 struct net_device *dev = dev_id;
664 struct net_local *lp = (struct net_local *) netdev_priv(dev); 664 struct net_local *lp = netdev_priv(dev);
665 void __iomem *base_addr = lp->base_addr; 665 void __iomem *base_addr = lp->base_addr;
666 u32 tx_status; 666 u32 tx_status;
667 667
@@ -918,7 +918,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
918 */ 918 */
919static int xemaclite_open(struct net_device *dev) 919static int xemaclite_open(struct net_device *dev)
920{ 920{
921 struct net_local *lp = (struct net_local *) netdev_priv(dev); 921 struct net_local *lp = netdev_priv(dev);
922 int retval; 922 int retval;
923 923
924 /* Just to be safe, stop the device first */ 924 /* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@ static int xemaclite_open(struct net_device *dev)
987 */ 987 */
988static int xemaclite_close(struct net_device *dev) 988static int xemaclite_close(struct net_device *dev)
989{ 989{
990 struct net_local *lp = (struct net_local *) netdev_priv(dev); 990 struct net_local *lp = netdev_priv(dev);
991 991
992 netif_stop_queue(dev); 992 netif_stop_queue(dev);
993 xemaclite_disable_interrupts(lp); 993 xemaclite_disable_interrupts(lp);
@@ -1001,21 +1001,6 @@ static int xemaclite_close(struct net_device *dev)
1001} 1001}
1002 1002
1003/** 1003/**
1004 * xemaclite_get_stats - Get the stats for the net_device
1005 * @dev: Pointer to the network device
1006 *
1007 * This function returns the address of the 'net_device_stats' structure for the
1008 * given network device. This structure holds usage statistics for the network
1009 * device.
1010 *
1011 * Return: Pointer to the net_device_stats structure.
1012 */
1013static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1014{
1015 return &dev->stats;
1016}
1017
1018/**
1019 * xemaclite_send - Transmit a frame 1004 * xemaclite_send - Transmit a frame
1020 * @orig_skb: Pointer to the socket buffer to be transmitted 1005 * @orig_skb: Pointer to the socket buffer to be transmitted
1021 * @dev: Pointer to the network device 1006 * @dev: Pointer to the network device
@@ -1031,7 +1016,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1031 */ 1016 */
1032static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) 1017static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1033{ 1018{
1034 struct net_local *lp = (struct net_local *) netdev_priv(dev); 1019 struct net_local *lp = netdev_priv(dev);
1035 struct sk_buff *new_skb; 1020 struct sk_buff *new_skb;
1036 unsigned int len; 1021 unsigned int len;
1037 unsigned long flags; 1022 unsigned long flags;
@@ -1068,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1068static void xemaclite_remove_ndev(struct net_device *ndev) 1053static void xemaclite_remove_ndev(struct net_device *ndev)
1069{ 1054{
1070 if (ndev) { 1055 if (ndev) {
1071 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1056 struct net_local *lp = netdev_priv(ndev);
1072 1057
1073 if (lp->base_addr) 1058 if (lp->base_addr)
1074 iounmap((void __iomem __force *) (lp->base_addr)); 1059 iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1230,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
1245 struct device *dev = &of_dev->dev; 1230 struct device *dev = &of_dev->dev;
1246 struct net_device *ndev = dev_get_drvdata(dev); 1231 struct net_device *ndev = dev_get_drvdata(dev);
1247 1232
1248 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1233 struct net_local *lp = netdev_priv(ndev);
1249 1234
1250 /* Un-register the mii_bus, if configured */ 1235 /* Un-register the mii_bus, if configured */
1251 if (lp->has_mdio) { 1236 if (lp->has_mdio) {
@@ -1285,7 +1270,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
1285 .ndo_start_xmit = xemaclite_send, 1270 .ndo_start_xmit = xemaclite_send,
1286 .ndo_set_mac_address = xemaclite_set_mac_address, 1271 .ndo_set_mac_address = xemaclite_set_mac_address,
1287 .ndo_tx_timeout = xemaclite_tx_timeout, 1272 .ndo_tx_timeout = xemaclite_tx_timeout,
1288 .ndo_get_stats = xemaclite_get_stats,
1289#ifdef CONFIG_NET_POLL_CONTROLLER 1273#ifdef CONFIG_NET_POLL_CONTROLLER
1290 .ndo_poll_controller = xemaclite_poll_controller, 1274 .ndo_poll_controller = xemaclite_poll_controller,
1291#endif 1275#endif
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
124#define TX_BUF_SIZE 8192 124#define TX_BUF_SIZE 8192
125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ 125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
126 126
127#define TX_TIMEOUT 10 127#define TX_TIMEOUT (HZ/10)
128 128
129struct znet_private { 129struct znet_private {
130 int rx_dma, tx_dma; 130 int rx_dma, tx_dma;
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c4985326e..3a5a6fcc0ead 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <xen/xenbus.h>
17#include <xen/interface/io/pciif.h> 16#include <xen/interface/io/pciif.h>
18#include <asm/xen/pci.h> 17#include <asm/xen/pci.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd,
576 575
577 pcidev = pci_get_bus_and_slot(bus, devfn); 576 pcidev = pci_get_bus_and_slot(bus, devfn);
578 if (!pcidev || !pcidev->driver) { 577 if (!pcidev || !pcidev->driver) {
579 dev_err(&pcidev->dev, 578 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
580 "device or driver is NULL\n"); 579 if (pcidev)
580 pci_dev_put(pcidev);
581 return result; 581 return result;
582 } 582 }
583 pdrv = pcidev->driver; 583 pdrv = pcidev->driver;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 68cf0c99138a..7b5080c45569 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void)
1159 1159
1160 list_for_each_entry(port, &rio_mports, node) { 1160 list_for_each_entry(port, &rio_mports, node) {
1161 if (!request_mem_region(port->iores.start, 1161 if (!request_mem_region(port->iores.start,
1162 port->iores.end - port->iores.start, 1162 resource_size(&port->iores),
1163 port->name)) { 1163 port->name)) {
1164 printk(KERN_ERR 1164 printk(KERN_ERR
1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", 1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
1166 (u64)port->iores.start, (u64)port->iores.end - 1); 1166 (u64)port->iores.start, (u64)port->iores.end);
1167 rc = -ENOMEM; 1167 rc = -ENOMEM;
1168 goto out; 1168 goto out;
1169 } 1169 }
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 359d1e04626c..f0d638922644 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -35,7 +35,7 @@
35 35
36#ifdef CONFIG_SH_SECUREEDGE5410 36#ifdef CONFIG_SH_SECUREEDGE5410
37#include <asm/rtc.h> 37#include <asm/rtc.h>
38#include <mach/snapgear.h> 38#include <mach/secureedge5410.h>
39 39
40#define RTC_RESET 0x1000 40#define RTC_RESET 0x1000
41#define RTC_IODATA 0x0800 41#define RTC_IODATA 0x0800
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0f19d540b655..c9f13b9ea339 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1188,7 +1188,8 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1188 spin_lock_irqsave(&card->ipm_lock, flags); 1188 spin_lock_irqsave(&card->ipm_lock, flags);
1189 list_for_each(l, &card->ipm_list) { 1189 list_for_each(l, &card->ipm_list) {
1190 ipm = list_entry(l, struct lcs_ipm_list, list); 1190 ipm = list_entry(l, struct lcs_ipm_list, list);
1191 for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) { 1191 for (im4 = rcu_dereference(in4_dev->mc_list);
1192 im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
1192 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1193 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1193 if ( (ipm->ipm.ip_addr == im4->multiaddr) && 1194 if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1194 (memcmp(buf, &ipm->ipm.mac_addr, 1195 (memcmp(buf, &ipm->ipm.mac_addr,
@@ -1233,7 +1234,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1233 unsigned long flags; 1234 unsigned long flags;
1234 1235
1235 LCS_DBF_TEXT(4, trace, "setmclst"); 1236 LCS_DBF_TEXT(4, trace, "setmclst");
1236 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1237 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1238 im4 = rcu_dereference(im4->next_rcu)) {
1237 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1239 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1238 ipm = lcs_check_addr_entry(card, im4, buf); 1240 ipm = lcs_check_addr_entry(card, im4, buf);
1239 if (ipm != NULL) 1241 if (ipm != NULL)
@@ -1269,10 +1271,10 @@ lcs_register_mc_addresses(void *data)
1269 in4_dev = in_dev_get(card->dev); 1271 in4_dev = in_dev_get(card->dev);
1270 if (in4_dev == NULL) 1272 if (in4_dev == NULL)
1271 goto out; 1273 goto out;
1272 read_lock(&in4_dev->mc_list_lock); 1274 rcu_read_lock();
1273 lcs_remove_mc_addresses(card,in4_dev); 1275 lcs_remove_mc_addresses(card,in4_dev);
1274 lcs_set_mc_addresses(card, in4_dev); 1276 lcs_set_mc_addresses(card, in4_dev);
1275 read_unlock(&in4_dev->mc_list_lock); 1277 rcu_read_unlock();
1276 in_dev_put(in4_dev); 1278 in_dev_put(in4_dev);
1277 1279
1278 netif_carrier_off(card->dev); 1280 netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6be43eb126b4..f47a714538db 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -440,7 +440,6 @@ struct qeth_qdio_out_q {
440 * index of buffer to be filled by driver; state EMPTY or PACKING 440 * index of buffer to be filled by driver; state EMPTY or PACKING
441 */ 441 */
442 int next_buf_to_fill; 442 int next_buf_to_fill;
443 int sync_iqdio_error;
444 /* 443 /*
445 * number of buffers that are currently filled (PRIMED) 444 * number of buffers that are currently filled (PRIMED)
446 * -> these buffers are hardware-owned 445 * -> these buffers are hardware-owned
@@ -695,14 +694,6 @@ struct qeth_mc_mac {
695 int is_vmac; 694 int is_vmac;
696}; 695};
697 696
698struct qeth_skb_data {
699 __u32 magic;
700 int count;
701};
702
703#define QETH_SKB_MAGIC 0x71657468
704#define QETH_SIGA_CC2_RETRIES 3
705
706struct qeth_rx { 697struct qeth_rx {
707 int b_count; 698 int b_count;
708 int b_index; 699 int b_index;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 764267062601..b7d9dc0adc62 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -877,8 +877,8 @@ out:
877 return; 877 return;
878} 878}
879 879
880static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 880static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
881 struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) 881 struct qeth_qdio_out_buffer *buf)
882{ 882{
883 int i; 883 int i;
884 struct sk_buff *skb; 884 struct sk_buff *skb;
@@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
887 if (buf->buffer->element[0].flags & 0x40) 887 if (buf->buffer->element[0].flags & 0x40)
888 atomic_dec(&queue->set_pci_flags_count); 888 atomic_dec(&queue->set_pci_flags_count);
889 889
890 if (!qeth_skip_skb) { 890 skb = skb_dequeue(&buf->skb_list);
891 while (skb) {
892 atomic_dec(&skb->users);
893 dev_kfree_skb_any(skb);
891 skb = skb_dequeue(&buf->skb_list); 894 skb = skb_dequeue(&buf->skb_list);
892 while (skb) {
893 atomic_dec(&skb->users);
894 dev_kfree_skb_any(skb);
895 skb = skb_dequeue(&buf->skb_list);
896 }
897 } 895 }
898 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 896 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
899 if (buf->buffer->element[i].addr && buf->is_header[i]) 897 if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
909 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 907 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
910} 908}
911 909
912static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
913 struct qeth_qdio_out_buffer *buf)
914{
915 __qeth_clear_output_buffer(queue, buf, 0);
916}
917
918void qeth_clear_qdio_buffers(struct qeth_card *card) 910void qeth_clear_qdio_buffers(struct qeth_card *card)
919{ 911{
920 int i, j; 912 int i, j;
@@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2833 } 2825 }
2834 } 2826 }
2835 2827
2836 queue->sync_iqdio_error = 0;
2837 queue->card->dev->trans_start = jiffies; 2828 queue->card->dev->trans_start = jiffies;
2838 if (queue->card->options.performance_stats) { 2829 if (queue->card->options.performance_stats) {
2839 queue->card->perf_stats.outbound_do_qdio_cnt++; 2830 queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2840,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2849 queue->card->perf_stats.outbound_do_qdio_time += 2840 queue->card->perf_stats.outbound_do_qdio_time +=
2850 qeth_get_micros() - 2841 qeth_get_micros() -
2851 queue->card->perf_stats.outbound_do_qdio_start_time; 2842 queue->card->perf_stats.outbound_do_qdio_start_time;
2852 if (rc > 0) { 2843 atomic_add(count, &queue->used_buffers);
2853 if (!(rc & QDIO_ERROR_SIGA_BUSY))
2854 queue->sync_iqdio_error = rc & 3;
2855 }
2856 if (rc) { 2844 if (rc) {
2857 queue->card->stats.tx_errors += count; 2845 queue->card->stats.tx_errors += count;
2858 /* ignore temporary SIGA errors without busy condition */ 2846 /* ignore temporary SIGA errors without busy condition */
@@ -2866,7 +2854,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2866 qeth_schedule_recovery(queue->card); 2854 qeth_schedule_recovery(queue->card);
2867 return; 2855 return;
2868 } 2856 }
2869 atomic_add(count, &queue->used_buffers);
2870 if (queue->card->options.performance_stats) 2857 if (queue->card->options.performance_stats)
2871 queue->card->perf_stats.bufs_sent += count; 2858 queue->card->perf_stats.bufs_sent += count;
2872} 2859}
@@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
2916{ 2903{
2917 struct qeth_card *card = (struct qeth_card *)card_ptr; 2904 struct qeth_card *card = (struct qeth_card *)card_ptr;
2918 2905
2919 if (card->dev) 2906 if (card->dev && (card->dev->flags & IFF_UP))
2920 napi_schedule(&card->napi); 2907 napi_schedule(&card->napi);
2921} 2908}
2922EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); 2909EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
@@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2940 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2927 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2941 struct qeth_qdio_out_buffer *buffer; 2928 struct qeth_qdio_out_buffer *buffer;
2942 int i; 2929 int i;
2943 unsigned qeth_send_err;
2944 2930
2945 QETH_CARD_TEXT(card, 6, "qdouhdl"); 2931 QETH_CARD_TEXT(card, 6, "qdouhdl");
2946 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 2932 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2956 } 2942 }
2957 for (i = first_element; i < (first_element + count); ++i) { 2943 for (i = first_element; i < (first_element + count); ++i) {
2958 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2944 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2959 qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); 2945 qeth_handle_send_error(card, buffer, qdio_error);
2960 __qeth_clear_output_buffer(queue, buffer, 2946 qeth_clear_output_buffer(queue, buffer);
2961 (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
2962 } 2947 }
2963 atomic_sub(count, &queue->used_buffers); 2948 atomic_sub(count, &queue->used_buffers);
2964 /* check if we need to do something on this outbound queue */ 2949 /* check if we need to do something on this outbound queue */
@@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3183 int offset, int hd_len) 3168 int offset, int hd_len)
3184{ 3169{
3185 struct qeth_qdio_out_buffer *buffer; 3170 struct qeth_qdio_out_buffer *buffer;
3186 struct sk_buff *skb1;
3187 struct qeth_skb_data *retry_ctrl;
3188 int index; 3171 int index;
3189 int rc;
3190 3172
3191 /* spin until we get the queue ... */ 3173 /* spin until we get the queue ... */
3192 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3174 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3205 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3187 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3206 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3188 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3207 qeth_flush_buffers(queue, index, 1); 3189 qeth_flush_buffers(queue, index, 1);
3208 if (queue->sync_iqdio_error == 2) {
3209 skb1 = skb_dequeue(&buffer->skb_list);
3210 while (skb1) {
3211 atomic_dec(&skb1->users);
3212 skb1 = skb_dequeue(&buffer->skb_list);
3213 }
3214 retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
3215 if (retry_ctrl->magic != QETH_SKB_MAGIC) {
3216 retry_ctrl->magic = QETH_SKB_MAGIC;
3217 retry_ctrl->count = 0;
3218 }
3219 if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
3220 retry_ctrl->count++;
3221 rc = dev_queue_xmit(skb);
3222 } else {
3223 dev_kfree_skb_any(skb);
3224 QETH_CARD_TEXT(card, 2, "qrdrop");
3225 }
3226 }
3227 return 0; 3190 return 0;
3228out: 3191out:
3229 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3192 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e37dd8c4bf4e..07d588867b57 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -333,7 +333,7 @@ struct qeth_arp_query_data {
333 __u16 request_bits; 333 __u16 request_bits;
334 __u16 reply_bits; 334 __u16 reply_bits;
335 __u32 no_entries; 335 __u32 no_entries;
336 char data; 336 char data; /* only for replies */
337} __attribute__((packed)); 337} __attribute__((packed));
338 338
339/* used as parameter for arp_query reply */ 339/* used as parameter for arp_query reply */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 42fa783a70c8..b5e967cf7e2d 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -372,7 +372,7 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
372 i = simple_strtoul(buf, &tmp, 16); 372 i = simple_strtoul(buf, &tmp, 16);
373 if ((i == 0) || (i == 1)) { 373 if ((i == 0) || (i == 1)) {
374 if (i == card->options.performance_stats) 374 if (i == card->options.performance_stats)
375 goto out;; 375 goto out;
376 card->options.performance_stats = i; 376 card->options.performance_stats = i;
377 if (i == 0) 377 if (i == 0)
378 memset(&card->perf_stats, 0, 378 memset(&card->perf_stats, 0,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 847e8797073c..7a7a1b664781 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,8 +849,6 @@ static int qeth_l2_open(struct net_device *dev)
849 card->state = CARD_STATE_UP; 849 card->state = CARD_STATE_UP;
850 netif_start_queue(dev); 850 netif_start_queue(dev);
851 851
852 if (!card->lan_online && netif_carrier_ok(dev))
853 netif_carrier_off(dev);
854 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { 852 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
855 napi_enable(&card->napi); 853 napi_enable(&card->napi);
856 napi_schedule(&card->napi); 854 napi_schedule(&card->napi);
@@ -1013,13 +1011,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1013 dev_warn(&card->gdev->dev, 1011 dev_warn(&card->gdev->dev,
1014 "The LAN is offline\n"); 1012 "The LAN is offline\n");
1015 card->lan_online = 0; 1013 card->lan_online = 0;
1016 goto out; 1014 goto contin;
1017 } 1015 }
1018 rc = -ENODEV; 1016 rc = -ENODEV;
1019 goto out_remove; 1017 goto out_remove;
1020 } else 1018 } else
1021 card->lan_online = 1; 1019 card->lan_online = 1;
1022 1020
1021contin:
1023 if ((card->info.type == QETH_CARD_TYPE_OSD) || 1022 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
1024 (card->info.type == QETH_CARD_TYPE_OSX)) 1023 (card->info.type == QETH_CARD_TYPE_OSX))
1025 /* configure isolation level */ 1024 /* configure isolation level */
@@ -1038,7 +1037,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1038 goto out_remove; 1037 goto out_remove;
1039 } 1038 }
1040 card->state = CARD_STATE_SOFTSETUP; 1039 card->state = CARD_STATE_SOFTSETUP;
1041 netif_carrier_on(card->dev); 1040 if (card->lan_online)
1041 netif_carrier_on(card->dev);
1042 else
1043 netif_carrier_off(card->dev);
1042 1044
1043 qeth_set_allowed_threads(card, 0xffffffff, 0); 1045 qeth_set_allowed_threads(card, 0xffffffff, 0);
1044 if (recover_flag == CARD_STATE_RECOVER) { 1046 if (recover_flag == CARD_STATE_RECOVER) {
@@ -1055,7 +1057,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1055 } 1057 }
1056 /* let user_space know that device is online */ 1058 /* let user_space know that device is online */
1057 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1059 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1058out:
1059 mutex_unlock(&card->conf_mutex); 1060 mutex_unlock(&card->conf_mutex);
1060 mutex_unlock(&card->discipline_mutex); 1061 mutex_unlock(&card->discipline_mutex);
1061 return 0; 1062 return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 74d1401a5d5e..e227e465bfc4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -30,6 +30,7 @@
30 30
31#include "qeth_l3.h" 31#include "qeth_l3.h"
32 32
33
33static int qeth_l3_set_offline(struct ccwgroup_device *); 34static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *); 35static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *); 36static int qeth_l3_stop(struct net_device *);
@@ -455,8 +456,11 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
455 QETH_CARD_TEXT(card, 2, "sdiplist"); 456 QETH_CARD_TEXT(card, 2, "sdiplist");
456 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 457 QETH_CARD_HEX(card, 2, &card, sizeof(void *));
457 458
458 if (card->options.sniffer) 459 if ((card->state != CARD_STATE_UP &&
460 card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
459 return; 461 return;
462 }
463
460 spin_lock_irqsave(&card->ip_lock, flags); 464 spin_lock_irqsave(&card->ip_lock, flags);
461 tbd_list = card->ip_tbd_list; 465 tbd_list = card->ip_tbd_list;
462 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 466 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -1796,7 +1800,8 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
1796 char buf[MAX_ADDR_LEN]; 1800 char buf[MAX_ADDR_LEN];
1797 1801
1798 QETH_CARD_TEXT(card, 4, "addmc"); 1802 QETH_CARD_TEXT(card, 4, "addmc");
1799 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1803 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1804 im4 = rcu_dereference(im4->next_rcu)) {
1800 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1805 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
1801 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1806 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1802 if (!ipm) 1807 if (!ipm)
@@ -1828,9 +1833,9 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1828 in_dev = in_dev_get(netdev); 1833 in_dev = in_dev_get(netdev);
1829 if (!in_dev) 1834 if (!in_dev)
1830 continue; 1835 continue;
1831 read_lock(&in_dev->mc_list_lock); 1836 rcu_read_lock();
1832 qeth_l3_add_mc(card, in_dev); 1837 qeth_l3_add_mc(card, in_dev);
1833 read_unlock(&in_dev->mc_list_lock); 1838 rcu_read_unlock();
1834 in_dev_put(in_dev); 1839 in_dev_put(in_dev);
1835 } 1840 }
1836} 1841}
@@ -1843,10 +1848,10 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
1843 in4_dev = in_dev_get(card->dev); 1848 in4_dev = in_dev_get(card->dev);
1844 if (in4_dev == NULL) 1849 if (in4_dev == NULL)
1845 return; 1850 return;
1846 read_lock(&in4_dev->mc_list_lock); 1851 rcu_read_lock();
1847 qeth_l3_add_mc(card, in4_dev); 1852 qeth_l3_add_mc(card, in4_dev);
1848 qeth_l3_add_vlan_mc(card); 1853 qeth_l3_add_vlan_mc(card);
1849 read_unlock(&in4_dev->mc_list_lock); 1854 rcu_read_unlock();
1850 in_dev_put(in4_dev); 1855 in_dev_put(in4_dev);
1851} 1856}
1852 1857
@@ -2454,22 +2459,46 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
2454 return rc; 2459 return rc;
2455} 2460}
2456 2461
2457static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 2462static __u32 get_arp_entry_size(struct qeth_card *card,
2458 struct qeth_arp_query_data *qdata, int entry_size, 2463 struct qeth_arp_query_data *qdata,
2459 int uentry_size) 2464 struct qeth_arp_entrytype *type, __u8 strip_entries)
2460{ 2465{
2461 char *entry_ptr; 2466 __u32 rc;
2462 char *uentry_ptr; 2467 __u8 is_hsi;
2463 int i;
2464 2468
2465 entry_ptr = (char *)&qdata->data; 2469 is_hsi = qdata->reply_bits == 5;
2466 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); 2470 if (type->ip == QETHARP_IP_ADDR_V4) {
2467 for (i = 0; i < qdata->no_entries; ++i) { 2471 QETH_CARD_TEXT(card, 4, "arpev4");
2468 /* strip off 32 bytes "media specific information" */ 2472 if (strip_entries) {
2469 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); 2473 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
2470 entry_ptr += entry_size; 2474 sizeof(struct qeth_arp_qi_entry7_short);
2471 uentry_ptr += uentry_size; 2475 } else {
2476 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
2477 sizeof(struct qeth_arp_qi_entry7);
2478 }
2479 } else if (type->ip == QETHARP_IP_ADDR_V6) {
2480 QETH_CARD_TEXT(card, 4, "arpev6");
2481 if (strip_entries) {
2482 rc = is_hsi ?
2483 sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
2484 sizeof(struct qeth_arp_qi_entry7_short_ipv6);
2485 } else {
2486 rc = is_hsi ?
2487 sizeof(struct qeth_arp_qi_entry5_ipv6) :
2488 sizeof(struct qeth_arp_qi_entry7_ipv6);
2489 }
2490 } else {
2491 QETH_CARD_TEXT(card, 4, "arpinv");
2492 rc = 0;
2472 } 2493 }
2494
2495 return rc;
2496}
2497
2498static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
2499{
2500 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
2501 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
2473} 2502}
2474 2503
2475static int qeth_l3_arp_query_cb(struct qeth_card *card, 2504static int qeth_l3_arp_query_cb(struct qeth_card *card,
@@ -2478,72 +2507,77 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
2478 struct qeth_ipa_cmd *cmd; 2507 struct qeth_ipa_cmd *cmd;
2479 struct qeth_arp_query_data *qdata; 2508 struct qeth_arp_query_data *qdata;
2480 struct qeth_arp_query_info *qinfo; 2509 struct qeth_arp_query_info *qinfo;
2481 int entry_size;
2482 int uentry_size;
2483 int i; 2510 int i;
2511 int e;
2512 int entrybytes_done;
2513 int stripped_bytes;
2514 __u8 do_strip_entries;
2484 2515
2485 QETH_CARD_TEXT(card, 4, "arpquecb"); 2516 QETH_CARD_TEXT(card, 3, "arpquecb");
2486 2517
2487 qinfo = (struct qeth_arp_query_info *) reply->param; 2518 qinfo = (struct qeth_arp_query_info *) reply->param;
2488 cmd = (struct qeth_ipa_cmd *) data; 2519 cmd = (struct qeth_ipa_cmd *) data;
2520 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
2489 if (cmd->hdr.return_code) { 2521 if (cmd->hdr.return_code) {
2490 QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code); 2522 QETH_CARD_TEXT(card, 4, "arpcberr");
2523 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
2491 return 0; 2524 return 0;
2492 } 2525 }
2493 if (cmd->data.setassparms.hdr.return_code) { 2526 if (cmd->data.setassparms.hdr.return_code) {
2494 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 2527 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
2495 QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code); 2528 QETH_CARD_TEXT(card, 4, "setaperr");
2529 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
2496 return 0; 2530 return 0;
2497 } 2531 }
2498 qdata = &cmd->data.setassparms.data.query_arp; 2532 qdata = &cmd->data.setassparms.data.query_arp;
2499 switch (qdata->reply_bits) {
2500 case 5:
2501 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
2502 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2503 uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
2504 break;
2505 case 7:
2506 /* fall through to default */
2507 default:
2508 /* tr is the same as eth -> entry7 */
2509 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
2510 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2511 uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
2512 break;
2513 }
2514 /* check if there is enough room in userspace */
2515 if ((qinfo->udata_len - qinfo->udata_offset) <
2516 qdata->no_entries * uentry_size){
2517 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
2518 cmd->hdr.return_code = -ENOMEM;
2519 goto out_error;
2520 }
2521 QETH_CARD_TEXT_(card, 4, "anore%i",
2522 cmd->data.setassparms.hdr.number_of_replies);
2523 QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
2524 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 2533 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
2525 2534
2526 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { 2535 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
2527 /* strip off "media specific information" */ 2536 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
2528 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size, 2537 entrybytes_done = 0;
2529 uentry_size); 2538 for (e = 0; e < qdata->no_entries; ++e) {
2530 } else 2539 char *cur_entry;
2531 /*copy entries to user buffer*/ 2540 __u32 esize;
2532 memcpy(qinfo->udata + qinfo->udata_offset, 2541 struct qeth_arp_entrytype *etype;
2533 (char *)&qdata->data, qdata->no_entries*uentry_size); 2542
2543 cur_entry = &qdata->data + entrybytes_done;
2544 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
2545 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
2546 QETH_CARD_TEXT(card, 4, "pmis");
2547 QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
2548 break;
2549 }
2550 esize = get_arp_entry_size(card, qdata, etype,
2551 do_strip_entries);
2552 QETH_CARD_TEXT_(card, 5, "esz%i", esize);
2553 if (!esize)
2554 break;
2555
2556 if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
2557 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
2558 cmd->hdr.return_code = -ENOMEM;
2559 goto out_error;
2560 }
2534 2561
2535 qinfo->no_entries += qdata->no_entries; 2562 memcpy(qinfo->udata + qinfo->udata_offset,
2536 qinfo->udata_offset += (qdata->no_entries*uentry_size); 2563 &qdata->data + entrybytes_done + stripped_bytes,
2564 esize);
2565 entrybytes_done += esize + stripped_bytes;
2566 qinfo->udata_offset += esize;
2567 ++qinfo->no_entries;
2568 }
2537 /* check if all replies received ... */ 2569 /* check if all replies received ... */
2538 if (cmd->data.setassparms.hdr.seq_no < 2570 if (cmd->data.setassparms.hdr.seq_no <
2539 cmd->data.setassparms.hdr.number_of_replies) 2571 cmd->data.setassparms.hdr.number_of_replies)
2540 return 1; 2572 return 1;
2573 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
2541 memcpy(qinfo->udata, &qinfo->no_entries, 4); 2574 memcpy(qinfo->udata, &qinfo->no_entries, 4);
2542 /* keep STRIP_ENTRIES flag so the user program can distinguish 2575 /* keep STRIP_ENTRIES flag so the user program can distinguish
2543 * stripped entries from normal ones */ 2576 * stripped entries from normal ones */
2544 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2577 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
2545 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 2578 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
2546 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 2579 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
2580 QETH_CARD_TEXT_(card, 4, "rc%i", 0);
2547 return 0; 2581 return 0;
2548out_error: 2582out_error:
2549 i = 0; 2583 i = 0;
@@ -2566,45 +2600,86 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
2566 reply_cb, reply_param); 2600 reply_cb, reply_param);
2567} 2601}
2568 2602
2569static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 2603static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
2604 enum qeth_prot_versions prot,
2605 struct qeth_arp_query_info *qinfo)
2570{ 2606{
2571 struct qeth_cmd_buffer *iob; 2607 struct qeth_cmd_buffer *iob;
2572 struct qeth_arp_query_info qinfo = {0, }; 2608 struct qeth_ipa_cmd *cmd;
2573 int tmp; 2609 int tmp;
2574 int rc; 2610 int rc;
2575 2611
2612 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
2613
2614 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
2615 IPA_CMD_ASS_ARP_QUERY_INFO,
2616 sizeof(struct qeth_arp_query_data) - sizeof(char),
2617 prot);
2618 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2619 cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
2620 cmd->data.setassparms.data.query_arp.reply_bits = 0;
2621 cmd->data.setassparms.data.query_arp.no_entries = 0;
2622 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2623 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2624 qeth_l3_arp_query_cb, (void *)qinfo);
2625 if (rc) {
2626 tmp = rc;
2627 QETH_DBF_MESSAGE(2,
2628 "Error while querying ARP cache on %s: %s "
2629 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2630 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2631 }
2632
2633 return rc;
2634}
2635
2636static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
2637{
2638 struct qeth_arp_query_info qinfo = {0, };
2639 int rc;
2640
2576 QETH_CARD_TEXT(card, 3, "arpquery"); 2641 QETH_CARD_TEXT(card, 3, "arpquery");
2577 2642
2578 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 2643 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
2579 IPA_ARP_PROCESSING)) { 2644 IPA_ARP_PROCESSING)) {
2580 return -EOPNOTSUPP; 2645 QETH_CARD_TEXT(card, 3, "arpqnsup");
2646 rc = -EOPNOTSUPP;
2647 goto out;
2581 } 2648 }
2582 /* get size of userspace buffer and mask_bits -> 6 bytes */ 2649 /* get size of userspace buffer and mask_bits -> 6 bytes */
2583 if (copy_from_user(&qinfo, udata, 6)) 2650 if (copy_from_user(&qinfo, udata, 6)) {
2584 return -EFAULT; 2651 rc = -EFAULT;
2652 goto out;
2653 }
2585 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 2654 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
2586 if (!qinfo.udata) 2655 if (!qinfo.udata) {
2587 return -ENOMEM; 2656 rc = -ENOMEM;
2657 goto out;
2658 }
2588 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 2659 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
2589 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2660 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
2590 IPA_CMD_ASS_ARP_QUERY_INFO,
2591 sizeof(int), QETH_PROT_IPV4);
2592
2593 rc = qeth_l3_send_ipa_arp_cmd(card, iob,
2594 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
2595 qeth_l3_arp_query_cb, (void *)&qinfo);
2596 if (rc) { 2661 if (rc) {
2597 tmp = rc;
2598 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
2599 "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
2600 qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
2601 if (copy_to_user(udata, qinfo.udata, 4)) 2662 if (copy_to_user(udata, qinfo.udata, 4))
2602 rc = -EFAULT; 2663 rc = -EFAULT;
2664 goto free_and_out;
2603 } else { 2665 } else {
2604 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 2666#ifdef CONFIG_QETH_IPV6
2667 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
2668 /* fails in case of GuestLAN QDIO mode */
2669 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
2670 &qinfo);
2671 }
2672#endif
2673 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
2674 QETH_CARD_TEXT(card, 4, "qactf");
2605 rc = -EFAULT; 2675 rc = -EFAULT;
2676 goto free_and_out;
2677 }
2678 QETH_CARD_TEXT_(card, 4, "qacts");
2606 } 2679 }
2680free_and_out:
2607 kfree(qinfo.udata); 2681 kfree(qinfo.udata);
2682out:
2608 return rc; 2683 return rc;
2609} 2684}
2610 2685
@@ -2938,6 +3013,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2938 3013
2939 /*fix header to TSO values ...*/ 3014 /*fix header to TSO values ...*/
2940 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 3015 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
3016 hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
2941 /*set values which are fix for the first approach ...*/ 3017 /*set values which are fix for the first approach ...*/
2942 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 3018 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2943 hdr->ext.imb_hdr_no = 1; 3019 hdr->ext.imb_hdr_no = 1;
@@ -3039,7 +3115,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3039 skb_pull(new_skb, ETH_HLEN); 3115 skb_pull(new_skb, ETH_HLEN);
3040 } 3116 }
3041 3117
3042 if (ipv == 6 && card->vlangrp && 3118 if (ipv != 4 && card->vlangrp &&
3043 vlan_tx_tag_present(new_skb)) { 3119 vlan_tx_tag_present(new_skb)) {
3044 skb_push(new_skb, VLAN_HLEN); 3120 skb_push(new_skb, VLAN_HLEN);
3045 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 3121 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
@@ -3176,8 +3252,6 @@ static int qeth_l3_open(struct net_device *dev)
3176 card->state = CARD_STATE_UP; 3252 card->state = CARD_STATE_UP;
3177 netif_start_queue(dev); 3253 netif_start_queue(dev);
3178 3254
3179 if (!card->lan_online && netif_carrier_ok(dev))
3180 netif_carrier_off(dev);
3181 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { 3255 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
3182 napi_enable(&card->napi); 3256 napi_enable(&card->napi);
3183 napi_schedule(&card->napi); 3257 napi_schedule(&card->napi);
@@ -3449,13 +3523,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3449 dev_warn(&card->gdev->dev, 3523 dev_warn(&card->gdev->dev,
3450 "The LAN is offline\n"); 3524 "The LAN is offline\n");
3451 card->lan_online = 0; 3525 card->lan_online = 0;
3452 goto out; 3526 goto contin;
3453 } 3527 }
3454 rc = -ENODEV; 3528 rc = -ENODEV;
3455 goto out_remove; 3529 goto out_remove;
3456 } else 3530 } else
3457 card->lan_online = 1; 3531 card->lan_online = 1;
3458 3532
3533contin:
3459 rc = qeth_l3_setadapter_parms(card); 3534 rc = qeth_l3_setadapter_parms(card);
3460 if (rc) 3535 if (rc)
3461 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3536 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -3480,10 +3555,13 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3480 goto out_remove; 3555 goto out_remove;
3481 } 3556 }
3482 card->state = CARD_STATE_SOFTSETUP; 3557 card->state = CARD_STATE_SOFTSETUP;
3483 netif_carrier_on(card->dev);
3484 3558
3485 qeth_set_allowed_threads(card, 0xffffffff, 0); 3559 qeth_set_allowed_threads(card, 0xffffffff, 0);
3486 qeth_l3_set_ip_addr_list(card); 3560 qeth_l3_set_ip_addr_list(card);
3561 if (card->lan_online)
3562 netif_carrier_on(card->dev);
3563 else
3564 netif_carrier_off(card->dev);
3487 if (recover_flag == CARD_STATE_RECOVER) { 3565 if (recover_flag == CARD_STATE_RECOVER) {
3488 if (recovery_mode) 3566 if (recovery_mode)
3489 qeth_l3_open(card->dev); 3567 qeth_l3_open(card->dev);
@@ -3496,7 +3574,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3496 } 3574 }
3497 /* let user_space know that device is online */ 3575 /* let user_space know that device is online */
3498 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3576 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
3499out:
3500 mutex_unlock(&card->conf_mutex); 3577 mutex_unlock(&card->conf_mutex);
3501 mutex_unlock(&card->discipline_mutex); 3578 mutex_unlock(&card->discipline_mutex);
3502 return 0; 3579 return 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb83bb0..f3cf924a2cd9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
320 "changed. The Linux SCSI layer does not " 320 "changed. The Linux SCSI layer does not "
321 "automatically adjust these parameters.\n"); 321 "automatically adjust these parameters.\n");
322 322
323 if (scmd->request->cmd_flags & REQ_HARDBARRIER) 323 /*
324 /* 324 * Pass the UA upwards for a determination in the completion
325 * barrier requests should always retry on UA 325 * functions.
326 * otherwise block will get a spurious error 326 */
327 */ 327 return SUCCESS;
328 return NEEDS_RETRY;
329 else
330 /*
331 * for normal (non barrier) commands, pass the
332 * UA upwards for a determination in the
333 * completion functions
334 */
335 return SUCCESS;
336 328
337 /* these three are not supported */ 329 /* these three are not supported */
338 case COPY_ABORTED: 330 case COPY_ABORTED:
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b7aa93..dd5e1ac22251 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2343,8 +2343,11 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
2343 2343
2344 /* 2344 /*
2345 * CTS flow control flag and modem status interrupts 2345 * CTS flow control flag and modem status interrupts
2346 * Only disable MSI if no threads are waiting in
2347 * serial_core::uart_wait_modem_status
2346 */ 2348 */
2347 up->ier &= ~UART_IER_MSI; 2349 if (!waitqueue_active(&up->port.state->port.delta_msr_wait))
2350 up->ier &= ~UART_IER_MSI;
2348 if (!(up->bugs & UART_BUG_NOMSR) && 2351 if (!(up->bugs & UART_BUG_NOMSR) &&
2349 UART_ENABLE_MS(&up->port, termios->c_cflag)) 2352 UART_ENABLE_MS(&up->port, termios->c_cflag))
2350 up->ier |= UART_IER_MSI; 2353 up->ier |= UART_IER_MSI;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d35a0aa..842e3b2a02b1 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2285 2285
2286static const struct pci_device_id softmodem_blacklist[] = { 2286static const struct pci_device_id softmodem_blacklist[] = {
2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ 2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
2288 { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
2289 { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
2288}; 2290};
2289 2291
2290/* 2292/*
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2863 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 2865 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
2864 0, 0, 2866 0, 0,
2865 pbn_b0_4_1152000 }, 2867 pbn_b0_4_1152000 },
2868 { PCI_VENDOR_ID_OXSEMI, 0x9505,
2869 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2870 pbn_b0_bt_2_921600 },
2866 2871
2867 /* 2872 /*
2868 * The below card is a little controversial since it is the 2873 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b18eab..19cac9f610fd 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
23#include <linux/tty.h> 23#include <linux/tty.h>
24#include <linux/tty_flip.h> 24#include <linux/tty_flip.h>
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/dma-mapping.h>
26 27
27#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 28#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
28 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 29 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
33#include <asm/gpio.h> 34#include <asm/gpio.h>
34#include <mach/bfin_serial_5xx.h> 35#include <mach/bfin_serial_5xx.h>
35 36
36#ifdef CONFIG_SERIAL_BFIN_DMA 37#include <asm/dma.h>
37#include <linux/dma-mapping.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#endif
42 41
43#ifdef CONFIG_SERIAL_BFIN_MODULE 42#ifdef CONFIG_SERIAL_BFIN_MODULE
44# undef CONFIG_EARLY_PRINTK 43# undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
360 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); 359 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
361 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 360 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
362 uart->port.icount.tx++; 361 uart->port.icount.tx++;
363 SSYNC();
364 } 362 }
365 363
366 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 364 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port)
688 686
689# ifdef CONFIG_BF54x 687# ifdef CONFIG_BF54x
690 { 688 {
689 /*
690 * UART2 and UART3 on BF548 share interrupt PINs and DMA
691 * controllers with SPORT2 and SPORT3. UART rx and tx
692 * interrupts are generated in PIO mode only when configure
693 * their peripheral mapping registers properly, which means
694 * request corresponding DMA channels in PIO mode as well.
695 */
691 unsigned uart_dma_ch_rx, uart_dma_ch_tx; 696 unsigned uart_dma_ch_rx, uart_dma_ch_tx;
692 697
693 switch (uart->port.irq) { 698 switch (uart->port.irq) {
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port)
734 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 739 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
735 IRQF_DISABLED, "BFIN_UART_CTS", uart)) { 740 IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
736 uart->cts_pin = -1; 741 uart->cts_pin = -1;
737 pr_info("Unable to attach BlackFin UART CTS interrupt.\ 742 pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
738 So, disable it.\n");
739 } 743 }
740 } 744 }
741 if (uart->rts_pin >= 0) { 745 if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port)
747 if (request_irq(uart->status_irq, 751 if (request_irq(uart->status_irq,
748 bfin_serial_mctrl_cts_int, 752 bfin_serial_mctrl_cts_int,
749 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { 753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
750 pr_info("Unable to attach BlackFin UART Modem \ 754 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
751 Status interrupt.\n");
752 } 755 }
753 756
754 /* CTS RTS PINs are negative assertive. */ 757 /* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
846 if (termios->c_cflag & CMSPAR) 849 if (termios->c_cflag & CMSPAR)
847 lcr |= STP; 850 lcr |= STP;
848 851
852 spin_lock_irqsave(&uart->port.lock, flags);
853
849 port->read_status_mask = OE; 854 port->read_status_mask = OE;
850 if (termios->c_iflag & INPCK) 855 if (termios->c_iflag & INPCK)
851 port->read_status_mask |= (FE | PE); 856 port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
875 if (termios->c_line != N_IRDA) 880 if (termios->c_line != N_IRDA)
876 quot -= ANOMALY_05000230; 881 quot -= ANOMALY_05000230;
877 882
878 spin_lock_irqsave(&uart->port.lock, flags);
879
880 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); 883 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
881 884
882 /* Disable UART */ 885 /* Disable UART */
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port,
1321 struct bfin_serial_port *uart; 1324 struct bfin_serial_port *uart;
1322 struct ktermios t; 1325 struct ktermios t;
1323 1326
1327#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1328 /*
1329 * If we are using early serial, don't let the normal console rewind
1330 * log buffer, since that causes things to be printed multiple times
1331 */
1332 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1333#endif
1334
1324 if (port == -1 || port >= nr_active_ports) 1335 if (port == -1 || port >= nr_active_ports)
1325 port = 0; 1336 port = 0;
1326 bfin_serial_init_ports(); 1337 bfin_serial_init_ports();
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c9a416..3374618300af 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/console.h> 19#include <linux/console.h>
20#include <linux/vt_kern.h> 20#include <linux/vt_kern.h>
21#include <linux/input.h>
21 22
22#define MAX_CONFIG_LEN 40 23#define MAX_CONFIG_LEN 40
23 24
@@ -37,6 +38,61 @@ static struct tty_driver *kgdb_tty_driver;
37static int kgdb_tty_line; 38static int kgdb_tty_line;
38 39
39#ifdef CONFIG_KDB_KEYBOARD 40#ifdef CONFIG_KDB_KEYBOARD
41static int kgdboc_reset_connect(struct input_handler *handler,
42 struct input_dev *dev,
43 const struct input_device_id *id)
44{
45 input_reset_device(dev);
46
47 /* Retrun an error - we do not want to bind, just to reset */
48 return -ENODEV;
49}
50
51static void kgdboc_reset_disconnect(struct input_handle *handle)
52{
53 /* We do not expect anyone to actually bind to us */
54 BUG();
55}
56
57static const struct input_device_id kgdboc_reset_ids[] = {
58 {
59 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
60 .evbit = { BIT_MASK(EV_KEY) },
61 },
62 { }
63};
64
65static struct input_handler kgdboc_reset_handler = {
66 .connect = kgdboc_reset_connect,
67 .disconnect = kgdboc_reset_disconnect,
68 .name = "kgdboc_reset",
69 .id_table = kgdboc_reset_ids,
70};
71
72static DEFINE_MUTEX(kgdboc_reset_mutex);
73
74static void kgdboc_restore_input_helper(struct work_struct *dummy)
75{
76 /*
77 * We need to take a mutex to prevent several instances of
78 * this work running on different CPUs so they don't try
79 * to register again already registered handler.
80 */
81 mutex_lock(&kgdboc_reset_mutex);
82
83 if (input_register_handler(&kgdboc_reset_handler) == 0)
84 input_unregister_handler(&kgdboc_reset_handler);
85
86 mutex_unlock(&kgdboc_reset_mutex);
87}
88
89static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
90
91static void kgdboc_restore_input(void)
92{
93 schedule_work(&kgdboc_restore_input_work);
94}
95
40static int kgdboc_register_kbd(char **cptr) 96static int kgdboc_register_kbd(char **cptr)
41{ 97{
42 if (strncmp(*cptr, "kbd", 3) == 0) { 98 if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@ static void kgdboc_unregister_kbd(void)
64 i--; 120 i--;
65 } 121 }
66 } 122 }
123 flush_work_sync(&kgdboc_restore_input_work);
67} 124}
68#else /* ! CONFIG_KDB_KEYBOARD */ 125#else /* ! CONFIG_KDB_KEYBOARD */
69#define kgdboc_register_kbd(x) 0 126#define kgdboc_register_kbd(x) 0
70#define kgdboc_unregister_kbd() 127#define kgdboc_unregister_kbd()
128#define kgdboc_restore_input()
71#endif /* ! CONFIG_KDB_KEYBOARD */ 129#endif /* ! CONFIG_KDB_KEYBOARD */
72 130
73static int kgdboc_option_setup(char *opt) 131static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@ static void kgdboc_post_exp_handler(void)
231 dbg_restore_graphics = 0; 289 dbg_restore_graphics = 0;
232 con_debug_leave(); 290 con_debug_leave();
233 } 291 }
292 kgdboc_restore_input();
234} 293}
235 294
236static struct kgdb_io kgdboc_io_ops = { 295static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index fd0d1b98901c..09615b51d591 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -90,8 +90,8 @@ struct clk_rate_round_data {
90static long clk_rate_round_helper(struct clk_rate_round_data *rounder) 90static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
91{ 91{
92 unsigned long rate_error, rate_error_prev = ~0UL; 92 unsigned long rate_error, rate_error_prev = ~0UL;
93 unsigned long rate_best_fit = rounder->rate;
94 unsigned long highest, lowest, freq; 93 unsigned long highest, lowest, freq;
94 long rate_best_fit = -ENOENT;
95 int i; 95 int i;
96 96
97 highest = 0; 97 highest = 0;
@@ -146,7 +146,7 @@ long clk_rate_table_round(struct clk *clk,
146 }; 146 };
147 147
148 if (clk->nr_freqs < 1) 148 if (clk->nr_freqs < 1)
149 return 0; 149 return -ENOSYS;
150 150
151 return clk_rate_round_helper(&table_round); 151 return clk_rate_round_helper(&table_round);
152} 152}
@@ -541,6 +541,98 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
541} 541}
542EXPORT_SYMBOL_GPL(clk_round_rate); 542EXPORT_SYMBOL_GPL(clk_round_rate);
543 543
544long clk_round_parent(struct clk *clk, unsigned long target,
545 unsigned long *best_freq, unsigned long *parent_freq,
546 unsigned int div_min, unsigned int div_max)
547{
548 struct cpufreq_frequency_table *freq, *best = NULL;
549 unsigned long error = ULONG_MAX, freq_high, freq_low, div;
550 struct clk *parent = clk_get_parent(clk);
551
552 if (!parent) {
553 *parent_freq = 0;
554 *best_freq = clk_round_rate(clk, target);
555 return abs(target - *best_freq);
556 }
557
558 for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
559 freq++) {
560 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
561 continue;
562
563 if (unlikely(freq->frequency / target <= div_min - 1)) {
564 unsigned long freq_max;
565
566 freq_max = (freq->frequency + div_min / 2) / div_min;
567 if (error > target - freq_max) {
568 error = target - freq_max;
569 best = freq;
570 if (best_freq)
571 *best_freq = freq_max;
572 }
573
574 pr_debug("too low freq %lu, error %lu\n", freq->frequency,
575 target - freq_max);
576
577 if (!error)
578 break;
579
580 continue;
581 }
582
583 if (unlikely(freq->frequency / target >= div_max)) {
584 unsigned long freq_min;
585
586 freq_min = (freq->frequency + div_max / 2) / div_max;
587 if (error > freq_min - target) {
588 error = freq_min - target;
589 best = freq;
590 if (best_freq)
591 *best_freq = freq_min;
592 }
593
594 pr_debug("too high freq %lu, error %lu\n", freq->frequency,
595 freq_min - target);
596
597 if (!error)
598 break;
599
600 continue;
601 }
602
603 div = freq->frequency / target;
604 freq_high = freq->frequency / div;
605 freq_low = freq->frequency / (div + 1);
606
607 if (freq_high - target < error) {
608 error = freq_high - target;
609 best = freq;
610 if (best_freq)
611 *best_freq = freq_high;
612 }
613
614 if (target - freq_low < error) {
615 error = target - freq_low;
616 best = freq;
617 if (best_freq)
618 *best_freq = freq_low;
619 }
620
621 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
622 freq->frequency, div, freq_high, div + 1, freq_low,
623 *best_freq, best->frequency);
624
625 if (!error)
626 break;
627 }
628
629 if (parent_freq)
630 *parent_freq = best->frequency;
631
632 return error;
633}
634EXPORT_SYMBOL_GPL(clk_round_parent);
635
544#ifdef CONFIG_PM 636#ifdef CONFIG_PM
545static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 637static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
546{ 638{
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 873a99ff8f64..e5e9e6735f7d 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -79,7 +79,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
79 * Register the IRQ position with the global IRQ map, then insert 79 * Register the IRQ position with the global IRQ map, then insert
80 * it in to the radix tree. 80 * it in to the radix tree.
81 */ 81 */
82 irq_reserve_irqs(irq, 1); 82 irq_reserve_irq(irq);
83 83
84 raw_spin_lock_irqsave(&intc_big_lock, flags); 84 raw_spin_lock_irqsave(&intc_big_lock, flags);
85 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq)); 85 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
index 4187cce20ffd..a3677c9dfe36 100644
--- a/drivers/sh/intc/dynamic.c
+++ b/drivers/sh/intc/dynamic.c
@@ -60,5 +60,5 @@ void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
60 int i; 60 int i;
61 61
62 for (i = 0; i < nr_vecs; i++) 62 for (i = 0; i < nr_vecs; i++)
63 irq_reserve_irqs(evt2irq(vectors[i].vect), 1); 63 irq_reserve_irq(evt2irq(vectors[i].vect));
64} 64}
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf48b74c..8a5caa30b85f 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN
102 102
103config ATH6KL_CFG80211 103config ATH6KL_CFG80211
104 bool "CFG80211 support" 104 bool "CFG80211 support"
105 depends on ATH6K_LEGACY 105 depends on ATH6K_LEGACY && CFG80211
106 help 106 help
107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. 107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
108 108
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
index 22c6c6659f5b..ee8b47746a15 100644
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
@@ -285,9 +285,9 @@ A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_I
285 do { 285 do {
286 286
287 /* check if host supports scatter requests and it meets our requirements */ 287 /* check if host supports scatter requests and it meets our requirements */
288 if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 288 if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
289 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n", 289 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
290 device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ)); 290 device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
291 status = A_ENOTSUP; 291 status = A_ENOTSUP;
292 break; 292 break;
293 } 293 }
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c16735..a659f7047373 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A
1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { 1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
1127 A_UINT32 param; 1127 A_UINT32 param;
1128 1128
1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); 1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
1130 1130
1131 if (status != A_OK) { 1131 if (status != A_OK) {
1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); 1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
3030 A_UINT8 csumDest=0; 3030 A_UINT8 csumDest=0;
3031 A_UINT8 csum=skb->ip_summed; 3031 A_UINT8 csum=skb->ip_summed;
3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){ 3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){
3033 csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); 3033 csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
3034 sizeof(ATH_LLC_SNAP_HDR));
3034 csumDest=skb->csum_offset+csumStart; 3035 csumDest=skb->csum_offset+csumStart;
3035 } 3036 }
3036#endif 3037#endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29eeb4d..7269d0a1d618 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
808 808
809static int 809static int
810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
811 A_UINT8 key_index, const A_UINT8 *mac_addr, 811 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
812 struct key_params *params) 812 struct key_params *params)
813{ 813{
814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
901 901
902static int 902static int
903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
904 A_UINT8 key_index, const A_UINT8 *mac_addr) 904 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
905{ 905{
906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
907 907
@@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
936 936
937static int 937static int
938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
939 A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, 939 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
940 void *cookie,
940 void (*callback)(void *cookie, struct key_params*)) 941 void (*callback)(void *cookie, struct key_params*))
941{ 942{
942 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 943 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa8669585..b68a7e5173be 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if)
165 batman_if->net_dev->dev_addr, ETH_ALEN); 165 batman_if->net_dev->dev_addr, ETH_ALEN);
166} 166}
167 167
168static void check_known_mac_addr(uint8_t *addr) 168static void check_known_mac_addr(struct net_device *net_dev)
169{ 169{
170 struct batman_if *batman_if; 170 struct batman_if *batman_if;
171 171
@@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr)
175 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 175 (batman_if->if_status != IF_TO_BE_ACTIVATED))
176 continue; 176 continue;
177 177
178 if (!compare_orig(batman_if->net_dev->dev_addr, addr)) 178 if (batman_if->net_dev == net_dev)
179 continue;
180
181 if (!compare_orig(batman_if->net_dev->dev_addr,
182 net_dev->dev_addr))
179 continue; 183 continue;
180 184
181 pr_warning("The newly added mac address (%pM) already exists " 185 pr_warning("The newly added mac address (%pM) already exists "
182 "on: %s\n", addr, batman_if->net_dev->name); 186 "on: %s\n", net_dev->dev_addr,
187 batman_if->net_dev->name);
183 pr_warning("It is strongly recommended to keep mac addresses " 188 pr_warning("It is strongly recommended to keep mac addresses "
184 "unique to avoid problems!\n"); 189 "unique to avoid problems!\n");
185 } 190 }
@@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
430 atomic_set(&batman_if->refcnt, 0); 435 atomic_set(&batman_if->refcnt, 0);
431 hardif_hold(batman_if); 436 hardif_hold(batman_if);
432 437
433 check_known_mac_addr(batman_if->net_dev->dev_addr); 438 check_known_mac_addr(batman_if->net_dev);
434 439
435 spin_lock(&if_list_lock); 440 spin_lock(&if_list_lock);
436 list_add_tail_rcu(&batman_if->list, &if_list); 441 list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@ static int hard_if_event(struct notifier_block *this,
515 goto out; 520 goto out;
516 } 521 }
517 522
518 check_known_mac_addr(batman_if->net_dev->dev_addr); 523 check_known_mac_addr(batman_if->net_dev);
519 update_mac_addresses(batman_if); 524 update_mac_addresses(batman_if);
520 525
521 bat_priv = netdev_priv(batman_if->soft_iface); 526 bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 90102631330b..657b69e6b957 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1000 1000
1001/* find a suitable router for this originator, and use 1001/* find a suitable router for this originator, and use
1002 * bonding if possible. */ 1002 * bonding if possible. */
1003struct neigh_node *find_router(struct orig_node *orig_node, 1003struct neigh_node *find_router(struct bat_priv *bat_priv,
1004 struct orig_node *orig_node,
1004 struct batman_if *recv_if) 1005 struct batman_if *recv_if)
1005{ 1006{
1006 struct bat_priv *bat_priv;
1007 struct orig_node *primary_orig_node; 1007 struct orig_node *primary_orig_node;
1008 struct orig_node *router_orig; 1008 struct orig_node *router_orig;
1009 struct neigh_node *router, *first_candidate, *best_router; 1009 struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node,
1019 /* without bonding, the first node should 1019 /* without bonding, the first node should
1020 * always choose the default router. */ 1020 * always choose the default router. */
1021 1021
1022 if (!recv_if)
1023 return orig_node->router;
1024
1025 bat_priv = netdev_priv(recv_if->soft_iface);
1026 bonding_enabled = atomic_read(&bat_priv->bonding_enabled); 1022 bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
1027 1023
1028 if (!bonding_enabled) 1024 if ((!recv_if) && (!bonding_enabled))
1029 return orig_node->router; 1025 return orig_node->router;
1030 1026
1031 router_orig = orig_node->router->orig_node; 1027 router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb,
1154 orig_node = ((struct orig_node *) 1150 orig_node = ((struct orig_node *)
1155 hash_find(bat_priv->orig_hash, unicast_packet->dest)); 1151 hash_find(bat_priv->orig_hash, unicast_packet->dest));
1156 1152
1157 router = find_router(orig_node, recv_if); 1153 router = find_router(bat_priv, orig_node, recv_if);
1158 1154
1159 if (!router) { 1155 if (!router) {
1160 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); 1156 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99df3706..92674c8d9c03 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); 38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); 39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); 40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
41struct neigh_node *find_router(struct orig_node *orig_node, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
42 struct batman_if *recv_if); 42 struct orig_node *orig_node, struct batman_if *recv_if);
43void update_bonding_candidates(struct bat_priv *bat_priv, 43void update_bonding_candidates(struct bat_priv *bat_priv,
44 struct orig_node *orig_node); 44 struct orig_node *orig_node);
45 45
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d69c03..0459413ff67f 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
224 if (!orig_node) 224 if (!orig_node)
225 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 225 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
226 226
227 router = find_router(orig_node, NULL); 227 router = find_router(bat_priv, orig_node, NULL);
228 228
229 if (!router) 229 if (!router)
230 goto unlock; 230 goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe24d999..fead9c56162e 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1001 } 1001 }
1002#endif 1002#endif
1003 case IOCTL_BE_BUCKET_SIZE: 1003 case IOCTL_BE_BUCKET_SIZE:
1004 Adapter->BEBucketSize = *(PULONG)arg; 1004 Status = 0;
1005 Status = STATUS_SUCCESS; 1005 if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
1006 Status = -EFAULT;
1006 break; 1007 break;
1007 1008
1008 case IOCTL_RTPS_BUCKET_SIZE: 1009 case IOCTL_RTPS_BUCKET_SIZE:
1009 Adapter->rtPSBucketSize = *(PULONG)arg; 1010 Status = 0;
1010 Status = STATUS_SUCCESS; 1011 if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
1012 Status = -EFAULT;
1011 break; 1013 break;
1012 case IOCTL_CHIP_RESET: 1014 case IOCTL_CHIP_RESET:
1013 { 1015 {
@@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1028 case IOCTL_QOS_THRESHOLD: 1030 case IOCTL_QOS_THRESHOLD:
1029 { 1031 {
1030 USHORT uiLoopIndex; 1032 USHORT uiLoopIndex;
1031 for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) 1033
1032 { 1034 Status = 0;
1033 Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; 1035 for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
1036 if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
1037 (unsigned long __user *)arg)) {
1038 Status = -EFAULT;
1039 break;
1040 }
1034 } 1041 }
1035 Status = STATUS_SUCCESS;
1036 break; 1042 break;
1037 } 1043 }
1038 1044
@@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1093 } 1099 }
1094 case IOCTL_BCM_GET_CURRENT_STATUS: 1100 case IOCTL_BCM_GET_CURRENT_STATUS:
1095 { 1101 {
1096 LINK_STATE *plink_state = NULL; 1102 LINK_STATE plink_state;
1103
1097 /* Copy Ioctl Buffer structure */ 1104 /* Copy Ioctl Buffer structure */
1098 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) 1105 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
1099 { 1106 {
@@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1101 Status = -EFAULT; 1108 Status = -EFAULT;
1102 break; 1109 break;
1103 } 1110 }
1104 plink_state = (LINK_STATE*)arg; 1111 if (IoBuffer.OutputLength != sizeof(plink_state)) {
1105 plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; 1112 Status = -EINVAL;
1106 plink_state->bShutdownMode = Adapter->bShutStatus; 1113 break;
1107 plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; 1114 }
1108 if(copy_to_user(IoBuffer.OutputBuffer, 1115
1109 (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) 1116 if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
1110 { 1117 Status = -EFAULT;
1118 break;
1119 }
1120 plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
1121 plink_state.bShutdownMode = Adapter->bShutStatus;
1122 plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
1123 if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
1111 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); 1124 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
1112 Status = -EFAULT; 1125 Status = -EFAULT;
1113 break; 1126 break;
@@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1331 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); 1344 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
1332 return -EFAULT; 1345 return -EFAULT;
1333 } 1346 }
1334 uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ 1347 if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
1348 return -EFAULT;
1349
1335 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) 1350 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
1336 { 1351 {
1337 1352
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb9b116..c8f1cf1b4409 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@ Contact Info:
90============= 90=============
91Brett Rudley brudley@broadcom.com 91Brett Rudley brudley@broadcom.com
92Henry Ptasinski henryp@broadcom.com 92Henry Ptasinski henryp@broadcom.com
93Nohee Ko noheek@broadcom.com 93Dowan Kim dowan@broadcom.com
94 94
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d300b531..dbf904184899 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@ Contact
45===== 45=====
46Brett Rudley <brudley@broadcom.com> 46Brett Rudley <brudley@broadcom.com>
47Henry Ptasinski <henryp@broadcom.com> 47Henry Ptasinski <henryp@broadcom.com>
48Nohee Ko <noheek@broadcom.com> 48Dowan Kim <dowan@broadcom.com>
49 49
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5f7492..9335f02029aa 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
2222 ASSERT(net); 2222 ASSERT(net);
2223 2223
2224 ASSERT(!net->netdev_ops); 2224 ASSERT(!net->netdev_ops);
2225 net->netdev_ops = &dhd_ops_virt;
2226
2227 net->netdev_ops = &dhd_ops_pri; 2225 net->netdev_ops = &dhd_ops_pri;
2228 2226
2229 /* 2227 /*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488d9c72..ea0825238d53 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
95 struct net_device *dev, 95 struct net_device *dev,
96 u8 key_idx); 96 u8 key_idx);
97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
98 u8 key_idx, const u8 *mac_addr, 98 u8 key_idx, bool pairwise, const u8 *mac_addr,
99 struct key_params *params); 99 struct key_params *params);
100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
101 u8 key_idx, const u8 *mac_addr); 101 u8 key_idx, bool pairwise, const u8 *mac_addr);
102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
103 u8 key_idx, const u8 *mac_addr, 103 u8 key_idx, bool pairwise, const u8 *mac_addr,
104 void *cookie, void (*callback) (void *cookie, 104 void *cookie, void (*callback) (void *cookie,
105 struct 105 struct
106 key_params * 106 key_params *
@@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
1615 1615
1616static s32 1616static s32
1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1618 u8 key_idx, const u8 *mac_addr, 1618 u8 key_idx, bool pairwise, const u8 *mac_addr,
1619 struct key_params *params) 1619 struct key_params *params)
1620{ 1620{
1621 struct wl_wsec_key key; 1621 struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1700 1700
1701static s32 1701static s32
1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1703 u8 key_idx, const u8 *mac_addr) 1703 u8 key_idx, bool pairwise, const u8 *mac_addr)
1704{ 1704{
1705 struct wl_wsec_key key; 1705 struct wl_wsec_key key;
1706 s32 err = 0; 1706 s32 err = 0;
@@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1756 1756
1757static s32 1757static s32
1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
1759 u8 key_idx, const u8 *mac_addr, void *cookie, 1759 u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
1760 void (*callback) (void *cookie, struct key_params * params)) 1760 void (*callback) (void *cookie, struct key_params * params))
1761{ 1761{
1762 struct key_params params; 1762 struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c8cb9a..0e740b8dafc3 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file)
3184 goto oops; 3184 goto oops;
3185 } 3185 }
3186 3186
3187 err = -EINTR;
3188 if(signal_pending(current))
3189 goto oops;
3190
3191 /* Set ownership of /proc/cpia/videoX to current user */ 3187 /* Set ownership of /proc/cpia/videoX to current user */
3192 if(cam->proc_entry) 3188 if(cam->proc_entry)
3193 cam->proc_entry->uid = current_uid(); 3189 cam->proc_entry->uid = current_euid();
3194 3190
3195 /* set mark for loading first frame uncompressed */ 3191 /* set mark for loading first frame uncompressed */
3196 cam->first_frame = 1; 3192 cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487531c2..20d509836d9e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
286 pid = kernel_thread (exec_mknod, (void *)info, 0); 286 pid = kernel_thread (exec_mknod, (void *)info, 0);
287 287
288 // initialize application information 288 // initialize application information
289 info->appcnt = 0;
290 289
291// if (ft1000_flarion_cnt == 0) { 290// if (ft1000_flarion_cnt == 0) {
292// 291//
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478d5542..a99e900ec4c9 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context)
212 recvlen, requestid); 212 recvlen, requestid);
213 213
214 icmsghdrp = (struct icmsg_hdr *)&buf[ 214 icmsghdrp = (struct icmsg_hdr *)&buf[
215 sizeof(struct vmbuspipe_hdr)];
216
217 icmsghdrp = (struct icmsg_hdr *)&buf[
218 sizeof(struct vmbuspipe_hdr)]; 215 sizeof(struct vmbuspipe_hdr)];
219 216
220 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 217 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cba8307..9618c7997461 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@ static int intel_sst_mmap_play_capture(u32 str_id,
244 int retval, i; 244 int retval, i;
245 struct stream_info *stream; 245 struct stream_info *stream;
246 struct snd_sst_mmap_buff_entry *buf_entry; 246 struct snd_sst_mmap_buff_entry *buf_entry;
247 struct snd_sst_mmap_buff_entry *tmp_buf;
247 248
248 pr_debug("sst:called for str_id %d\n", str_id); 249 pr_debug("sst:called for str_id %d\n", str_id);
249 retval = sst_validate_strid(str_id); 250 retval = sst_validate_strid(str_id);
250 if (retval) 251 if (retval)
251 return -EINVAL; 252 return -EINVAL;
252 BUG_ON(!mmap_buf);
253 253
254 stream = &sst_drv_ctx->streams[str_id]; 254 stream = &sst_drv_ctx->streams[str_id];
255 if (stream->mmapped != true) 255 if (stream->mmapped != true)
@@ -262,14 +262,24 @@ static int intel_sst_mmap_play_capture(u32 str_id,
262 stream->curr_bytes = 0; 262 stream->curr_bytes = 0;
263 stream->cumm_bytes = 0; 263 stream->cumm_bytes = 0;
264 264
265 tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
266 if (!tmp_buf)
267 return -ENOMEM;
268 if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
269 mmap_buf->entries * sizeof(*tmp_buf))) {
270 retval = -EFAULT;
271 goto out_free;
272 }
273
265 pr_debug("sst:new buffers count %d status %d\n", 274 pr_debug("sst:new buffers count %d status %d\n",
266 mmap_buf->entries, stream->status); 275 mmap_buf->entries, stream->status);
267 buf_entry = mmap_buf->buff; 276 buf_entry = tmp_buf;
268 for (i = 0; i < mmap_buf->entries; i++) { 277 for (i = 0; i < mmap_buf->entries; i++) {
269 BUG_ON(!buf_entry);
270 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); 278 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
271 if (!bufs) 279 if (!bufs) {
272 return -ENOMEM; 280 retval = -ENOMEM;
281 goto out_free;
282 }
273 bufs->size = buf_entry->size; 283 bufs->size = buf_entry->size;
274 bufs->offset = buf_entry->offset; 284 bufs->offset = buf_entry->offset;
275 bufs->addr = sst_drv_ctx->mmap_mem; 285 bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@ static int intel_sst_mmap_play_capture(u32 str_id,
293 if (sst_play_frame(str_id) < 0) { 303 if (sst_play_frame(str_id) < 0) {
294 pr_warn("sst: play frames fail\n"); 304 pr_warn("sst: play frames fail\n");
295 mutex_unlock(&stream->lock); 305 mutex_unlock(&stream->lock);
296 return -EIO; 306 retval = -EIO;
307 goto out_free;
297 } 308 }
298 } else if (stream->ops == STREAM_OPS_CAPTURE) { 309 } else if (stream->ops == STREAM_OPS_CAPTURE) {
299 if (sst_capture_frame(str_id) < 0) { 310 if (sst_capture_frame(str_id) < 0) {
300 pr_warn("sst: capture frame fail\n"); 311 pr_warn("sst: capture frame fail\n");
301 mutex_unlock(&stream->lock); 312 mutex_unlock(&stream->lock);
302 return -EIO; 313 retval = -EIO;
314 goto out_free;
303 } 315 }
304 } 316 }
305 } 317 }
@@ -314,6 +326,9 @@ static int intel_sst_mmap_play_capture(u32 str_id,
314 if (retval >= 0) 326 if (retval >= 0)
315 retval = stream->cumm_bytes; 327 retval = stream->cumm_bytes;
316 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); 328 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
329
330out_free:
331 kfree(tmp_buf);
317 return retval; 332 return retval;
318} 333}
319 334
@@ -377,7 +392,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
377{ 392{
378 struct sst_stream_bufs *stream_bufs; 393 struct sst_stream_bufs *stream_bufs;
379 unsigned long index, mmap_len; 394 unsigned long index, mmap_len;
380 unsigned char *bufp; 395 unsigned char __user *bufp;
381 unsigned long size, copied_size; 396 unsigned long size, copied_size;
382 int retval = 0, add_to_list = 0; 397 int retval = 0, add_to_list = 0;
383 static int sent_offset; 398 static int sent_offset;
@@ -512,9 +527,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
512 /* copy to user */ 527 /* copy to user */
513 list_for_each_entry_safe(entry, _entry, 528 list_for_each_entry_safe(entry, _entry,
514 copy_to_list, node) { 529 copy_to_list, node) {
515 if (copy_to_user((void *) 530 if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
516 iovec[entry->iov_index].iov_base +
517 entry->iov_offset,
518 kbufs->addr + entry->offset, 531 kbufs->addr + entry->offset,
519 entry->size)) { 532 entry->size)) {
520 /* Clean up the list and return error */ 533 /* Clean up the list and return error */
@@ -590,7 +603,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
590 buf, (int) count, (int) stream->status); 603 buf, (int) count, (int) stream->status);
591 604
592 stream->buf_type = SST_BUF_USER_STATIC; 605 stream->buf_type = SST_BUF_USER_STATIC;
593 iovec.iov_base = (void *)buf; 606 iovec.iov_base = buf;
594 iovec.iov_len = count; 607 iovec.iov_len = count;
595 nr_segs = 1; 608 nr_segs = 1;
596 609
@@ -838,7 +851,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
838 break; 851 break;
839 852
840 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { 853 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
841 struct snd_sst_params *str_param = (struct snd_sst_params *)arg; 854 struct snd_sst_params str_param;
842 855
843 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); 856 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
844 if (minor != STREAM_MODULE) { 857 if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
846 break; 859 break;
847 } 860 }
848 861
862 if (copy_from_user(&str_param, (void __user *)arg,
863 sizeof(str_param))) {
864 retval = -EFAULT;
865 break;
866 }
867
849 if (!str_id) { 868 if (!str_id) {
850 869
851 retval = sst_get_stream(str_param); 870 retval = sst_get_stream(&str_param);
852 if (retval > 0) { 871 if (retval > 0) {
853 struct stream_info *str_info; 872 struct stream_info *str_info;
873 char __user *dest;
874
854 sst_drv_ctx->stream_cnt++; 875 sst_drv_ctx->stream_cnt++;
855 data->str_id = retval; 876 data->str_id = retval;
856 str_info = &sst_drv_ctx->streams[retval]; 877 str_info = &sst_drv_ctx->streams[retval];
857 str_info->src = SST_DRV; 878 str_info->src = SST_DRV;
858 retval = copy_to_user(&str_param->stream_id, 879 dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
859 &retval, sizeof(__u32)); 880 retval = copy_to_user(dest, &retval, sizeof(__u32));
860 if (retval) 881 if (retval)
861 retval = -EFAULT; 882 retval = -EFAULT;
862 } else { 883 } else {
@@ -866,16 +887,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
866 } else { 887 } else {
867 pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); 888 pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
868 /* allocated set params only */ 889 /* allocated set params only */
869 retval = sst_set_stream_param(str_id, str_param); 890 retval = sst_set_stream_param(str_id, &str_param);
870 /* Block the call for reply */ 891 /* Block the call for reply */
871 if (!retval) { 892 if (!retval) {
872 int sfreq = 0, word_size = 0, num_channel = 0; 893 int sfreq = 0, word_size = 0, num_channel = 0;
873 sfreq = str_param->sparams.uc.pcm_params.sfreq; 894 sfreq = str_param.sparams.uc.pcm_params.sfreq;
874 word_size = str_param->sparams. 895 word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
875 uc.pcm_params.pcm_wd_sz; 896 num_channel = str_param.sparams.uc.pcm_params.num_chan;
876 num_channel = str_param-> 897 if (str_param.ops == STREAM_OPS_CAPTURE) {
877 sparams.uc.pcm_params.num_chan;
878 if (str_param->ops == STREAM_OPS_CAPTURE) {
879 sst_drv_ctx->scard_ops->\ 898 sst_drv_ctx->scard_ops->\
880 set_pcm_audio_params(sfreq, 899 set_pcm_audio_params(sfreq,
881 word_size, num_channel); 900 word_size, num_channel);
@@ -885,41 +904,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
885 break; 904 break;
886 } 905 }
887 case _IOC_NR(SNDRV_SST_SET_VOL): { 906 case _IOC_NR(SNDRV_SST_SET_VOL): {
888 struct snd_sst_vol *set_vol; 907 struct snd_sst_vol set_vol;
889 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; 908
909 if (copy_from_user(&set_vol, (void __user *)arg,
910 sizeof(set_vol))) {
911 pr_debug("sst: copy failed\n");
912 retval = -EFAULT;
913 break;
914 }
890 pr_debug("sst: SET_VOLUME recieved for %d!\n", 915 pr_debug("sst: SET_VOLUME recieved for %d!\n",
891 rec_vol->stream_id); 916 set_vol.stream_id);
892 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 917 if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
893 pr_debug("sst: invalid operation!\n"); 918 pr_debug("sst: invalid operation!\n");
894 retval = -EPERM; 919 retval = -EPERM;
895 break; 920 break;
896 } 921 }
897 set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); 922 retval = sst_set_vol(&set_vol);
898 if (!set_vol) {
899 pr_debug("sst: mem allocation failed\n");
900 retval = -ENOMEM;
901 break;
902 }
903 if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
904 pr_debug("sst: copy failed\n");
905 retval = -EFAULT;
906 break;
907 }
908 retval = sst_set_vol(set_vol);
909 kfree(set_vol);
910 break; 923 break;
911 } 924 }
912 case _IOC_NR(SNDRV_SST_GET_VOL): { 925 case _IOC_NR(SNDRV_SST_GET_VOL): {
913 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
914 struct snd_sst_vol get_vol; 926 struct snd_sst_vol get_vol;
927
928 if (copy_from_user(&get_vol, (void __user *)arg,
929 sizeof(get_vol))) {
930 retval = -EFAULT;
931 break;
932 }
915 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", 933 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
916 rec_vol->stream_id); 934 get_vol.stream_id);
917 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 935 if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
918 pr_debug("sst: invalid operation!\n"); 936 pr_debug("sst: invalid operation!\n");
919 retval = -EPERM; 937 retval = -EPERM;
920 break; 938 break;
921 } 939 }
922 get_vol.stream_id = rec_vol->stream_id;
923 retval = sst_get_vol(&get_vol); 940 retval = sst_get_vol(&get_vol);
924 if (retval) { 941 if (retval) {
925 retval = -EIO; 942 retval = -EIO;
@@ -928,7 +945,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
928 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", 945 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
929 get_vol.stream_id, get_vol.volume, 946 get_vol.stream_id, get_vol.volume,
930 get_vol.ramp_duration, get_vol.ramp_type); 947 get_vol.ramp_duration, get_vol.ramp_type);
931 if (copy_to_user((struct snd_sst_vol *)arg, 948 if (copy_to_user((struct snd_sst_vol __user *)arg,
932 &get_vol, sizeof(get_vol))) { 949 &get_vol, sizeof(get_vol))) {
933 retval = -EFAULT; 950 retval = -EFAULT;
934 break; 951 break;
@@ -938,25 +955,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
938 } 955 }
939 956
940 case _IOC_NR(SNDRV_SST_MUTE): { 957 case _IOC_NR(SNDRV_SST_MUTE): {
941 struct snd_sst_mute *set_mute; 958 struct snd_sst_mute set_mute;
942 struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; 959
943 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", 960 if (copy_from_user(&set_mute, (void __user *)arg,
944 rec_mute->stream_id); 961 sizeof(set_mute))) {
945 if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { 962 retval = -EFAULT;
946 retval = -EPERM;
947 break;
948 }
949 set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
950 if (!set_mute) {
951 retval = -ENOMEM;
952 break; 963 break;
953 } 964 }
954 if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { 965 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
955 retval = -EFAULT; 966 set_mute.stream_id);
967 if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
968 retval = -EPERM;
956 break; 969 break;
957 } 970 }
958 retval = sst_set_mute(set_mute); 971 retval = sst_set_mute(&set_mute);
959 kfree(set_mute);
960 break; 972 break;
961 } 973 }
962 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { 974 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
973 retval = -EIO; 985 retval = -EIO;
974 break; 986 break;
975 } 987 }
976 if (copy_to_user((struct snd_sst_get_stream_params *)arg, 988 if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
977 &get_params, sizeof(get_params))) { 989 &get_params, sizeof(get_params))) {
978 retval = -EFAULT; 990 retval = -EFAULT;
979 break; 991 break;
@@ -983,16 +995,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
983 } 995 }
984 996
985 case _IOC_NR(SNDRV_SST_MMAP_PLAY): 997 case _IOC_NR(SNDRV_SST_MMAP_PLAY):
986 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): 998 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
999 struct snd_sst_mmap_buffs mmap_buf;
1000
987 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); 1001 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
988 if (minor != STREAM_MODULE) { 1002 if (minor != STREAM_MODULE) {
989 retval = -EBADRQC; 1003 retval = -EBADRQC;
990 break; 1004 break;
991 } 1005 }
992 retval = intel_sst_mmap_play_capture(str_id, 1006 if (copy_from_user(&mmap_buf, (void __user *)arg,
993 (struct snd_sst_mmap_buffs *)arg); 1007 sizeof(mmap_buf))) {
1008 retval = -EFAULT;
1009 break;
1010 }
1011 retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
994 break; 1012 break;
995 1013 }
996 case _IOC_NR(SNDRV_SST_STREAM_DROP): 1014 case _IOC_NR(SNDRV_SST_STREAM_DROP):
997 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); 1015 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
998 if (minor != STREAM_MODULE) { 1016 if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1003 break; 1021 break;
1004 1022
1005 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { 1023 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
1006 unsigned long long *ms = (unsigned long long *)arg;
1007 struct snd_sst_tstamp tstamp = {0}; 1024 struct snd_sst_tstamp tstamp = {0};
1008 unsigned long long time, freq, mod; 1025 unsigned long long time, freq, mod;
1009 1026
@@ -1013,14 +1030,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1013 break; 1030 break;
1014 } 1031 }
1015 memcpy_fromio(&tstamp, 1032 memcpy_fromio(&tstamp,
1016 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1033 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1017 +(str_id * sizeof(tstamp))),
1018 sizeof(tstamp)); 1034 sizeof(tstamp));
1019 time = tstamp.samples_rendered; 1035 time = tstamp.samples_rendered;
1020 freq = (unsigned long long) tstamp.sampling_frequency; 1036 freq = (unsigned long long) tstamp.sampling_frequency;
1021 time = time * 1000; /* converting it to ms */ 1037 time = time * 1000; /* converting it to ms */
1022 mod = do_div(time, freq); 1038 mod = do_div(time, freq);
1023 if (copy_to_user(ms, &time, sizeof(*ms))) 1039 if (copy_to_user((void __user *)arg, &time,
1040 sizeof(unsigned long long)))
1024 retval = -EFAULT; 1041 retval = -EFAULT;
1025 break; 1042 break;
1026 } 1043 }
@@ -1065,92 +1082,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1065 } 1082 }
1066 1083
1067 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { 1084 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
1068 struct snd_sst_target_device *target_device; 1085 struct snd_sst_target_device target_device;
1069 1086
1070 pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); 1087 pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
1071 target_device = (struct snd_sst_target_device *)arg; 1088 if (copy_from_user(&target_device, (void __user *)arg,
1072 BUG_ON(!target_device); 1089 sizeof(target_device))) {
1090 retval = -EFAULT;
1091 break;
1092 }
1073 if (minor != AM_MODULE) { 1093 if (minor != AM_MODULE) {
1074 retval = -EBADRQC; 1094 retval = -EBADRQC;
1075 break; 1095 break;
1076 } 1096 }
1077 retval = sst_target_device_select(target_device); 1097 retval = sst_target_device_select(&target_device);
1078 break; 1098 break;
1079 } 1099 }
1080 1100
1081 case _IOC_NR(SNDRV_SST_DRIVER_INFO): { 1101 case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
1082 struct snd_sst_driver_info *info = 1102 struct snd_sst_driver_info info;
1083 (struct snd_sst_driver_info *)arg;
1084 1103
1085 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); 1104 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
1086 info->version = SST_VERSION_NUM; 1105 info.version = SST_VERSION_NUM;
1087 /* hard coding, shud get sumhow later */ 1106 /* hard coding, shud get sumhow later */
1088 info->active_pcm_streams = sst_drv_ctx->stream_cnt - 1107 info.active_pcm_streams = sst_drv_ctx->stream_cnt -
1089 sst_drv_ctx->encoded_cnt; 1108 sst_drv_ctx->encoded_cnt;
1090 info->active_enc_streams = sst_drv_ctx->encoded_cnt; 1109 info.active_enc_streams = sst_drv_ctx->encoded_cnt;
1091 info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; 1110 info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
1092 info->max_enc_streams = MAX_ENC_STREAM; 1111 info.max_enc_streams = MAX_ENC_STREAM;
1093 info->buf_per_stream = sst_drv_ctx->mmap_len; 1112 info.buf_per_stream = sst_drv_ctx->mmap_len;
1113 if (copy_to_user((void __user *)arg, &info,
1114 sizeof(info)))
1115 retval = -EFAULT;
1094 break; 1116 break;
1095 } 1117 }
1096 1118
1097 case _IOC_NR(SNDRV_SST_STREAM_DECODE): { 1119 case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
1098 struct snd_sst_dbufs *param = 1120 struct snd_sst_dbufs param;
1099 (struct snd_sst_dbufs *)arg, dbufs_local; 1121 struct snd_sst_dbufs dbufs_local;
1100 int i;
1101 struct snd_sst_buffs ibufs, obufs; 1122 struct snd_sst_buffs ibufs, obufs;
1102 struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], 1123 struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
1103 obuf_temp[param->obufs->entries]; 1124 char __user *dest;
1104 1125
1105 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); 1126 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
1106 if (minor != STREAM_MODULE) { 1127 if (minor != STREAM_MODULE) {
1107 retval = -EBADRQC; 1128 retval = -EBADRQC;
1108 break; 1129 break;
1109 } 1130 }
1110 if (!param) { 1131 if (copy_from_user(&param, (void __user *)arg,
1111 retval = -EINVAL; 1132 sizeof(param))) {
1133 retval = -EFAULT;
1112 break; 1134 break;
1113 } 1135 }
1114 1136
1115 dbufs_local.input_bytes_consumed = param->input_bytes_consumed; 1137 dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
1116 dbufs_local.output_bytes_produced = 1138 dbufs_local.output_bytes_produced =
1117 param->output_bytes_produced; 1139 param.output_bytes_produced;
1118 dbufs_local.ibufs = &ibufs; 1140
1119 dbufs_local.obufs = &obufs; 1141 if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
1120 dbufs_local.ibufs->entries = param->ibufs->entries; 1142 retval = -EFAULT;
1121 dbufs_local.ibufs->type = param->ibufs->type; 1143 break;
1122 dbufs_local.obufs->entries = param->obufs->entries; 1144 }
1123 dbufs_local.obufs->type = param->obufs->type; 1145 if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
1124 1146 retval = -EFAULT;
1125 dbufs_local.ibufs->buff_entry = ibuf_temp; 1147 break;
1126 for (i = 0; i < dbufs_local.ibufs->entries; i++) {
1127 ibuf_temp[i].buffer =
1128 param->ibufs->buff_entry[i].buffer;
1129 ibuf_temp[i].size =
1130 param->ibufs->buff_entry[i].size;
1131 } 1148 }
1132 dbufs_local.obufs->buff_entry = obuf_temp; 1149
1133 for (i = 0; i < dbufs_local.obufs->entries; i++) { 1150 ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
1134 obuf_temp[i].buffer = 1151 obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
1135 param->obufs->buff_entry[i].buffer; 1152 if (!ibuf_tmp || !obuf_tmp) {
1136 obuf_temp[i].size = 1153 retval = -ENOMEM;
1137 param->obufs->buff_entry[i].size; 1154 goto free_iobufs;
1155 }
1156
1157 if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
1158 ibufs.entries * sizeof(*ibuf_tmp))) {
1159 retval = -EFAULT;
1160 goto free_iobufs;
1138 } 1161 }
1162 ibufs.buff_entry = ibuf_tmp;
1163 dbufs_local.ibufs = &ibufs;
1164
1165 if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
1166 obufs.entries * sizeof(*obuf_tmp))) {
1167 retval = -EFAULT;
1168 goto free_iobufs;
1169 }
1170 obufs.buff_entry = obuf_tmp;
1171 dbufs_local.obufs = &obufs;
1172
1139 retval = sst_decode(str_id, &dbufs_local); 1173 retval = sst_decode(str_id, &dbufs_local);
1140 if (retval) 1174 if (retval) {
1141 retval = -EAGAIN; 1175 retval = -EAGAIN;
1142 if (copy_to_user(&param->input_bytes_consumed, 1176 goto free_iobufs;
1177 }
1178
1179 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1180 if (copy_to_user(dest,
1143 &dbufs_local.input_bytes_consumed, 1181 &dbufs_local.input_bytes_consumed,
1144 sizeof(unsigned long long))) { 1182 sizeof(unsigned long long))) {
1145 retval = -EFAULT; 1183 retval = -EFAULT;
1146 break; 1184 goto free_iobufs;
1147 } 1185 }
1148 if (copy_to_user(&param->output_bytes_produced, 1186
1187 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1188 if (copy_to_user(dest,
1149 &dbufs_local.output_bytes_produced, 1189 &dbufs_local.output_bytes_produced,
1150 sizeof(unsigned long long))) { 1190 sizeof(unsigned long long))) {
1151 retval = -EFAULT; 1191 retval = -EFAULT;
1152 break; 1192 goto free_iobufs;
1153 } 1193 }
1194free_iobufs:
1195 kfree(ibuf_tmp);
1196 kfree(obuf_tmp);
1154 break; 1197 break;
1155 } 1198 }
1156 1199
@@ -1164,7 +1207,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1164 break; 1207 break;
1165 1208
1166 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { 1209 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
1167 unsigned long long *bytes = (unsigned long long *)arg; 1210 unsigned long long __user *bytes = (unsigned long long __user *)arg;
1168 struct snd_sst_tstamp tstamp = {0}; 1211 struct snd_sst_tstamp tstamp = {0};
1169 1212
1170 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); 1213 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1173 break; 1216 break;
1174 } 1217 }
1175 memcpy_fromio(&tstamp, 1218 memcpy_fromio(&tstamp,
1176 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1219 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1177 +(str_id * sizeof(tstamp))),
1178 sizeof(tstamp)); 1220 sizeof(tstamp));
1179 if (copy_to_user(bytes, &tstamp.bytes_processed, 1221 if (copy_to_user(bytes, &tstamp.bytes_processed,
1180 sizeof(*bytes))) 1222 sizeof(*bytes)))
@@ -1197,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1197 kfree(fw_info); 1239 kfree(fw_info);
1198 break; 1240 break;
1199 } 1241 }
1200 if (copy_to_user((struct snd_sst_dbufs *)arg, 1242 if (copy_to_user((struct snd_sst_dbufs __user *)arg,
1201 fw_info, sizeof(*fw_info))) { 1243 fw_info, sizeof(*fw_info))) {
1202 kfree(fw_info); 1244 kfree(fw_info);
1203 retval = -EFAULT; 1245 retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c851e4a..bf0ead78bfae 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@ struct stream_info {
231 spinlock_t pcm_lock; 231 spinlock_t pcm_lock;
232 bool mmapped; 232 bool mmapped;
233 unsigned int sg_index; /* current buf Index */ 233 unsigned int sg_index; /* current buf Index */
234 unsigned char *cur_ptr; /* Current static bufs */ 234 unsigned char __user *cur_ptr; /* Current static bufs */
235 struct snd_sst_buf_entry *buf_entry; 235 struct snd_sst_buf_entry __user *buf_entry;
236 struct sst_block data_blk; /* stream ops block */ 236 struct sst_block data_blk; /* stream ops block */
237 struct sst_block ctrl_blk; /* stream control cmd block */ 237 struct sst_block ctrl_blk; /* stream control cmd block */
238 enum snd_sst_buf_type buf_type; 238 enum snd_sst_buf_type buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805844f2..978bf87ff13d 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us)
22 int result; 22 int result;
23 BYTE MiscReg03 = 0; 23 BYTE MiscReg03 = 0;
24 24
25 printk("--- Initial Nedia ---\n"); 25 printk("--- Init Media ---\n");
26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); 26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
27 if (result != USB_STOR_XFER_GOOD) 27 if (result != USB_STOR_XFER_GOOD)
28 { 28 {
@@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
65 int result; 65 int result;
66 66
67 memset(bcb, 0, sizeof(bcb)); 67 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
69 bcb->DataTransferLength = 0x01; 69 bcb->DataTransferLength = 0x01;
70 bcb->Flags = 0x80; 70 bcb->Flags = 0x80;
@@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us)
92 return USB_STOR_TRANSPORT_ERROR; 92 return USB_STOR_TRANSPORT_ERROR;
93 } 93 }
94 94
95 memset(bcb, 0, sizeof(bcb)); 95 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
97 bcb->Flags = 0x80; 97 bcb->Flags = 0x80;
98 bcb->CDB[0] = 0xF2; 98 bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us)
112 return USB_STOR_TRANSPORT_ERROR; 112 return USB_STOR_TRANSPORT_ERROR;
113 } 113 }
114 114
115 memset(bcb, 0, sizeof(bcb)); 115 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
117 bcb->DataTransferLength = 0x200; 117 bcb->DataTransferLength = 0x200;
118 bcb->Flags = 0x80; 118 bcb->Flags = 0x80;
@@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us)
161 return USB_STOR_TRANSPORT_ERROR; 161 return USB_STOR_TRANSPORT_ERROR;
162 } 162 }
163 163
164 memset(bcb, 0, sizeof(bcb)); 164 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
166 bcb->DataTransferLength = 0x200; 166 bcb->DataTransferLength = 0x200;
167 bcb->Flags = 0x80; 167 bcb->Flags = 0x80;
@@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us)
219 return USB_STOR_TRANSPORT_ERROR; 219 return USB_STOR_TRANSPORT_ERROR;
220 } 220 }
221 221
222 memset(bcb, 0, sizeof(bcb)); 222 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
224 bcb->DataTransferLength = 0x200; 224 bcb->DataTransferLength = 0x200;
225 bcb->Flags = 0x80; 225 bcb->Flags = 0x80;
@@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
341 break; 341 break;
342 } 342 }
343 343
344 memset(bcb, 0, sizeof(bcb)); 344 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
346 bcb->DataTransferLength = 0x800; 346 bcb->DataTransferLength = 0x800;
347 bcb->Flags =0x00; 347 bcb->Flags =0x00;
@@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
433 433
434 //printk("transport --- ENE_Read_Data\n"); 434 //printk("transport --- ENE_Read_Data\n");
435 // set up the command wrapper 435 // set up the command wrapper
436 memset(bcb, 0, sizeof(bcb)); 436 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
438 bcb->DataTransferLength = length; 438 bcb->DataTransferLength = length;
439 bcb->Flags =0x80; 439 bcb->Flags =0x80;
@@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
470 470
471 //printk("transport --- ENE_Write_Data\n"); 471 //printk("transport --- ENE_Write_Data\n");
472 // set up the command wrapper 472 // set up the command wrapper
473 memset(bcb, 0, sizeof(bcb)); 473 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
475 bcb->DataTransferLength = length; 475 bcb->DataTransferLength = length;
476 bcb->Flags =0x00; 476 bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9da87d..9a3fdb4e4fe4 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo
15 if (result != USB_STOR_XFER_GOOD) 15 if (result != USB_STOR_XFER_GOOD)
16 return USB_STOR_TRANSPORT_ERROR; 16 return USB_STOR_TRANSPORT_ERROR;
17 17
18 memset(bcb, 0, sizeof(bcb)); 18 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
20 bcb->DataTransferLength = 0x200*len; 20 bcb->DataTransferLength = 0x200*len;
21 bcb->Flags = 0x00; 21 bcb->Flags = 0x00;
@@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
53 return USB_STOR_TRANSPORT_ERROR; 53 return USB_STOR_TRANSPORT_ERROR;
54 54
55 // Read Page Data 55 // Read Page Data
56 memset(bcb, 0, sizeof(bcb)); 56 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
58 bcb->DataTransferLength = 0x200; 58 bcb->DataTransferLength = 0x200;
59 bcb->Flags = 0x80; 59 bcb->Flags = 0x80;
@@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
69 return USB_STOR_TRANSPORT_ERROR; 69 return USB_STOR_TRANSPORT_ERROR;
70 70
71 // Read Extra Data 71 // Read Extra Data
72 memset(bcb, 0, sizeof(bcb)); 72 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
74 bcb->DataTransferLength = 0x4; 74 bcb->DataTransferLength = 0x4;
75 bcb->Flags = 0x80; 75 bcb->Flags = 0x80;
@@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr)
108 if (result != USB_STOR_XFER_GOOD) 108 if (result != USB_STOR_XFER_GOOD)
109 return USB_STOR_TRANSPORT_ERROR; 109 return USB_STOR_TRANSPORT_ERROR;
110 110
111 memset(bcb, 0, sizeof(bcb)); 111 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
113 bcb->DataTransferLength = 0x200; 113 bcb->DataTransferLength = 0x200;
114 bcb->Flags = 0x80; 114 bcb->Flags = 0x80;
@@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE
673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); 673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
674 674
675 // Read Extra Data 675 // Read Extra Data
676 memset(bcb, 0, sizeof(bcb)); 676 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
678 bcb->DataTransferLength = 0x4 * blen; 678 bcb->DataTransferLength = 0x4 * blen;
679 bcb->Flags = 0x80; 679 bcb->Flags = 0x80;
@@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType
700 BYTE ExtBuf[4]; 700 BYTE ExtBuf[4];
701 701
702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); 702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
703 memset(bcb, 0, sizeof(bcb)); 703 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
705 bcb->DataTransferLength = 0x4; 705 bcb->DataTransferLength = 0x4;
706 bcb->Flags = 0x80; 706 bcb->Flags = 0x80;
@@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B
807 if (result != USB_STOR_XFER_GOOD) 807 if (result != USB_STOR_XFER_GOOD)
808 return USB_STOR_TRANSPORT_ERROR; 808 return USB_STOR_TRANSPORT_ERROR;
809 809
810 memset(bcb, 0, sizeof(bcb)); 810 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
812 bcb->DataTransferLength = 0x4; 812 bcb->DataTransferLength = 0x4;
813 bcb->Flags = 0x80; 813 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c629935..cb92d25acee0 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
145 } 145 }
146 146
147 // set up the command wrapper 147 // set up the command wrapper
148 memset(bcb, 0, sizeof(bcb)); 148 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
150 bcb->DataTransferLength = blenByte; 150 bcb->DataTransferLength = blenByte;
151 bcb->Flags = 0x80; 151 bcb->Flags = 0x80;
@@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
193 blkno = phyblk * 0x20 + PageNum; 193 blkno = phyblk * 0x20 + PageNum;
194 194
195 // set up the command wrapper 195 // set up the command wrapper
196 memset(bcb, 0, sizeof(bcb)); 196 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
198 bcb->DataTransferLength = 0x200 * len; 198 bcb->DataTransferLength = 0x200 * len;
199 bcb->Flags = 0x80; 199 bcb->Flags = 0x80;
@@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
250 } 250 }
251 251
252 // set up the command wrapper 252 // set up the command wrapper
253 memset(bcb, 0, sizeof(bcb)); 253 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
255 bcb->DataTransferLength = blenByte; 255 bcb->DataTransferLength = blenByte;
256 bcb->Flags = 0x00; 256 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f850ebe..d646507a3611 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
152 bnByte = bn; 152 bnByte = bn;
153 153
154 // set up the command wrapper 154 // set up the command wrapper
155 memset(bcb, 0, sizeof(bcb)); 155 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
157 bcb->DataTransferLength = blenByte; 157 bcb->DataTransferLength = blenByte;
158 bcb->Flags = 0x80; 158 bcb->Flags = 0x80;
@@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
192 bnByte = bn; 192 bnByte = bn;
193 193
194 // set up the command wrapper 194 // set up the command wrapper
195 memset(bcb, 0, sizeof(bcb)); 195 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
197 bcb->DataTransferLength = blenByte; 197 bcb->DataTransferLength = blenByte;
198 bcb->Flags = 0x00; 198 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b65988636..1b52535a388f 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
267 267
268 // Read sect data 268 // Read sect data
269 memset(bcb, 0, sizeof(bcb)); 269 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
271 bcb->DataTransferLength = 0x200; 271 bcb->DataTransferLength = 0x200;
272 bcb->Flags = 0x80; 272 bcb->Flags = 0x80;
@@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
281 return USB_STOR_TRANSPORT_ERROR; 281 return USB_STOR_TRANSPORT_ERROR;
282 282
283 // Read redundant 283 // Read redundant
284 memset(bcb, 0, sizeof(bcb)); 284 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
286 bcb->DataTransferLength = 0x10; 286 bcb->DataTransferLength = 0x10;
287 bcb->Flags = 0x80; 287 bcb->Flags = 0x80;
@@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
320 320
321 // Read sect data 321 // Read sect data
322 memset(bcb, 0, sizeof(bcb)); 322 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
324 bcb->DataTransferLength = 0x200*count; 324 bcb->DataTransferLength = 0x200*count;
325 bcb->Flags = 0x80; 325 bcb->Flags = 0x80;
@@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
334 return USB_STOR_TRANSPORT_ERROR; 334 return USB_STOR_TRANSPORT_ERROR;
335 335
336 // Read redundant 336 // Read redundant
337 memset(bcb, 0, sizeof(bcb)); 337 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
339 bcb->DataTransferLength = 0x10; 339 bcb->DataTransferLength = 0x10;
340 bcb->Flags = 0x80; 340 bcb->Flags = 0x80;
@@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; 536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
537 537
538 // Write sect data 538 // Write sect data
539 memset(bcb, 0, sizeof(bcb)); 539 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
541 bcb->DataTransferLength = 0x200*count; 541 bcb->DataTransferLength = 0x200*count;
542 bcb->Flags = 0x00; 542 bcb->Flags = 0x00;
@@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
755 755
756 // Write sect data 756 // Write sect data
757 memset(bcb, 0, sizeof(bcb)); 757 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
759 bcb->DataTransferLength = 0x200; 759 bcb->DataTransferLength = 0x200;
760 bcb->Flags = 0x00; 760 bcb->Flags = 0x00;
@@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
792 addr=addr*(WORD)Ssfdc.MaxSectors; 792 addr=addr*(WORD)Ssfdc.MaxSectors;
793 793
794 memset(bcb, 0, sizeof(bcb)); 794 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
796 bcb->DataTransferLength = 0x200; 796 bcb->DataTransferLength = 0x200;
797 bcb->Flags = 0x80; 797 bcb->Flags = 0x80;
@@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
829 829
830 memset(bcb, 0, sizeof(bcb)); 830 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
832 bcb->DataTransferLength = 0x10; 832 bcb->DataTransferLength = 0x10;
833 bcb->Flags = 0x80; 833 bcb->Flags = 0x80;
@@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
872 872
873 memset(bcb, 0, sizeof(bcb)); 873 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
875 bcb->DataTransferLength = 0x10; 875 bcb->DataTransferLength = 0x10;
876 bcb->Flags = 0x80; 876 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df643ab0..111160cce441 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
40 us->current_urb->error_count = 0; 40 us->current_urb->error_count = 0;
41 us->current_urb->status = 0; 41 us->current_urb->status = 0;
42 42
43// us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; 43 us->current_urb->transfer_flags = 0;
44 if (us->current_urb->transfer_buffer == us->iobuf) 44 if (us->current_urb->transfer_buffer == us->iobuf)
45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
46 us->current_urb->transfer_dma = us->iobuf_dma; 46 us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff82fd2..a99879bada42 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv,
330 for (i = 8; i < 14; i++) 330 for (i = 8; i < 14; i++)
331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ 331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
332#endif 332#endif
333 i = (payload_length / 256);
334 i = (payload_length % 256);
335 mic_iv[14] = (unsigned char)(payload_length / 256); 333 mic_iv[14] = (unsigned char)(payload_length / 256);
336 mic_iv[15] = (unsigned char)(payload_length % 256); 334 mic_iv[15] = (unsigned char)(payload_length % 256);
337 335
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074a9083..ddacfc6c4861 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ 65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ 66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ 67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
68 {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */
68 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ 69 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
69 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ 70 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
70 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ 71 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194b5cbb..b1786dcac245 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev)
5829 } 5829 }
5830 } 5830 }
5831 5831
5832 pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
5833 priv->rxbuffersize, PCI_DMA_FROMDEVICE);
5834
5832 skb = new_skb; 5835 skb = new_skb;
5833 priv->rx_buf[priv->rx_idx] = skb; 5836 priv->rx_buf[priv->rx_idx] = skb;
5834 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); 5837 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
index bbf3d9c4abb0..097e82bc7a63 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -766,7 +766,7 @@ static int solo_enc_open(struct file *file)
766 &solo_enc->lock, 766 &solo_enc->lock,
767 V4L2_BUF_TYPE_VIDEO_CAPTURE, 767 V4L2_BUF_TYPE_VIDEO_CAPTURE,
768 V4L2_FIELD_INTERLACED, 768 V4L2_FIELD_INTERLACED,
769 sizeof(struct videobuf_buffer), fh); 769 sizeof(struct videobuf_buffer), fh, NULL);
770 770
771 spin_unlock(&solo_enc->lock); 771 spin_unlock(&solo_enc->lock);
772 772
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
index 9731fa02b5e8..6ffd21de837d 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -437,7 +437,7 @@ static int solo_v4l2_open(struct file *file)
437 &solo_dev->pdev->dev, &fh->slock, 437 &solo_dev->pdev->dev, &fh->slock,
438 V4L2_BUF_TYPE_VIDEO_CAPTURE, 438 V4L2_BUF_TYPE_VIDEO_CAPTURE,
439 SOLO_DISP_PIX_FIELD, 439 SOLO_DISP_PIX_FIELD,
440 sizeof(struct videobuf_buffer), fh); 440 sizeof(struct videobuf_buffer), fh, NULL);
441 441
442 return 0; 442 return 0;
443} 443}
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824e7ebc..807dd7eb748f 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file,
1286 case VIDIOCGCAP: 1286 case VIDIOCGCAP:
1287 { 1287 {
1288 struct video_capability b; 1288 struct video_capability b;
1289 memset(&b, 0, sizeof(b));
1289 strcpy(b.name, saa->video_dev.name); 1290 strcpy(b.name, saa->video_dev.name);
1290 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | 1291 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
1291 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | 1292 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file,
1416 case VIDIOCGWIN: 1417 case VIDIOCGWIN:
1417 { 1418 {
1418 struct video_window vw; 1419 struct video_window vw;
1420 memset(&vw, 0, sizeof(vw));
1419 vw.x = saa->win.x; 1421 vw.x = saa->win.x;
1420 vw.y = saa->win.y; 1422 vw.y = saa->win.y;
1421 vw.width = saa->win.width; 1423 vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file,
1448 case VIDIOCGFBUF: 1450 case VIDIOCGFBUF:
1449 { 1451 {
1450 struct video_buffer v; 1452 struct video_buffer v;
1453 memset(&v, 0, sizeof(v));
1451 v.base = (void *)saa->win.vidadr; 1454 v.base = (void *)saa->win.vidadr;
1452 v.height = saa->win.sheight; 1455 v.height = saa->win.sheight;
1453 v.width = saa->win.swidth; 1456 v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file,
1492 case VIDIOCGAUDIO: 1495 case VIDIOCGAUDIO:
1493 { 1496 {
1494 struct video_audio v; 1497 struct video_audio v;
1498 memset(&v, 0, sizeof(v));
1495 v = saa->audio_dev; 1499 v = saa->audio_dev;
1496 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); 1500 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
1497 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; 1501 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file,
1534 case VIDIOCGUNIT: 1538 case VIDIOCGUNIT:
1535 { 1539 {
1536 struct video_unit vu; 1540 struct video_unit vu;
1541 memset(&vu, 0, sizeof(vu));
1537 vu.video = saa->video_dev.minor; 1542 vu.video = saa->video_dev.minor;
1538 vu.vbi = VIDEO_NO_UNIT; 1543 vu.vbi = VIDEO_NO_UNIT;
1539 vu.radio = VIDEO_NO_UNIT; 1544 vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@ static int saa_open(struct file *file)
1888 1893
1889 saa->user++; 1894 saa->user++;
1890 if (saa->user > 1) { 1895 if (saa->user > 1) {
1896 saa->user--;
1891 unlock_kernel(); 1897 unlock_kernel();
1892 return 0; /* device open already, don't reset */ 1898 return 0; /* device open already, don't reset */
1893 } 1899 }
@@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
2000 if (retval < 0) { 2006 if (retval < 0) {
2001 dev_err(&pdev->dev, "%d: error in registering video device!\n", 2007 dev_err(&pdev->dev, "%d: error in registering video device!\n",
2002 num); 2008 num);
2003 goto errio; 2009 goto errirq;
2004 } 2010 }
2005 2011
2006 return 0; 2012 return 0;
2013
2014errirq:
2015 free_irq(saa->irq, saa);
2007errio: 2016errio:
2008 iounmap(saa->saa7146_mem); 2017 iounmap(saa->saa7146_mem);
2009err: 2018err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d464143c..93de4f2e8bf8 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 7 depends on ARCH_OMAP3
8 select OMAP_MBOX_FWK 8 select OMAP_MBOX_FWK
9 select OMAP_IOMMU
10 help 9 help
11 DSP/BIOS Bridge is designed for platforms that contain a GPP and 10 DSP/BIOS Bridge is designed for platforms that contain a GPP and
12 one or more attached DSPs. The GPP is considered the master or 11 one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2935c5..41c644c3318f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
2 2
3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
5 core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ 5 core/tiomap3430_pwr.o core/tiomap_io.o \
6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ 7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
8 pmgr/cmm.o pmgr/dbll.o 8 pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ 9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ 10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
11 rmgr/nldr.o rmgr/drv_interface.o 11 rmgr/nldr.o rmgr/drv_interface.o
12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ 12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
13 dynload/tramp.o 13 dynload/tramp.o
14libhw = hw/hw_mmu.o
14 15
15bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ 16bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
16 $(libdload) 17 $(libdload) $(libhw)
17 18
18#Machine dependent 19#Machine dependent
19ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ 20ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae263387a87..16723cd34831 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
27struct deh_mgr { 27struct deh_mgr {
28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 struct bridge_dev_context *hbridge_context; /* Bridge context. */
29 struct ntfy_object *ntfy_obj; /* NTFY object */ 29 struct ntfy_object *ntfy_obj; /* NTFY object */
30};
31 30
32int mmu_fault_isr(struct iommu *mmu); 31 /* MMU Fault DPC */
32 struct tasklet_struct dpc_tasklet;
33};
33 34
34#endif /* _DEH_ */ 35#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c1cb98..1c1f157e167a 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <dspbridge/dsp-mmu.h>
27#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
27#include <hw_defs.h>
28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
29#include <dspbridge/sync.h> 29#include <dspbridge/sync.h>
30#include <dspbridge/clk.h> 30#include <dspbridge/clk.h>
@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
306 306
307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) 307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
308 308
309struct shm_segs {
310 u32 seg0_da;
311 u32 seg0_pa;
312 u32 seg0_va;
313 u32 seg0_size;
314 u32 seg1_da;
315 u32 seg1_pa;
316 u32 seg1_va;
317 u32 seg1_size;
318};
319
320
321/* This Bridge driver's device context: */ 309/* This Bridge driver's device context: */
322struct bridge_dev_context { 310struct bridge_dev_context {
323 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 311 struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@@ -328,6 +316,7 @@ struct bridge_dev_context {
328 */ 316 */
329 u32 dw_dsp_ext_base_addr; /* See the comment above */ 317 u32 dw_dsp_ext_base_addr; /* See the comment above */
330 u32 dw_api_reg_base; /* API mem map'd registers */ 318 u32 dw_api_reg_base; /* API mem map'd registers */
319 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 dw_api_clk_base; /* CLK Registers */ 320 u32 dw_api_clk_base; /* CLK Registers */
332 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 321 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 322 u32 dw_public_rhea; /* Pub Rhea */
@@ -339,8 +328,7 @@ struct bridge_dev_context {
339 u32 dw_internal_size; /* Internal memory size */ 328 u32 dw_internal_size; /* Internal memory size */
340 329
341 struct omap_mbox *mbox; /* Mail box handle */ 330 struct omap_mbox *mbox; /* Mail box handle */
342 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 331
343 struct shm_segs sh_s;
344 struct cfg_hostres *resources; /* Host Resources */ 332 struct cfg_hostres *resources; /* Host Resources */
345 333
346 /* 334 /*
@@ -353,6 +341,7 @@ struct bridge_dev_context {
353 341
354 /* TC Settings */ 342 /* TC Settings */
355 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 343 bool tc_word_swap_on; /* Traffic Controller Word Swap */
344 struct pg_table_attrs *pt_attrs;
356 u32 dsp_per_clks; 345 u32 dsp_per_clks;
357}; 346};
358 347
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95adc8ff..000000000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * dsp-mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <dspbridge/host_os.h>
20#include <plat/dmtimer.h>
21#include <dspbridge/dbdefs.h>
22#include <dspbridge/dev.h>
23#include <dspbridge/io_sm.h>
24#include <dspbridge/dspdeh.h>
25#include "_tiomap.h"
26
27#include <dspbridge/dsp-mmu.h>
28
29#define MMU_CNTL_TWL_EN (1 << 2)
30
31static struct tasklet_struct mmu_tasklet;
32
33#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
35{
36 void *dummy_addr;
37 u32 fa, tmp;
38 struct iotlb_entry e;
39 struct iommu *mmu = dev_context->dsp_mmu;
40 dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
41
42 /*
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
46 */
47 tmp = iommu_read_reg(mmu, MMU_CNTL);
48 tmp &= ~MMU_CNTL_TWL_EN;
49 iommu_write_reg(mmu, tmp, MMU_CNTL);
50 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
51 e.da = fa & PAGE_MASK;
52 e.pa = virt_to_phys(dummy_addr);
53 e.valid = 1;
54 e.prsvd = 1;
55 e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
56 e.endian = MMU_RAM_ENDIAN_LITTLE;
57 e.elsz = MMU_RAM_ELSZ_32;
58 e.mixed = 0;
59
60 load_iotlb_entry(mmu, &e);
61
62 dsp_clk_enable(DSP_CLK_GPT8);
63
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
65
66 /* Clear MMU interrupt */
67 tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
68 iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
69
70 dump_dsp_stack(dev_context);
71 dsp_clk_disable(DSP_CLK_GPT8);
72
73 iopgtable_clear_entry(mmu, fa);
74 free_page((unsigned long)dummy_addr);
75}
76#endif
77
78
79static void fault_tasklet(unsigned long data)
80{
81 struct iommu *mmu = (struct iommu *)data;
82 struct bridge_dev_context *dev_ctx;
83 struct deh_mgr *dm;
84 u32 fa;
85 dev_get_deh_mgr(dev_get_first(), &dm);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx);
87
88 if (!dm || !dev_ctx)
89 return;
90
91 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
92
93#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx);
95 dump_dl_modules(dev_ctx);
96 mmu_fault_print_stack(dev_ctx);
97#endif
98
99 bridge_deh_notify(dm, DSP_MMUFAULT, fa);
100}
101
102/*
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
105 */
106static int mmu_fault_callback(struct iommu *mmu)
107{
108 if (!mmu)
109 return -EPERM;
110
111 iommu_write_reg(mmu, 0, MMU_IRQENABLE);
112 tasklet_schedule(&mmu_tasklet);
113 return 0;
114}
115
116/**
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
118 *
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
121 *
122 */
123struct iommu *dsp_mmu_init()
124{
125 struct iommu *mmu;
126
127 mmu = iommu_get("iva2");
128
129 if (!IS_ERR(mmu)) {
130 tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
131 mmu->isr = mmu_fault_callback;
132 }
133
134 return mmu;
135}
136
137/**
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
140 *
141 * This function destroys dsp mmu module.
142 *
143 */
144void dsp_mmu_exit(struct iommu *mmu)
145{
146 if (mmu)
147 iommu_put(mmu);
148 tasklet_kill(&mmu_tasklet);
149}
150
151/**
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
155 *
156 */
157static u32 user_va2_pa(struct mm_struct *mm, u32 address)
158{
159 pgd_t *pgd;
160 pmd_t *pmd;
161 pte_t *ptep, pte;
162
163 pgd = pgd_offset(mm, address);
164 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
165 pmd = pmd_offset(pgd, address);
166 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
167 ptep = pte_offset_map(pmd, address);
168 if (ptep) {
169 pte = *ptep;
170 if (pte_present(pte))
171 return pte & PAGE_MASK;
172 }
173 }
174 }
175
176 return 0;
177}
178
179/**
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
185 *
186 */
187static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
188 struct page **usr_pgs)
189{
190 u32 pa;
191 int i;
192 struct page *pg;
193
194 for (i = 0; i < pages; i++) {
195 pa = user_va2_pa(mm, uva);
196
197 if (!pfn_valid(__phys_to_pfn(pa)))
198 break;
199
200 pg = phys_to_page(pa);
201 usr_pgs[i] = pg;
202 get_page(pg);
203 }
204 return i;
205}
206
207/**
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
211 * @da DSP address
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
214 *
215 * This function maps a user space buffer into DSP virtual address.
216 *
217 */
218u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
219 struct page **usr_pgs)
220{
221 int res, w;
222 unsigned pages;
223 int i;
224 struct vm_area_struct *vma;
225 struct mm_struct *mm = current->mm;
226 struct sg_table *sgt;
227 struct scatterlist *sg;
228
229 if (!size || !usr_pgs)
230 return -EINVAL;
231
232 pages = size / PG_SIZE4K;
233
234 down_read(&mm->mmap_sem);
235 vma = find_vma(mm, uva);
236 while (vma && (uva + size > vma->vm_end))
237 vma = find_vma(mm, vma->vm_end + 1);
238
239 if (!vma) {
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__, uva, size);
242 up_read(&mm->mmap_sem);
243 return -EINVAL;
244 }
245 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
246 w = 1;
247
248 if (vma->vm_flags & VM_IO)
249 i = get_io_pages(mm, uva, pages, usr_pgs);
250 else
251 i = get_user_pages(current, mm, uva, pages, w, 1,
252 usr_pgs, NULL);
253 up_read(&mm->mmap_sem);
254
255 if (i < 0)
256 return i;
257
258 if (i < pages) {
259 res = -EFAULT;
260 goto err_pages;
261 }
262
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
264 if (!sgt) {
265 res = -ENOMEM;
266 goto err_pages;
267 }
268
269 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
270
271 if (res < 0)
272 goto err_sg;
273
274 for_each_sg(sgt->sgl, sg, sgt->nents, i)
275 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
276
277 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
278
279 if (!IS_ERR_VALUE(da))
280 return da;
281 res = (int)da;
282
283 sg_free_table(sgt);
284err_sg:
285 kfree(sgt);
286 i = pages;
287err_pages:
288 while (i--)
289 put_page(usr_pgs[i]);
290 return res;
291}
292
293/**
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
296 * @da DSP address
297 *
298 * This function unmaps a user space buffer into DSP virtual address.
299 *
300 */
301int user_to_dsp_unmap(struct iommu *mmu, u32 da)
302{
303 unsigned i;
304 struct sg_table *sgt;
305 struct scatterlist *sg;
306
307 sgt = iommu_vunmap(mmu, da);
308 if (!sgt)
309 return -EFAULT;
310
311 for_each_sg(sgt->sgl, sg, sgt->nents, i)
312 put_page(sg_page(sg));
313 sg_free_table(sgt);
314 kfree(sgt);
315
316 return 0;
317}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194badaba0ed..571864555ddd 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
39#include <dspbridge/ntfy.h> 39#include <dspbridge/ntfy.h>
40#include <dspbridge/sync.h> 40#include <dspbridge/sync.h>
41 41
42/* Hardware Abstraction Layer */
43#include <hw_defs.h>
44#include <hw_mmu.h>
45
42/* Bridge Driver */ 46/* Bridge Driver */
43#include <dspbridge/dspdeh.h> 47#include <dspbridge/dspdeh.h>
44#include <dspbridge/dspio.h> 48#include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
287 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
288 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
289 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
290 struct shm_segs *sm_sg;
291 u32 ul_shm_base; 294 u32 ul_shm_base;
292 u32 ul_shm_base_offset; 295 u32 ul_shm_base_offset;
293 u32 ul_shm_limit; 296 u32 ul_shm_limit;
@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
310 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
311 struct cfg_hostres *host_res; 314 struct cfg_hostres *host_res;
312 struct bridge_dev_context *pbridge_context; 315 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
313 u32 shm0_end; 317 u32 shm0_end;
314 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
315 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
320 u32 pa_curr = 0;
321 u32 va_curr = 0;
322 u32 gpp_va_curr = 0;
323 u32 num_bytes = 0;
324 u32 all_bits = 0;
325 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 };
316 328
317 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
318 if (!pbridge_context) { 330 if (!pbridge_context) {
@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
325 status = -EFAULT; 337 status = -EFAULT;
326 goto func_end; 338 goto func_end;
327 } 339 }
328 sm_sg = &pbridge_context->sh_s;
329
330 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
331 if (!cod_man) { 341 if (!cod_man) {
332 status = -EFAULT; 342 status = -EFAULT;
@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
461 if (status) 471 if (status)
462 goto func_end; 472 goto func_end;
463 473
464 sm_sg->seg1_pa = ul_gpp_pa; 474 pa_curr = ul_gpp_pa;
465 sm_sg->seg1_da = ul_dyn_ext_base; 475 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
466 sm_sg->seg1_va = ul_gpp_va; 476 gpp_va_curr = ul_gpp_va;
467 sm_sg->seg1_size = ul_seg1_size; 477 num_bytes = ul_seg1_size;
468 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; 478
469 sm_sg->seg0_da = ul_dsp_va; 479 /*
470 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; 480 * Try to fit into TLB entries. If not possible, push them to page
471 sm_sg->seg0_size = ul_seg_size; 481 * tables. It is quite possible that if sections are not on
482 * bigger page boundary, we may end up making several small pages.
483 * So, push them onto page tables, if that is the case.
484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
490
491 while (num_bytes) {
492 /*
493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 }
525 pa_curr += ul_pad_size;
526 va_curr += ul_pad_size;
527 gpp_va_curr += ul_pad_size;
528
529 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes = ul_seg_size;
531 va_curr = ul_dsp_va * hio_mgr->word_size;
532 while (num_bytes) {
533 /*
534 * To find the max. page size with which both PA & VA are
535 * aligned.
536 */
537 all_bits = pa_curr | va_curr;
538 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
539 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
540 va_curr, num_bytes);
541 for (i = 0; i < 4; i++) {
542 if (!(num_bytes >= page_size[i]) ||
543 !((all_bits & (page_size[i] - 1)) == 0))
544 continue;
545 if (ndx < MAX_LOCK_TLB_ENTRIES) {
546 /*
547 * This is the physical address written to
548 * DSP MMU.
549 */
550 ae_proc[ndx].ul_gpp_pa = pa_curr;
551 /*
552 * This is the virtual uncached ioremapped
553 * address!!!
554 */
555 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
556 ae_proc[ndx].ul_dsp_va =
557 va_curr / hio_mgr->word_size;
558 ae_proc[ndx].ul_size = page_size[i];
559 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n",
564 ae_proc[ndx].ul_gpp_pa,
565 ae_proc[ndx].ul_gpp_va,
566 ae_proc[ndx].ul_dsp_va *
567 hio_mgr->word_size, page_size[i]);
568 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
585 }
586 pa_curr += page_size[i];
587 va_curr += page_size[i];
588 gpp_va_curr += page_size[i];
589 num_bytes -= page_size[i];
590 /*
591 * Don't try smaller sizes. Hopefully we have reached
592 * an address aligned to a bigger page size.
593 */
594 break;
595 }
596 }
472 597
473 /* 598 /*
474 * Copy remaining entries from CDB. All entries are 1 MB and 599 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
509 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
510 ae_proc[ndx].ul_dsp_va); 635 ae_proc[ndx].ul_dsp_va);
511 ndx++; 636 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
512 } 645 }
513 } 646 }
514 if (status) 647 if (status)
515 goto func_end; 648 goto func_end;
516 } 649 }
517 650
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
657 /* Map the L4 peripherals */
658 i = 0;
659 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663 map_attrs, NULL);
664 if (status)
665 goto func_end;
666 i++;
667 }
668
518 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
519 ae_proc[i].ul_dsp_va = 0; 670 ae_proc[i].ul_dsp_va = 0;
520 ae_proc[i].ul_gpp_pa = 0; 671 ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
537 status = -EFAULT; 688 status = -EFAULT;
538 goto func_end; 689 goto func_end;
539 } else { 690 } else {
540 if (sm_sg->seg0_da > ul_shm_base) { 691 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
541 status = -EPERM; 692 status = -EPERM;
542 goto func_end; 693 goto func_end;
543 } 694 }
544 /* ul_shm_base may not be at ul_dsp_va address */ 695 /* ul_shm_base may not be at ul_dsp_va address */
545 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * 696 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
546 hio_mgr->word_size; 697 hio_mgr->word_size;
547 /* 698 /*
548 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
566 goto func_end; 717 goto func_end;
567 } 718 }
568 /* Register SM */ 719 /* Register SM */
569 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); 720 status =
721 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
570 } 722 }
571 723
572 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 724 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12bc0d3..1be081f917a7 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
23#include <dspbridge/host_os.h> 23#include <dspbridge/host_os.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/mmzone.h> 25#include <linux/mmzone.h>
26#include <plat/control.h>
27 26
28/* ----------------------------------- DSP/BIOS Bridge */ 27/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h> 28#include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
35#include <dspbridge/drv.h> 34#include <dspbridge/drv.h>
36#include <dspbridge/sync.h> 35#include <dspbridge/sync.h>
37 36
37/* ------------------------------------ Hardware Abstraction Layer */
38#include <hw_defs.h>
39#include <hw_mmu.h>
40
38/* ----------------------------------- Link Driver */ 41/* ----------------------------------- Link Driver */
39#include <dspbridge/dspdefs.h> 42#include <dspbridge/dspdefs.h>
40#include <dspbridge/dspchnl.h> 43#include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
47/* ----------------------------------- Platform Manager */ 50/* ----------------------------------- Platform Manager */
48#include <dspbridge/dev.h> 51#include <dspbridge/dev.h>
49#include <dspbridge/dspapi.h> 52#include <dspbridge/dspapi.h>
53#include <dspbridge/dmm.h>
50#include <dspbridge/wdt.h> 54#include <dspbridge/wdt.h>
51 55
52/* ----------------------------------- Local */ 56/* ----------------------------------- Local */
@@ -67,6 +71,20 @@
67#define MMU_SMALL_PAGE_MASK 0xFFFFF000 71#define MMU_SMALL_PAGE_MASK 0xFFFFF000
68#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 72#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
69#define PAGES_II_LVL_TABLE 512 73#define PAGES_II_LVL_TABLE 512
74#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
75
76/*
77 * This is a totally ugly layer violation, but needed until
78 * omap_ctrl_set_dsp_boot*() are provided.
79 */
80#define OMAP3_IVA2_BOOTMOD_IDLE 1
81#define OMAP2_CONTROL_GENERAL 0x270
82#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
83#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
84
85#define OMAP343X_CTRL_REGADDR(reg) \
86 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
87
70 88
71/* Forward Declarations: */ 89/* Forward Declarations: */
72static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 90static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
91static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, 109static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
92 u8 *host_buff, u32 dsp_addr, 110 u8 *host_buff, u32 dsp_addr,
93 u32 ul_num_bytes, u32 mem_type); 111 u32 ul_num_bytes, u32 mem_type);
112static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
113 u32 ul_mpu_addr, u32 virt_addr,
114 u32 ul_num_bytes, u32 ul_map_attr,
115 struct page **mapped_pages);
116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117 u32 virt_addr, u32 ul_num_bytes);
94static int bridge_dev_create(struct bridge_dev_context 118static int bridge_dev_create(struct bridge_dev_context
95 **dev_cntxt, 119 **dev_cntxt,
96 struct dev_object *hdev_obj, 120 struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
98static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
99 u32 dw_cmd, void *pargs); 123 u32 dw_cmd, void *pargs);
100static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125static u32 user_va2_pa(struct mm_struct *mm, u32 address);
126static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
127 u32 va, u32 size,
128 struct hw_mmu_map_attrs_t *map_attrs);
129static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
130 u32 size, struct hw_mmu_map_attrs_t *attrs);
131static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
132 u32 ul_mpu_addr, u32 virt_addr,
133 u32 ul_num_bytes,
134 struct hw_mmu_map_attrs_t *hw_attrs);
135
101bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 136bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
102 137
138/* ----------------------------------- Globals */
139
140/* Attributes of L2 page tables for DSP MMU */
141struct page_info {
142 u32 num_entries; /* Number of valid PTEs in the L2 PT */
143};
144
145/* Attributes used to manage the DSP MMU page tables */
146struct pg_table_attrs {
147 spinlock_t pg_lock; /* Critical section object handle */
148
149 u32 l1_base_pa; /* Physical address of the L1 PT */
150 u32 l1_base_va; /* Virtual address of the L1 PT */
151 u32 l1_size; /* Size of the L1 PT */
152 u32 l1_tbl_alloc_pa;
153 /* Physical address of Allocated mem for L1 table. May not be aligned */
154 u32 l1_tbl_alloc_va;
155 /* Virtual address of Allocated mem for L1 table. May not be aligned */
156 u32 l1_tbl_alloc_sz;
157 /* Size of consistent memory allocated for L1 table.
158 * May not be aligned */
159
160 u32 l2_base_pa; /* Physical address of the L2 PT */
161 u32 l2_base_va; /* Virtual address of the L2 PT */
162 u32 l2_size; /* Size of the L2 PT */
163 u32 l2_tbl_alloc_pa;
164 /* Physical address of Allocated mem for L2 table. May not be aligned */
165 u32 l2_tbl_alloc_va;
166 /* Virtual address of Allocated mem for L2 table. May not be aligned */
167 u32 l2_tbl_alloc_sz;
168 /* Size of consistent memory allocated for L2 table.
169 * May not be aligned */
170
171 u32 l2_num_pages; /* Number of allocated L2 PT */
172 /* Array [l2_num_pages] of L2 PT info structs */
173 struct page_info *pg_info;
174};
175
103/* 176/*
104 * This Bridge driver's function interface table. 177 * This Bridge driver's function interface table.
105 */ 178 */
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
119 bridge_brd_set_state, 192 bridge_brd_set_state,
120 bridge_brd_mem_copy, 193 bridge_brd_mem_copy,
121 bridge_brd_mem_write, 194 bridge_brd_mem_write,
195 bridge_brd_mem_map,
196 bridge_brd_mem_un_map,
122 /* The following CHNL functions are provided by chnl_io.lib: */ 197 /* The following CHNL functions are provided by chnl_io.lib: */
123 bridge_chnl_create, 198 bridge_chnl_create,
124 bridge_chnl_destroy, 199 bridge_chnl_destroy,
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
148 bridge_msg_set_queue_id, 223 bridge_msg_set_queue_id,
149}; 224};
150 225
226static inline void flush_all(struct bridge_dev_context *dev_context)
227{
228 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
229 dev_context->dw_brd_state == BRD_HIBERNATION)
230 wake_dsp(dev_context, NULL);
231
232 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
233}
234
235static void bad_page_dump(u32 pa, struct page *pg)
236{
237 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
238 pr_emerg("Bad page state in process '%s'\n"
239 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
240 "Backtrace:\n",
241 current->comm, pg, (int)(2 * sizeof(unsigned long)),
242 (unsigned long)pg->flags, pg->mapping,
243 page_mapcount(pg), page_count(pg));
244 dump_stack();
245}
246
151/* 247/*
152 * ======== bridge_drv_entry ======== 248 * ======== bridge_drv_entry ========
153 * purpose: 249 * purpose:
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
203 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 299 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
204 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 300 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
205 } 301 }
206 302 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
303 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
207 dsp_clk_enable(DSP_CLK_IVA2); 304 dsp_clk_enable(DSP_CLK_IVA2);
208 305
209 /* set the device state to IDLE */ 306 /* set the device state to IDLE */
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
274{ 371{
275 int status = 0; 372 int status = 0;
276 struct bridge_dev_context *dev_context = dev_ctxt; 373 struct bridge_dev_context *dev_context = dev_ctxt;
277 struct iommu *mmu = NULL;
278 struct shm_segs *sm_sg;
279 int l4_i = 0, tlb_i = 0;
280 u32 sg0_da = 0, sg1_da = 0;
281 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
282 u32 dw_sync_addr = 0; 374 u32 dw_sync_addr = 0;
283 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 375 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
284 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 376 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
285 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 377 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
286 /* Offset of shm_base_virt from tlb_base_virt */ 378 /* Offset of shm_base_virt from tlb_base_virt */
287 u32 ul_shm_offset_virt; 379 u32 ul_shm_offset_virt;
380 s32 entry_ndx;
381 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
288 struct cfg_hostres *resources = NULL; 382 struct cfg_hostres *resources = NULL;
289 u32 temp; 383 u32 temp;
290 u32 ul_dsp_clk_rate; 384 u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
305 ul_shm_base_virt *= DSPWORDSIZE; 399 ul_shm_base_virt *= DSPWORDSIZE;
306 DBC_ASSERT(ul_shm_base_virt != 0); 400 DBC_ASSERT(ul_shm_base_virt != 0);
307 /* DSP Virtual address */ 401 /* DSP Virtual address */
308 ul_tlb_base_virt = dev_context->sh_s.seg0_da; 402 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
309 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 403 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
310 ul_shm_offset_virt = 404 ul_shm_offset_virt =
311 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 405 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
312 /* Kernel logical address */ 406 /* Kernel logical address */
313 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; 407 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
314 408
315 DBC_ASSERT(ul_shm_base != 0); 409 DBC_ASSERT(ul_shm_base != 0);
316 /* 2nd wd is used as sync field */ 410 /* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
345 OMAP343X_CONTROL_IVA2_BOOTMOD)); 439 OMAP343X_CONTROL_IVA2_BOOTMOD));
346 } 440 }
347 } 441 }
348
349 if (!status) { 442 if (!status) {
443 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
444 * IVA2 SYSC register */
445 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
446 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 udelay(100);
350 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, 448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
351 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
352 mmu = dev_context->dsp_mmu; 450 udelay(100);
353 if (mmu) 451
354 dsp_mmu_exit(mmu); 452 /* Disbale the DSP MMU */
355 mmu = dsp_mmu_init(); 453 hw_mmu_disable(resources->dw_dmmu_base);
356 if (IS_ERR(mmu)) { 454 /* Disable TWL */
357 dev_err(bridge, "dsp_mmu_init failed!\n"); 455 hw_mmu_twl_disable(resources->dw_dmmu_base);
358 dev_context->dsp_mmu = NULL; 456
359 status = (int)mmu; 457 /* Only make TLB entry if both addresses are non-zero */
360 } 458 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
361 } 459 entry_ndx++) {
362 if (!status) { 460 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
363 dev_context->dsp_mmu = mmu; 461 struct hw_mmu_map_attrs_t map_attrs = {
364 sm_sg = &dev_context->sh_s; 462 .endianism = e->endianism,
365 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 463 .element_size = e->elem_size,
366 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 464 .mixed_size = e->mixed_mode,
367 if (IS_ERR_VALUE(sg0_da)) { 465 };
368 status = (int)sg0_da; 466
369 sg0_da = 0; 467 if (!e->ul_gpp_pa || !e->ul_dsp_va)
370 }
371 }
372 if (!status) {
373 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
374 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
375 if (IS_ERR_VALUE(sg1_da)) {
376 status = (int)sg1_da;
377 sg1_da = 0;
378 }
379 }
380 if (!status) {
381 u32 da;
382 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
383 if (!tlb[tlb_i].ul_gpp_pa)
384 continue; 468 continue;
385 469
386 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" 470 dev_dbg(bridge,
387 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, 471 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
388 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); 472 itmp_entry_ndx,
389 473 e->ul_gpp_pa,
390 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, 474 e->ul_dsp_va,
391 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, 475 e->ul_size);
392 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 476
393 if (IS_ERR_VALUE(da)) { 477 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
394 status = (int)da; 478 e->ul_gpp_pa,
395 break; 479 e->ul_dsp_va,
396 } 480 e->ul_size,
397 } 481 itmp_entry_ndx,
398 } 482 &map_attrs, 1, 1);
399 if (!status) { 483
400 u32 da; 484 itmp_entry_ndx++;
401 l4_i = 0;
402 while (l4_peripheral_table[l4_i].phys_addr) {
403 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
404 dsp_virt_addr, l4_peripheral_table[l4_i].
405 phys_addr, PAGE_SIZE,
406 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
407 if (IS_ERR_VALUE(da)) {
408 status = (int)da;
409 break;
410 }
411 l4_i++;
412 } 485 }
413 } 486 }
414 487
415 /* Lock the above TLB entries and get the BIOS and load monitor timer 488 /* Lock the above TLB entries and get the BIOS and load monitor timer
416 * information */ 489 * information */
417 if (!status) { 490 if (!status) {
491 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
492 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
493 hw_mmu_ttb_set(resources->dw_dmmu_base,
494 dev_context->pt_attrs->l1_base_pa);
495 hw_mmu_twl_enable(resources->dw_dmmu_base);
496 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
497
498 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
499 temp = (temp & 0xFFFFFFEF) | 0x11;
500 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
501
502 /* Let the DSP MMU run */
503 hw_mmu_enable(resources->dw_dmmu_base);
504
418 /* Enable the BIOS clock */ 505 /* Enable the BIOS clock */
419 (void)dev_get_symbol(dev_context->hdev_obj, 506 (void)dev_get_symbol(dev_context->hdev_obj,
420 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 507 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
421 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
422 BRIDGEINIT_LOADMON_GPTIMER, 509 BRIDGEINIT_LOADMON_GPTIMER,
423 &ul_load_monitor_timer); 510 &ul_load_monitor_timer);
511 }
424 512
513 if (!status) {
425 if (ul_load_monitor_timer != 0xFFFF) { 514 if (ul_load_monitor_timer != 0xFFFF) {
426 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 515 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
427 ul_load_monitor_timer; 516 ul_load_monitor_timer;
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
430 dev_dbg(bridge, "Not able to get the symbol for Load " 519 dev_dbg(bridge, "Not able to get the symbol for Load "
431 "Monitor Timer\n"); 520 "Monitor Timer\n");
432 } 521 }
522 }
433 523
524 if (!status) {
434 if (ul_bios_gp_timer != 0xFFFF) { 525 if (ul_bios_gp_timer != 0xFFFF) {
435 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 526 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
436 ul_bios_gp_timer; 527 ul_bios_gp_timer;
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
439 dev_dbg(bridge, 530 dev_dbg(bridge,
440 "Not able to get the symbol for BIOS Timer\n"); 531 "Not able to get the symbol for BIOS Timer\n");
441 } 532 }
533 }
442 534
535 if (!status) {
443 /* Set the DSP clock rate */ 536 /* Set the DSP clock rate */
444 (void)dev_get_symbol(dev_context->hdev_obj, 537 (void)dev_get_symbol(dev_context->hdev_obj,
445 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 538 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 585
493 /* Let DSP go */ 586 /* Let DSP go */
494 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base,
590 HW_MMU_ALL_INTERRUPTS);
495 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
496 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
497 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 593 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
521 617
522 /* update board state */ 618 /* update board state */
523 dev_context->dw_brd_state = BRD_RUNNING; 619 dev_context->dw_brd_state = BRD_RUNNING;
524 return 0; 620 /* (void)chnlsm_enable_interrupt(dev_context); */
525 } else { 621 } else {
526 dev_context->dw_brd_state = BRD_UNKNOWN; 622 dev_context->dw_brd_state = BRD_UNKNOWN;
527 } 623 }
528 } 624 }
529
530 while (tlb_i--) {
531 if (!tlb[tlb_i].ul_gpp_pa)
532 continue;
533 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
534 }
535 while (l4_i--)
536 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
537 if (sg0_da)
538 iommu_kunmap(mmu, sg0_da);
539 if (sg1_da)
540 iommu_kunmap(mmu, sg1_da);
541 return status; 625 return status;
542} 626}
543 627
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
553{ 637{
554 int status = 0; 638 int status = 0;
555 struct bridge_dev_context *dev_context = dev_ctxt; 639 struct bridge_dev_context *dev_context = dev_ctxt;
640 struct pg_table_attrs *pt_attrs;
556 u32 dsp_pwr_state; 641 u32 dsp_pwr_state;
557 int i;
558 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
559 struct omap_dsp_platform_data *pdata = 642 struct omap_dsp_platform_data *pdata =
560 omap_dspbridge_dev->dev.platform_data; 643 omap_dspbridge_dev->dev.platform_data;
561 644
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
591 674
592 dsp_wdt_enable(false); 675 dsp_wdt_enable(false);
593 676
594 /* Reset DSP */ 677 /* This is a good place to clear the MMU page tables as well */
595 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 678 if (dev_context->pt_attrs) {
596 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 679 pt_attrs = dev_context->pt_attrs;
597 680 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
681 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
682 memset((u8 *) pt_attrs->pg_info, 0x00,
683 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
684 }
598 /* Disable the mailbox interrupts */ 685 /* Disable the mailbox interrupts */
599 if (dev_context->mbox) { 686 if (dev_context->mbox) {
600 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 687 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
601 omap_mbox_put(dev_context->mbox); 688 omap_mbox_put(dev_context->mbox);
602 dev_context->mbox = NULL; 689 dev_context->mbox = NULL;
603 } 690 }
604 if (dev_context->dsp_mmu) { 691 /* Reset IVA2 clocks*/
605 pr_err("Proc stop mmu if statement\n"); 692 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
606 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { 693 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
607 if (!tlb[i].ul_gpp_pa)
608 continue;
609 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
610 }
611 i = 0;
612 while (l4_peripheral_table[i].phys_addr) {
613 iommu_kunmap(dev_context->dsp_mmu,
614 l4_peripheral_table[i].dsp_virt_addr);
615 i++;
616 }
617 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
618 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
619 dsp_mmu_exit(dev_context->dsp_mmu);
620 dev_context->dsp_mmu = NULL;
621 }
622 /* Reset IVA IOMMU*/
623 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
624 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
625 694
626 dsp_clock_disable_all(dev_context->dsp_per_clks); 695 dsp_clock_disable_all(dev_context->dsp_per_clks);
627 dsp_clk_disable(DSP_CLK_IVA2); 696 dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
681 struct bridge_dev_context *dev_context = NULL; 750 struct bridge_dev_context *dev_context = NULL;
682 s32 entry_ndx; 751 s32 entry_ndx;
683 struct cfg_hostres *resources = config_param; 752 struct cfg_hostres *resources = config_param;
753 struct pg_table_attrs *pt_attrs;
754 u32 pg_tbl_pa;
755 u32 pg_tbl_va;
756 u32 align_size;
684 struct drv_data *drv_datap = dev_get_drvdata(bridge); 757 struct drv_data *drv_datap = dev_get_drvdata(bridge);
685 758
686 /* Allocate and initialize a data structure to contain the bridge driver 759 /* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
711 if (!dev_context->dw_dsp_base_addr) 784 if (!dev_context->dw_dsp_base_addr)
712 status = -EPERM; 785 status = -EPERM;
713 786
787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
788 if (pt_attrs != NULL) {
789 /* Assuming that we use only DSP's memory map
790 * until 0x4000:0000 , we would need only 1024
791 * L1 enties i.e L1 size = 4K */
792 pt_attrs->l1_size = 0x1000;
793 align_size = pt_attrs->l1_size;
794 /* Align sizes are expected to be power of 2 */
795 /* we like to get aligned on L1 table size */
796 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
797 align_size, &pg_tbl_pa);
798
799 /* Check if the PA is aligned for us */
800 if ((pg_tbl_pa) & (align_size - 1)) {
801 /* PA not aligned to page table size ,
802 * try with more allocation and align */
803 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
804 pt_attrs->l1_size);
805 /* we like to get aligned on L1 table size */
806 pg_tbl_va =
807 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
808 align_size, &pg_tbl_pa);
809 /* We should be able to get aligned table now */
810 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
811 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
812 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
813 /* Align the PA to the next 'align' boundary */
814 pt_attrs->l1_base_pa =
815 ((pg_tbl_pa) +
816 (align_size - 1)) & (~(align_size - 1));
817 pt_attrs->l1_base_va =
818 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
819 } else {
820 /* We got aligned PA, cool */
821 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
822 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
823 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
824 pt_attrs->l1_base_pa = pg_tbl_pa;
825 pt_attrs->l1_base_va = pg_tbl_va;
826 }
827 if (pt_attrs->l1_base_va)
828 memset((u8 *) pt_attrs->l1_base_va, 0x00,
829 pt_attrs->l1_size);
830
831 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
832 * L4 pages */
833 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
834 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
835 pt_attrs->l2_num_pages;
836 align_size = 4; /* Make it u32 aligned */
837 /* we like to get aligned on L1 table size */
838 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
839 align_size, &pg_tbl_pa);
840 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
841 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
842 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
843 pt_attrs->l2_base_pa = pg_tbl_pa;
844 pt_attrs->l2_base_va = pg_tbl_va;
845
846 if (pt_attrs->l2_base_va)
847 memset((u8 *) pt_attrs->l2_base_va, 0x00,
848 pt_attrs->l2_size);
849
850 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
851 sizeof(struct page_info), GFP_KERNEL);
852 dev_dbg(bridge,
853 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
854 "%x, size %x\n", pt_attrs->l1_base_pa,
855 pt_attrs->l1_base_va, pt_attrs->l1_size,
856 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
857 pt_attrs->l2_size);
858 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
859 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
860 }
861 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
862 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
863 dev_context->pt_attrs = pt_attrs;
864 else
865 status = -ENOMEM;
866
714 if (!status) { 867 if (!status) {
868 spin_lock_init(&pt_attrs->pg_lock);
715 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 869 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
870
871 /* Set the Clock Divisor for the DSP module */
872 udelay(5);
873 /* MMU address is obtained from the host
874 * resources struct */
875 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
876 }
877 if (!status) {
716 dev_context->hdev_obj = hdev_obj; 878 dev_context->hdev_obj = hdev_obj;
717 /* Store current board state. */ 879 /* Store current board state. */
718 dev_context->dw_brd_state = BRD_UNKNOWN; 880 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
722 /* Return ptr to our device state to the DSP API for storage */ 884 /* Return ptr to our device state to the DSP API for storage */
723 *dev_cntxt = dev_context; 885 *dev_cntxt = dev_context;
724 } else { 886 } else {
887 if (pt_attrs != NULL) {
888 kfree(pt_attrs->pg_info);
889
890 if (pt_attrs->l2_tbl_alloc_va) {
891 mem_free_phys_mem((void *)
892 pt_attrs->l2_tbl_alloc_va,
893 pt_attrs->l2_tbl_alloc_pa,
894 pt_attrs->l2_tbl_alloc_sz);
895 }
896 if (pt_attrs->l1_tbl_alloc_va) {
897 mem_free_phys_mem((void *)
898 pt_attrs->l1_tbl_alloc_va,
899 pt_attrs->l1_tbl_alloc_pa,
900 pt_attrs->l1_tbl_alloc_sz);
901 }
902 }
903 kfree(pt_attrs);
725 kfree(dev_context); 904 kfree(dev_context);
726 } 905 }
727func_end: 906func_end:
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
789 */ 968 */
790static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 969static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
791{ 970{
971 struct pg_table_attrs *pt_attrs;
792 int status = 0; 972 int status = 0;
793 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 973 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
794 dev_ctxt; 974 dev_ctxt;
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
802 982
803 /* first put the device to stop state */ 983 /* first put the device to stop state */
804 bridge_brd_stop(dev_context); 984 bridge_brd_stop(dev_context);
985 if (dev_context->pt_attrs) {
986 pt_attrs = dev_context->pt_attrs;
987 kfree(pt_attrs->pg_info);
988
989 if (pt_attrs->l2_tbl_alloc_va) {
990 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
991 pt_attrs->l2_tbl_alloc_pa,
992 pt_attrs->l2_tbl_alloc_sz);
993 }
994 if (pt_attrs->l1_tbl_alloc_va) {
995 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
996 pt_attrs->l1_tbl_alloc_pa,
997 pt_attrs->l1_tbl_alloc_sz);
998 }
999 kfree(pt_attrs);
1000
1001 }
805 1002
806 if (dev_context->resources) { 1003 if (dev_context->resources) {
807 host_res = dev_context->resources; 1004 host_res = dev_context->resources;
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
832 iounmap((void *)host_res->dw_mem_base[3]); 1029 iounmap((void *)host_res->dw_mem_base[3]);
833 if (host_res->dw_mem_base[4]) 1030 if (host_res->dw_mem_base[4])
834 iounmap((void *)host_res->dw_mem_base[4]); 1031 iounmap((void *)host_res->dw_mem_base[4]);
1032 if (host_res->dw_dmmu_base)
1033 iounmap(host_res->dw_dmmu_base);
835 if (host_res->dw_per_base) 1034 if (host_res->dw_per_base)
836 iounmap(host_res->dw_per_base); 1035 iounmap(host_res->dw_per_base);
837 if (host_res->dw_per_pm_base) 1036 if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
845 host_res->dw_mem_base[2] = (u32) NULL; 1044 host_res->dw_mem_base[2] = (u32) NULL;
846 host_res->dw_mem_base[3] = (u32) NULL; 1045 host_res->dw_mem_base[3] = (u32) NULL;
847 host_res->dw_mem_base[4] = (u32) NULL; 1046 host_res->dw_mem_base[4] = (u32) NULL;
1047 host_res->dw_dmmu_base = NULL;
848 host_res->dw_sys_ctrl_base = NULL; 1048 host_res->dw_sys_ctrl_base = NULL;
849 1049
850 kfree(host_res); 1050 kfree(host_res);
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
928} 1128}
929 1129
930/* 1130/*
1131 * ======== bridge_brd_mem_map ========
1132 * This function maps MPU buffer to the DSP address space. It performs
1133 * linear to physical address translation if required. It translates each
1134 * page since linear addresses can be physically non-contiguous
1135 * All address & size arguments are assumed to be page aligned (in proc.c)
1136 *
1137 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1138 */
1139static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1140 u32 ul_mpu_addr, u32 virt_addr,
1141 u32 ul_num_bytes, u32 ul_map_attr,
1142 struct page **mapped_pages)
1143{
1144 u32 attrs;
1145 int status = 0;
1146 struct bridge_dev_context *dev_context = dev_ctxt;
1147 struct hw_mmu_map_attrs_t hw_attrs;
1148 struct vm_area_struct *vma;
1149 struct mm_struct *mm = current->mm;
1150 u32 write = 0;
1151 u32 num_usr_pgs = 0;
1152 struct page *mapped_page, *pg;
1153 s32 pg_num;
1154 u32 va = virt_addr;
1155 struct task_struct *curr_task = current;
1156 u32 pg_i = 0;
1157 u32 mpu_addr, pa;
1158
1159 dev_dbg(bridge,
1160 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1161 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1162 ul_map_attr);
1163 if (ul_num_bytes == 0)
1164 return -EINVAL;
1165
1166 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1167 attrs = ul_map_attr;
1168 } else {
1169 /* Assign default attributes */
1170 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1171 }
1172 /* Take mapping properties */
1173 if (attrs & DSP_MAPBIGENDIAN)
1174 hw_attrs.endianism = HW_BIG_ENDIAN;
1175 else
1176 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1177
1178 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1179 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1180 /* Ignore element_size if mixed_size is enabled */
1181 if (hw_attrs.mixed_size == 0) {
1182 if (attrs & DSP_MAPELEMSIZE8) {
1183 /* Size is 8 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1185 } else if (attrs & DSP_MAPELEMSIZE16) {
1186 /* Size is 16 bit */
1187 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1188 } else if (attrs & DSP_MAPELEMSIZE32) {
1189 /* Size is 32 bit */
1190 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1191 } else if (attrs & DSP_MAPELEMSIZE64) {
1192 /* Size is 64 bit */
1193 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1194 } else {
1195 /*
1196 * Mixedsize isn't enabled, so size can't be
1197 * zero here
1198 */
1199 return -EINVAL;
1200 }
1201 }
1202 if (attrs & DSP_MAPDONOTLOCK)
1203 hw_attrs.donotlockmpupage = 1;
1204 else
1205 hw_attrs.donotlockmpupage = 0;
1206
1207 if (attrs & DSP_MAPVMALLOCADDR) {
1208 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 }
1211 /*
1212 * Do OS-specific user-va to pa translation.
1213 * Combine physically contiguous regions to reduce TLBs.
1214 * Pass the translated pa to pte_update.
1215 */
1216 if ((attrs & DSP_MAPPHYSICALADDR)) {
1217 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1218 ul_num_bytes, &hw_attrs);
1219 goto func_cont;
1220 }
1221
1222 /*
1223 * Important Note: ul_mpu_addr is mapped from user application process
1224 * to current process - it must lie completely within the current
1225 * virtual memory address space in order to be of use to us here!
1226 */
1227 down_read(&mm->mmap_sem);
1228 vma = find_vma(mm, ul_mpu_addr);
1229 if (vma)
1230 dev_dbg(bridge,
1231 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1235
1236 /*
1237 * It is observed that under some circumstances, the user buffer is
1238 * spread across several VMAs. So loop through and check if the entire
1239 * user buffer is covered
1240 */
1241 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1242 /* jump to the next VMA region */
1243 vma = find_vma(mm, vma->vm_end + 1);
1244 dev_dbg(bridge,
1245 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1246 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1247 ul_num_bytes, vma->vm_start, vma->vm_end,
1248 vma->vm_flags);
1249 }
1250 if (!vma) {
1251 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1252 __func__, ul_mpu_addr, ul_num_bytes);
1253 status = -EINVAL;
1254 up_read(&mm->mmap_sem);
1255 goto func_cont;
1256 }
1257
1258 if (vma->vm_flags & VM_IO) {
1259 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1260 mpu_addr = ul_mpu_addr;
1261
1262 /* Get the physical addresses for user buffer */
1263 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1264 pa = user_va2_pa(mm, mpu_addr);
1265 if (!pa) {
1266 status = -EPERM;
1267 pr_err("DSPBRIDGE: VM_IO mapping physical"
1268 "address is invalid\n");
1269 break;
1270 }
1271 if (pfn_valid(__phys_to_pfn(pa))) {
1272 pg = PHYS_TO_PAGE(pa);
1273 get_page(pg);
1274 if (page_count(pg) < 1) {
1275 pr_err("Bad page in VM_IO buffer\n");
1276 bad_page_dump(pa, pg);
1277 }
1278 }
1279 status = pte_set(dev_context->pt_attrs, pa,
1280 va, HW_PAGE_SIZE4KB, &hw_attrs);
1281 if (status)
1282 break;
1283
1284 va += HW_PAGE_SIZE4KB;
1285 mpu_addr += HW_PAGE_SIZE4KB;
1286 pa += HW_PAGE_SIZE4KB;
1287 }
1288 } else {
1289 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1290 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1291 write = 1;
1292
1293 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1294 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1295 write, 1, &mapped_page, NULL);
1296 if (pg_num > 0) {
1297 if (page_count(mapped_page) < 1) {
1298 pr_err("Bad page count after doing"
1299 "get_user_pages on"
1300 "user buffer\n");
1301 bad_page_dump(page_to_phys(mapped_page),
1302 mapped_page);
1303 }
1304 status = pte_set(dev_context->pt_attrs,
1305 page_to_phys(mapped_page), va,
1306 HW_PAGE_SIZE4KB, &hw_attrs);
1307 if (status)
1308 break;
1309
1310 if (mapped_pages)
1311 mapped_pages[pg_i] = mapped_page;
1312
1313 va += HW_PAGE_SIZE4KB;
1314 ul_mpu_addr += HW_PAGE_SIZE4KB;
1315 } else {
1316 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1317 "MPU addr = 0x%x,"
1318 "vma->vm_flags = 0x%lx,"
1319 "get_user_pages Err"
1320 "Value = %d, Buffer"
1321 "size=0x%x\n", ul_mpu_addr,
1322 vma->vm_flags, pg_num, ul_num_bytes);
1323 status = -EPERM;
1324 break;
1325 }
1326 }
1327 }
1328 up_read(&mm->mmap_sem);
1329func_cont:
1330 if (status) {
1331 /*
1332 * Roll out the mapped pages incase it failed in middle of
1333 * mapping
1334 */
1335 if (pg_i) {
1336 bridge_brd_mem_un_map(dev_context, virt_addr,
1337 (pg_i * PG_SIZE4K));
1338 }
1339 status = -EPERM;
1340 }
1341 /*
1342 * In any case, flush the TLB
1343 * This is called from here instead from pte_update to avoid unnecessary
1344 * repetition while mapping non-contiguous physical regions of a virtual
1345 * region
1346 */
1347 flush_all(dev_context);
1348 dev_dbg(bridge, "%s status %x\n", __func__, status);
1349 return status;
1350}
1351
1352/*
1353 * ======== bridge_brd_mem_un_map ========
1354 * Invalidate the PTEs for the DSP VA block to be unmapped.
1355 *
1356 * PTEs of a mapped memory block are contiguous in any page table
1357 * So, instead of looking up the PTE address for every 4K block,
1358 * we clear consecutive PTEs until we unmap all the bytes
1359 */
1360static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1361 u32 virt_addr, u32 ul_num_bytes)
1362{
1363 u32 l1_base_va;
1364 u32 l2_base_va;
1365 u32 l2_base_pa;
1366 u32 l2_page_num;
1367 u32 pte_val;
1368 u32 pte_size;
1369 u32 pte_count;
1370 u32 pte_addr_l1;
1371 u32 pte_addr_l2 = 0;
1372 u32 rem_bytes;
1373 u32 rem_bytes_l2;
1374 u32 va_curr;
1375 struct page *pg = NULL;
1376 int status = 0;
1377 struct bridge_dev_context *dev_context = dev_ctxt;
1378 struct pg_table_attrs *pt = dev_context->pt_attrs;
1379 u32 temp;
1380 u32 paddr;
1381 u32 numof4k_pages = 0;
1382
1383 va_curr = virt_addr;
1384 rem_bytes = ul_num_bytes;
1385 rem_bytes_l2 = 0;
1386 l1_base_va = pt->l1_base_va;
1387 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1388 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1389 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1390 ul_num_bytes, l1_base_va, pte_addr_l1);
1391
1392 while (rem_bytes && !status) {
1393 u32 va_curr_orig = va_curr;
1394 /* Find whether the L1 PTE points to a valid L2 PT */
1395 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1396 pte_val = *(u32 *) pte_addr_l1;
1397 pte_size = hw_mmu_pte_size_l1(pte_val);
1398
1399 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1400 goto skip_coarse_page;
1401
1402 /*
1403 * Get the L2 PA from the L1 PTE, and find
1404 * corresponding L2 VA
1405 */
1406 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1407 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1408 l2_page_num =
1409 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1410 /*
1411 * Find the L2 PTE address from which we will start
1412 * clearing, the number of PTEs to be cleared on this
1413 * page, and the size of VA space that needs to be
1414 * cleared on this L2 page
1415 */
1416 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1417 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1418 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1419 if (rem_bytes < (pte_count * PG_SIZE4K))
1420 pte_count = rem_bytes / PG_SIZE4K;
1421 rem_bytes_l2 = pte_count * PG_SIZE4K;
1422
1423 /*
1424 * Unmap the VA space on this L2 PT. A quicker way
1425 * would be to clear pte_count entries starting from
1426 * pte_addr_l2. However, below code checks that we don't
1427 * clear invalid entries or less than 64KB for a 64KB
1428 * entry. Similar checking is done for L1 PTEs too
1429 * below
1430 */
1431 while (rem_bytes_l2 && !status) {
1432 pte_val = *(u32 *) pte_addr_l2;
1433 pte_size = hw_mmu_pte_size_l2(pte_val);
1434 /* va_curr aligned to pte_size? */
1435 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1436 va_curr & (pte_size - 1)) {
1437 status = -EPERM;
1438 break;
1439 }
1440
1441 /* Collect Physical addresses from VA */
1442 paddr = (pte_val & ~(pte_size - 1));
1443 if (pte_size == HW_PAGE_SIZE64KB)
1444 numof4k_pages = 16;
1445 else
1446 numof4k_pages = 1;
1447 temp = 0;
1448 while (temp++ < numof4k_pages) {
1449 if (!pfn_valid(__phys_to_pfn(paddr))) {
1450 paddr += HW_PAGE_SIZE4KB;
1451 continue;
1452 }
1453 pg = PHYS_TO_PAGE(paddr);
1454 if (page_count(pg) < 1) {
1455 pr_info("DSPBRIDGE: UNMAP function: "
1456 "COUNT 0 FOR PA 0x%x, size = "
1457 "0x%x\n", paddr, ul_num_bytes);
1458 bad_page_dump(paddr, pg);
1459 } else {
1460 set_page_dirty(pg);
1461 page_cache_release(pg);
1462 }
1463 paddr += HW_PAGE_SIZE4KB;
1464 }
1465 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1466 status = -EPERM;
1467 goto EXIT_LOOP;
1468 }
1469
1470 status = 0;
1471 rem_bytes_l2 -= pte_size;
1472 va_curr += pte_size;
1473 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1474 }
1475 spin_lock(&pt->pg_lock);
1476 if (rem_bytes_l2 == 0) {
1477 pt->pg_info[l2_page_num].num_entries -= pte_count;
1478 if (pt->pg_info[l2_page_num].num_entries == 0) {
1479 /*
1480 * Clear the L1 PTE pointing to the L2 PT
1481 */
1482 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1483 HW_MMU_COARSE_PAGE_SIZE))
1484 status = 0;
1485 else {
1486 status = -EPERM;
1487 spin_unlock(&pt->pg_lock);
1488 goto EXIT_LOOP;
1489 }
1490 }
1491 rem_bytes -= pte_count * PG_SIZE4K;
1492 } else
1493 status = -EPERM;
1494
1495 spin_unlock(&pt->pg_lock);
1496 continue;
1497skip_coarse_page:
1498 /* va_curr aligned to pte_size? */
1499 /* pte_size = 1 MB or 16 MB */
1500 if (pte_size == 0 || rem_bytes < pte_size ||
1501 va_curr & (pte_size - 1)) {
1502 status = -EPERM;
1503 break;
1504 }
1505
1506 if (pte_size == HW_PAGE_SIZE1MB)
1507 numof4k_pages = 256;
1508 else
1509 numof4k_pages = 4096;
1510 temp = 0;
1511 /* Collect Physical addresses from VA */
1512 paddr = (pte_val & ~(pte_size - 1));
1513 while (temp++ < numof4k_pages) {
1514 if (pfn_valid(__phys_to_pfn(paddr))) {
1515 pg = PHYS_TO_PAGE(paddr);
1516 if (page_count(pg) < 1) {
1517 pr_info("DSPBRIDGE: UNMAP function: "
1518 "COUNT 0 FOR PA 0x%x, size = "
1519 "0x%x\n", paddr, ul_num_bytes);
1520 bad_page_dump(paddr, pg);
1521 } else {
1522 set_page_dirty(pg);
1523 page_cache_release(pg);
1524 }
1525 }
1526 paddr += HW_PAGE_SIZE4KB;
1527 }
1528 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1529 status = 0;
1530 rem_bytes -= pte_size;
1531 va_curr += pte_size;
1532 } else {
1533 status = -EPERM;
1534 goto EXIT_LOOP;
1535 }
1536 }
1537 /*
1538 * It is better to flush the TLB here, so that any stale old entries
1539 * get flushed
1540 */
1541EXIT_LOOP:
1542 flush_all(dev_context);
1543 dev_dbg(bridge,
1544 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1545 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1546 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1547 return status;
1548}
1549
1550/*
1551 * ======== user_va2_pa ========
1552 * Purpose:
1553 * This function walks through the page tables to convert a userland
1554 * virtual address to physical address
1555 */
1556static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1557{
1558 pgd_t *pgd;
1559 pmd_t *pmd;
1560 pte_t *ptep, pte;
1561
1562 pgd = pgd_offset(mm, address);
1563 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1564 pmd = pmd_offset(pgd, address);
1565 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1566 ptep = pte_offset_map(pmd, address);
1567 if (ptep) {
1568 pte = *ptep;
1569 if (pte_present(pte))
1570 return pte & PAGE_MASK;
1571 }
1572 }
1573 }
1574
1575 return 0;
1576}
1577
1578/*
1579 * ======== pte_update ========
1580 * This function calculates the optimum page-aligned addresses and sizes
1581 * Caller must pass page-aligned values
1582 */
1583static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1584 u32 va, u32 size,
1585 struct hw_mmu_map_attrs_t *map_attrs)
1586{
1587 u32 i;
1588 u32 all_bits;
1589 u32 pa_curr = pa;
1590 u32 va_curr = va;
1591 u32 num_bytes = size;
1592 struct bridge_dev_context *dev_context = dev_ctxt;
1593 int status = 0;
1594 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1595 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1596 };
1597
1598 while (num_bytes && !status) {
1599 /* To find the max. page size with which both PA & VA are
1600 * aligned */
1601 all_bits = pa_curr | va_curr;
1602
1603 for (i = 0; i < 4; i++) {
1604 if ((num_bytes >= page_size[i]) && ((all_bits &
1605 (page_size[i] -
1606 1)) == 0)) {
1607 status =
1608 pte_set(dev_context->pt_attrs, pa_curr,
1609 va_curr, page_size[i], map_attrs);
1610 pa_curr += page_size[i];
1611 va_curr += page_size[i];
1612 num_bytes -= page_size[i];
1613 /* Don't try smaller sizes. Hopefully we have
1614 * reached an address aligned to a bigger page
1615 * size */
1616 break;
1617 }
1618 }
1619 }
1620
1621 return status;
1622}
1623
1624/*
1625 * ======== pte_set ========
1626 * This function calculates PTE address (MPU virtual) to be updated
1627 * It also manages the L2 page tables
1628 */
1629static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1630 u32 size, struct hw_mmu_map_attrs_t *attrs)
1631{
1632 u32 i;
1633 u32 pte_val;
1634 u32 pte_addr_l1;
1635 u32 pte_size;
1636 /* Base address of the PT that will be updated */
1637 u32 pg_tbl_va;
1638 u32 l1_base_va;
1639 /* Compiler warns that the next three variables might be used
1640 * uninitialized in this function. Doesn't seem so. Working around,
1641 * anyways. */
1642 u32 l2_base_va = 0;
1643 u32 l2_base_pa = 0;
1644 u32 l2_page_num = 0;
1645 int status = 0;
1646
1647 l1_base_va = pt->l1_base_va;
1648 pg_tbl_va = l1_base_va;
1649 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1650 /* Find whether the L1 PTE points to a valid L2 PT */
1651 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1652 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1653 pte_val = *(u32 *) pte_addr_l1;
1654 pte_size = hw_mmu_pte_size_l1(pte_val);
1655 } else {
1656 return -EPERM;
1657 }
1658 spin_lock(&pt->pg_lock);
1659 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1660 /* Get the L2 PA from the L1 PTE, and find
1661 * corresponding L2 VA */
1662 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1663 l2_base_va =
1664 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1665 l2_page_num =
1666 (l2_base_pa -
1667 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1668 } else if (pte_size == 0) {
1669 /* L1 PTE is invalid. Allocate a L2 PT and
1670 * point the L1 PTE to it */
1671 /* Find a free L2 PT. */
1672 for (i = 0; (i < pt->l2_num_pages) &&
1673 (pt->pg_info[i].num_entries != 0); i++)
1674 ;;
1675 if (i < pt->l2_num_pages) {
1676 l2_page_num = i;
1677 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1678 HW_MMU_COARSE_PAGE_SIZE);
1679 l2_base_va = pt->l2_base_va + (l2_page_num *
1680 HW_MMU_COARSE_PAGE_SIZE);
1681 /* Endianness attributes are ignored for
1682 * HW_MMU_COARSE_PAGE_SIZE */
1683 status =
1684 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1685 HW_MMU_COARSE_PAGE_SIZE,
1686 attrs);
1687 } else {
1688 status = -ENOMEM;
1689 }
1690 } else {
1691 /* Found valid L1 PTE of another size.
1692 * Should not overwrite it. */
1693 status = -EPERM;
1694 }
1695 if (!status) {
1696 pg_tbl_va = l2_base_va;
1697 if (size == HW_PAGE_SIZE64KB)
1698 pt->pg_info[l2_page_num].num_entries += 16;
1699 else
1700 pt->pg_info[l2_page_num].num_entries++;
1701 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1702 "%x, num_entries %x\n", l2_base_va,
1703 l2_base_pa, l2_page_num,
1704 pt->pg_info[l2_page_num].num_entries);
1705 }
1706 spin_unlock(&pt->pg_lock);
1707 }
1708 if (!status) {
1709 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1710 pg_tbl_va, pa, va, size);
1711 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1712 "mixed_size %x\n", attrs->endianism,
1713 attrs->element_size, attrs->mixed_size);
1714 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1715 }
1716
1717 return status;
1718}
1719
1720/* Memory map kernel VA -- memory allocated with vmalloc */
1721static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1722 u32 ul_mpu_addr, u32 virt_addr,
1723 u32 ul_num_bytes,
1724 struct hw_mmu_map_attrs_t *hw_attrs)
1725{
1726 int status = 0;
1727 struct page *page[1];
1728 u32 i;
1729 u32 pa_curr;
1730 u32 pa_next;
1731 u32 va_curr;
1732 u32 size_curr;
1733 u32 num_pages;
1734 u32 pa;
1735 u32 num_of4k_pages;
1736 u32 temp = 0;
1737
1738 /*
1739 * Do Kernel va to pa translation.
1740 * Combine physically contiguous regions to reduce TLBs.
1741 * Pass the translated pa to pte_update.
1742 */
1743 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1744 i = 0;
1745 va_curr = ul_mpu_addr;
1746 page[0] = vmalloc_to_page((void *)va_curr);
1747 pa_next = page_to_phys(page[0]);
1748 while (!status && (i < num_pages)) {
1749 /*
1750 * Reuse pa_next from the previous iteraion to avoid
1751 * an extra va2pa call
1752 */
1753 pa_curr = pa_next;
1754 size_curr = PAGE_SIZE;
1755 /*
1756 * If the next page is physically contiguous,
1757 * map it with the current one by increasing
1758 * the size of the region to be mapped
1759 */
1760 while (++i < num_pages) {
1761 page[0] =
1762 vmalloc_to_page((void *)(va_curr + size_curr));
1763 pa_next = page_to_phys(page[0]);
1764
1765 if (pa_next == (pa_curr + size_curr))
1766 size_curr += PAGE_SIZE;
1767 else
1768 break;
1769
1770 }
1771 if (pa_next == 0) {
1772 status = -ENOMEM;
1773 break;
1774 }
1775 pa = pa_curr;
1776 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1777 while (temp++ < num_of4k_pages) {
1778 get_page(PHYS_TO_PAGE(pa));
1779 pa += HW_PAGE_SIZE4KB;
1780 }
1781 status = pte_update(dev_context, pa_curr, virt_addr +
1782 (va_curr - ul_mpu_addr), size_curr,
1783 hw_attrs);
1784 va_curr += size_curr;
1785 }
1786 /*
1787 * In any case, flush the TLB
1788 * This is called from here instead from pte_update to avoid unnecessary
1789 * repetition while mapping non-contiguous physical regions of a virtual
1790 * region
1791 */
1792 flush_all(dev_context);
1793 dev_dbg(bridge, "%s status %x\n", __func__, status);
1794 return status;
1795}
1796
1797/*
931 * ======== wait_for_start ======== 1798 * ======== wait_for_start ========
932 * Wait for the singal from DSP that it has started, or time out. 1799 * Wait for the singal from DSP that it has started, or time out.
933 */ 1800 */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd5e757..fb9026e1403c 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
31#include <dspbridge/dev.h> 31#include <dspbridge/dev.h>
32#include <dspbridge/iodefs.h> 32#include <dspbridge/iodefs.h>
33 33
34/* ------------------------------------ Hardware Abstraction Layer */
35#include <hw_defs.h>
36#include <hw_mmu.h>
37
34#include <dspbridge/pwr_sh.h> 38#include <dspbridge/pwr_sh.h>
35 39
36/* ----------------------------------- Bridge Driver */ 40/* ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02549e4..ba2961049dad 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->sh_s.seg0_da * DSPWORDSIZE; 137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va;
140 141
141 if (!trace_read) { 142 if (!trace_read) {
142 ul_shm_offset_virt = 143 ul_shm_offset_virt =
143 ul_shm_base_virt - ul_tlb_base_virt; 144 ul_shm_base_virt - ul_tlb_base_virt;
144 ul_shm_offset_virt += 145 ul_shm_offset_virt +=
145 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + 146 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
146 1, PAGE_SIZE * 16); 147 1, HW_PAGE_SIZE64KB);
147 dw_ext_prog_virt_mem -= ul_shm_offset_virt; 148 dw_ext_prog_virt_mem -= ul_shm_offset_virt;
148 dw_ext_prog_virt_mem += 149 dw_ext_prog_virt_mem +=
149 (ul_ext_base - ul_dyn_ext_base); 150 (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
317 ret = -EPERM; 318 ret = -EPERM;
318 319
319 if (!ret) { 320 if (!ret) {
320 ul_tlb_base_virt = dev_context->sh_s.seg0_da * 321 ul_tlb_base_virt =
321 DSPWORDSIZE; 322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
322
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->sh_s.seg0_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
393 omap_dspbridge_dev->dev.platform_data; 393 omap_dspbridge_dev->dev.platform_data;
394 struct cfg_hostres *resources = dev_context->resources; 394 struct cfg_hostres *resources = dev_context->resources;
395 int status = 0; 395 int status = 0;
396 u32 temp;
396 397
397 if (!dev_context->mbox) 398 if (!dev_context->mbox)
398 return 0; 399 return 0;
@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
436 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
437 438
438 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
439 iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); 440 temp = readl(resources->dw_dmmu_base + 0x10);
440 441
441 dev_context->dw_brd_state = BRD_RUNNING; 442 dev_context->dw_brd_state = BRD_RUNNING;
442 } else if (dev_context->dw_brd_state == BRD_RETENTION) { 443 } else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c73914..3430418190da 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
31#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
32#include <dspbridge/wdt.h> 32#include <dspbridge/wdt.h>
33 33
34static u32 fault_addr;
35
36static void mmu_fault_dpc(unsigned long data)
37{
38 struct deh_mgr *deh = (void *)data;
39
40 if (!deh)
41 return;
42
43 bridge_deh_notify(deh, DSP_MMUFAULT, 0);
44}
45
46static irqreturn_t mmu_fault_isr(int irq, void *data)
47{
48 struct deh_mgr *deh = data;
49 struct cfg_hostres *resources;
50 u32 event;
51
52 if (!deh)
53 return IRQ_HANDLED;
54
55 resources = deh->hbridge_context->resources;
56 if (!resources) {
57 dev_dbg(bridge, "%s: Failed to get Host Resources\n",
58 __func__);
59 return IRQ_HANDLED;
60 }
61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr);
67 /*
68 * Schedule a DPC directly. In the future, it may be
69 * necessary to check if DSP MMU fault is intended for
70 * Bridge.
71 */
72 tasklet_schedule(&deh->dpc_tasklet);
73
74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base,
77 HW_MMU_TRANSLATION_FAULT);
78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base,
80 HW_MMU_ALL_INTERRUPTS);
81 }
82 return IRQ_HANDLED;
83}
84
34int bridge_deh_create(struct deh_mgr **ret_deh, 85int bridge_deh_create(struct deh_mgr **ret_deh,
35 struct dev_object *hdev_obj) 86 struct dev_object *hdev_obj)
36{ 87{
@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
58 } 109 }
59 ntfy_init(deh->ntfy_obj); 110 ntfy_init(deh->ntfy_obj);
60 111
112 /* Create a MMUfault DPC */
113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
114
61 /* Fill in context structure */ 115 /* Fill in context structure */
62 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
63 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
64 *ret_deh = deh; 124 *ret_deh = deh;
65 return 0; 125 return 0;
66 126
@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
80 ntfy_delete(deh->ntfy_obj); 140 ntfy_delete(deh->ntfy_obj);
81 kfree(deh->ntfy_obj); 141 kfree(deh->ntfy_obj);
82 } 142 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
145
146 /* Free DPC object */
147 tasklet_kill(&deh->dpc_tasklet);
83 148
84 /* Deallocate the DEH manager object */ 149 /* Deallocate the DEH manager object */
85 kfree(deh); 150 kfree(deh);
@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
101 return ntfy_unregister(deh->ntfy_obj, hnotification); 166 return ntfy_unregister(deh->ntfy_obj, hnotification);
102} 167}
103 168
169#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
170static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
171{
172 struct cfg_hostres *resources;
173 struct hw_mmu_map_attrs_t map_attrs = {
174 .endianism = HW_LITTLE_ENDIAN,
175 .element_size = HW_ELEM_SIZE16BIT,
176 .mixed_size = HW_MMU_CPUES,
177 };
178 void *dummy_va_addr;
179
180 resources = dev_context->resources;
181 dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
182
183 /*
184 * Before acking the MMU fault, let's make sure MMU can only
185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack.
187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
190
191 hw_mmu_tlb_add(resources->dw_dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET);
195
196 dsp_clk_enable(DSP_CLK_GPT8);
197
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199
200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base,
202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8);
205
206 hw_mmu_disable(resources->dw_dmmu_base);
207 free_page((unsigned long)dummy_va_addr);
208}
209#endif
210
104static inline const char *event_to_string(int event) 211static inline const char *event_to_string(int event)
105{ 212{
106 switch (event) { 213 switch (event) {
@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
133#endif 240#endif
134 break; 241 break;
135 case DSP_MMUFAULT: 242 case DSP_MMUFAULT:
136 dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); 243 dev_err(bridge, "%s: %s, addr=0x%x", __func__,
244 str, fault_addr);
245#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
246 print_dsp_trace_buffer(dev_context);
247 dump_dl_modules(dev_context);
248 mmu_fault_print_stack(dev_context);
249#endif
137 break; 250 break;
138 default: 251 default:
139 dev_err(bridge, "%s: %s", __func__, str); 252 dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 000000000000..e48d7f67c60a
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
1/*
2 * EasiGlobal.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _EASIGLOBAL_H
18#define _EASIGLOBAL_H
19#include <linux/types.h>
20
21/*
22 * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
23 *
24 * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
25 */
26
27#define READ_ONLY 1
28#define WRITE_ONLY 2
29#define READ_WRITE 3
30
31/*
32 * MACRO: _DEBUG_LEVEL1_EASI
33 *
34 * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
35 * register access function was called.
36 *
37 * NOTE: We currently dont use this functionality.
38 */
39#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
40
41#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 000000000000..1cefca321d71
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
1/*
2 * MMUAccInt.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_ACC_INT_H
18#define _MMU_ACC_INT_H
19
20/* Mappings of level 1 EASI function numbers to function names */
21
22#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
23#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
24#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
25#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
26#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
27#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
28#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
29#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
30#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
31#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
32#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
33#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
34#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
35#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
36#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
37#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
38#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
39#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
40#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
41#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
42#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
43#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
44#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
45
46/* Register offset address definitions */
47#define MMU_MMU_SYSCONFIG_OFFSET 0x10
48#define MMU_MMU_IRQSTATUS_OFFSET 0x18
49#define MMU_MMU_IRQENABLE_OFFSET 0x1c
50#define MMU_MMU_WALKING_ST_OFFSET 0x40
51#define MMU_MMU_CNTL_OFFSET 0x44
52#define MMU_MMU_FAULT_AD_OFFSET 0x48
53#define MMU_MMU_TTB_OFFSET 0x4c
54#define MMU_MMU_LOCK_OFFSET 0x50
55#define MMU_MMU_LD_TLB_OFFSET 0x54
56#define MMU_MMU_CAM_OFFSET 0x58
57#define MMU_MMU_RAM_OFFSET 0x5c
58#define MMU_MMU_GFLUSH_OFFSET 0x60
59#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
60/* Bitfield mask and offset declarations */
61#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
62#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
63#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
64#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
65#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
66#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
67#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
68#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
69#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
70#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
71#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
72#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
73#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
74#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
75
76#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 000000000000..ab1a16da731c
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
1/*
2 * MMURegAcM.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_REG_ACM_H
18#define _MMU_REG_ACM_H
19
20#include <linux/io.h>
21#include <EasiGlobal.h>
22
23#include "MMUAccInt.h"
24
25#if defined(USE_LEVEL_1_MACROS)
26
27#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
28 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
29 __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
30
31#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
32{\
33 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
34 register u32 data = __raw_readl((base_address)+offset);\
35 register u32 new_value = (value);\
36 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
37 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
38 new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
39 new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
40 new_value |= data;\
41 __raw_writel(new_value, base_address+offset);\
42}
43
44#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
45{\
46 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
47 register u32 data = __raw_readl((base_address)+offset);\
48 register u32 new_value = (value);\
49 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
50 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
51 new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
52 new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
53 new_value |= data;\
54 __raw_writel(new_value, base_address+offset);\
55}
56
57#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
58 (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
59 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
60
61#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
62{\
63 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
64 register u32 new_value = (value);\
65 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
66 __raw_writel(new_value, (base_address)+offset);\
67}
68
69#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
70 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
71 __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
72
73#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
74{\
75 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
76 register u32 new_value = (value);\
77 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
78 __raw_writel(new_value, (base_address)+offset);\
79}
80
81#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
82 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
83 (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
84 & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
85 MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
86
87#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
88 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
89 (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
90 MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
91 MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
92
93#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
94{\
95 const u32 offset = MMU_MMU_CNTL_OFFSET;\
96 register u32 data = __raw_readl((base_address)+offset);\
97 register u32 new_value = (value);\
98 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
99 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
100 new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
101 new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
102 new_value |= data;\
103 __raw_writel(new_value, base_address+offset);\
104}
105
106#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
107{\
108 const u32 offset = MMU_MMU_CNTL_OFFSET;\
109 register u32 data = __raw_readl((base_address)+offset);\
110 register u32 new_value = (value);\
111 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
112 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
113 new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
114 new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
115 new_value |= data;\
116 __raw_writel(new_value, base_address+offset);\
117}
118
119#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
120 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
121 __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
122
123#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
124{\
125 const u32 offset = MMU_MMU_TTB_OFFSET;\
126 register u32 new_value = (value);\
127 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
128 __raw_writel(new_value, (base_address)+offset);\
129}
130
131#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
132 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
133 __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
134
135#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
136{\
137 const u32 offset = MMU_MMU_LOCK_OFFSET;\
138 register u32 new_value = (value);\
139 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
140 __raw_writel(new_value, (base_address)+offset);\
141}
142
143#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
144 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
145 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
146 MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
147 MMU_MMU_LOCK_BASE_VALUE_OFFSET))
148
149#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
150{\
151 const u32 offset = MMU_MMU_LOCK_OFFSET;\
152 register u32 data = __raw_readl((base_address)+offset);\
153 register u32 new_value = (value);\
154 _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
155 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
156 new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
157 new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
158 new_value |= data;\
159 __raw_writel(new_value, base_address+offset);\
160}
161
162#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
163 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
164 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
165 MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
166 MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
167
168#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
169{\
170 const u32 offset = MMU_MMU_LOCK_OFFSET;\
171 register u32 data = __raw_readl((base_address)+offset);\
172 register u32 new_value = (value);\
173 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
174 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
175 new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
176 new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
177 new_value |= data;\
178 __raw_writel(new_value, base_address+offset);\
179}
180
181#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
182 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
183 (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
184 (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
185 MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
186
187#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
188 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
189 __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
190
191#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
192{\
193 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
194 register u32 new_value = (value);\
195 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
196 __raw_writel(new_value, (base_address)+offset);\
197}
198
199#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
200{\
201 const u32 offset = MMU_MMU_CAM_OFFSET;\
202 register u32 new_value = (value);\
203 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
204 __raw_writel(new_value, (base_address)+offset);\
205}
206
207#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
208{\
209 const u32 offset = MMU_MMU_RAM_OFFSET;\
210 register u32 new_value = (value);\
211 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
212 __raw_writel(new_value, (base_address)+offset);\
213}
214
215#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
216{\
217 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
218 register u32 new_value = (value);\
219 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
220 __raw_writel(new_value, (base_address)+offset);\
221}
222
223#endif /* USE_LEVEL_1_MACROS */
224
225#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 000000000000..d5266d4c163f
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
1/*
2 * hw_defs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global HW definitions
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_DEFS_H
20#define _HW_DEFS_H
21
22/* Page size */
23#define HW_PAGE_SIZE4KB 0x1000
24#define HW_PAGE_SIZE64KB 0x10000
25#define HW_PAGE_SIZE1MB 0x100000
26#define HW_PAGE_SIZE16MB 0x1000000
27
28/* hw_status: return type for HW API */
29typedef long hw_status;
30
31/* Macro used to set and clear any bit */
32#define HW_CLEAR 0
33#define HW_SET 1
34
35/* hw_endianism_t: Enumerated Type used to specify the endianism
36 * Do NOT change these values. They are used as bit fields. */
37enum hw_endianism_t {
38 HW_LITTLE_ENDIAN,
39 HW_BIG_ENDIAN
40};
41
42/* hw_element_size_t: Enumerated Type used to specify the element size
43 * Do NOT change these values. They are used as bit fields. */
44enum hw_element_size_t {
45 HW_ELEM_SIZE8BIT,
46 HW_ELEM_SIZE16BIT,
47 HW_ELEM_SIZE32BIT,
48 HW_ELEM_SIZE64BIT
49};
50
51/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
52enum hw_idle_mode_t {
53 HW_FORCE_IDLE,
54 HW_NO_IDLE,
55 HW_SMART_IDLE
56};
57
58#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 000000000000..014f5d5293ae
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
1/*
2 * hw_mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * API definitions to setup MMU TLB and PTE
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/io.h>
20#include "MMURegAcM.h"
21#include <hw_defs.h>
22#include <hw_mmu.h>
23#include <linux/types.h>
24#include <linux/err.h>
25
26#define MMU_BASE_VAL_MASK 0xFC00
27#define MMU_PAGE_MAX 3
28#define MMU_ELEMENTSIZE_MAX 3
29#define MMU_ADDR_MASK 0xFFFFF000
30#define MMU_TTB_MASK 0xFFFFC000
31#define MMU_SECTION_ADDR_MASK 0xFFF00000
32#define MMU_SSECTION_ADDR_MASK 0xFF000000
33#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35#define MMU_SMALL_PAGE_MASK 0xFFFFF000
36
37#define MMU_LOAD_TLB 0x00000001
38#define MMU_GFLUSH 0x60
39
40/*
41 * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42 */
43enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
48};
49
50/*
51 * FUNCTION : mmu_flush_entry
52 *
53 * INPUTS:
54 *
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
58 *
59 * RETURNS:
60 *
61 * Type : hw_status
62 * Description : 0 -- No errors occured
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Paramater was set to NULL
65 *
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
69 *
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
72 */
73static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75/*
76 * FUNCTION : mmu_set_cam_entry
77 *
78 * INPUTS:
79 *
80 * Identifier : base_address
81 * TypE : const u32
82 * Description : Base Address of instance of MMU module
83 *
84 * Identifier : page_sz
85 * TypE : const u32
86 * Description : It indicates the page size
87 *
88 * Identifier : preserved_bit
89 * Type : const u32
90 * Description : It indicates the TLB entry is preserved entry
91 * or not
92 *
93 * Identifier : valid_bit
94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not
96 *
97 *
98 * Identifier : virtual_addr_tag
99 * Type : const u32
100 * Description : virtual Address
101 *
102 * RETURNS:
103 *
104 * Type : hw_status
105 * Description : 0 -- No errors occured
106 * RET_BAD_NULL_PARAM -- A Pointer Paramater
107 * was set to NULL
108 * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109 * of Range
110 *
111 * PURPOSE: : Set MMU_CAM reg
112 *
113 * METHOD: : Check the Input parameters and set the CAM entry.
114 */
115static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz,
117 const u32 preserved_bit,
118 const u32 valid_bit,
119 const u32 virtual_addr_tag);
120
121/*
122 * FUNCTION : mmu_set_ram_entry
123 *
124 * INPUTS:
125 *
126 * Identifier : base_address
127 * Type : const u32
128 * Description : Base Address of instance of MMU module
129 *
130 * Identifier : physical_addr
131 * Type : const u32
132 * Description : Physical Address to which the corresponding
133 * virtual Address shouldpoint
134 *
135 * Identifier : endianism
136 * Type : hw_endianism_t
137 * Description : endianism for the given page
138 *
139 * Identifier : element_size
140 * Type : hw_element_size_t
141 * Description : The element size ( 8,16, 32 or 64 bit)
142 *
143 * Identifier : mixed_size
144 * Type : hw_mmu_mixed_size_t
145 * Description : Element Size to follow CPU or TLB
146 *
147 * RETURNS:
148 *
149 * Type : hw_status
150 * Description : 0 -- No errors occured
151 * RET_BAD_NULL_PARAM -- A Pointer Paramater
152 * was set to NULL
153 * RET_PARAM_OUT_OF_RANGE -- Input Parameter
154 * out of Range
155 *
156 * PURPOSE: : Set MMU_CAM reg
157 *
158 * METHOD: : Check the Input parameters and set the RAM entry.
159 */
160static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161 const u32 physical_addr,
162 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size,
164 enum hw_mmu_mixed_size_t mixed_size);
165
166/* HW FUNCTIONS */
167
168hw_status hw_mmu_enable(const void __iomem *base_address)
169{
170 hw_status status = 0;
171
172 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
173
174 return status;
175}
176
177hw_status hw_mmu_disable(const void __iomem *base_address)
178{
179 hw_status status = 0;
180
181 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
182
183 return status;
184}
185
186hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187 u32 num_locked_entries)
188{
189 hw_status status = 0;
190
191 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
192
193 return status;
194}
195
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victim_entry_num)
198{
199 hw_status status = 0;
200
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202
203 return status;
204}
205
206hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
207{
208 hw_status status = 0;
209
210 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
211
212 return status;
213}
214
215hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
216{
217 hw_status status = 0;
218 u32 irq_reg;
219
220 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
221
222 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
223
224 return status;
225}
226
227hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
228{
229 hw_status status = 0;
230 u32 irq_reg;
231
232 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
233
234 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
235
236 return status;
237}
238
239hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
240{
241 hw_status status = 0;
242
243 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
244
245 return status;
246}
247
248hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
249{
250 hw_status status = 0;
251
252 /* read values from register */
253 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
254
255 return status;
256}
257
258hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
259{
260 hw_status status = 0;
261 u32 load_ttb;
262
263 load_ttb = ttb_phys_addr & ~0x7FUL;
264 /* write values to register */
265 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
266
267 return status;
268}
269
270hw_status hw_mmu_twl_enable(const void __iomem *base_address)
271{
272 hw_status status = 0;
273
274 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
275
276 return status;
277}
278
279hw_status hw_mmu_twl_disable(const void __iomem *base_address)
280{
281 hw_status status = 0;
282
283 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
284
285 return status;
286}
287
288hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289 u32 page_sz)
290{
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
294
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
299
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
303
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
307
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
311
312 default:
313 return -EINVAL;
314 }
315
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321 mmu_flush_entry(base_address);
322
323 return status;
324}
325
326hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr,
328 u32 virtual_addr,
329 u32 page_sz,
330 u32 entry_num,
331 struct hw_mmu_map_attrs_t *map_attrs,
332 s8 preserved_bit, s8 valid_bit)
333{
334 hw_status status = 0;
335 u32 lock_reg;
336 u32 virtual_addr_tag;
337 enum hw_mmu_page_size_t mmu_pg_size;
338
339 /*Check the input Parameters */
340 switch (page_sz) {
341 case HW_PAGE_SIZE4KB:
342 mmu_pg_size = HW_MMU_SMALL_PAGE;
343 break;
344
345 case HW_PAGE_SIZE64KB:
346 mmu_pg_size = HW_MMU_LARGE_PAGE;
347 break;
348
349 case HW_PAGE_SIZE1MB:
350 mmu_pg_size = HW_MMU_SECTION;
351 break;
352
353 case HW_PAGE_SIZE16MB:
354 mmu_pg_size = HW_MMU_SUPERSECTION;
355 break;
356
357 default:
358 return -EINVAL;
359 }
360
361 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
362
363 /* Generate the 20-bit tag from virtual address */
364 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
365
366 /* Write the fields in the CAM Entry Register */
367 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368 virtual_addr_tag);
369
370 /* Write the different fields of the RAM Entry Register */
371 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373 map_attrs->element_size, map_attrs->mixed_size);
374
375 /* Update the MMU Lock Register */
376 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
378
379 /* Enable loading of an entry in TLB by writing 1
380 into LD_TLB_REG register */
381 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
382
383 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
384
385 return status;
386}
387
388hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389 u32 physical_addr,
390 u32 virtual_addr,
391 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
392{
393 hw_status status = 0;
394 u32 pte_addr, pte_val;
395 s32 num_entries = 1;
396
397 switch (page_sz) {
398 case HW_PAGE_SIZE4KB:
399 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400 virtual_addr &
401 MMU_SMALL_PAGE_MASK);
402 pte_val =
403 ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) | (map_attrs->
405 element_size << 4) |
406 (map_attrs->mixed_size << 11) | 2);
407 break;
408
409 case HW_PAGE_SIZE64KB:
410 num_entries = 16;
411 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412 virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val =
415 ((physical_addr & MMU_LARGE_PAGE_MASK) |
416 (map_attrs->endianism << 9) | (map_attrs->
417 element_size << 4) |
418 (map_attrs->mixed_size << 11) | 1);
419 break;
420
421 case HW_PAGE_SIZE1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423 virtual_addr &
424 MMU_SECTION_ADDR_MASK);
425 pte_val =
426 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427 (map_attrs->endianism << 15) | (map_attrs->
428 element_size << 10) |
429 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430 break;
431
432 case HW_PAGE_SIZE16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435 virtual_addr &
436 MMU_SSECTION_ADDR_MASK);
437 pte_val =
438 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439 (map_attrs->endianism << 15) | (map_attrs->
440 element_size << 10) |
441 (map_attrs->mixed_size << 17)
442 ) | 0x40000 | 0x2);
443 break;
444
445 case HW_MMU_COARSE_PAGE_SIZE:
446 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447 virtual_addr &
448 MMU_SECTION_ADDR_MASK);
449 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450 break;
451
452 default:
453 return -EINVAL;
454 }
455
456 while (--num_entries >= 0)
457 ((u32 *) pte_addr)[num_entries] = pte_val;
458
459 return status;
460}
461
462hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
463{
464 hw_status status = 0;
465 u32 pte_addr;
466 s32 num_entries = 1;
467
468 switch (page_size) {
469 case HW_PAGE_SIZE4KB:
470 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471 virtual_addr &
472 MMU_SMALL_PAGE_MASK);
473 break;
474
475 case HW_PAGE_SIZE64KB:
476 num_entries = 16;
477 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478 virtual_addr &
479 MMU_LARGE_PAGE_MASK);
480 break;
481
482 case HW_PAGE_SIZE1MB:
483 case HW_MMU_COARSE_PAGE_SIZE:
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr &
486 MMU_SECTION_ADDR_MASK);
487 break;
488
489 case HW_PAGE_SIZE16MB:
490 num_entries = 16;
491 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492 virtual_addr &
493 MMU_SSECTION_ADDR_MASK);
494 break;
495
496 default:
497 return -EINVAL;
498 }
499
500 while (--num_entries >= 0)
501 ((u32 *) pte_addr)[num_entries] = 0;
502
503 return status;
504}
505
506/* mmu_flush_entry */
507static hw_status mmu_flush_entry(const void __iomem *base_address)
508{
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
511
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515 return status;
516}
517
518/* mmu_set_cam_entry */
519static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520 const u32 page_sz,
521 const u32 preserved_bit,
522 const u32 valid_bit,
523 const u32 virtual_addr_tag)
524{
525 hw_status status = 0;
526 u32 mmu_cam_reg;
527
528 mmu_cam_reg = (virtual_addr_tag << 12);
529 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530 (preserved_bit << 3);
531
532 /* write values to register */
533 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
534
535 return status;
536}
537
538/* mmu_set_ram_entry */
539static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540 const u32 physical_addr,
541 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size,
543 enum hw_mmu_mixed_size_t mixed_size)
544{
545 hw_status status = 0;
546 u32 mmu_ram_reg;
547
548 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550 (mixed_size << 6));
551
552 /* write values to register */
553 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
554
555 return status;
556
557}
558
559void hw_mmu_tlb_flush_all(const void __iomem *base)
560{
561 __raw_writeb(1, base + MMU_GFLUSH);
562}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 000000000000..1458a2c6027b
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
1/*
2 * hw_mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * MMU types and API declarations
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_MMU_H
20#define _HW_MMU_H
21
22#include <linux/types.h>
23
24/* Bitmasks for interrupt sources */
25#define HW_MMU_TRANSLATION_FAULT 0x2
26#define HW_MMU_ALL_INTERRUPTS 0x1F
27
28#define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
31 CPU/TLB Element size */
32enum hw_mmu_mixed_size_t {
33 HW_MMU_TLBES,
34 HW_MMU_CPUES
35};
36
37/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
38struct hw_mmu_map_attrs_t {
39 enum hw_endianism_t endianism;
40 enum hw_element_size_t element_size;
41 enum hw_mmu_mixed_size_t mixed_size;
42 bool donotlockmpupage;
43};
44
45extern hw_status hw_mmu_enable(const void __iomem *base_address);
46
47extern hw_status hw_mmu_disable(const void __iomem *base_address);
48
49extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50 u32 num_locked_entries);
51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53 u32 victim_entry_num);
54
55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
57 u32 irq_mask);
58
59extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
60 u32 irq_mask);
61
62extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
63 u32 irq_mask);
64
65extern hw_status hw_mmu_event_status(const void __iomem *base_address,
66 u32 *irq_mask);
67
68extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
69 u32 *addr);
70
71/* Set the TT base address */
72extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
73 u32 ttb_phys_addr);
74
75extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
76
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80 u32 virtual_addr, u32 page_sz);
81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr,
84 u32 virtual_addr,
85 u32 page_sz,
86 u32 entry_num,
87 struct hw_mmu_map_attrs_t *map_attrs,
88 s8 preserved_bit, s8 valid_bit);
89
90/* For PTEs */
91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92 u32 physical_addr,
93 u32 virtual_addr,
94 u32 page_sz,
95 struct hw_mmu_map_attrs_t *map_attrs);
96
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 virtual_addr, u32 page_size);
99
100void hw_mmu_tlb_flush_all(const void __iomem *base);
101
102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{
104 u32 pte_addr;
105 u32 va31_to20;
106
107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
108 va31_to20 &= 0xFFFFFFFCUL;
109 pte_addr = l1_base + va31_to20;
110
111 return pte_addr;
112}
113
114static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115{
116 u32 pte_addr;
117
118 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119
120 return pte_addr;
121}
122
123static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
124{
125 u32 pte_coarse;
126
127 pte_coarse = pte_val & 0xFFFFFC00;
128
129 return pte_coarse;
130}
131
132static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
133{
134 u32 pte_size = 0;
135
136 if ((pte_val & 0x3) == 0x1) {
137 /* Points to L2 PT */
138 pte_size = HW_MMU_COARSE_PAGE_SIZE;
139 }
140
141 if ((pte_val & 0x3) == 0x2) {
142 if (pte_val & (1 << 18))
143 pte_size = HW_PAGE_SIZE16MB;
144 else
145 pte_size = HW_PAGE_SIZE1MB;
146 }
147
148 return pte_size;
149}
150
151static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
152{
153 u32 pte_size = 0;
154
155 if (pte_val & 0x2)
156 pte_size = HW_PAGE_SIZE4KB;
157 else if (pte_val & 0x1)
158 pte_size = HW_PAGE_SIZE64KB;
159
160 return pte_size;
161}
162
163#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cca34c7..38122dbf877a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@ struct cfg_hostres {
68 void __iomem *dw_per_base; 68 void __iomem *dw_per_base;
69 u32 dw_per_pm_base; 69 u32 dw_per_pm_base;
70 u32 dw_core_pm_base; 70 u32 dw_core_pm_base;
71 void __iomem *dw_dmmu_base;
71 void __iomem *dw_sys_ctrl_base; 72 void __iomem *dw_sys_ctrl_base;
72}; 73};
73 74
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f57429..357458fadd2a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
27#include <dspbridge/nodedefs.h> 27#include <dspbridge/nodedefs.h>
28#include <dspbridge/dispdefs.h> 28#include <dspbridge/dispdefs.h>
29#include <dspbridge/dspdefs.h> 29#include <dspbridge/dspdefs.h>
30#include <dspbridge/dmm.h>
30#include <dspbridge/host_os.h> 31#include <dspbridge/host_os.h>
31 32
32/* ----------------------------------- This */ 33/* ----------------------------------- This */
@@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
233 struct cmm_object **mgr); 234 struct cmm_object **mgr);
234 235
235/* 236/*
237 * ======== dev_get_dmm_mgr ========
238 * Purpose:
239 * Retrieve the handle to the dynamic memory manager created for this
240 * device.
241 * Parameters:
242 * hdev_obj: Handle to device object created with
243 * dev_create_device().
244 * *mgr: Ptr to location to store handle.
245 * Returns:
246 * 0: Success.
247 * -EFAULT: Invalid hdev_obj.
248 * Requires:
249 * mgr != NULL.
250 * DEV Initialized.
251 * Ensures:
252 * 0: *mgr contains a handle to a channel manager object,
253 * or NULL.
254 * else: *mgr is NULL.
255 */
256extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
257 struct dmm_object **mgr);
258
259/*
236 * ======== dev_get_cod_mgr ======== 260 * ======== dev_get_cod_mgr ========
237 * Purpose: 261 * Purpose:
238 * Retrieve the COD manager create for this device. 262 * Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 000000000000..6c58335c5f60
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
1/*
2 * dmm.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region.
8 *
9 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 *
11 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 */
19
20#ifndef DMM_
21#define DMM_
22
23#include <dspbridge/dbdefs.h>
24
25struct dmm_object;
26
27/* DMM attributes used in dmm_create() */
28struct dmm_mgrattrs {
29 u32 reserved;
30};
31
32#define DMMPOOLSIZE 0x4000000
33
34/*
35 * ======== dmm_get_handle ========
36 * Purpose:
37 * Return the dynamic memory manager object for this device.
38 * This is typically called from the client process.
39 */
40
41extern int dmm_get_handle(void *hprocessor,
42 struct dmm_object **dmm_manager);
43
44extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
45 u32 size, u32 *prsv_addr);
46
47extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
48 u32 rsv_addr);
49
50extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
51 u32 size);
52
53extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
54 u32 addr, u32 *psize);
55
56extern int dmm_destroy(struct dmm_object *dmm_mgr);
57
58extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
59
60extern int dmm_create(struct dmm_object **dmm_manager,
61 struct dev_object *hdev_obj,
62 const struct dmm_mgrattrs *mgr_attrts);
63
64extern bool dmm_init(void);
65
66extern void dmm_exit(void);
67
68extern int dmm_create_tables(struct dmm_object *dmm_mgr,
69 u32 addr, u32 size);
70
71#ifdef DSP_DMM_DEBUG
72u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
73#endif
74
75#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b5c6f2..c1f363ec9afa 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@ struct dmm_map_object {
108 struct bridge_dma_map_info dma_info; 108 struct bridge_dma_map_info dma_info;
109}; 109};
110 110
111/* Used for DMM reserved memory accounting */
112struct dmm_rsv_object {
113 struct list_head link;
114 u32 dsp_reserved_addr;
115};
116
111/* New structure (member of process context) abstracts DMM resource info */ 117/* New structure (member of process context) abstracts DMM resource info */
112struct dspheap_res_object { 118struct dspheap_res_object {
113 s32 heap_allocated; /* DMM status */ 119 s32 heap_allocated; /* DMM status */
@@ -159,6 +165,10 @@ struct process_context {
159 struct list_head dmm_map_list; 165 struct list_head dmm_map_list;
160 spinlock_t dmm_map_lock; 166 spinlock_t dmm_map_lock;
161 167
168 /* DMM reserved memory resources */
169 struct list_head dmm_rsv_list;
170 spinlock_t dmm_rsv_lock;
171
162 /* DSP Heap resources */ 172 /* DSP Heap resources */
163 struct dspheap_res_object *pdspheap_list; 173 struct dspheap_res_object *pdspheap_list;
164 174
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4cc0734..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * dsp-mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2005-2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _DSP_MMU_
20#define _DSP_MMU_
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25/**
26 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
27 *
28 * This function initialize dsp mmu module and returns a struct iommu
29 * handle to use it for dsp maps.
30 *
31 */
32struct iommu *dsp_mmu_init(void);
33
34/**
35 * dsp_mmu_exit() - destroy dsp mmu module
36 * @mmu: Pointer to iommu handle.
37 *
38 * This function destroys dsp mmu module.
39 *
40 */
41void dsp_mmu_exit(struct iommu *mmu);
42
43/**
44 * user_to_dsp_map() - maps user to dsp virtual address
45 * @mmu: Pointer to iommu handle.
46 * @uva: Virtual user space address.
47 * @da DSP address
48 * @size Buffer size to map.
49 * @usr_pgs struct page array pointer where the user pages will be stored
50 *
51 * This function maps a user space buffer into DSP virtual address.
52 *
53 */
54u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
55 struct page **usr_pgs);
56
57/**
58 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
59 * @mmu: Pointer to iommu handle.
60 * @da DSP address
61 *
62 * This function unmaps a user space buffer into DSP virtual address.
63 *
64 */
65int user_to_dsp_unmap(struct iommu *mmu, u32 da);
66
67#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 615363474810..0ae7d1646a1b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
162 u32 mem_type); 162 u32 mem_type);
163 163
164/* 164/*
165 * ======== bridge_brd_mem_map ========
166 * Purpose:
167 * Map a MPU memory region to a DSP/IVA memory space
168 * Parameters:
169 * dev_ctxt: Handle to Bridge driver defined device info.
170 * ul_mpu_addr: MPU memory region start address.
171 * virt_addr: DSP/IVA memory region u8 address.
172 * ul_num_bytes: Number of bytes to map.
173 * map_attrs: Mapping attributes (e.g. endianness).
174 * Returns:
175 * 0: Success.
176 * -EPERM: Other, unspecified error.
177 * Requires:
178 * dev_ctxt != NULL;
179 * Ensures:
180 */
181typedef int(*fxn_brd_memmap) (struct bridge_dev_context
182 * dev_ctxt, u32 ul_mpu_addr,
183 u32 virt_addr, u32 ul_num_bytes,
184 u32 map_attr,
185 struct page **mapped_pages);
186
187/*
188 * ======== bridge_brd_mem_un_map ========
189 * Purpose:
190 * UnMap an MPU memory region from DSP/IVA memory space
191 * Parameters:
192 * dev_ctxt: Handle to Bridge driver defined device info.
193 * virt_addr: DSP/IVA memory region u8 address.
194 * ul_num_bytes: Number of bytes to unmap.
195 * Returns:
196 * 0: Success.
197 * -EPERM: Other, unspecified error.
198 * Requires:
199 * dev_ctxt != NULL;
200 * Ensures:
201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt,
204 u32 virt_addr, u32 ul_num_bytes);
205
206/*
165 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
166 * Purpose: 208 * Purpose:
167 * Bring board to the BRD_STOPPED state. 209 * Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@ struct bridge_drv_interface {
951 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ 993 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
952 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ 994 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
953 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ 995 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
996 fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
997 fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
954 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ 998 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
955 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ 999 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
956 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ 1000 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad180108ada..41e0594dff34 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
19#ifndef DSPIOCTL_ 19#ifndef DSPIOCTL_
20#define DSPIOCTL_ 20#define DSPIOCTL_
21 21
22/* ------------------------------------ Hardware Abstraction Layer */
23#include <hw_defs.h>
24#include <hw_mmu.h>
25
22/* 26/*
23 * Any IOCTLS at or above this value are reserved for standard Bridge driver 27 * Any IOCTLS at or above this value are reserved for standard Bridge driver
24 * interfaces. 28 * interfaces.
@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
61 /* GPP virtual address. __va does not work for ioremapped addresses */ 65 /* GPP virtual address. __va does not work for ioremapped addresses */
62 u32 ul_gpp_va; 66 u32 ul_gpp_va;
63 u32 ul_size; /* Size of the mapped memory in bytes */ 67 u32 ul_size; /* Size of the mapped memory in bytes */
68 enum hw_endianism_t endianism;
69 enum hw_mmu_mixed_size_t mixed_mode;
70 enum hw_element_size_t elem_size;
64}; 71};
65 72
66#endif /* DSPIOCTL_ */ 73#endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab6b5bf..5e09fd165d9d 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor,
551 struct process_context *pr_ctxt); 551 struct process_context *pr_ctxt);
552 552
553/* 553/*
554 * ======== proc_reserve_memory ========
555 * Purpose:
556 * Reserve a virtually contiguous region of DSP address space.
557 * Parameters:
558 * hprocessor : The processor handle.
559 * ul_size : Size of the address space to reserve.
560 * pp_rsv_addr : Ptr to DSP side reserved u8 address.
561 * Returns:
562 * 0 : Success.
563 * -EFAULT : Invalid processor handle.
564 * -EPERM : General failure.
565 * -ENOMEM : Cannot reserve chunk of this size.
566 * Requires:
567 * pp_rsv_addr is not NULL
568 * PROC Initialized.
569 * Ensures:
570 * Details:
571 */
572extern int proc_reserve_memory(void *hprocessor,
573 u32 ul_size, void **pp_rsv_addr,
574 struct process_context *pr_ctxt);
575
576/*
554 * ======== proc_un_map ======== 577 * ======== proc_un_map ========
555 * Purpose: 578 * Purpose:
556 * Removes a MPU buffer mapping from the DSP address space. 579 * Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
572extern int proc_un_map(void *hprocessor, void *map_addr, 595extern int proc_un_map(void *hprocessor, void *map_addr,
573 struct process_context *pr_ctxt); 596 struct process_context *pr_ctxt);
574 597
598/*
599 * ======== proc_un_reserve_memory ========
600 * Purpose:
601 * Frees a previously reserved region of DSP address space.
602 * Parameters:
603 * hprocessor : The processor handle.
604 * prsv_addr : Ptr to DSP side reservedBYTE address.
605 * Returns:
606 * 0 : Success.
607 * -EFAULT : Invalid processor handle.
608 * -EPERM : General failure.
609 * -ENOENT : Cannot find a reserved region starting with this
610 * : address.
611 * Requires:
612 * prsv_addr is not NULL
613 * PROC Initialized.
614 * Ensures:
615 * Details:
616 */
617extern int proc_un_reserve_memory(void *hprocessor,
618 void *prsv_addr,
619 struct process_context *pr_ctxt);
620
575#endif /* PROC_ */ 621#endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267ef0e2..132e960967b9 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
34#include <dspbridge/cod.h> 34#include <dspbridge/cod.h>
35#include <dspbridge/drv.h> 35#include <dspbridge/drv.h>
36#include <dspbridge/proc.h> 36#include <dspbridge/proc.h>
37#include <dspbridge/dmm.h>
37 38
38/* ----------------------------------- Resource Manager */ 39/* ----------------------------------- Resource Manager */
39#include <dspbridge/mgr.h> 40#include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@ struct dev_object {
74 struct msg_mgr *hmsg_mgr; /* Message manager. */ 75 struct msg_mgr *hmsg_mgr; /* Message manager. */
75 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 76 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
76 struct cmm_object *hcmm_mgr; /* SM memory manager. */ 77 struct cmm_object *hcmm_mgr; /* SM memory manager. */
78 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
77 struct ldr_module *module_obj; /* Bridge Module handle. */ 79 struct ldr_module *module_obj; /* Bridge Module handle. */
78 u32 word_size; /* DSP word size: quick access. */ 80 u32 word_size; /* DSP word size: quick access. */
79 struct drv_object *hdrv_obj; /* Driver Object */ 81 struct drv_object *hdrv_obj; /* Driver Object */
@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
248 /* Instantiate the DEH module */ 250 /* Instantiate the DEH module */
249 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 251 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
250 } 252 }
253 /* Create DMM mgr . */
254 status = dmm_create(&dev_obj->dmm_mgr,
255 (struct dev_object *)dev_obj, NULL);
251 } 256 }
252 /* Add the new DEV_Object to the global list: */ 257 /* Add the new DEV_Object to the global list: */
253 if (!status) { 258 if (!status) {
@@ -273,6 +278,8 @@ leave:
273 kfree(dev_obj->proc_list); 278 kfree(dev_obj->proc_list);
274 if (dev_obj->cod_mgr) 279 if (dev_obj->cod_mgr)
275 cod_delete(dev_obj->cod_mgr); 280 cod_delete(dev_obj->cod_mgr);
281 if (dev_obj->dmm_mgr)
282 dmm_destroy(dev_obj->dmm_mgr);
276 kfree(dev_obj); 283 kfree(dev_obj);
277 } 284 }
278 285
@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
382 dev_obj->hcmm_mgr = NULL; 389 dev_obj->hcmm_mgr = NULL;
383 } 390 }
384 391
392 if (dev_obj->dmm_mgr) {
393 dmm_destroy(dev_obj->dmm_mgr);
394 dev_obj->dmm_mgr = NULL;
395 }
396
385 /* Call the driver's bridge_dev_destroy() function: */ 397 /* Call the driver's bridge_dev_destroy() function: */
386 /* Require of DevDestroy */ 398 /* Require of DevDestroy */
387 if (dev_obj->hbridge_context) { 399 if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
462} 474}
463 475
464/* 476/*
477 * ======== dev_get_dmm_mgr ========
478 * Purpose:
479 * Retrieve the handle to the dynamic memory manager created for this
480 * device.
481 */
482int dev_get_dmm_mgr(struct dev_object *hdev_obj,
483 struct dmm_object **mgr)
484{
485 int status = 0;
486 struct dev_object *dev_obj = hdev_obj;
487
488 DBC_REQUIRE(refs > 0);
489 DBC_REQUIRE(mgr != NULL);
490
491 if (hdev_obj) {
492 *mgr = dev_obj->dmm_mgr;
493 } else {
494 *mgr = NULL;
495 status = -EFAULT;
496 }
497
498 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
499 return status;
500}
501
502/*
465 * ======== dev_get_cod_mgr ======== 503 * ======== dev_get_cod_mgr ========
466 * Purpose: 504 * Purpose:
467 * Retrieve the COD manager create for this device. 505 * Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@ void dev_exit(void)
713 751
714 refs--; 752 refs--;
715 753
716 if (refs == 0) 754 if (refs == 0) {
717 cmm_exit(); 755 cmm_exit();
756 dmm_exit();
757 }
718 758
719 DBC_ENSURE(refs >= 0); 759 DBC_ENSURE(refs >= 0);
720} 760}
@@ -726,12 +766,25 @@ void dev_exit(void)
726 */ 766 */
727bool dev_init(void) 767bool dev_init(void)
728{ 768{
729 bool ret = true; 769 bool cmm_ret, dmm_ret, ret = true;
730 770
731 DBC_REQUIRE(refs >= 0); 771 DBC_REQUIRE(refs >= 0);
732 772
733 if (refs == 0) 773 if (refs == 0) {
734 ret = cmm_init(); 774 cmm_ret = cmm_init();
775 dmm_ret = dmm_init();
776
777 ret = cmm_ret && dmm_ret;
778
779 if (!ret) {
780 if (cmm_ret)
781 cmm_exit();
782
783 if (dmm_ret)
784 dmm_exit();
785
786 }
787 }
735 788
736 if (ret) 789 if (ret)
737 refs++; 790 refs++;
@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1065 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1118 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
1066 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1119 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
1067 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1120 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
1121 STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
1122 STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
1068 STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1123 STORE_FXN(fxn_chnl_create, pfn_chnl_create);
1069 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1124 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
1070 STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1125 STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 000000000000..8685233d7627
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/* ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
31/* ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/* ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h>
36
37/* ----------------------------------- Platform Manager */
38#include <dspbridge/dev.h>
39#include <dspbridge/proc.h>
40
41/* ----------------------------------- This */
42#include <dspbridge/dmm.h>
43
44/* ----------------------------------- Defines, Data Structures, Typedefs */
45#define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47 dyn_mem_map_beg)
48#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49
50/* DMM Mgr */
51struct dmm_object {
52 /* Dmm Lock is used to serialize access mem manager for
53 * multi-threads. */
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */
55};
56
57/* ----------------------------------- Globals */
58static u32 refs; /* module reference count */
59struct map_page {
60 u32 region_size:15;
61 u32 mapped_size:15;
62 u32 reserved:1;
63 u32 mapped:1;
64};
65
66/* Create the free list */
67static struct map_page *virtual_mapping_table;
68static u32 free_region; /* The index of free region */
69static u32 free_size;
70static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
71static u32 table_size; /* The size of virt and phys pages tables */
72
73/* ----------------------------------- Function Prototypes */
74static struct map_page *get_region(u32 addr);
75static struct map_page *get_free_region(u32 len);
76static struct map_page *get_mapped_region(u32 addrs);
77
78/* ======== dmm_create_tables ========
79 * Purpose:
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
83 * for DSP.
84 */
85int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
86{
87 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88 int status = 0;
89
90 status = dmm_delete_tables(dmm_obj);
91 if (!status) {
92 dyn_mem_map_beg = addr;
93 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94 /* Create the free list */
95 virtual_mapping_table = __vmalloc(table_size *
96 sizeof(struct map_page), GFP_KERNEL |
97 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98 if (virtual_mapping_table == NULL)
99 status = -ENOMEM;
100 else {
101 /* On successful allocation,
102 * all entries are zero ('free') */
103 free_region = 0;
104 free_size = table_size * PG_SIZE4K;
105 virtual_mapping_table[0].region_size = table_size;
106 }
107 }
108
109 if (status)
110 pr_err("%s: failure, status 0x%x\n", __func__, status);
111
112 return status;
113}
114
115/*
116 * ======== dmm_create ========
117 * Purpose:
118 * Create a dynamic memory manager object.
119 */
120int dmm_create(struct dmm_object **dmm_manager,
121 struct dev_object *hdev_obj,
122 const struct dmm_mgrattrs *mgr_attrts)
123{
124 struct dmm_object *dmm_obj = NULL;
125 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
128
129 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132 if (dmm_obj != NULL) {
133 spin_lock_init(&dmm_obj->dmm_lock);
134 *dmm_manager = dmm_obj;
135 } else {
136 status = -ENOMEM;
137 }
138
139 return status;
140}
141
142/*
143 * ======== dmm_destroy ========
144 * Purpose:
145 * Release the communication memory manager resources.
146 */
147int dmm_destroy(struct dmm_object *dmm_mgr)
148{
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0;
151
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj);
155 if (!status)
156 kfree(dmm_obj);
157 } else
158 status = -EFAULT;
159
160 return status;
161}
162
163/*
164 * ======== dmm_delete_tables ========
165 * Purpose:
166 * Delete DMM Tables.
167 */
168int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{
170 int status = 0;
171
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */
174 if (dmm_mgr)
175 vfree(virtual_mapping_table);
176 else
177 status = -EFAULT;
178 return status;
179}
180
181/*
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
186 */
187void dmm_exit(void)
188{
189 DBC_REQUIRE(refs > 0);
190
191 refs--;
192}
193
194/*
195 * ======== dmm_get_handle ========
196 * Purpose:
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
199 */
200int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
201{
202 int status = 0;
203 struct dev_object *hdev_obj;
204
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else
210 hdev_obj = dev_get_first(); /* default */
211
212 if (!status)
213 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
214
215 return status;
216}
217
218/*
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225 bool ret = true;
226
227 DBC_REQUIRE(refs >= 0);
228
229 if (ret)
230 refs++;
231
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234 virtual_mapping_table = NULL;
235 table_size = 0;
236
237 return ret;
238}
239
240/*
241 * ======== dmm_map_memory ========
242 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
247 */
248int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
249{
250 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251 struct map_page *chunk;
252 int status = 0;
253
254 spin_lock(&dmm_obj->dmm_lock);
255 /* Find the Reserved memory chunk containing the DSP block to
256 * be mapped */
257 chunk = (struct map_page *)get_region(addr);
258 if (chunk != NULL) {
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk->mapped = true;
261 chunk->mapped_size = (size / PG_SIZE4K);
262 } else
263 status = -ENOENT;
264 spin_unlock(&dmm_obj->dmm_lock);
265
266 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
268
269 return status;
270}
271
272/*
273 * ======== dmm_reserve_memory ========
274 * Purpose:
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
276 */
277int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278 u32 *prsv_addr)
279{
280 int status = 0;
281 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282 struct map_page *node;
283 u32 rsv_addr = 0;
284 u32 rsv_size = 0;
285
286 spin_lock(&dmm_obj->dmm_lock);
287
288 /* Try to get a DSP chunk from the free list */
289 node = get_free_region(size);
290 if (node != NULL) {
291 /* DSP chunk of given size is available. */
292 rsv_addr = DMM_ADDR_VIRTUAL(node);
293 /* Calculate the number entries to use */
294 rsv_size = size / PG_SIZE4K;
295 if (rsv_size < node->region_size) {
296 /* Mark remainder of free region */
297 node[rsv_size].mapped = false;
298 node[rsv_size].reserved = false;
299 node[rsv_size].region_size =
300 node->region_size - rsv_size;
301 node[rsv_size].mapped_size = 0;
302 }
303 /* get_region will return first fit chunk. But we only use what
304 is requested. */
305 node->mapped = false;
306 node->reserved = true;
307 node->region_size = rsv_size;
308 node->mapped_size = 0;
309 /* Return the chunk's starting address */
310 *prsv_addr = rsv_addr;
311 } else
312 /*dSP chunk of given size is not available */
313 status = -ENOMEM;
314
315 spin_unlock(&dmm_obj->dmm_lock);
316
317 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319 prsv_addr, status, rsv_addr, rsv_size);
320
321 return status;
322}
323
324/*
325 * ======== dmm_un_map_memory ========
326 * Purpose:
327 * Remove the mapped block from the reserved chunk.
328 */
329int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
330{
331 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332 struct map_page *chunk;
333 int status = 0;
334
335 spin_lock(&dmm_obj->dmm_lock);
336 chunk = get_mapped_region(addr);
337 if (chunk == NULL)
338 status = -ENOENT;
339
340 if (!status) {
341 /* Unmap the region */
342 *psize = chunk->mapped_size * PG_SIZE4K;
343 chunk->mapped = false;
344 chunk->mapped_size = 0;
345 }
346 spin_unlock(&dmm_obj->dmm_lock);
347
348 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
350
351 return status;
352}
353
354/*
355 * ======== dmm_un_reserve_memory ========
356 * Purpose:
357 * Free a chunk of reserved DSP/IVA address space.
358 */
359int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
360{
361 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362 struct map_page *chunk;
363 u32 i;
364 int status = 0;
365 u32 chunk_size;
366
367 spin_lock(&dmm_obj->dmm_lock);
368
369 /* Find the chunk containing the reserved address */
370 chunk = get_mapped_region(rsv_addr);
371 if (chunk == NULL)
372 status = -ENOENT;
373
374 if (!status) {
375 /* Free all the mapped pages for this reserved region */
376 i = 0;
377 while (i < chunk->region_size) {
378 if (chunk[i].mapped) {
379 /* Remove mapping from the page tables. */
380 chunk_size = chunk[i].mapped_size;
381 /* Clear the mapping flags */
382 chunk[i].mapped = false;
383 chunk[i].mapped_size = 0;
384 i += chunk_size;
385 } else
386 i++;
387 }
388 /* Clear the flags (mark the region 'free') */
389 chunk->reserved = false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
393 */
394 }
395 spin_unlock(&dmm_obj->dmm_lock);
396
397 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__, dmm_mgr, rsv_addr, status, chunk);
399
400 return status;
401}
402
403/*
404 * ======== get_region ========
405 * Purpose:
406 * Returns a region containing the specified memory region
407 */
408static struct map_page *get_region(u32 addr)
409{
410 struct map_page *curr_region = NULL;
411 u32 i = 0;
412
413 if (virtual_mapping_table != NULL) {
414 /* find page mapped by this address */
415 i = DMM_ADDR_TO_INDEX(addr);
416 if (i < table_size)
417 curr_region = virtual_mapping_table + i;
418 }
419
420 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__, curr_region, free_region, free_size);
422 return curr_region;
423}
424
425/*
426 * ======== get_free_region ========
427 * Purpose:
428 * Returns the requested free region
429 */
430static struct map_page *get_free_region(u32 len)
431{
432 struct map_page *curr_region = NULL;
433 u32 i = 0;
434 u32 region_size = 0;
435 u32 next_i = 0;
436
437 if (virtual_mapping_table == NULL)
438 return curr_region;
439 if (len > free_size) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i < table_size) {
443 region_size = virtual_mapping_table[i].region_size;
444 next_i = i + region_size;
445 if (virtual_mapping_table[i].reserved == false) {
446 /* Coalesce, if possible */
447 if (next_i < table_size &&
448 virtual_mapping_table[next_i].reserved
449 == false) {
450 virtual_mapping_table[i].region_size +=
451 virtual_mapping_table
452 [next_i].region_size;
453 continue;
454 }
455 region_size *= PG_SIZE4K;
456 if (region_size > free_size) {
457 free_region = i;
458 free_size = region_size;
459 }
460 }
461 i = next_i;
462 }
463 }
464 if (len <= free_size) {
465 curr_region = virtual_mapping_table + free_region;
466 free_region += (len / PG_SIZE4K);
467 free_size -= len;
468 }
469 return curr_region;
470}
471
472/*
473 * ======== get_mapped_region ========
474 * Purpose:
475 * Returns the requestedmapped region
476 */
477static struct map_page *get_mapped_region(u32 addrs)
478{
479 u32 i = 0;
480 struct map_page *curr_region = NULL;
481
482 if (virtual_mapping_table == NULL)
483 return curr_region;
484
485 i = DMM_ADDR_TO_INDEX(addrs);
486 if (i < table_size && (virtual_mapping_table[i].mapped ||
487 virtual_mapping_table[i].reserved))
488 curr_region = virtual_mapping_table + i;
489 return curr_region;
490}
491
492#ifdef DSP_DMM_DEBUG
493u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
494{
495 struct map_page *curr_node = NULL;
496 u32 i;
497 u32 freemem = 0;
498 u32 bigsize = 0;
499
500 spin_lock(&dmm_mgr->dmm_lock);
501
502 if (virtual_mapping_table != NULL) {
503 for (i = 0; i < table_size; i +=
504 virtual_mapping_table[i].region_size) {
505 curr_node = virtual_mapping_table + i;
506 if (curr_node->reserved) {
507 /*printk("RESERVED size = 0x%x, "
508 "Map size = 0x%x\n",
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
512 */
513 } else {
514/* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
516 */
517 freemem += (curr_node->region_size * PG_SIZE4K);
518 if (curr_node->region_size > bigsize)
519 bigsize = curr_node->region_size;
520 }
521 }
522 }
523 spin_unlock(&dmm_mgr->dmm_lock);
524 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525 freemem / (1024 * 1024));
526 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize * PG_SIZE4K / (1024 * 1024)));
530
531 return 0;
532}
533#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551ce4d78..86ca785f1913 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
993/* 993/*
994 * ======== procwrap_reserve_memory ======== 994 * ======== procwrap_reserve_memory ========
995 */ 995 */
996u32 __deprecated procwrap_reserve_memory(union trapped_args *args, 996u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
997 void *pr_ctxt)
998{ 997{
999 return 0; 998 int status;
999 void *prsv_addr;
1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1001
1002 if ((args->args_proc_rsvmem.ul_size <= 0) ||
1003 (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
1004 return -EINVAL;
1005
1006 status = proc_reserve_memory(hprocessor,
1007 args->args_proc_rsvmem.ul_size, &prsv_addr,
1008 pr_ctxt);
1009 if (!status) {
1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
1011 status = -EINVAL;
1012 proc_un_reserve_memory(args->args_proc_rsvmem.
1013 hprocessor, prsv_addr, pr_ctxt);
1014 }
1015 }
1016 return status;
1000} 1017}
1001 1018
1002/* 1019/*
@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1025/* 1042/*
1026 * ======== procwrap_un_reserve_memory ======== 1043 * ======== procwrap_un_reserve_memory ========
1027 */ 1044 */
1028u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, 1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
1029 void *pr_ctxt)
1030{ 1046{
1031 return 0; 1047 int status;
1048 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1049
1050 status = proc_un_reserve_memory(hprocessor,
1051 args->args_proc_unrsvmem.prsv_addr,
1052 pr_ctxt);
1053 return status;
1032} 1054}
1033 1055
1034/* 1056/*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc168516e5..81b1b9013550 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
146 struct process_context *ctxt = (struct process_context *)process_ctxt; 146 struct process_context *ctxt = (struct process_context *)process_ctxt;
147 int status = 0; 147 int status = 0;
148 struct dmm_map_object *temp_map, *map_obj; 148 struct dmm_map_object *temp_map, *map_obj;
149 struct dmm_rsv_object *temp_rsv, *rsv_obj;
149 150
150 /* Free DMM mapped memory resources */ 151 /* Free DMM mapped memory resources */
151 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 152 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
155 pr_err("%s: proc_un_map failed!" 156 pr_err("%s: proc_un_map failed!"
156 " status = 0x%xn", __func__, status); 157 " status = 0x%xn", __func__, status);
157 } 158 }
159
160 /* Free DMM reserved memory resources */
161 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
162 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
163 rsv_obj->dsp_reserved_addr,
164 ctxt);
165 if (status)
166 pr_err("%s: proc_un_reserve_memory failed!"
167 " status = 0x%xn", __func__, status);
168 }
158 return status; 169 return status;
159} 170}
160 171
@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
732 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 743 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 744 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 745 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
746 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
735 747
736 /* for 24xx base port is not mapping the mamory for DSP 748 /* for 24xx base port is not mapping the mamory for DSP
737 * internal memory TODO Do a ioremap here */ 749 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
785 OMAP_PER_PRM_SIZE); 797 OMAP_PER_PRM_SIZE);
786 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 798 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787 OMAP_CORE_PRM_SIZE); 799 OMAP_CORE_PRM_SIZE);
800 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
801 OMAP_DMMU_SIZE);
788 802
789 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 803 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790 host_res->dw_mem_base[0]); 804 host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
796 host_res->dw_mem_base[3]); 810 host_res->dw_mem_base[3]);
797 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 811 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798 host_res->dw_mem_base[4]); 812 host_res->dw_mem_base[4]);
813 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
799 814
800 shm_size = drv_datap->shm_size; 815 shm_size = drv_datap->shm_size;
801 if (shm_size >= 0x10000) { 816 if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43fec044..324fcdffb3b3 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
509 pr_ctxt->res_state = PROC_RES_ALLOCATED; 509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
510 spin_lock_init(&pr_ctxt->dmm_map_lock); 510 spin_lock_init(&pr_ctxt->dmm_map_lock);
511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); 511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
512 514
513 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
514 if (pr_ctxt->node_id) { 516 if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247f527a..1562f3c1281c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/nodepriv.h> 57#include <dspbridge/nodepriv.h>
58#include <dspbridge/node.h> 58#include <dspbridge/node.h>
59#include <dspbridge/dmm.h>
59 60
60/* Static/Dynamic Loader includes */ 61/* Static/Dynamic Loader includes */
61#include <dspbridge/dbll.h> 62#include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
316 u32 mapped_addr = 0; 317 u32 mapped_addr = 0;
317 u32 map_attrs = 0x0; 318 u32 map_attrs = 0x0;
318 struct dsp_processorstate proc_state; 319 struct dsp_processorstate proc_state;
320#ifdef DSP_DMM_DEBUG
321 struct dmm_object *dmm_mgr;
322 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
323#endif
319 324
320 void *node_res; 325 void *node_res;
321 326
@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
425 if (status) 430 if (status)
426 goto func_cont; 431 goto func_cont;
427 432
433 status = proc_reserve_memory(hprocessor,
434 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr),
438 pr_ctxt);
439 if (status) {
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
441 __func__, status);
442 goto func_cont;
443 }
444#ifdef DSP_DMM_DEBUG
445 status = dmm_get_handle(p_proc_object, &dmm_mgr);
446 if (!dmm_mgr) {
447 status = DSP_EHANDLE;
448 goto func_cont;
449 }
450
451 dmm_mem_map_dump(dmm_mgr);
452#endif
453
428 map_attrs |= DSP_MAPLITTLEENDIAN; 454 map_attrs |= DSP_MAPLITTLEENDIAN;
429 map_attrs |= DSP_MAPELEMSIZE32; 455 map_attrs |= DSP_MAPELEMSIZE32;
430 map_attrs |= DSP_MAPVIRTUALADDR; 456 map_attrs |= DSP_MAPVIRTUALADDR;
431 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, 457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
432 pnode->create_args.asa.task_arg_obj.heap_size, 458 pnode->create_args.asa.task_arg_obj.heap_size,
433 NULL, (void **)&mapped_addr, map_attrs, 459 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
434 pr_ctxt); 461 pr_ctxt);
435 if (status) 462 if (status)
436 pr_err("%s: Failed to map memory for Heap: 0x%x\n", 463 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
2484 struct stream_chnl stream; 2511 struct stream_chnl stream;
2485 struct node_msgargs node_msg_args; 2512 struct node_msgargs node_msg_args;
2486 struct node_taskargs task_arg_obj; 2513 struct node_taskargs task_arg_obj;
2487 2514#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2518#endif
2488 int status; 2519 int status;
2489 if (!hnode) 2520 if (!hnode)
2490 goto func_end; 2521 goto func_end;
@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
2545 status = proc_un_map(hnode->hprocessor, (void *) 2576 status = proc_un_map(hnode->hprocessor, (void *)
2546 task_arg_obj.udsp_heap_addr, 2577 task_arg_obj.udsp_heap_addr,
2547 pr_ctxt); 2578 pr_ctxt);
2579
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2581 (void *)
2582 task_arg_obj.
2583 udsp_heap_res_addr,
2584 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2587 if (dmm_mgr)
2588 dmm_mem_map_dump(dmm_mgr);
2589 else
2590 status = DSP_EHANDLE;
2591#endif
2548 } 2592 }
2549 } 2593 }
2550 if (node_type != NODE_MESSAGE) { 2594 if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02efedf..b47d7aa747b1 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
39#include <dspbridge/cod.h> 39#include <dspbridge/cod.h>
40#include <dspbridge/dev.h> 40#include <dspbridge/dev.h>
41#include <dspbridge/procpriv.h> 41#include <dspbridge/procpriv.h>
42#include <dspbridge/dmm.h>
42 43
43/* ----------------------------------- Resource Manager */ 44/* ----------------------------------- Resource Manager */
44#include <dspbridge/mgr.h> 45#include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
51#include <dspbridge/msg.h> 52#include <dspbridge/msg.h>
52#include <dspbridge/dspioctl.h> 53#include <dspbridge/dspioctl.h>
53#include <dspbridge/drv.h> 54#include <dspbridge/drv.h>
54#include <_tiomap.h>
55 55
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/proc.h> 57#include <dspbridge/proc.h>
@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
151 return map_obj; 151 return map_obj;
152} 152}
153 153
154static int match_exact_map_obj(struct dmm_map_object *map_obj,
155 u32 dsp_addr, u32 size)
156{
157 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
158 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
159 __func__, dsp_addr, map_obj->size, size);
160
161 return map_obj->dsp_addr == dsp_addr &&
162 map_obj->size == size;
163}
164
154static void remove_mapping_information(struct process_context *pr_ctxt, 165static void remove_mapping_information(struct process_context *pr_ctxt,
155 u32 dsp_addr) 166 u32 dsp_addr, u32 size)
156{ 167{
157 struct dmm_map_object *map_obj; 168 struct dmm_map_object *map_obj;
158 169
159 pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); 170 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
171 dsp_addr, size);
160 172
161 spin_lock(&pr_ctxt->dmm_map_lock); 173 spin_lock(&pr_ctxt->dmm_map_lock);
162 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { 174 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
163 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", 175 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
164 __func__, 176 __func__,
165 map_obj->mpu_addr, 177 map_obj->mpu_addr,
166 map_obj->dsp_addr); 178 map_obj->dsp_addr,
179 map_obj->size);
167 180
168 if (map_obj->dsp_addr == dsp_addr) { 181 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
169 pr_debug("%s: match, deleting map info\n", __func__); 182 pr_debug("%s: match, deleting map info\n", __func__);
170 list_del(&map_obj->link); 183 list_del(&map_obj->link);
171 kfree(map_obj->dma_info.sg); 184 kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1077 s32 cnew_envp; /* " " in new_envp[] */ 1090 s32 cnew_envp; /* " " in new_envp[] */
1078 s32 nproc_id = 0; /* Anticipate MP version. */ 1091 s32 nproc_id = 0; /* Anticipate MP version. */
1079 struct dcd_manager *hdcd_handle; 1092 struct dcd_manager *hdcd_handle;
1093 struct dmm_object *dmm_mgr;
1080 u32 dw_ext_end; 1094 u32 dw_ext_end;
1081 u32 proc_id; 1095 u32 proc_id;
1082 int brd_state; 1096 int brd_state;
@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
1267 if (!status) 1281 if (!status)
1268 status = cod_get_sym_value(cod_mgr, EXTEND, 1282 status = cod_get_sym_value(cod_mgr, EXTEND,
1269 &dw_ext_end); 1283 &dw_ext_end);
1284
1285 /* Reset DMM structs and add an initial free chunk */
1286 if (!status) {
1287 status =
1288 dev_get_dmm_mgr(p_proc_object->hdev_obj,
1289 &dmm_mgr);
1290 if (dmm_mgr) {
1291 /* Set dw_ext_end to DMM START u8
1292 * address */
1293 dw_ext_end =
1294 (dw_ext_end + 1) * DSPWORDSIZE;
1295 /* DMM memory is from EXT_END */
1296 status = dmm_create_tables(dmm_mgr,
1297 dw_ext_end,
1298 DMMPOOLSIZE);
1299 } else {
1300 status = -EFAULT;
1301 }
1302 }
1270 } 1303 }
1271 } 1304 }
1272 /* Restore the original argv[0] */ 1305 /* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1319{ 1352{
1320 u32 va_align; 1353 u32 va_align;
1321 u32 pa_align; 1354 u32 pa_align;
1355 struct dmm_object *dmm_mgr;
1322 u32 size_align; 1356 u32 size_align;
1323 int status = 0; 1357 int status = 0;
1324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1358 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1325 struct dmm_map_object *map_obj; 1359 struct dmm_map_object *map_obj;
1360 u32 tmp_addr = 0;
1326 1361
1327#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK 1362#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1328 if ((ul_map_attr & BUFMODE_MASK) != RBUF) { 1363 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1347 } 1382 }
1348 /* Critical section */ 1383 /* Critical section */
1349 mutex_lock(&proc_lock); 1384 mutex_lock(&proc_lock);
1385 dmm_get_handle(p_proc_object, &dmm_mgr);
1386 if (dmm_mgr)
1387 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1388 else
1389 status = -EFAULT;
1350 1390
1351 /* Add mapping to the page tables. */ 1391 /* Add mapping to the page tables. */
1352 if (!status) { 1392 if (!status) {
1393
1394 /* Mapped address = MSB of VA | LSB of PA */
1395 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1353 /* mapped memory resource tracking */ 1396 /* mapped memory resource tracking */
1354 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, 1397 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1355 size_align); 1398 size_align);
1356 if (!map_obj) { 1399 if (!map_obj)
1357 status = -ENOMEM; 1400 status = -ENOMEM;
1358 } else { 1401 else
1359 va_align = user_to_dsp_map( 1402 status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
1360 p_proc_object->hbridge_context->dsp_mmu, 1403 (p_proc_object->hbridge_context, pa_align, va_align,
1361 pa_align, va_align, size_align, 1404 size_align, ul_map_attr, map_obj->pages);
1362 map_obj->pages);
1363 if (IS_ERR_VALUE(va_align))
1364 status = (int)va_align;
1365 }
1366 } 1405 }
1367 if (!status) { 1406 if (!status) {
1368 /* Mapped address = MSB of VA | LSB of PA */ 1407 /* Mapped address = MSB of VA | LSB of PA */
1369 map_obj->dsp_addr = (va_align | 1408 *pp_map_addr = (void *) tmp_addr;
1370 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1371 *pp_map_addr = (void *)map_obj->dsp_addr;
1372 } else { 1409 } else {
1373 remove_mapping_information(pr_ctxt, va_align); 1410 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1411 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1374 } 1412 }
1375 mutex_unlock(&proc_lock); 1413 mutex_unlock(&proc_lock);
1376 1414
@@ -1463,6 +1501,55 @@ func_end:
1463} 1501}
1464 1502
1465/* 1503/*
1504 * ======== proc_reserve_memory ========
1505 * Purpose:
1506 * Reserve a virtually contiguous region of DSP address space.
1507 */
1508int proc_reserve_memory(void *hprocessor, u32 ul_size,
1509 void **pp_rsv_addr,
1510 struct process_context *pr_ctxt)
1511{
1512 struct dmm_object *dmm_mgr;
1513 int status = 0;
1514 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1515 struct dmm_rsv_object *rsv_obj;
1516
1517 if (!p_proc_object) {
1518 status = -EFAULT;
1519 goto func_end;
1520 }
1521
1522 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1523 if (!dmm_mgr) {
1524 status = -EFAULT;
1525 goto func_end;
1526 }
1527
1528 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1529 if (status != 0)
1530 goto func_end;
1531
1532 /*
1533 * A successful reserve should be followed by insertion of rsv_obj
1534 * into dmm_rsv_list, so that reserved memory resource tracking
1535 * remains uptodate
1536 */
1537 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1538 if (rsv_obj) {
1539 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1540 spin_lock(&pr_ctxt->dmm_rsv_lock);
1541 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1542 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1543 }
1544
1545func_end:
1546 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1547 "status 0x%x\n", __func__, hprocessor,
1548 ul_size, pp_rsv_addr, status);
1549 return status;
1550}
1551
1552/*
1466 * ======== proc_start ======== 1553 * ======== proc_start ========
1467 * Purpose: 1554 * Purpose:
1468 * Start a processor running. 1555 * Start a processor running.
@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
1610{ 1697{
1611 int status = 0; 1698 int status = 0;
1612 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1699 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1700 struct dmm_object *dmm_mgr;
1613 u32 va_align; 1701 u32 va_align;
1702 u32 size_align;
1614 1703
1615 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); 1704 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1616 if (!p_proc_object) { 1705 if (!p_proc_object) {
@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
1618 goto func_end; 1707 goto func_end;
1619 } 1708 }
1620 1709
1710 status = dmm_get_handle(hprocessor, &dmm_mgr);
1711 if (!dmm_mgr) {
1712 status = -EFAULT;
1713 goto func_end;
1714 }
1715
1621 /* Critical section */ 1716 /* Critical section */
1622 mutex_lock(&proc_lock); 1717 mutex_lock(&proc_lock);
1718 /*
1719 * Update DMM structures. Get the size to unmap.
1720 * This function returns error if the VA is not mapped
1721 */
1722 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1623 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1624 status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, 1724 if (!status) {
1625 va_align); 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align);
1727 }
1626 1728
1627 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);
1628 if (status) 1730 if (status)
@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1633 * from dmm_map_list, so that mapped memory resource tracking 1735 * from dmm_map_list, so that mapped memory resource tracking
1634 * remains uptodate 1736 * remains uptodate
1635 */ 1737 */
1636 remove_mapping_information(pr_ctxt, (u32) map_addr); 1738 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1637 1739
1638func_end: 1740func_end:
1639 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", 1741 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@ func_end:
1642} 1744}
1643 1745
1644/* 1746/*
1747 * ======== proc_un_reserve_memory ========
1748 * Purpose:
1749 * Frees a previously reserved region of DSP address space.
1750 */
1751int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1752 struct process_context *pr_ctxt)
1753{
1754 struct dmm_object *dmm_mgr;
1755 int status = 0;
1756 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1757 struct dmm_rsv_object *rsv_obj;
1758
1759 if (!p_proc_object) {
1760 status = -EFAULT;
1761 goto func_end;
1762 }
1763
1764 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1765 if (!dmm_mgr) {
1766 status = -EFAULT;
1767 goto func_end;
1768 }
1769
1770 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1771 if (status != 0)
1772 goto func_end;
1773
1774 /*
1775 * A successful unreserve should be followed by removal of rsv_obj
1776 * from dmm_rsv_list, so that reserved memory resource tracking
1777 * remains uptodate
1778 */
1779 spin_lock(&pr_ctxt->dmm_rsv_lock);
1780 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1781 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1782 list_del(&rsv_obj->link);
1783 kfree(rsv_obj);
1784 break;
1785 }
1786 }
1787 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1788
1789func_end:
1790 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1791 __func__, hprocessor, prsv_addr, status);
1792 return status;
1793}
1794
1795/*
1645 * ======== = proc_monitor ======== == 1796 * ======== = proc_monitor ======== ==
1646 * Purpose: 1797 * Purpose:
1647 * Place the Processor in Monitor State. This is an internal 1798 * Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e848d297..fed25105970a 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
887 887
888 struct fb_deferred_io *fbdefio; 888 struct fb_deferred_io *fbdefio;
889 889
890 fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); 890 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
891 891
892 if (fbdefio) { 892 if (fbdefio) {
893 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 893 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d9e15b..7cc3d2407d1b 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1675 1675
1676 { 1676 {
1677 char essid[IW_ESSID_MAX_SIZE+1]; 1677 char essid[IW_ESSID_MAX_SIZE+1];
1678 if (wrq->u.essid.pointer) 1678 if (wrq->u.essid.pointer) {
1679 rc = iwctl_giwessid(dev, NULL, 1679 rc = iwctl_giwessid(dev, NULL,
1680 &(wrq->u.essid), essid); 1680 &(wrq->u.essid), essid);
1681 if (copy_to_user(wrq->u.essid.pointer, 1681 if (copy_to_user(wrq->u.essid.pointer,
1682 essid, 1682 essid,
1683 wrq->u.essid.length) ) 1683 wrq->u.essid.length) )
1684 rc = -EFAULT; 1684 rc = -EFAULT;
1685 }
1685 } 1686 }
1686 break; 1687 break;
1687 1688
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a2197012065..7777d9a60a52 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle,
1417 */ 1417 */
1418 bus_mask = 0; 1418 bus_mask = 0;
1419 media_mask = 0; 1419 media_mask = 0;
1420 media_mask = 0;
1421 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { 1420 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
1422 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { 1421 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
1423 if (config_p->devices_to_enumerate[bus][device] == 1422 if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5318f2..6a71f52c59b1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@ exit:
139} 139}
140 140
141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, 141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
142 u8 key_index, const u8 *mac_addr, 142 u8 key_index, bool pairwise, const u8 *mac_addr,
143 struct key_params *params) 143 struct key_params *params)
144{ 144{
145 wlandevice_t *wlandev = dev->ml_priv; 145 wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@ exit:
198} 198}
199 199
200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, 200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
201 u8 key_index, const u8 *mac_addr, void *cookie, 201 u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
202 void (*callback)(void *cookie, struct key_params*)) 202 void (*callback)(void *cookie, struct key_params*))
203{ 203{
204 wlandevice_t *wlandev = dev->ml_priv; 204 wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
227} 227}
228 228
229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, 229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
230 u8 key_index, const u8 *mac_addr) 230 u8 key_index, bool pairwise, const u8 *mac_addr)
231{ 231{
232 wlandevice_t *wlandev = dev->ml_priv; 232 wlandevice_t *wlandev = dev->ml_priv;
233 u32 did; 233 u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8429e..b7b4a733b467 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
522 if (copy_to_user(useraddr, &edata, sizeof(edata))) 522 if (copy_to_user(useraddr, &edata, sizeof(edata)))
523 return -EFAULT; 523 return -EFAULT;
524 return 0; 524 return 0;
525 }
526#endif 525#endif
526 }
527 527
528 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
529} 529}
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644
index 000000000000..c43ef48b1a0f
--- /dev/null
+++ b/drivers/tty/Makefile
@@ -0,0 +1,11 @@
1obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
2 tty_buffer.o tty_port.o tty_mutex.o
3obj-$(CONFIG_LEGACY_PTYS) += pty.o
4obj-$(CONFIG_UNIX98_PTYS) += pty.o
5obj-$(CONFIG_AUDIT) += tty_audit.o
6obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
7obj-$(CONFIG_N_HDLC) += n_hdlc.o
8obj-$(CONFIG_N_GSM) += n_gsm.o
9obj-$(CONFIG_R3964) += n_r3964.o
10
11obj-y += vt/
diff --git a/drivers/char/n_gsm.c b/drivers/tty/n_gsm.c
index 04ef3ef0a422..81b46585edf7 100644
--- a/drivers/char/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
716 if (msg->len < 128) 716 if (msg->len < 128)
717 *--dp = (msg->len << 1) | EA; 717 *--dp = (msg->len << 1) | EA;
718 else { 718 else {
719 *--dp = (msg->len >> 6) | EA; 719 *--dp = ((msg->len & 127) << 1) | EA;
720 *--dp = (msg->len & 127) << 1; 720 *--dp = (msg->len >> 6) & 0xfe;
721 } 721 }
722 } 722 }
723 723
@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2375 gsm->mru = c->mru; 2375 gsm->mru = c->mru;
2376 gsm->encoding = c->encapsulation; 2376 gsm->encoding = c->encapsulation;
2377 gsm->adaption = c->adaption; 2377 gsm->adaption = c->adaption;
2378 gsm->n2 = c->n2;
2378 2379
2379 if (c->i == 1) 2380 if (c->i == 1)
2380 gsm->ftype = UIH; 2381 gsm->ftype = UIH;
diff --git a/drivers/char/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d32281032c..47d32281032c 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
diff --git a/drivers/char/n_r3964.c b/drivers/tty/n_r3964.c
index 88dda0c45ee0..88dda0c45ee0 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/tty/n_r3964.c
diff --git a/drivers/char/n_tty.c b/drivers/tty/n_tty.c
index 428f4fe0b5f7..428f4fe0b5f7 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/tty/n_tty.c
diff --git a/drivers/char/pty.c b/drivers/tty/pty.c
index 923a48585501..923a48585501 100644
--- a/drivers/char/pty.c
+++ b/drivers/tty/pty.c
diff --git a/drivers/char/sysrq.c b/drivers/tty/sysrq.c
index eaa5d3efa79d..eaa5d3efa79d 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/tty/sysrq.c
diff --git a/drivers/char/tty_audit.c b/drivers/tty/tty_audit.c
index f64582b0f623..f64582b0f623 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/tty/tty_audit.c
diff --git a/drivers/char/tty_buffer.c b/drivers/tty/tty_buffer.c
index cc1e9850d655..d8210ca00720 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0;
417 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
418 int count; 419 int count;
419 char *char_buf; 420 char *char_buf;
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
423 if (!count) { 424 if (!count) {
424 if (head->next == NULL) 425 if (head->next == NULL)
425 break; 426 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
426 tty->buf.head = head->next; 436 tty->buf.head = head->next;
427 tty_buffer_free(tty, head); 437 tty_buffer_free(tty, head);
428 continue; 438 continue;
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
432 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
433 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
434 break; 444 break;
435 if (!tty->receive_room) { 445 if (!tty->receive_room || seen_tail) {
436 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_delayed_work(&tty->buf.work, 1);
437 break; 447 break;
438 } 448 }
diff --git a/drivers/char/tty_io.c b/drivers/tty/tty_io.c
index c05c5af5aa04..c05c5af5aa04 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/tty/tty_io.c
diff --git a/drivers/char/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 0c1889971459..0c1889971459 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
diff --git a/drivers/char/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 412f9775d19c..d8e96b005023 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
47 47
48static DEFINE_SPINLOCK(tty_ldisc_lock); 48static DEFINE_SPINLOCK(tty_ldisc_lock);
49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
50static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
50/* Line disc dispatch table */ 51/* Line disc dispatch table */
51static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; 52static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
52 53
@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
83 return; 84 return;
84 } 85 }
85 local_irq_restore(flags); 86 local_irq_restore(flags);
87 wake_up(&tty_ldisc_idle);
86} 88}
87 89
88/** 90/**
@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
531} 533}
532 534
533/** 535/**
536 * tty_ldisc_wait_idle - wait for the ldisc to become idle
537 * @tty: tty to wait for
538 *
539 * Wait for the line discipline to become idle. The discipline must
540 * have been halted for this to guarantee it remains idle.
541 */
542static int tty_ldisc_wait_idle(struct tty_struct *tty)
543{
544 int ret;
545 ret = wait_event_interruptible_timeout(tty_ldisc_idle,
546 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
547 if (ret < 0)
548 return ret;
549 return ret > 0 ? 0 : -EBUSY;
550}
551
552/**
534 * tty_set_ldisc - set line discipline 553 * tty_set_ldisc - set line discipline
535 * @tty: the terminal to set 554 * @tty: the terminal to set
536 * @ldisc: the line discipline 555 * @ldisc: the line discipline
@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
634 653
635 flush_scheduled_work(); 654 flush_scheduled_work();
636 655
656 retval = tty_ldisc_wait_idle(tty);
657
637 tty_lock(); 658 tty_lock();
638 mutex_lock(&tty->ldisc_mutex); 659 mutex_lock(&tty->ldisc_mutex);
660
661 /* handle wait idle failure locked */
662 if (retval) {
663 tty_ldisc_put(new_ldisc);
664 goto enable;
665 }
666
639 if (test_bit(TTY_HUPPED, &tty->flags)) { 667 if (test_bit(TTY_HUPPED, &tty->flags)) {
640 /* We were raced by the hangup method. It will have stomped 668 /* We were raced by the hangup method. It will have stomped
641 the ldisc data and closed the ldisc down */ 669 the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
669 697
670 tty_ldisc_put(o_ldisc); 698 tty_ldisc_put(o_ldisc);
671 699
700enable:
672 /* 701 /*
673 * Allow ldisc referencing to occur again 702 * Allow ldisc referencing to occur again
674 */ 703 */
@@ -714,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty)
714 * state closed 743 * state closed
715 */ 744 */
716 745
717static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) 746static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
718{ 747{
719 struct tty_ldisc *ld; 748 struct tty_ldisc *ld = tty_ldisc_get(ldisc);
749
750 if (IS_ERR(ld))
751 return -1;
720 752
721 tty_ldisc_close(tty, tty->ldisc); 753 tty_ldisc_close(tty, tty->ldisc);
722 tty_ldisc_put(tty->ldisc); 754 tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
724 /* 756 /*
725 * Switch the line discipline back 757 * Switch the line discipline back
726 */ 758 */
727 ld = tty_ldisc_get(ldisc);
728 BUG_ON(IS_ERR(ld));
729 tty_ldisc_assign(tty, ld); 759 tty_ldisc_assign(tty, ld);
730 tty_set_termios_ldisc(tty, ldisc); 760 tty_set_termios_ldisc(tty, ldisc);
761
762 return 0;
731} 763}
732 764
733/** 765/**
@@ -802,13 +834,16 @@ void tty_ldisc_hangup(struct tty_struct *tty)
802 a FIXME */ 834 a FIXME */
803 if (tty->ldisc) { /* Not yet closed */ 835 if (tty->ldisc) { /* Not yet closed */
804 if (reset == 0) { 836 if (reset == 0) {
805 tty_ldisc_reinit(tty, tty->termios->c_line); 837
806 err = tty_ldisc_open(tty, tty->ldisc); 838 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
839 err = tty_ldisc_open(tty, tty->ldisc);
840 else
841 err = 1;
807 } 842 }
808 /* If the re-open fails or we reset then go to N_TTY. The 843 /* If the re-open fails or we reset then go to N_TTY. The
809 N_TTY open cannot fail */ 844 N_TTY open cannot fail */
810 if (reset || err) { 845 if (reset || err) {
811 tty_ldisc_reinit(tty, N_TTY); 846 BUG_ON(tty_ldisc_reinit(tty, N_TTY));
812 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 847 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
813 } 848 }
814 tty_ldisc_enable(tty); 849 tty_ldisc_enable(tty);
diff --git a/drivers/char/tty_mutex.c b/drivers/tty/tty_mutex.c
index 133697540c73..133697540c73 100644
--- a/drivers/char/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
diff --git a/drivers/char/tty_port.c b/drivers/tty/tty_port.c
index 33d37d230f8f..33d37d230f8f 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/tty/tty_port.c
diff --git a/drivers/char/.gitignore b/drivers/tty/vt/.gitignore
index 83683a2d8e6a..83683a2d8e6a 100644
--- a/drivers/char/.gitignore
+++ b/drivers/tty/vt/.gitignore
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
new file mode 100644
index 000000000000..14a51c9960df
--- /dev/null
+++ b/drivers/tty/vt/Makefile
@@ -0,0 +1,34 @@
1#
2# This file contains the font map for the default (hardware) font
3#
4FONTMAPFILE = cp437.uni
5
6obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
7 selection.o keyboard.o
8obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
9obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
10
11# Files generated that shall be removed upon make clean
12clean-files := consolemap_deftbl.c defkeymap.c
13
14quiet_cmd_conmk = CONMK $@
15 cmd_conmk = scripts/conmakehash $< > $@
16
17$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
18 $(call cmd,conmk)
19
20$(obj)/defkeymap.o: $(obj)/defkeymap.c
21
22# Uncomment if you're changing the keymap and have an appropriate
23# loadkeys version for the map. By default, we'll use the shipped
24# versions.
25# GENERATE_KEYMAP := 1
26
27ifdef GENERATE_KEYMAP
28
29$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
30 loadkeys --mktable $< > $@.tmp
31 sed -e 's/^static *//' $@.tmp > $@
32 rm $@.tmp
33
34endif
diff --git a/drivers/char/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80156d4..45d3e80156d4 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
diff --git a/drivers/char/cp437.uni b/drivers/tty/vt/cp437.uni
index bc6163484f62..bc6163484f62 100644
--- a/drivers/char/cp437.uni
+++ b/drivers/tty/vt/cp437.uni
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index d2208dfe3f67..d2208dfe3f67 100644
--- a/drivers/char/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
diff --git a/drivers/char/defkeymap.map b/drivers/tty/vt/defkeymap.map
index 50b30cace261..50b30cace261 100644
--- a/drivers/char/defkeymap.map
+++ b/drivers/tty/vt/defkeymap.map
diff --git a/drivers/char/keyboard.c b/drivers/tty/vt/keyboard.c
index e95d7876ca6b..e95d7876ca6b 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
diff --git a/drivers/char/selection.c b/drivers/tty/vt/selection.c
index ebae344ce910..ebae344ce910 100644
--- a/drivers/char/selection.c
+++ b/drivers/tty/vt/selection.c
diff --git a/drivers/char/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 273ab44cc91d..eab3a1ff99e4 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@ static unsigned int
553vcs_poll(struct file *file, poll_table *wait) 553vcs_poll(struct file *file, poll_table *wait)
554{ 554{
555 struct vcs_poll_data *poll = vcs_poll_data_get(file); 555 struct vcs_poll_data *poll = vcs_poll_data_get(file);
556 int ret = 0; 556 int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
557 557
558 if (poll) { 558 if (poll) {
559 poll_wait(file, &poll->waitq, wait); 559 poll_wait(file, &poll->waitq, wait);
560 if (!poll->seen_last_update) 560 if (poll->seen_last_update)
561 ret = POLLIN | POLLRDNORM; 561 ret = DEFAULT_POLLMASK;
562 } 562 }
563 return ret; 563 return ret;
564} 564}
diff --git a/drivers/char/vt.c b/drivers/tty/vt/vt.c
index a8ec48ed14d9..a8ec48ed14d9 100644
--- a/drivers/char/vt.c
+++ b/drivers/tty/vt/vt.c
diff --git a/drivers/char/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0fb4611..6b68a0fb4611 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6202a5..045bb4b823e1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
965 965
966static int proc_connectinfo(struct dev_state *ps, void __user *arg) 966static int proc_connectinfo(struct dev_state *ps, void __user *arg)
967{ 967{
968 struct usbdevfs_connectinfo ci; 968 struct usbdevfs_connectinfo ci = {
969 .devnum = ps->dev->devnum,
970 .slow = ps->dev->speed == USB_SPEED_LOW
971 };
969 972
970 ci.devnum = ps->dev->devnum;
971 ci.slow = ps->dev->speed == USB_SPEED_LOW;
972 if (copy_to_user(arg, &ci, sizeof(ci))) 973 if (copy_to_user(arg, &ci, sizeof(ci)))
973 return -EFAULT; 974 return -EFAULT;
974 return 0; 975 return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca814651..607d0db4a988 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC || ARCH_MXC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 select USB_FSL_MPH_DR_OF 161 select USB_FSL_MPH_DR_OF if OF
162 help 162 help
163 Some of Freescale PowerPC processors have a High Speed 163 Some of Freescale PowerPC processors have a High Speed
164 Dual-Role(DR) USB controller, which supports device mode. 164 Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb2319056..e7e0c69d3b1f 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@ struct goku_udc {
251 got_region:1, 251 got_region:1,
252 req_config:1, 252 req_config:1,
253 configured:1, 253 configured:1,
254 enabled:1; 254 enabled:1,
255 registered:1;
255 256
256 /* pci state used to access those endpoints */ 257 /* pci state used to access those endpoints */
257 struct pci_dev *pdev; 258 struct pci_dev *pdev;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index cb23355f52d3..fbe86ca95802 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -811,7 +811,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
811 INFO(dev, "MAC %pM\n", net->dev_addr); 811 INFO(dev, "MAC %pM\n", net->dev_addr);
812 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 812 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
813 813
814 netif_stop_queue(net);
815 the_dev = dev; 814 the_dev = dev;
816 } 815 }
817 816
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354a4c20..40f7716b31fc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@ struct gs_port {
105 wait_queue_head_t close_wait; /* wait for last close */ 105 wait_queue_head_t close_wait; /* wait for last close */
106 106
107 struct list_head read_pool; 107 struct list_head read_pool;
108 int read_started;
109 int read_allocated;
108 struct list_head read_queue; 110 struct list_head read_queue;
109 unsigned n_read; 111 unsigned n_read;
110 struct tasklet_struct push; 112 struct tasklet_struct push;
111 113
112 struct list_head write_pool; 114 struct list_head write_pool;
115 int write_started;
116 int write_allocated;
113 struct gs_buf port_write_buf; 117 struct gs_buf port_write_buf;
114 wait_queue_head_t drain_wait; /* wait while writes drain */ 118 wait_queue_head_t drain_wait; /* wait while writes drain */
115 119
@@ -363,6 +367,9 @@ __acquires(&port->port_lock)
363 struct usb_request *req; 367 struct usb_request *req;
364 int len; 368 int len;
365 369
370 if (port->write_started >= QUEUE_SIZE)
371 break;
372
366 req = list_entry(pool->next, struct usb_request, list); 373 req = list_entry(pool->next, struct usb_request, list);
367 len = gs_send_packet(port, req->buf, in->maxpacket); 374 len = gs_send_packet(port, req->buf, in->maxpacket);
368 if (len == 0) { 375 if (len == 0) {
@@ -397,6 +404,8 @@ __acquires(&port->port_lock)
397 break; 404 break;
398 } 405 }
399 406
407 port->write_started++;
408
400 /* abort immediately after disconnect */ 409 /* abort immediately after disconnect */
401 if (!port->port_usb) 410 if (!port->port_usb)
402 break; 411 break;
@@ -418,7 +427,6 @@ __acquires(&port->port_lock)
418{ 427{
419 struct list_head *pool = &port->read_pool; 428 struct list_head *pool = &port->read_pool;
420 struct usb_ep *out = port->port_usb->out; 429 struct usb_ep *out = port->port_usb->out;
421 unsigned started = 0;
422 430
423 while (!list_empty(pool)) { 431 while (!list_empty(pool)) {
424 struct usb_request *req; 432 struct usb_request *req;
@@ -430,6 +438,9 @@ __acquires(&port->port_lock)
430 if (!tty) 438 if (!tty)
431 break; 439 break;
432 440
441 if (port->read_started >= QUEUE_SIZE)
442 break;
443
433 req = list_entry(pool->next, struct usb_request, list); 444 req = list_entry(pool->next, struct usb_request, list);
434 list_del(&req->list); 445 list_del(&req->list);
435 req->length = out->maxpacket; 446 req->length = out->maxpacket;
@@ -447,13 +458,13 @@ __acquires(&port->port_lock)
447 list_add(&req->list, pool); 458 list_add(&req->list, pool);
448 break; 459 break;
449 } 460 }
450 started++; 461 port->read_started++;
451 462
452 /* abort immediately after disconnect */ 463 /* abort immediately after disconnect */
453 if (!port->port_usb) 464 if (!port->port_usb)
454 break; 465 break;
455 } 466 }
456 return started; 467 return port->read_started;
457} 468}
458 469
459/* 470/*
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port)
535 } 546 }
536recycle: 547recycle:
537 list_move(&req->list, &port->read_pool); 548 list_move(&req->list, &port->read_pool);
549 port->read_started--;
538 } 550 }
539 551
540 /* Push from tty to ldisc; without low_latency set this is handled by 552 /* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
587 599
588 spin_lock(&port->port_lock); 600 spin_lock(&port->port_lock);
589 list_add(&req->list, &port->write_pool); 601 list_add(&req->list, &port->write_pool);
602 port->write_started--;
590 603
591 switch (req->status) { 604 switch (req->status) {
592 default: 605 default:
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
608 spin_unlock(&port->port_lock); 621 spin_unlock(&port->port_lock);
609} 622}
610 623
611static void gs_free_requests(struct usb_ep *ep, struct list_head *head) 624static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 int *allocated)
612{ 626{
613 struct usb_request *req; 627 struct usb_request *req;
614 628
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
616 req = list_entry(head->next, struct usb_request, list); 630 req = list_entry(head->next, struct usb_request, list);
617 list_del(&req->list); 631 list_del(&req->list);
618 gs_free_req(ep, req); 632 gs_free_req(ep, req);
633 if (allocated)
634 (*allocated)--;
619 } 635 }
620} 636}
621 637
622static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, 638static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
623 void (*fn)(struct usb_ep *, struct usb_request *)) 639 void (*fn)(struct usb_ep *, struct usb_request *),
640 int *allocated)
624{ 641{
625 int i; 642 int i;
626 struct usb_request *req; 643 struct usb_request *req;
644 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
627 645
628 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't 646 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
629 * do quite that many this time, don't fail ... we just won't 647 * do quite that many this time, don't fail ... we just won't
630 * be as speedy as we might otherwise be. 648 * be as speedy as we might otherwise be.
631 */ 649 */
632 for (i = 0; i < QUEUE_SIZE; i++) { 650 for (i = 0; i < n; i++) {
633 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); 651 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
634 if (!req) 652 if (!req)
635 return list_empty(head) ? -ENOMEM : 0; 653 return list_empty(head) ? -ENOMEM : 0;
636 req->complete = fn; 654 req->complete = fn;
637 list_add_tail(&req->list, head); 655 list_add_tail(&req->list, head);
656 if (allocated)
657 (*allocated)++;
638 } 658 }
639 return 0; 659 return 0;
640} 660}
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port)
661 * configurations may use different endpoints with a given port; 681 * configurations may use different endpoints with a given port;
662 * and high speed vs full speed changes packet sizes too. 682 * and high speed vs full speed changes packet sizes too.
663 */ 683 */
664 status = gs_alloc_requests(ep, head, gs_read_complete); 684 status = gs_alloc_requests(ep, head, gs_read_complete,
685 &port->read_allocated);
665 if (status) 686 if (status)
666 return status; 687 return status;
667 688
668 status = gs_alloc_requests(port->port_usb->in, &port->write_pool, 689 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
669 gs_write_complete); 690 gs_write_complete, &port->write_allocated);
670 if (status) { 691 if (status) {
671 gs_free_requests(ep, head); 692 gs_free_requests(ep, head, &port->read_allocated);
672 return status; 693 return status;
673 } 694 }
674 695
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port)
680 if (started) { 701 if (started) {
681 tty_wakeup(port->port_tty); 702 tty_wakeup(port->port_tty);
682 } else { 703 } else {
683 gs_free_requests(ep, head); 704 gs_free_requests(ep, head, &port->read_allocated);
684 gs_free_requests(port->port_usb->in, &port->write_pool); 705 gs_free_requests(port->port_usb->in, &port->write_pool,
706 &port->write_allocated);
685 status = -EIO; 707 status = -EIO;
686 } 708 }
687 709
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser)
1315 spin_lock_irqsave(&port->port_lock, flags); 1337 spin_lock_irqsave(&port->port_lock, flags);
1316 if (port->open_count == 0 && !port->openclose) 1338 if (port->open_count == 0 && !port->openclose)
1317 gs_buf_free(&port->port_write_buf); 1339 gs_buf_free(&port->port_write_buf);
1318 gs_free_requests(gser->out, &port->read_pool); 1340 gs_free_requests(gser->out, &port->read_pool, NULL);
1319 gs_free_requests(gser->out, &port->read_queue); 1341 gs_free_requests(gser->out, &port->read_queue, NULL);
1320 gs_free_requests(gser->in, &port->write_pool); 1342 gs_free_requests(gser->in, &port->write_pool, NULL);
1343
1344 port->read_allocated = port->read_started =
1345 port->write_allocated = port->write_started = 0;
1346
1321 spin_unlock_irqrestore(&port->port_lock, flags); 1347 spin_unlock_irqrestore(&port->port_lock, flags);
1322} 1348}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c396ca32..6f4f8e6a40c7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@ config USB_EHCI_FSL
122 bool "Support for Freescale on-chip EHCI USB controller" 122 bool "Support for Freescale on-chip EHCI USB controller"
123 depends on USB_EHCI_HCD && FSL_SOC 123 depends on USB_EHCI_HCD && FSL_SOC
124 select USB_EHCI_ROOT_HUB_TT 124 select USB_EHCI_ROOT_HUB_TT
125 select USB_FSL_MPH_DR_OF 125 select USB_FSL_MPH_DR_OF if OF
126 ---help--- 126 ---help---
127 Variation of ARC USB block used in some Freescale chips. 127 Variation of ARC USB block used in some Freescale chips.
128 128
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7c44af..bce85055019a 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@ struct ehci_mxc_priv {
36static int ehci_mxc_setup(struct usb_hcd *hcd) 36static int ehci_mxc_setup(struct usb_hcd *hcd)
37{ 37{
38 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 38 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
39 struct device *dev = hcd->self.controller;
40 struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
39 int retval; 41 int retval;
40 42
41 /* EHCI registers start at offset 0x100 */ 43 /* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
63 65
64 ehci_reset(ehci); 66 ehci_reset(ehci);
65 67
68 /* set up the PORTSCx register */
69 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
70
71 /* is this really needed? */
72 msleep(10);
73
66 ehci_port_power(ehci, 0); 74 ehci_port_power(ehci, 0);
67 return 0; 75 return 0;
68} 76}
@@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
114 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 122 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
115 struct usb_hcd *hcd; 123 struct usb_hcd *hcd;
116 struct resource *res; 124 struct resource *res;
117 int irq, ret, temp; 125 int irq, ret;
118 struct ehci_mxc_priv *priv; 126 struct ehci_mxc_priv *priv;
119 struct device *dev = &pdev->dev; 127 struct device *dev = &pdev->dev;
120 128
@@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
188 clk_enable(priv->ahbclk); 196 clk_enable(priv->ahbclk);
189 } 197 }
190 198
191 /* set up the PORTSCx register */
192 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
193 mdelay(10);
194
195 /* setup specific usb hw */ 199 /* setup specific usb hw */
196 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); 200 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
197 if (ret < 0) 201 if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872f3ab9..931d588c3fb5 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
273 }, 273 },
274}; 274};
275 275
276MODULE_ALIAS("platfrom:jz4740-ohci"); 276MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 375664198776..c9078e4e1f4d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
553 /* needed for power consumption */ 553 /* needed for power consumption */
554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; 554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
555 555
556 memset(&info, 0, sizeof(info));
556 /* directly from the descriptor */ 557 /* directly from the descriptor */
557 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); 558 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
558 info.product = dev->product_id; 559 info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e99a4b4..dd573abd2d1e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3008#else 3008#else
3009 x.sisusb_conactive = 0; 3009 x.sisusb_conactive = 0;
3010#endif 3010#endif
3011 memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
3011 3012
3012 if (copy_to_user((void __user *)arg, &x, sizeof(x))) 3013 if (copy_to_user((void __user *)arg, &x, sizeof(x)))
3013 retval = -EFAULT; 3014 retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d274363..fcb5206a65bd 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
171 } 171 }
172 172
173 /* Start sampling ID pin, when plug is removed from MUSB */ 173 /* Start sampling ID pin, when plug is removed from MUSB */
174 if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 174 if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
176 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
176 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 177 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
177 musb->a_wait_bcon = TIMER_DELAY; 178 musb->a_wait_bcon = TIMER_DELAY;
178 } 179 }
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
323 return -EIO; 324 return -EIO;
324} 325}
325 326
326int __init musb_platform_init(struct musb *musb, void *board_data) 327static void musb_platform_reg_init(struct musb *musb)
327{ 328{
328
329 /*
330 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
331 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
332 * be low for DEVICE mode and high for HOST mode. We set it high
333 * here because we are in host mode
334 */
335
336 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
337 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
338 musb->config->gpio_vrsel);
339 return -ENODEV;
340 }
341 gpio_direction_output(musb->config->gpio_vrsel, 0);
342
343 usb_nop_xceiv_register();
344 musb->xceiv = otg_get_transceiver();
345 if (!musb->xceiv) {
346 gpio_free(musb->config->gpio_vrsel);
347 return -ENODEV;
348 }
349
350 if (ANOMALY_05000346) { 329 if (ANOMALY_05000346) {
351 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 330 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
352 SSYNC(); 331 SSYNC();
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
358 } 337 }
359 338
360 /* Configure PLL oscillator register */ 339 /* Configure PLL oscillator register */
361 bfin_write_USB_PLLOSC_CTRL(0x30a8); 340 bfin_write_USB_PLLOSC_CTRL(0x3080 |
341 ((480/musb->config->clkin) << 1));
362 SSYNC(); 342 SSYNC();
363 343
364 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); 344 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
380 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | 360 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
381 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); 361 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
382 SSYNC(); 362 SSYNC();
363}
364
365int __init musb_platform_init(struct musb *musb, void *board_data)
366{
367
368 /*
369 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
370 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
371 * be low for DEVICE mode and high for HOST mode. We set it high
372 * here because we are in host mode
373 */
374
375 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
376 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
377 musb->config->gpio_vrsel);
378 return -ENODEV;
379 }
380 gpio_direction_output(musb->config->gpio_vrsel, 0);
381
382 usb_nop_xceiv_register();
383 musb->xceiv = otg_get_transceiver();
384 if (!musb->xceiv) {
385 gpio_free(musb->config->gpio_vrsel);
386 return -ENODEV;
387 }
388
389 musb_platform_reg_init(musb);
383 390
384 if (is_host_enabled(musb)) { 391 if (is_host_enabled(musb)) {
385 musb->board_set_vbus = bfin_set_vbus; 392 musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
394 return 0; 401 return 0;
395} 402}
396 403
404#ifdef CONFIG_PM
405void musb_platform_save_context(struct musb *musb,
406 struct musb_context_registers *musb_context)
407{
408 if (is_host_active(musb))
409 /*
410 * During hibernate gpio_vrsel will change from high to low
411 * low which will generate wakeup event resume the system
412 * immediately. Set it to 0 before hibernate to avoid this
413 * wakeup event.
414 */
415 gpio_set_value(musb->config->gpio_vrsel, 0);
416}
417
418void musb_platform_restore_context(struct musb *musb,
419 struct musb_context_registers *musb_context)
420{
421 musb_platform_reg_init(musb);
422}
423#endif
424
397int musb_platform_exit(struct musb *musb) 425int musb_platform_exit(struct musb *musb)
398{ 426{
399 gpio_free(musb->config->gpio_vrsel); 427 gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024c5515..e6669fc3b804 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
552 if (int_usb & MUSB_INTR_SESSREQ) { 552 if (int_usb & MUSB_INTR_SESSREQ) {
553 void __iomem *mbase = musb->mregs; 553 void __iomem *mbase = musb->mregs;
554 554
555 if (devctl & MUSB_DEVCTL_BDEVICE) { 555 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
556 && (devctl & MUSB_DEVCTL_BDEVICE)) {
556 DBG(3, "SessReq while on B state\n"); 557 DBG(3, "SessReq while on B state\n");
557 return IRQ_HANDLED; 558 return IRQ_HANDLED;
558 } 559 }
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev)
1052 clk_put(musb->clock); 1053 clk_put(musb->clock);
1053 spin_unlock_irqrestore(&musb->lock, flags); 1054 spin_unlock_irqrestore(&musb->lock, flags);
1054 1055
1056 if (!is_otg_enabled(musb) && is_host_enabled(musb))
1057 usb_remove_hcd(musb_to_hcd(musb));
1058 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1059 musb_platform_exit(musb);
1060
1055 /* FIXME power down */ 1061 /* FIXME power down */
1056} 1062}
1057 1063
@@ -2244,13 +2250,6 @@ static int __exit musb_remove(struct platform_device *pdev)
2244 */ 2250 */
2245 musb_exit_debugfs(musb); 2251 musb_exit_debugfs(musb);
2246 musb_shutdown(pdev); 2252 musb_shutdown(pdev);
2247#ifdef CONFIG_USB_MUSB_HDRC_HCD
2248 if (musb->board_mode == MUSB_HOST)
2249 usb_remove_hcd(musb_to_hcd(musb));
2250#endif
2251 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2252 musb_platform_exit(musb);
2253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2254 2253
2255 musb_free(musb); 2254 musb_free(musb);
2256 iounmap(ctrl_base); 2255 iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@ static int musb_suspend(struct device *dev)
2411 unsigned long flags; 2410 unsigned long flags;
2412 struct musb *musb = dev_to_musb(&pdev->dev); 2411 struct musb *musb = dev_to_musb(&pdev->dev);
2413 2412
2414 if (!musb->clock)
2415 return 0;
2416
2417 spin_lock_irqsave(&musb->lock, flags); 2413 spin_lock_irqsave(&musb->lock, flags);
2418 2414
2419 if (is_peripheral_active(musb)) { 2415 if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@ static int musb_suspend(struct device *dev)
2428 2424
2429 musb_save_context(musb); 2425 musb_save_context(musb);
2430 2426
2431 if (musb->set_clock) 2427 if (musb->clock) {
2432 musb->set_clock(musb->clock, 0); 2428 if (musb->set_clock)
2433 else 2429 musb->set_clock(musb->clock, 0);
2434 clk_disable(musb->clock); 2430 else
2431 clk_disable(musb->clock);
2432 }
2435 spin_unlock_irqrestore(&musb->lock, flags); 2433 spin_unlock_irqrestore(&musb->lock, flags);
2436 return 0; 2434 return 0;
2437} 2435}
@@ -2441,13 +2439,12 @@ static int musb_resume_noirq(struct device *dev)
2441 struct platform_device *pdev = to_platform_device(dev); 2439 struct platform_device *pdev = to_platform_device(dev);
2442 struct musb *musb = dev_to_musb(&pdev->dev); 2440 struct musb *musb = dev_to_musb(&pdev->dev);
2443 2441
2444 if (!musb->clock) 2442 if (musb->clock) {
2445 return 0; 2443 if (musb->set_clock)
2446 2444 musb->set_clock(musb->clock, 1);
2447 if (musb->set_clock) 2445 else
2448 musb->set_clock(musb->clock, 1); 2446 clk_enable(musb->clock);
2449 else 2447 }
2450 clk_enable(musb->clock);
2451 2448
2452 musb_restore_context(musb); 2449 musb_restore_context(musb);
2453 2450
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5b46a7..febaabcc2b35 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@ struct musb_context_registers {
487}; 487};
488 488
489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
490 defined(CONFIG_ARCH_OMAP4) 490 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
491extern void musb_platform_save_context(struct musb *musb, 491extern void musb_platform_save_context(struct musb *musb,
492 struct musb_context_registers *musb_context); 492 struct musb_context_registers *musb_context);
493extern void musb_platform_restore_context(struct musb *musb, 493extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d815049cbaa..36cfd060dbe5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
644 */ 644 */
645 645
646 csr |= MUSB_RXCSR_DMAENAB; 646 csr |= MUSB_RXCSR_DMAENAB;
647 if (!musb_ep->hb_mult &&
648 musb_ep->hw_ep->rx_double_buffered)
649 csr |= MUSB_RXCSR_AUTOCLEAR;
650#ifdef USE_MODE1 647#ifdef USE_MODE1
648 csr |= MUSB_RXCSR_AUTOCLEAR;
651 /* csr |= MUSB_RXCSR_DMAMODE; */ 649 /* csr |= MUSB_RXCSR_DMAMODE; */
652 650
653 /* this special sequence (enabling and then 651 /* this special sequence (enabling and then
@@ -656,6 +654,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
656 */ 654 */
657 musb_writew(epio, MUSB_RXCSR, 655 musb_writew(epio, MUSB_RXCSR,
658 csr | MUSB_RXCSR_DMAMODE); 656 csr | MUSB_RXCSR_DMAMODE);
657#else
658 if (!musb_ep->hb_mult &&
659 musb_ep->hw_ep->rx_double_buffered)
660 csr |= MUSB_RXCSR_AUTOCLEAR;
659#endif 661#endif
660 musb_writew(epio, MUSB_RXCSR, csr); 662 musb_writew(epio, MUSB_RXCSR, csr);
661 663
@@ -807,7 +809,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
807 809
808#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 810#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
809 /* Autoclear doesn't clear RxPktRdy for short packets */ 811 /* Autoclear doesn't clear RxPktRdy for short packets */
810 if ((dma->desired_mode == 0) 812 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
811 || (dma->actual_len 813 || (dma->actual_len
812 & (musb_ep->packet_sz - 1))) { 814 & (musb_ep->packet_sz - 1))) {
813 /* ack the read! */ 815 /* ack the read! */
@@ -818,8 +820,16 @@ void musb_g_rx(struct musb *musb, u8 epnum)
818 /* incomplete, and not short? wait for next IN packet */ 820 /* incomplete, and not short? wait for next IN packet */
819 if ((request->actual < request->length) 821 if ((request->actual < request->length)
820 && (musb_ep->dma->actual_len 822 && (musb_ep->dma->actual_len
821 == musb_ep->packet_sz)) 823 == musb_ep->packet_sz)) {
824 /* In double buffer case, continue to unload fifo if
825 * there is Rx packet in FIFO.
826 **/
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
829 hw_ep->rx_double_buffered)
830 goto exit;
822 return; 831 return;
832 }
823#endif 833#endif
824 musb_g_giveback(musb_ep, request, 0); 834 musb_g_giveback(musb_ep, request, 0);
825 835
@@ -827,7 +837,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
827 if (!request) 837 if (!request)
828 return; 838 return;
829 } 839 }
830 840exit:
831 /* Analyze request */ 841 /* Analyze request */
832 rxstate(musb, to_musb_request(request)); 842 rxstate(musb, to_musb_request(request));
833} 843}
@@ -916,13 +926,9 @@ static int musb_gadget_enable(struct usb_ep *ep,
916 * likewise high bandwidth periodic tx 926 * likewise high bandwidth periodic tx
917 */ 927 */
918 /* Set TXMAXP with the FIFO size of the endpoint 928 /* Set TXMAXP with the FIFO size of the endpoint
919 * to disable double buffering mode. Currently, It seems that double 929 * to disable double buffering mode.
920 * buffering has problem if musb RTL revision number < 2.0.
921 */ 930 */
922 if (musb->hwvers < MUSB_HWVERS_2000) 931 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
923 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
924 else
925 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
926 932
927 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 933 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
928 if (musb_readw(regs, MUSB_TXCSR) 934 if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
958 /* Set RXMAXP with the FIFO size of the endpoint 964 /* Set RXMAXP with the FIFO size of the endpoint
959 * to disable double buffering mode. 965 * to disable double buffering mode.
960 */ 966 */
961 if (musb->hwvers < MUSB_HWVERS_2000) 967 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
962 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
963 else
964 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
965 968
966 /* force shared fifo to OUT-only mode */ 969 /* force shared fifo to OUT-only mode */
967 if (hw_ep->is_shared_fifo) { 970 if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1166 : DMA_FROM_DEVICE); 1169 : DMA_FROM_DEVICE);
1167 request->mapped = 0; 1170 request->mapped = 0;
1168 } 1171 }
1169 } else if (!req->buf) {
1170 return -ENODATA;
1171 } else 1172 } else
1172 request->mapped = 0; 1173 request->mapped = 0;
1173 1174
@@ -1695,8 +1696,10 @@ int __init musb_gadget_setup(struct musb *musb)
1695 musb_platform_try_idle(musb, 0); 1696 musb_platform_try_idle(musb, 0);
1696 1697
1697 status = device_register(&musb->g.dev); 1698 status = device_register(&musb->g.dev);
1698 if (status != 0) 1699 if (status != 0) {
1700 put_device(&musb->g.dev);
1699 the_gadget = NULL; 1701 the_gadget = NULL;
1702 }
1700 return status; 1703 return status;
1701} 1704}
1702 1705
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 244267527a60..5a727c5b8676 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
633 return 0; 633 return 0;
634} 634}
635 635
636static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) 636static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
637{ 637{
638 return 0;
638} 639}
639 640
640#endif /* CONFIG_BLACKFIN */ 641#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af5cbdb..563114d613d6 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel,
158 dma_addr_t dma_addr, u32 len) 158 dma_addr_t dma_addr, u32 len)
159{ 159{
160 struct musb_dma_channel *musb_channel = channel->private_data; 160 struct musb_dma_channel *musb_channel = channel->private_data;
161 struct musb_dma_controller *controller = musb_channel->controller;
162 struct musb *musb = controller->private_data;
161 163
162 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", 164 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
163 musb_channel->epnum, 165 musb_channel->epnum,
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel,
167 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
168 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
169 171
172 /*
173 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary.
175 * It ends up masking the last two bits of the address
176 * programmed in DMA_ADDR.
177 *
178 * Fail such DMA transfers, so that the backup PIO mode
179 * can carry out the transfer
180 */
181 if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
182 return false;
183
170 channel->actual_len = 0; 184 channel->actual_len = 0;
171 musb_channel->start_addr = dma_addr; 185 musb_channel->start_addr = dma_addr;
172 musb_channel->len = len; 186 musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a5847803..76f8b3556672 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@ static struct usb_device_id id_table_combined [] = {
794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
797 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
798 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
797 { }, /* Optional parameter entry */ 799 { }, /* Optional parameter entry */
798 { } /* Terminating entry */ 800 { } /* Terminating entry */
799}; 801};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1fb6a..263f62551197 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C 1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D 1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
1103
1104/*
1105 * Milkymist One JTAG/Serial
1106 */
1107#define QIHARDWARE_VID 0x20B7
1108#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1109
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1bcf65..ef2977d3a613 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = {
518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, 518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
521 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 521 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2054b1e25a65..d1268191acbd 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
331 331
332 iu->iu_id = IU_ID_COMMAND; 332 iu->iu_id = IU_ID_COMMAND;
333 iu->tag = cpu_to_be16(stream_id); 333 iu->tag = cpu_to_be16(stream_id);
334 if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) 334 iu->prio_attr = UAS_SIMPLE_TAG;
335 iu->prio_attr = UAS_ORDERED_TAG;
336 else
337 iu->prio_attr = UAS_SIMPLE_TAG;
338 iu->len = len; 335 iu->len = len;
339 int_to_scsilun(sdev->lun, &iu->lun); 336 int_to_scsilun(sdev->lun, &iu->lun);
340 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); 337 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7110cb..e45e673b8770 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab
326 int bit_index; 326 int bit_index;
327 327
328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329 329 if (!ai)
330 return UWB_RSV_ALLOC_NOT_FOUND;
330 ai->min_mas = rsv->min_mas; 331 ai->min_mas = rsv->min_mas;
331 ai->max_mas = rsv->max_mas; 332 ai->max_mas = rsv->max_mas;
332 ai->max_interval = rsv->max_interval; 333 ai->max_interval = rsv->max_interval;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4b4da5b86ff9..f442668a1e52 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -129,8 +129,9 @@ static void handle_tx(struct vhost_net *net)
129 size_t hdr_size; 129 size_t hdr_size;
130 struct socket *sock; 130 struct socket *sock;
131 131
132 sock = rcu_dereference_check(vq->private_data, 132 /* TODO: check that we are running from vhost_worker?
133 lockdep_is_held(&vq->mutex)); 133 * Not sure it's worth it, it's straight-forward enough. */
134 sock = rcu_dereference_check(vq->private_data, 1);
134 if (!sock) 135 if (!sock)
135 return; 136 return;
136 137
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 3ec24609151e..734c650a47c4 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
502 struct device_attribute *attr, const char *buf, size_t count) 502 struct device_attribute *attr, const char *buf, size_t count)
503{ 503{
504 struct adp8860_bl *data = dev_get_drvdata(dev); 504 struct adp8860_bl *data = dev_get_drvdata(dev);
505 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
506 if (ret)
507 return ret;
505 508
506 strict_strtoul(buf, 10, &data->cached_daylight_max);
507 return adp8860_store(dev, buf, count, ADP8860_BLMX1); 509 return adp8860_store(dev, buf, count, ADP8860_BLMX1);
508} 510}
509static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, 511static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
614 if (val == 0) { 616 if (val == 0) {
615 /* Enable automatic ambient light sensing */ 617 /* Enable automatic ambient light sensing */
616 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 618 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
617 } else if ((val > 0) && (val < 6)) { 619 } else if ((val > 0) && (val <= 3)) {
618 /* Disable automatic ambient light sensing */ 620 /* Disable automatic ambient light sensing */
619 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 621 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
620 622
@@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
622 mutex_lock(&data->lock); 624 mutex_lock(&data->lock);
623 adp8860_read(data->client, ADP8860_CFGR, &reg_val); 625 adp8860_read(data->client, ADP8860_CFGR, &reg_val);
624 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); 626 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
625 reg_val |= val << CFGR_BLV_SHIFT; 627 reg_val |= (val - 1) << CFGR_BLV_SHIFT;
626 adp8860_write(data->client, ADP8860_CFGR, reg_val); 628 adp8860_write(data->client, ADP8860_CFGR, reg_val);
627 mutex_unlock(&data->lock); 629 mutex_unlock(&data->lock);
628 } 630 }
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9093ef0fa869..c67801e57aaf 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
78 const u16 slpin = 0x10; 78 const u16 slpin = 0x10;
79 const u16 disoff = 0x28; 79 const u16 disoff = 0x28;
80 80
81 if (power) { 81 if (power <= FB_BLANK_NORMAL) {
82 if (priv->lcd_on) 82 if (priv->lcd_on)
83 return 0; 83 return 0;
84 84
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index abc43a0eb97d..5d3cf33953ac 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
129 struct spi_device *spi = st->spi; 129 struct spi_device *spi = st->spi;
130 struct lms283gf05_pdata *pdata = spi->dev.platform_data; 130 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
131 131
132 if (power) { 132 if (power <= FB_BLANK_NORMAL) {
133 if (pdata) 133 if (pdata)
134 lms283gf05_reset(pdata->reset_gpio, 134 lms283gf05_reset(pdata->reset_gpio,
135 pdata->reset_inverted); 135 pdata->reset_inverted);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9fb533f6373e..1485f7345f49 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
335 }, 335 },
336 .driver_data = (void *)&nvidia_chipset_data, 336 .driver_data = (void *)&nvidia_chipset_data,
337 }, 337 },
338 {
339 .callback = mbp_dmi_match,
340 .ident = "MacBookAir 3,1",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
344 },
345 .driver_data = (void *)&nvidia_chipset_data,
346 },
347 {
348 .callback = mbp_dmi_match,
349 .ident = "MacBookAir 3,2",
350 .matches = {
351 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
352 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
353 },
354 .driver_data = (void *)&nvidia_chipset_data,
355 },
338 { } 356 { }
339}; 357};
340 358
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 550443518891..21866ec69656 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -25,6 +25,7 @@ struct pwm_bl_data {
25 struct pwm_device *pwm; 25 struct pwm_device *pwm;
26 struct device *dev; 26 struct device *dev;
27 unsigned int period; 27 unsigned int period;
28 unsigned int lth_brightness;
28 int (*notify)(struct device *, 29 int (*notify)(struct device *,
29 int brightness); 30 int brightness);
30}; 31};
@@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
48 pwm_config(pb->pwm, 0, pb->period); 49 pwm_config(pb->pwm, 0, pb->period);
49 pwm_disable(pb->pwm); 50 pwm_disable(pb->pwm);
50 } else { 51 } else {
51 pwm_config(pb->pwm, brightness * pb->period / max, pb->period); 52 brightness = pb->lth_brightness +
53 (brightness * (pb->period - pb->lth_brightness) / max);
54 pwm_config(pb->pwm, brightness, pb->period);
52 pwm_enable(pb->pwm); 55 pwm_enable(pb->pwm);
53 } 56 }
54 return 0; 57 return 0;
@@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
92 95
93 pb->period = data->pwm_period_ns; 96 pb->period = data->pwm_period_ns;
94 pb->notify = data->notify; 97 pb->notify = data->notify;
98 pb->lth_brightness = data->lth_brightness *
99 (data->pwm_period_ns / data->max_brightness);
95 pb->dev = &pdev->dev; 100 pb->dev = &pdev->dev;
96 101
97 pb->pwm = pwm_request(data->pwm_id, "backlight"); 102 pb->pwm = pwm_request(data->pwm_id, "backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index a3128c9cb7ad..5927db0da999 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
729 729
730 return strlen(buf); 730 return strlen(buf);
731} 731}
732static DEVICE_ATTR(gamma_table, 0644, 732static DEVICE_ATTR(gamma_table, 0444,
733 s6e63m0_sysfs_show_gamma_table, NULL); 733 s6e63m0_sysfs_show_gamma_table, NULL);
734 734
735static int __init s6e63m0_probe(struct spi_device *spi) 735static int __devinit s6e63m0_probe(struct spi_device *spi)
736{ 736{
737 int ret = 0; 737 int ret = 0;
738 struct s6e63m0 *lcd = NULL; 738 struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); 829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
830 830
831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN); 831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
832 device_remove_file(&spi->dev, &dev_attr_gamma_table);
833 device_remove_file(&spi->dev, &dev_attr_gamma_mode);
834 backlight_device_unregister(lcd->bd);
832 lcd_device_unregister(lcd->ld); 835 lcd_device_unregister(lcd->ld);
833 kfree(lcd); 836 kfree(lcd);
834 837
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f548a8e..321a0c8346e5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@ static void restore_cpu_virqs(unsigned int cpu)
1299 evtchn_to_irq[evtchn] = irq; 1299 evtchn_to_irq[evtchn] = irq;
1300 irq_info[irq] = mk_virq_info(evtchn, virq); 1300 irq_info[irq] = mk_virq_info(evtchn, virq);
1301 bind_evtchn_to_cpu(evtchn, cpu); 1301 bind_evtchn_to_cpu(evtchn, cpu);
1302
1303 /* Ready for use. */
1304 unmask_evtchn(evtchn);
1305 } 1302 }
1306} 1303}
1307 1304
@@ -1327,10 +1324,6 @@ static void restore_cpu_ipis(unsigned int cpu)
1327 evtchn_to_irq[evtchn] = irq; 1324 evtchn_to_irq[evtchn] = irq;
1328 irq_info[irq] = mk_ipi_info(evtchn, ipi); 1325 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1329 bind_evtchn_to_cpu(evtchn, cpu); 1326 bind_evtchn_to_cpu(evtchn, cpu);
1330
1331 /* Ready for use. */
1332 unmask_evtchn(evtchn);
1333
1334 } 1327 }
1335} 1328}
1336 1329
@@ -1390,6 +1383,7 @@ void xen_poll_irq(int irq)
1390void xen_irq_resume(void) 1383void xen_irq_resume(void)
1391{ 1384{
1392 unsigned int cpu, irq, evtchn; 1385 unsigned int cpu, irq, evtchn;
1386 struct irq_desc *desc;
1393 1387
1394 init_evtchn_cpu_bindings(); 1388 init_evtchn_cpu_bindings();
1395 1389
@@ -1408,6 +1402,23 @@ void xen_irq_resume(void)
1408 restore_cpu_virqs(cpu); 1402 restore_cpu_virqs(cpu);
1409 restore_cpu_ipis(cpu); 1403 restore_cpu_ipis(cpu);
1410 } 1404 }
1405
1406 /*
1407 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1408 * are not handled by the IRQ core.
1409 */
1410 for_each_irq_desc(irq, desc) {
1411 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1412 continue;
1413 if (desc->status & IRQ_DISABLED)
1414 continue;
1415
1416 evtchn = evtchn_from_irq(irq);
1417 if (evtchn == -1)
1418 continue;
1419
1420 unmask_evtchn(evtchn);
1421 }
1411} 1422}
1412 1423
1413static struct irq_chip xen_dynamic_chip __read_mostly = { 1424static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/fs/bio.c b/fs/bio.c
index 8abb2dfb2e7c..4bd454fa844e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
370{ 370{
371 struct bio *bio; 371 struct bio *bio;
372 372
373 if (nr_iovecs > UIO_MAXIOV)
374 return NULL;
375
373 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), 376 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
374 gfp_mask); 377 gfp_mask);
375 if (unlikely(!bio)) 378 if (unlikely(!bio))
@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd)
697static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, 700static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
698 gfp_t gfp_mask) 701 gfp_t gfp_mask)
699{ 702{
700 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); 703 struct bio_map_data *bmd;
701 704
705 if (iov_count > UIO_MAXIOV)
706 return NULL;
707
708 bmd = kmalloc(sizeof(*bmd), gfp_mask);
702 if (!bmd) 709 if (!bmd)
703 return NULL; 710 return NULL;
704 711
@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
827 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 834 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
828 start = uaddr >> PAGE_SHIFT; 835 start = uaddr >> PAGE_SHIFT;
829 836
837 /*
838 * Overflow, abort
839 */
840 if (end < start)
841 return ERR_PTR(-EINVAL);
842
830 nr_pages += end - start; 843 nr_pages += end - start;
831 len += iov[i].iov_len; 844 len += iov[i].iov_len;
832 } 845 }
@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
955 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 968 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
956 unsigned long start = uaddr >> PAGE_SHIFT; 969 unsigned long start = uaddr >> PAGE_SHIFT;
957 970
971 /*
972 * Overflow, abort
973 */
974 if (end < start)
975 return ERR_PTR(-EINVAL);
976
958 nr_pages += end - start; 977 nr_pages += end - start;
959 /* 978 /*
960 * buffer must be aligned to at least hardsector size for now 979 * buffer must be aligned to at least hardsector size for now
@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
982 unsigned long start = uaddr >> PAGE_SHIFT; 1001 unsigned long start = uaddr >> PAGE_SHIFT;
983 const int local_nr_pages = end - start; 1002 const int local_nr_pages = end - start;
984 const int page_limit = cur_page + local_nr_pages; 1003 const int page_limit = cur_page + local_nr_pages;
985 1004
986 ret = get_user_pages_fast(uaddr, local_nr_pages, 1005 ret = get_user_pages_fast(uaddr, local_nr_pages,
987 write_to_vm, &pages[cur_page]); 1006 write_to_vm, &pages[cur_page]);
988 if (ret < local_nr_pages) { 1007 if (ret < local_nr_pages) {
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 5aff46c61e52..355abcdcda98 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -81,7 +81,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for
81 81
82v) mount check for unmatched uids 82v) mount check for unmatched uids
83 83
84w) Add support for new vfs entry points for setlease and fallocate 84w) Add support for new vfs entry point for fallocate
85 85
86x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 86x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of
87processes can proceed better in parallel (on the server) 87processes can proceed better in parallel (on the server)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 525ba59a4105..e9a393c9c2ca 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,7 +15,7 @@
15 * the GNU Lesser General Public License for more details. 15 * the GNU Lesser General Public License for more details.
16 * 16 *
17 */ 17 */
18#include <linux/radix-tree.h> 18#include <linux/rbtree.h>
19 19
20#ifndef _CIFS_FS_SB_H 20#ifndef _CIFS_FS_SB_H
21#define _CIFS_FS_SB_H 21#define _CIFS_FS_SB_H
@@ -42,9 +42,9 @@
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
43 43
44struct cifs_sb_info { 44struct cifs_sb_info {
45 struct radix_tree_root tlink_tree; 45 struct rb_root tlink_tree;
46#define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */
47 spinlock_t tlink_tree_lock; 46 spinlock_t tlink_tree_lock;
47 struct tcon_link *master_tlink;
48 struct nls_table *local_nls; 48 struct nls_table *local_nls;
49 unsigned int rsize; 49 unsigned int rsize;
50 unsigned int wsize; 50 unsigned int wsize;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 75c4eaa79588..9c3789762ab7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data,
116 return -ENOMEM; 116 return -ENOMEM;
117 117
118 spin_lock_init(&cifs_sb->tlink_tree_lock); 118 spin_lock_init(&cifs_sb->tlink_tree_lock);
119 INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL); 119 cifs_sb->tlink_tree = RB_ROOT;
120 120
121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
122 if (rc) { 122 if (rc) {
@@ -321,8 +321,7 @@ cifs_alloc_inode(struct super_block *sb)
321 /* Until the file is open and we have gotten oplock 321 /* Until the file is open and we have gotten oplock
322 info back from the server, can not assume caching of 322 info back from the server, can not assume caching of
323 file data or metadata */ 323 file data or metadata */
324 cifs_inode->clientCanCacheRead = false; 324 cifs_set_oplock_level(cifs_inode, 0);
325 cifs_inode->clientCanCacheAll = false;
326 cifs_inode->delete_pending = false; 325 cifs_inode->delete_pending = false;
327 cifs_inode->invalid_mapping = false; 326 cifs_inode->invalid_mapping = false;
328 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 327 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f259e4d7612d..b577bf0a1bb3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -336,7 +336,8 @@ struct cifsTconInfo {
336 * "get" on the container. 336 * "get" on the container.
337 */ 337 */
338struct tcon_link { 338struct tcon_link {
339 unsigned long tl_index; 339 struct rb_node tl_rbnode;
340 uid_t tl_uid;
340 unsigned long tl_flags; 341 unsigned long tl_flags;
341#define TCON_LINK_MASTER 0 342#define TCON_LINK_MASTER 0
342#define TCON_LINK_PENDING 1 343#define TCON_LINK_PENDING 1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index edb6d90efdf2..7ed69b6b5fe6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,6 +104,7 @@ extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
104extern u64 cifs_UnixTimeToNT(struct timespec); 104extern u64 cifs_UnixTimeToNT(struct timespec);
105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
106 int offset); 106 int offset);
107extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
107 108
108extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle, 109extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
109 struct file *file, struct tcon_link *tlink, 110 struct file *file, struct tcon_link *tlink,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9eb327defa1d..251a17c03545 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -116,6 +116,7 @@ struct smb_vol {
116 116
117static int ipv4_connect(struct TCP_Server_Info *server); 117static int ipv4_connect(struct TCP_Server_Info *server);
118static int ipv6_connect(struct TCP_Server_Info *server); 118static int ipv6_connect(struct TCP_Server_Info *server);
119static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
119static void cifs_prune_tlinks(struct work_struct *work); 120static void cifs_prune_tlinks(struct work_struct *work);
120 121
121/* 122/*
@@ -2900,24 +2901,16 @@ remote_path_check:
2900 goto mount_fail_check; 2901 goto mount_fail_check;
2901 } 2902 }
2902 2903
2903 tlink->tl_index = pSesInfo->linux_uid; 2904 tlink->tl_uid = pSesInfo->linux_uid;
2904 tlink->tl_tcon = tcon; 2905 tlink->tl_tcon = tcon;
2905 tlink->tl_time = jiffies; 2906 tlink->tl_time = jiffies;
2906 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 2907 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
2907 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 2908 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
2908 2909
2909 rc = radix_tree_preload(GFP_KERNEL); 2910 cifs_sb->master_tlink = tlink;
2910 if (rc == -ENOMEM) {
2911 kfree(tlink);
2912 goto mount_fail_check;
2913 }
2914
2915 spin_lock(&cifs_sb->tlink_tree_lock); 2911 spin_lock(&cifs_sb->tlink_tree_lock);
2916 radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink); 2912 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
2917 radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
2918 CIFS_TLINK_MASTER_TAG);
2919 spin_unlock(&cifs_sb->tlink_tree_lock); 2913 spin_unlock(&cifs_sb->tlink_tree_lock);
2920 radix_tree_preload_end();
2921 2914
2922 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 2915 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
2923 TLINK_IDLE_EXPIRE); 2916 TLINK_IDLE_EXPIRE);
@@ -3107,32 +3100,25 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3107int 3100int
3108cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3101cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3109{ 3102{
3110 int i, ret; 3103 struct rb_root *root = &cifs_sb->tlink_tree;
3104 struct rb_node *node;
3105 struct tcon_link *tlink;
3111 char *tmp; 3106 char *tmp;
3112 struct tcon_link *tlink[8];
3113 unsigned long index = 0;
3114 3107
3115 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3108 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3116 3109
3117 do { 3110 spin_lock(&cifs_sb->tlink_tree_lock);
3118 spin_lock(&cifs_sb->tlink_tree_lock); 3111 while ((node = rb_first(root))) {
3119 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3112 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3120 (void **)tlink, index, 3113 cifs_get_tlink(tlink);
3121 ARRAY_SIZE(tlink)); 3114 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3122 /* increment index for next pass */ 3115 rb_erase(node, root);
3123 if (ret > 0)
3124 index = tlink[ret - 1]->tl_index + 1;
3125 for (i = 0; i < ret; i++) {
3126 cifs_get_tlink(tlink[i]);
3127 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
3128 radix_tree_delete(&cifs_sb->tlink_tree,
3129 tlink[i]->tl_index);
3130 }
3131 spin_unlock(&cifs_sb->tlink_tree_lock);
3132 3116
3133 for (i = 0; i < ret; i++) 3117 spin_unlock(&cifs_sb->tlink_tree_lock);
3134 cifs_put_tlink(tlink[i]); 3118 cifs_put_tlink(tlink);
3135 } while (ret != 0); 3119 spin_lock(&cifs_sb->tlink_tree_lock);
3120 }
3121 spin_unlock(&cifs_sb->tlink_tree_lock);
3136 3122
3137 tmp = cifs_sb->prepath; 3123 tmp = cifs_sb->prepath;
3138 cifs_sb->prepathlen = 0; 3124 cifs_sb->prepathlen = 0;
@@ -3271,22 +3257,10 @@ out:
3271 return tcon; 3257 return tcon;
3272} 3258}
3273 3259
3274static struct tcon_link * 3260static inline struct tcon_link *
3275cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) 3261cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3276{ 3262{
3277 struct tcon_link *tlink; 3263 return cifs_sb->master_tlink;
3278 unsigned int ret;
3279
3280 spin_lock(&cifs_sb->tlink_tree_lock);
3281 ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
3282 0, 1, CIFS_TLINK_MASTER_TAG);
3283 spin_unlock(&cifs_sb->tlink_tree_lock);
3284
3285 /* the master tcon should always be present */
3286 if (ret == 0)
3287 BUG();
3288
3289 return tlink;
3290} 3264}
3291 3265
3292struct cifsTconInfo * 3266struct cifsTconInfo *
@@ -3302,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused)
3302 return signal_pending(current) ? -ERESTARTSYS : 0; 3276 return signal_pending(current) ? -ERESTARTSYS : 0;
3303} 3277}
3304 3278
3279/* find and return a tlink with given uid */
3280static struct tcon_link *
3281tlink_rb_search(struct rb_root *root, uid_t uid)
3282{
3283 struct rb_node *node = root->rb_node;
3284 struct tcon_link *tlink;
3285
3286 while (node) {
3287 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3288
3289 if (tlink->tl_uid > uid)
3290 node = node->rb_left;
3291 else if (tlink->tl_uid < uid)
3292 node = node->rb_right;
3293 else
3294 return tlink;
3295 }
3296 return NULL;
3297}
3298
3299/* insert a tcon_link into the tree */
3300static void
3301tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
3302{
3303 struct rb_node **new = &(root->rb_node), *parent = NULL;
3304 struct tcon_link *tlink;
3305
3306 while (*new) {
3307 tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
3308 parent = *new;
3309
3310 if (tlink->tl_uid > new_tlink->tl_uid)
3311 new = &((*new)->rb_left);
3312 else
3313 new = &((*new)->rb_right);
3314 }
3315
3316 rb_link_node(&new_tlink->tl_rbnode, parent, new);
3317 rb_insert_color(&new_tlink->tl_rbnode, root);
3318}
3319
3305/* 3320/*
3306 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 3321 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
3307 * current task. 3322 * current task.
@@ -3309,7 +3324,7 @@ cifs_sb_tcon_pending_wait(void *unused)
3309 * If the superblock doesn't refer to a multiuser mount, then just return 3324 * If the superblock doesn't refer to a multiuser mount, then just return
3310 * the master tcon for the mount. 3325 * the master tcon for the mount.
3311 * 3326 *
3312 * First, search the radix tree for an existing tcon for this fsuid. If one 3327 * First, search the rbtree for an existing tcon for this fsuid. If one
3313 * exists, then check to see if it's pending construction. If it is then wait 3328 * exists, then check to see if it's pending construction. If it is then wait
3314 * for construction to complete. Once it's no longer pending, check to see if 3329 * for construction to complete. Once it's no longer pending, check to see if
3315 * it failed and either return an error or retry construction, depending on 3330 * it failed and either return an error or retry construction, depending on
@@ -3322,14 +3337,14 @@ struct tcon_link *
3322cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 3337cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
3323{ 3338{
3324 int ret; 3339 int ret;
3325 unsigned long fsuid = (unsigned long) current_fsuid(); 3340 uid_t fsuid = current_fsuid();
3326 struct tcon_link *tlink, *newtlink; 3341 struct tcon_link *tlink, *newtlink;
3327 3342
3328 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 3343 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3329 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 3344 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
3330 3345
3331 spin_lock(&cifs_sb->tlink_tree_lock); 3346 spin_lock(&cifs_sb->tlink_tree_lock);
3332 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3347 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
3333 if (tlink) 3348 if (tlink)
3334 cifs_get_tlink(tlink); 3349 cifs_get_tlink(tlink);
3335 spin_unlock(&cifs_sb->tlink_tree_lock); 3350 spin_unlock(&cifs_sb->tlink_tree_lock);
@@ -3338,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
3338 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3353 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3339 if (newtlink == NULL) 3354 if (newtlink == NULL)
3340 return ERR_PTR(-ENOMEM); 3355 return ERR_PTR(-ENOMEM);
3341 newtlink->tl_index = fsuid; 3356 newtlink->tl_uid = fsuid;
3342 newtlink->tl_tcon = ERR_PTR(-EACCES); 3357 newtlink->tl_tcon = ERR_PTR(-EACCES);
3343 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 3358 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
3344 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 3359 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
3345 cifs_get_tlink(newtlink); 3360 cifs_get_tlink(newtlink);
3346 3361
3347 ret = radix_tree_preload(GFP_KERNEL);
3348 if (ret != 0) {
3349 kfree(newtlink);
3350 return ERR_PTR(ret);
3351 }
3352
3353 spin_lock(&cifs_sb->tlink_tree_lock); 3362 spin_lock(&cifs_sb->tlink_tree_lock);
3354 /* was one inserted after previous search? */ 3363 /* was one inserted after previous search? */
3355 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3364 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
3356 if (tlink) { 3365 if (tlink) {
3357 cifs_get_tlink(tlink); 3366 cifs_get_tlink(tlink);
3358 spin_unlock(&cifs_sb->tlink_tree_lock); 3367 spin_unlock(&cifs_sb->tlink_tree_lock);
3359 radix_tree_preload_end();
3360 kfree(newtlink); 3368 kfree(newtlink);
3361 goto wait_for_construction; 3369 goto wait_for_construction;
3362 } 3370 }
3363 ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
3364 spin_unlock(&cifs_sb->tlink_tree_lock);
3365 radix_tree_preload_end();
3366 if (ret) {
3367 kfree(newtlink);
3368 return ERR_PTR(ret);
3369 }
3370 tlink = newtlink; 3371 tlink = newtlink;
3372 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3373 spin_unlock(&cifs_sb->tlink_tree_lock);
3371 } else { 3374 } else {
3372wait_for_construction: 3375wait_for_construction:
3373 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, 3376 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@@ -3413,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work)
3413{ 3416{
3414 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 3417 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
3415 prune_tlinks.work); 3418 prune_tlinks.work);
3416 struct tcon_link *tlink[8]; 3419 struct rb_root *root = &cifs_sb->tlink_tree;
3417 unsigned long now = jiffies; 3420 struct rb_node *node = rb_first(root);
3418 unsigned long index = 0; 3421 struct rb_node *tmp;
3419 int i, ret; 3422 struct tcon_link *tlink;
3420 3423
3421 do { 3424 /*
3422 spin_lock(&cifs_sb->tlink_tree_lock); 3425 * Because we drop the spinlock in the loop in order to put the tlink
3423 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3426 * it's not guarded against removal of links from the tree. The only
3424 (void **)tlink, index, 3427 * places that remove entries from the tree are this function and
3425 ARRAY_SIZE(tlink)); 3428 * umounts. Because this function is non-reentrant and is canceled
3426 /* increment index for next pass */ 3429 * before umount can proceed, this is safe.
3427 if (ret > 0) 3430 */
3428 index = tlink[ret - 1]->tl_index + 1; 3431 spin_lock(&cifs_sb->tlink_tree_lock);
3429 for (i = 0; i < ret; i++) { 3432 node = rb_first(root);
3430 if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) || 3433 while (node != NULL) {
3431 atomic_read(&tlink[i]->tl_count) != 0 || 3434 tmp = node;
3432 time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE, 3435 node = rb_next(tmp);
3433 now)) { 3436 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
3434 tlink[i] = NULL; 3437
3435 continue; 3438 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
3436 } 3439 atomic_read(&tlink->tl_count) != 0 ||
3437 cifs_get_tlink(tlink[i]); 3440 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
3438 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); 3441 continue;
3439 radix_tree_delete(&cifs_sb->tlink_tree,
3440 tlink[i]->tl_index);
3441 }
3442 spin_unlock(&cifs_sb->tlink_tree_lock);
3443 3442
3444 for (i = 0; i < ret; i++) { 3443 cifs_get_tlink(tlink);
3445 if (tlink[i] != NULL) 3444 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3446 cifs_put_tlink(tlink[i]); 3445 rb_erase(tmp, root);
3447 } 3446
3448 } while (ret != 0); 3447 spin_unlock(&cifs_sb->tlink_tree_lock);
3448 cifs_put_tlink(tlink);
3449 spin_lock(&cifs_sb->tlink_tree_lock);
3450 }
3451 spin_unlock(&cifs_sb->tlink_tree_lock);
3449 3452
3450 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 3453 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
3451 TLINK_IDLE_EXPIRE); 3454 TLINK_IDLE_EXPIRE);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ae82159cf7fa..06c3e83fa387 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -146,12 +146,7 @@ client_can_cache:
146 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 146 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
147 xid, NULL); 147 xid, NULL);
148 148
149 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 149 cifs_set_oplock_level(pCifsInode, oplock);
150 pCifsInode->clientCanCacheAll = true;
151 pCifsInode->clientCanCacheRead = true;
152 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
153 } else if ((oplock & 0xF) == OPLOCK_READ)
154 pCifsInode->clientCanCacheRead = true;
155 150
156 return rc; 151 return rc;
157} 152}
@@ -253,12 +248,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
253 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); 248 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
254 spin_unlock(&cifs_file_list_lock); 249 spin_unlock(&cifs_file_list_lock);
255 250
256 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 251 cifs_set_oplock_level(pCifsInode, oplock);
257 pCifsInode->clientCanCacheAll = true;
258 pCifsInode->clientCanCacheRead = true;
259 cFYI(1, "Exclusive Oplock inode %p", inode);
260 } else if ((oplock & 0xF) == OPLOCK_READ)
261 pCifsInode->clientCanCacheRead = true;
262 252
263 file->private_data = pCifsFile; 253 file->private_data = pCifsFile;
264 return pCifsFile; 254 return pCifsFile;
@@ -271,8 +261,9 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
271 */ 261 */
272void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 262void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
273{ 263{
264 struct inode *inode = cifs_file->dentry->d_inode;
274 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); 265 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
275 struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode); 266 struct cifsInodeInfo *cifsi = CIFS_I(inode);
276 struct cifsLockInfo *li, *tmp; 267 struct cifsLockInfo *li, *tmp;
277 268
278 spin_lock(&cifs_file_list_lock); 269 spin_lock(&cifs_file_list_lock);
@@ -288,8 +279,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
288 if (list_empty(&cifsi->openFileList)) { 279 if (list_empty(&cifsi->openFileList)) {
289 cFYI(1, "closing last open instance for inode %p", 280 cFYI(1, "closing last open instance for inode %p",
290 cifs_file->dentry->d_inode); 281 cifs_file->dentry->d_inode);
291 cifsi->clientCanCacheRead = false; 282 cifs_set_oplock_level(cifsi, 0);
292 cifsi->clientCanCacheAll = false;
293 } 283 }
294 spin_unlock(&cifs_file_list_lock); 284 spin_unlock(&cifs_file_list_lock);
295 285
@@ -607,8 +597,6 @@ reopen_success:
607 rc = filemap_write_and_wait(inode->i_mapping); 597 rc = filemap_write_and_wait(inode->i_mapping);
608 mapping_set_error(inode->i_mapping, rc); 598 mapping_set_error(inode->i_mapping, rc);
609 599
610 pCifsInode->clientCanCacheAll = false;
611 pCifsInode->clientCanCacheRead = false;
612 if (tcon->unix_ext) 600 if (tcon->unix_ext)
613 rc = cifs_get_inode_info_unix(&inode, 601 rc = cifs_get_inode_info_unix(&inode,
614 full_path, inode->i_sb, xid); 602 full_path, inode->i_sb, xid);
@@ -622,18 +610,9 @@ reopen_success:
622 invalidate the current end of file on the server 610 invalidate the current end of file on the server
623 we can not go to the server to get the new inod 611 we can not go to the server to get the new inod
624 info */ 612 info */
625 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 613
626 pCifsInode->clientCanCacheAll = true; 614 cifs_set_oplock_level(pCifsInode, oplock);
627 pCifsInode->clientCanCacheRead = true; 615
628 cFYI(1, "Exclusive Oplock granted on inode %p",
629 pCifsFile->dentry->d_inode);
630 } else if ((oplock & 0xF) == OPLOCK_READ) {
631 pCifsInode->clientCanCacheRead = true;
632 pCifsInode->clientCanCacheAll = false;
633 } else {
634 pCifsInode->clientCanCacheRead = false;
635 pCifsInode->clientCanCacheAll = false;
636 }
637 cifs_relock_file(pCifsFile); 616 cifs_relock_file(pCifsFile);
638 617
639reopen_error_exit: 618reopen_error_exit:
@@ -775,12 +754,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
775 754
776 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 755 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
777 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); 756 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
778
779 if (file->private_data == NULL) {
780 rc = -EBADF;
781 FreeXid(xid);
782 return rc;
783 }
784 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 757 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
785 758
786 if ((tcon->ses->capabilities & CAP_UNIX) && 759 if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -956,6 +929,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
956ssize_t cifs_user_write(struct file *file, const char __user *write_data, 929ssize_t cifs_user_write(struct file *file, const char __user *write_data,
957 size_t write_size, loff_t *poffset) 930 size_t write_size, loff_t *poffset)
958{ 931{
932 struct inode *inode = file->f_path.dentry->d_inode;
959 int rc = 0; 933 int rc = 0;
960 unsigned int bytes_written = 0; 934 unsigned int bytes_written = 0;
961 unsigned int total_written; 935 unsigned int total_written;
@@ -963,7 +937,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
963 struct cifsTconInfo *pTcon; 937 struct cifsTconInfo *pTcon;
964 int xid, long_op; 938 int xid, long_op;
965 struct cifsFileInfo *open_file; 939 struct cifsFileInfo *open_file;
966 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); 940 struct cifsInodeInfo *cifsi = CIFS_I(inode);
967 941
968 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 942 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
969 943
@@ -1029,21 +1003,17 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
1029 1003
1030 cifs_stats_bytes_written(pTcon, total_written); 1004 cifs_stats_bytes_written(pTcon, total_written);
1031 1005
1032 /* since the write may have blocked check these pointers again */
1033 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1034 struct inode *inode = file->f_path.dentry->d_inode;
1035/* Do not update local mtime - server will set its actual value on write 1006/* Do not update local mtime - server will set its actual value on write
1036 * inode->i_ctime = inode->i_mtime = 1007 * inode->i_ctime = inode->i_mtime =
1037 * current_fs_time(inode->i_sb);*/ 1008 * current_fs_time(inode->i_sb);*/
1038 if (total_written > 0) { 1009 if (total_written > 0) {
1039 spin_lock(&inode->i_lock); 1010 spin_lock(&inode->i_lock);
1040 if (*poffset > file->f_path.dentry->d_inode->i_size) 1011 if (*poffset > inode->i_size)
1041 i_size_write(file->f_path.dentry->d_inode, 1012 i_size_write(inode, *poffset);
1042 *poffset); 1013 spin_unlock(&inode->i_lock);
1043 spin_unlock(&inode->i_lock);
1044 }
1045 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1046 } 1014 }
1015 mark_inode_dirty_sync(inode);
1016
1047 FreeXid(xid); 1017 FreeXid(xid);
1048 return total_written; 1018 return total_written;
1049} 1019}
@@ -1178,7 +1148,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1178 bool fsuid_only) 1148 bool fsuid_only)
1179{ 1149{
1180 struct cifsFileInfo *open_file; 1150 struct cifsFileInfo *open_file;
1181 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); 1151 struct cifs_sb_info *cifs_sb;
1182 bool any_available = false; 1152 bool any_available = false;
1183 int rc; 1153 int rc;
1184 1154
@@ -1192,6 +1162,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1192 return NULL; 1162 return NULL;
1193 } 1163 }
1194 1164
1165 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1166
1195 /* only filter by fsuid on multiuser mounts */ 1167 /* only filter by fsuid on multiuser mounts */
1196 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 1168 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1197 fsuid_only = false; 1169 fsuid_only = false;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 39869c3c3efb..ef3a55bf86b6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2177,7 +2177,6 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2177 2177
2178 setattr_copy(inode, attrs); 2178 setattr_copy(inode, attrs);
2179 mark_inode_dirty(inode); 2179 mark_inode_dirty(inode);
2180 return 0;
2181 2180
2182cifs_setattr_exit: 2181cifs_setattr_exit:
2183 kfree(full_path); 2182 kfree(full_path);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 077bf756f342..0c98672d0122 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,10 +38,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
38 struct cifs_sb_info *cifs_sb; 38 struct cifs_sb_info *cifs_sb;
39#ifdef CONFIG_CIFS_POSIX 39#ifdef CONFIG_CIFS_POSIX
40 struct cifsFileInfo *pSMBFile = filep->private_data; 40 struct cifsFileInfo *pSMBFile = filep->private_data;
41 struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink); 41 struct cifsTconInfo *tcon;
42 __u64 ExtAttrBits = 0; 42 __u64 ExtAttrBits = 0;
43 __u64 ExtAttrMask = 0; 43 __u64 ExtAttrMask = 0;
44 __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability); 44 __u64 caps;
45#endif /* CONFIG_CIFS_POSIX */ 45#endif /* CONFIG_CIFS_POSIX */
46 46
47 xid = GetXid(); 47 xid = GetXid();
@@ -62,9 +62,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
62 break; 62 break;
63#ifdef CONFIG_CIFS_POSIX 63#ifdef CONFIG_CIFS_POSIX
64 case FS_IOC_GETFLAGS: 64 case FS_IOC_GETFLAGS:
65 if (pSMBFile == NULL)
66 break;
67 tcon = tlink_tcon(pSMBFile->tlink);
68 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
65 if (CIFS_UNIX_EXTATTR_CAP & caps) { 69 if (CIFS_UNIX_EXTATTR_CAP & caps) {
66 if (pSMBFile == NULL)
67 break;
68 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, 70 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
69 &ExtAttrBits, &ExtAttrMask); 71 &ExtAttrBits, &ExtAttrMask);
70 if (rc == 0) 72 if (rc == 0)
@@ -75,13 +77,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
75 break; 77 break;
76 78
77 case FS_IOC_SETFLAGS: 79 case FS_IOC_SETFLAGS:
80 if (pSMBFile == NULL)
81 break;
82 tcon = tlink_tcon(pSMBFile->tlink);
83 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
78 if (CIFS_UNIX_EXTATTR_CAP & caps) { 84 if (CIFS_UNIX_EXTATTR_CAP & caps) {
79 if (get_user(ExtAttrBits, (int __user *)arg)) { 85 if (get_user(ExtAttrBits, (int __user *)arg)) {
80 rc = -EFAULT; 86 rc = -EFAULT;
81 break; 87 break;
82 } 88 }
83 if (pSMBFile == NULL)
84 break;
85 /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, 89 /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
86 extAttrBits, &ExtAttrMask);*/ 90 extAttrBits, &ExtAttrMask);*/
87 } 91 }
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c4e296fe3518..43f10281bc19 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -569,10 +569,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
569 569
570 cFYI(1, "file id match, oplock break"); 570 cFYI(1, "file id match, oplock break");
571 pCifsInode = CIFS_I(netfile->dentry->d_inode); 571 pCifsInode = CIFS_I(netfile->dentry->d_inode);
572 pCifsInode->clientCanCacheAll = false;
573 if (pSMB->OplockLevel == 0)
574 pCifsInode->clientCanCacheRead = false;
575 572
573 cifs_set_oplock_level(pCifsInode,
574 pSMB->OplockLevel);
576 /* 575 /*
577 * cifs_oplock_break_put() can't be called 576 * cifs_oplock_break_put() can't be called
578 * from here. Get reference after queueing 577 * from here. Get reference after queueing
@@ -722,3 +721,23 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
722 cifs_sb_master_tcon(cifs_sb)->treeName); 721 cifs_sb_master_tcon(cifs_sb)->treeName);
723 } 722 }
724} 723}
724
725void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
726{
727 oplock &= 0xF;
728
729 if (oplock == OPLOCK_EXCLUSIVE) {
730 cinode->clientCanCacheAll = true;
731 cinode->clientCanCacheRead = true;
732 cFYI(1, "Exclusive Oplock granted on inode %p",
733 &cinode->vfs_inode);
734 } else if (oplock == OPLOCK_READ) {
735 cinode->clientCanCacheAll = false;
736 cinode->clientCanCacheRead = true;
737 cFYI(1, "Level II Oplock granted on inode %p",
738 &cinode->vfs_inode);
739 } else {
740 cinode->clientCanCacheAll = false;
741 cinode->clientCanCacheRead = false;
742 }
743}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8b5dd6369f82..6a5edea2d70b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -177,7 +177,7 @@ struct mpage_da_data {
177 177
178struct ext4_io_page { 178struct ext4_io_page {
179 struct page *p_page; 179 struct page *p_page;
180 int p_count; 180 atomic_t p_count;
181}; 181};
182 182
183#define MAX_IO_PAGES 128 183#define MAX_IO_PAGES 128
@@ -858,6 +858,7 @@ struct ext4_inode_info {
858 spinlock_t i_completed_io_lock; 858 spinlock_t i_completed_io_lock;
859 /* current io_end structure for async DIO write*/ 859 /* current io_end structure for async DIO write*/
860 ext4_io_end_t *cur_aio_dio; 860 ext4_io_end_t *cur_aio_dio;
861 atomic_t i_ioend_count; /* Number of outstanding io_end structs */
861 862
862 /* 863 /*
863 * Transactions that contain inode's metadata needed to complete 864 * Transactions that contain inode's metadata needed to complete
@@ -2060,6 +2061,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2060/* page-io.c */ 2061/* page-io.c */
2061extern int __init ext4_init_pageio(void); 2062extern int __init ext4_init_pageio(void);
2062extern void ext4_exit_pageio(void); 2063extern void ext4_exit_pageio(void);
2064extern void ext4_ioend_wait(struct inode *);
2063extern void ext4_free_io_end(ext4_io_end_t *io); 2065extern void ext4_free_io_end(ext4_io_end_t *io);
2064extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2066extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2065extern int ext4_end_io_nolock(ext4_io_end_t *io); 2067extern int ext4_end_io_nolock(ext4_io_end_t *io);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 191616470466..bdbe69902207 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -53,6 +53,7 @@
53static inline int ext4_begin_ordered_truncate(struct inode *inode, 53static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 loff_t new_size) 54 loff_t new_size)
55{ 55{
56 trace_ext4_begin_ordered_truncate(inode, new_size);
56 return jbd2_journal_begin_ordered_truncate( 57 return jbd2_journal_begin_ordered_truncate(
57 EXT4_SB(inode->i_sb)->s_journal, 58 EXT4_SB(inode->i_sb)->s_journal,
58 &EXT4_I(inode)->jinode, 59 &EXT4_I(inode)->jinode,
@@ -178,6 +179,7 @@ void ext4_evict_inode(struct inode *inode)
178 handle_t *handle; 179 handle_t *handle;
179 int err; 180 int err;
180 181
182 trace_ext4_evict_inode(inode);
181 if (inode->i_nlink) { 183 if (inode->i_nlink) {
182 truncate_inode_pages(&inode->i_data, 0); 184 truncate_inode_pages(&inode->i_data, 0);
183 goto no_delete; 185 goto no_delete;
@@ -5410,9 +5412,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5410 * will return the blocks that include the delayed allocation 5412 * will return the blocks that include the delayed allocation
5411 * blocks for this file. 5413 * blocks for this file.
5412 */ 5414 */
5413 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
5414 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5415 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5415 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
5416 5416
5417 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5417 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5418 return 0; 5418 return 0;
@@ -5649,6 +5649,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5649 int err, ret; 5649 int err, ret;
5650 5650
5651 might_sleep(); 5651 might_sleep();
5652 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5652 err = ext4_reserve_inode_write(handle, inode, &iloc); 5653 err = ext4_reserve_inode_write(handle, inode, &iloc);
5653 if (ext4_handle_valid(handle) && 5654 if (ext4_handle_valid(handle) &&
5654 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5655 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c58eba34724a..5b4d4e3a4d58 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4640,8 +4640,6 @@ do_more:
4640 * with group lock held. generate_buddy look at 4640 * with group lock held. generate_buddy look at
4641 * them with group lock_held 4641 * them with group lock_held
4642 */ 4642 */
4643 if (test_opt(sb, DISCARD))
4644 ext4_issue_discard(sb, block_group, bit, count);
4645 ext4_lock_group(sb, block_group); 4643 ext4_lock_group(sb, block_group);
4646 mb_clear_bits(bitmap_bh->b_data, bit, count); 4644 mb_clear_bits(bitmap_bh->b_data, bit, count);
4647 mb_free_blocks(inode, &e4b, bit, count); 4645 mb_free_blocks(inode, &e4b, bit, count);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 46a7d6a9d976..7f5451cd1d38 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,8 +32,14 @@
32 32
33static struct kmem_cache *io_page_cachep, *io_end_cachep; 33static struct kmem_cache *io_page_cachep, *io_end_cachep;
34 34
35#define WQ_HASH_SZ 37
36#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
37static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
38
35int __init ext4_init_pageio(void) 39int __init ext4_init_pageio(void)
36{ 40{
41 int i;
42
37 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 43 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
38 if (io_page_cachep == NULL) 44 if (io_page_cachep == NULL)
39 return -ENOMEM; 45 return -ENOMEM;
@@ -42,6 +48,8 @@ int __init ext4_init_pageio(void)
42 kmem_cache_destroy(io_page_cachep); 48 kmem_cache_destroy(io_page_cachep);
43 return -ENOMEM; 49 return -ENOMEM;
44 } 50 }
51 for (i = 0; i < WQ_HASH_SZ; i++)
52 init_waitqueue_head(&ioend_wq[i]);
45 53
46 return 0; 54 return 0;
47} 55}
@@ -52,24 +60,37 @@ void ext4_exit_pageio(void)
52 kmem_cache_destroy(io_page_cachep); 60 kmem_cache_destroy(io_page_cachep);
53} 61}
54 62
63void ext4_ioend_wait(struct inode *inode)
64{
65 wait_queue_head_t *wq = to_ioend_wq(inode);
66
67 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
68}
69
70static void put_io_page(struct ext4_io_page *io_page)
71{
72 if (atomic_dec_and_test(&io_page->p_count)) {
73 end_page_writeback(io_page->p_page);
74 put_page(io_page->p_page);
75 kmem_cache_free(io_page_cachep, io_page);
76 }
77}
78
55void ext4_free_io_end(ext4_io_end_t *io) 79void ext4_free_io_end(ext4_io_end_t *io)
56{ 80{
57 int i; 81 int i;
82 wait_queue_head_t *wq;
58 83
59 BUG_ON(!io); 84 BUG_ON(!io);
60 if (io->page) 85 if (io->page)
61 put_page(io->page); 86 put_page(io->page);
62 for (i = 0; i < io->num_io_pages; i++) { 87 for (i = 0; i < io->num_io_pages; i++)
63 if (--io->pages[i]->p_count == 0) { 88 put_io_page(io->pages[i]);
64 struct page *page = io->pages[i]->p_page;
65
66 end_page_writeback(page);
67 put_page(page);
68 kmem_cache_free(io_page_cachep, io->pages[i]);
69 }
70 }
71 io->num_io_pages = 0; 89 io->num_io_pages = 0;
72 iput(io->inode); 90 wq = to_ioend_wq(io->inode);
91 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
92 waitqueue_active(wq))
93 wake_up_all(wq);
73 kmem_cache_free(io_end_cachep, io); 94 kmem_cache_free(io_end_cachep, io);
74} 95}
75 96
@@ -142,8 +163,8 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
142 io = kmem_cache_alloc(io_end_cachep, flags); 163 io = kmem_cache_alloc(io_end_cachep, flags);
143 if (io) { 164 if (io) {
144 memset(io, 0, sizeof(*io)); 165 memset(io, 0, sizeof(*io));
145 io->inode = igrab(inode); 166 atomic_inc(&EXT4_I(inode)->i_ioend_count);
146 BUG_ON(!io->inode); 167 io->inode = inode;
147 INIT_WORK(&io->work, ext4_end_io_work); 168 INIT_WORK(&io->work, ext4_end_io_work);
148 INIT_LIST_HEAD(&io->list); 169 INIT_LIST_HEAD(&io->list);
149 } 170 }
@@ -171,35 +192,15 @@ static void ext4_end_bio(struct bio *bio, int error)
171 struct workqueue_struct *wq; 192 struct workqueue_struct *wq;
172 struct inode *inode; 193 struct inode *inode;
173 unsigned long flags; 194 unsigned long flags;
174 ext4_fsblk_t err_block;
175 int i; 195 int i;
176 196
177 BUG_ON(!io_end); 197 BUG_ON(!io_end);
178 inode = io_end->inode;
179 bio->bi_private = NULL; 198 bio->bi_private = NULL;
180 bio->bi_end_io = NULL; 199 bio->bi_end_io = NULL;
181 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 200 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
182 error = 0; 201 error = 0;
183 err_block = bio->bi_sector >> (inode->i_blkbits - 9);
184 bio_put(bio); 202 bio_put(bio);
185 203
186 if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
187 pr_err("sb umounted, discard end_io request for inode %lu\n",
188 io_end->inode->i_ino);
189 ext4_free_io_end(io_end);
190 return;
191 }
192
193 if (error) {
194 io_end->flag |= EXT4_IO_END_ERROR;
195 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
196 "(offset %llu size %ld starting block %llu)",
197 inode->i_ino,
198 (unsigned long long) io_end->offset,
199 (long) io_end->size,
200 (unsigned long long) err_block);
201 }
202
203 for (i = 0; i < io_end->num_io_pages; i++) { 204 for (i = 0; i < io_end->num_io_pages; i++) {
204 struct page *page = io_end->pages[i]->p_page; 205 struct page *page = io_end->pages[i]->p_page;
205 struct buffer_head *bh, *head; 206 struct buffer_head *bh, *head;
@@ -236,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error)
236 } while (bh != head); 237 } while (bh != head);
237 } 238 }
238 239
239 if (--io_end->pages[i]->p_count == 0) { 240 put_io_page(io_end->pages[i]);
240 struct page *page = io_end->pages[i]->p_page;
241
242 end_page_writeback(page);
243 put_page(page);
244 kmem_cache_free(io_page_cachep, io_end->pages[i]);
245 }
246 241
247 /* 242 /*
248 * If this is a partial write which happened to make 243 * If this is a partial write which happened to make
@@ -254,8 +249,19 @@ static void ext4_end_bio(struct bio *bio, int error)
254 if (!partial_write) 249 if (!partial_write)
255 SetPageUptodate(page); 250 SetPageUptodate(page);
256 } 251 }
257
258 io_end->num_io_pages = 0; 252 io_end->num_io_pages = 0;
253 inode = io_end->inode;
254
255 if (error) {
256 io_end->flag |= EXT4_IO_END_ERROR;
257 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
258 "(offset %llu size %ld starting block %llu)",
259 inode->i_ino,
260 (unsigned long long) io_end->offset,
261 (long) io_end->size,
262 (unsigned long long)
263 bio->bi_sector >> (inode->i_blkbits - 9));
264 }
259 265
260 /* Add the io_end to per-inode completed io list*/ 266 /* Add the io_end to per-inode completed io list*/
261 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 267 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -305,7 +311,6 @@ static int io_submit_init(struct ext4_io_submit *io,
305 bio->bi_private = io->io_end = io_end; 311 bio->bi_private = io->io_end = io_end;
306 bio->bi_end_io = ext4_end_bio; 312 bio->bi_end_io = ext4_end_bio;
307 313
308 io_end->inode = inode;
309 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 314 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
310 315
311 io->io_bio = bio; 316 io->io_bio = bio;
@@ -360,7 +365,7 @@ submit_and_retry:
360 if ((io_end->num_io_pages == 0) || 365 if ((io_end->num_io_pages == 0) ||
361 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 366 (io_end->pages[io_end->num_io_pages-1] != io_page)) {
362 io_end->pages[io_end->num_io_pages++] = io_page; 367 io_end->pages[io_end->num_io_pages++] = io_page;
363 io_page->p_count++; 368 atomic_inc(&io_page->p_count);
364 } 369 }
365 return 0; 370 return 0;
366} 371}
@@ -389,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
389 return -ENOMEM; 394 return -ENOMEM;
390 } 395 }
391 io_page->p_page = page; 396 io_page->p_page = page;
392 io_page->p_count = 0; 397 atomic_set(&io_page->p_count, 1);
393 get_page(page); 398 get_page(page);
394 399
395 for (bh = head = page_buffers(page), block_start = 0; 400 for (bh = head = page_buffers(page), block_start = 0;
@@ -421,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
421 * PageWriteback bit from the page to prevent the system from 426 * PageWriteback bit from the page to prevent the system from
422 * wedging later on. 427 * wedging later on.
423 */ 428 */
424 if (io_page->p_count == 0) { 429 put_io_page(io_page);
425 put_page(page);
426 end_page_writeback(page);
427 kmem_cache_free(io_page_cachep, io_page);
428 }
429 return ret; 430 return ret;
430} 431}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 40131b777af6..61182fe6254e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -828,12 +828,22 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
828 ei->cur_aio_dio = NULL; 828 ei->cur_aio_dio = NULL;
829 ei->i_sync_tid = 0; 829 ei->i_sync_tid = 0;
830 ei->i_datasync_tid = 0; 830 ei->i_datasync_tid = 0;
831 atomic_set(&ei->i_ioend_count, 0);
831 832
832 return &ei->vfs_inode; 833 return &ei->vfs_inode;
833} 834}
834 835
836static int ext4_drop_inode(struct inode *inode)
837{
838 int drop = generic_drop_inode(inode);
839
840 trace_ext4_drop_inode(inode, drop);
841 return drop;
842}
843
835static void ext4_destroy_inode(struct inode *inode) 844static void ext4_destroy_inode(struct inode *inode)
836{ 845{
846 ext4_ioend_wait(inode);
837 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 847 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
838 ext4_msg(inode->i_sb, KERN_ERR, 848 ext4_msg(inode->i_sb, KERN_ERR,
839 "Inode %lu (%p): orphan list check failed!", 849 "Inode %lu (%p): orphan list check failed!",
@@ -1173,6 +1183,7 @@ static const struct super_operations ext4_sops = {
1173 .destroy_inode = ext4_destroy_inode, 1183 .destroy_inode = ext4_destroy_inode,
1174 .write_inode = ext4_write_inode, 1184 .write_inode = ext4_write_inode,
1175 .dirty_inode = ext4_dirty_inode, 1185 .dirty_inode = ext4_dirty_inode,
1186 .drop_inode = ext4_drop_inode,
1176 .evict_inode = ext4_evict_inode, 1187 .evict_inode = ext4_evict_inode,
1177 .put_super = ext4_put_super, 1188 .put_super = ext4_put_super,
1178 .sync_fs = ext4_sync_fs, 1189 .sync_fs = ext4_sync_fs,
@@ -1194,6 +1205,7 @@ static const struct super_operations ext4_nojournal_sops = {
1194 .destroy_inode = ext4_destroy_inode, 1205 .destroy_inode = ext4_destroy_inode,
1195 .write_inode = ext4_write_inode, 1206 .write_inode = ext4_write_inode,
1196 .dirty_inode = ext4_dirty_inode, 1207 .dirty_inode = ext4_dirty_inode,
1208 .drop_inode = ext4_drop_inode,
1197 .evict_inode = ext4_evict_inode, 1209 .evict_inode = ext4_evict_inode,
1198 .write_super = ext4_write_super, 1210 .write_super = ext4_write_super,
1199 .put_super = ext4_put_super, 1211 .put_super = ext4_put_super,
@@ -2699,7 +2711,6 @@ static int ext4_lazyinit_thread(void *arg)
2699 struct ext4_li_request *elr; 2711 struct ext4_li_request *elr;
2700 unsigned long next_wakeup; 2712 unsigned long next_wakeup;
2701 DEFINE_WAIT(wait); 2713 DEFINE_WAIT(wait);
2702 int ret;
2703 2714
2704 BUG_ON(NULL == eli); 2715 BUG_ON(NULL == eli);
2705 2716
@@ -2723,13 +2734,12 @@ cont_thread:
2723 elr = list_entry(pos, struct ext4_li_request, 2734 elr = list_entry(pos, struct ext4_li_request,
2724 lr_request); 2735 lr_request);
2725 2736
2726 if (time_after_eq(jiffies, elr->lr_next_sched)) 2737 if (time_after_eq(jiffies, elr->lr_next_sched)) {
2727 ret = ext4_run_li_request(elr); 2738 if (ext4_run_li_request(elr) != 0) {
2728 2739 /* error, remove the lazy_init job */
2729 if (ret) { 2740 ext4_remove_li_request(elr);
2730 ret = 0; 2741 continue;
2731 ext4_remove_li_request(elr); 2742 }
2732 continue;
2733 } 2743 }
2734 2744
2735 if (time_before(elr->lr_next_sched, next_wakeup)) 2745 if (time_before(elr->lr_next_sched, next_wakeup))
@@ -2740,7 +2750,8 @@ cont_thread:
2740 if (freezing(current)) 2750 if (freezing(current))
2741 refrigerator(); 2751 refrigerator();
2742 2752
2743 if (time_after_eq(jiffies, next_wakeup)) { 2753 if ((time_after_eq(jiffies, next_wakeup)) ||
2754 (MAX_JIFFY_OFFSET == next_wakeup)) {
2744 cond_resched(); 2755 cond_resched();
2745 continue; 2756 continue;
2746 } 2757 }
@@ -3348,6 +3359,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3348 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3359 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3349 spin_lock_init(&sbi->s_next_gen_lock); 3360 spin_lock_init(&sbi->s_next_gen_lock);
3350 3361
3362 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3363 ext4_count_free_blocks(sb));
3364 if (!err) {
3365 err = percpu_counter_init(&sbi->s_freeinodes_counter,
3366 ext4_count_free_inodes(sb));
3367 }
3368 if (!err) {
3369 err = percpu_counter_init(&sbi->s_dirs_counter,
3370 ext4_count_dirs(sb));
3371 }
3372 if (!err) {
3373 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
3374 }
3375 if (err) {
3376 ext4_msg(sb, KERN_ERR, "insufficient memory");
3377 goto failed_mount3;
3378 }
3379
3351 sbi->s_stripe = ext4_get_stripe_size(sbi); 3380 sbi->s_stripe = ext4_get_stripe_size(sbi);
3352 sbi->s_max_writeback_mb_bump = 128; 3381 sbi->s_max_writeback_mb_bump = 128;
3353 3382
@@ -3446,22 +3475,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3446 } 3475 }
3447 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 3476 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3448 3477
3449no_journal: 3478 /*
3450 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3479 * The journal may have updated the bg summary counts, so we
3451 ext4_count_free_blocks(sb)); 3480 * need to update the global counters.
3452 if (!err) 3481 */
3453 err = percpu_counter_init(&sbi->s_freeinodes_counter, 3482 percpu_counter_set(&sbi->s_freeblocks_counter,
3454 ext4_count_free_inodes(sb)); 3483 ext4_count_free_blocks(sb));
3455 if (!err) 3484 percpu_counter_set(&sbi->s_freeinodes_counter,
3456 err = percpu_counter_init(&sbi->s_dirs_counter, 3485 ext4_count_free_inodes(sb));
3457 ext4_count_dirs(sb)); 3486 percpu_counter_set(&sbi->s_dirs_counter,
3458 if (!err) 3487 ext4_count_dirs(sb));
3459 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); 3488 percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
3460 if (err) {
3461 ext4_msg(sb, KERN_ERR, "insufficient memory");
3462 goto failed_mount_wq;
3463 }
3464 3489
3490no_journal:
3465 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); 3491 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
3466 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3492 if (!EXT4_SB(sb)->dio_unwritten_wq) {
3467 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3493 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3611,10 +3637,6 @@ failed_mount_wq:
3611 jbd2_journal_destroy(sbi->s_journal); 3637 jbd2_journal_destroy(sbi->s_journal);
3612 sbi->s_journal = NULL; 3638 sbi->s_journal = NULL;
3613 } 3639 }
3614 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3615 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3616 percpu_counter_destroy(&sbi->s_dirs_counter);
3617 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3618failed_mount3: 3640failed_mount3:
3619 if (sbi->s_flex_groups) { 3641 if (sbi->s_flex_groups) {
3620 if (is_vmalloc_addr(sbi->s_flex_groups)) 3642 if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3622,6 +3644,10 @@ failed_mount3:
3622 else 3644 else
3623 kfree(sbi->s_flex_groups); 3645 kfree(sbi->s_flex_groups);
3624 } 3646 }
3647 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3648 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3649 percpu_counter_destroy(&sbi->s_dirs_counter);
3650 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3625failed_mount2: 3651failed_mount2:
3626 for (i = 0; i < db_count; i++) 3652 for (i = 0; i < db_count; i++)
3627 brelse(sbi->s_group_desc[i]); 3653 brelse(sbi->s_group_desc[i]);
@@ -3949,13 +3975,11 @@ static int ext4_commit_super(struct super_block *sb, int sync)
3949 else 3975 else
3950 es->s_kbytes_written = 3976 es->s_kbytes_written =
3951 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); 3977 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
3952 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter)) 3978 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
3953 ext4_free_blocks_count_set(es, percpu_counter_sum_positive( 3979 &EXT4_SB(sb)->s_freeblocks_counter));
3954 &EXT4_SB(sb)->s_freeblocks_counter)); 3980 es->s_free_inodes_count =
3955 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) 3981 cpu_to_le32(percpu_counter_sum_positive(
3956 es->s_free_inodes_count = 3982 &EXT4_SB(sb)->s_freeinodes_counter));
3957 cpu_to_le32(percpu_counter_sum_positive(
3958 &EXT4_SB(sb)->s_freeinodes_counter));
3959 sb->s_dirt = 0; 3983 sb->s_dirt = 0;
3960 BUFFER_TRACE(sbh, "marking dirty"); 3984 BUFFER_TRACE(sbh, "marking dirty");
3961 mark_buffer_dirty(sbh); 3985 mark_buffer_dirty(sbh);
@@ -4556,12 +4580,10 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4556 4580
4557static int ext4_quota_off(struct super_block *sb, int type) 4581static int ext4_quota_off(struct super_block *sb, int type)
4558{ 4582{
4559 /* Force all delayed allocation blocks to be allocated */ 4583 /* Force all delayed allocation blocks to be allocated.
4560 if (test_opt(sb, DELALLOC)) { 4584 * Caller already holds s_umount sem */
4561 down_read(&sb->s_umount); 4585 if (test_opt(sb, DELALLOC))
4562 sync_filesystem(sb); 4586 sync_filesystem(sb);
4563 up_read(&sb->s_umount);
4564 }
4565 4587
4566 return dquot_quota_off(sb, type); 4588 return dquot_quota_off(sb, type);
4567} 4589}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d6cfac1f0a40..a5fe68189eed 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
933 *user = current_user(); 933 *user = current_user();
934 if (user_shm_lock(size, *user)) { 934 if (user_shm_lock(size, *user)) {
935 WARN_ONCE(1, 935 printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
936 "Using mlock ulimits for SHM_HUGETLB deprecated\n");
937 } else { 936 } else {
938 *user = NULL; 937 *user = NULL;
939 return ERR_PTR(-EPERM); 938 return ERR_PTR(-EPERM);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 748cfb92dcc6..2f7d05c89922 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
111 read_lock(&tasklist_lock); 111 read_lock(&tasklist_lock);
112 switch (which) { 112 switch (which) {
113 case IOPRIO_WHO_PROCESS: 113 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
114 if (!who) 115 if (!who)
115 p = current; 116 p = current;
116 else 117 else
117 p = find_task_by_vpid(who); 118 p = find_task_by_vpid(who);
118 if (p) 119 if (p)
119 ret = set_task_ioprio(p, ioprio); 120 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
120 break; 122 break;
121 case IOPRIO_WHO_PGRP: 123 case IOPRIO_WHO_PGRP:
122 if (!who) 124 if (!who)
@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
139 break; 141 break;
140 142
141 do_each_thread(g, p) { 143 do_each_thread(g, p) {
142 if (__task_cred(p)->uid != who) 144 int match;
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
143 continue; 150 continue;
144 ret = set_task_ioprio(p, ioprio); 151 ret = set_task_ioprio(p, ioprio);
145 if (ret) 152 if (ret)
@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
200 read_lock(&tasklist_lock); 207 read_lock(&tasklist_lock);
201 switch (which) { 208 switch (which) {
202 case IOPRIO_WHO_PROCESS: 209 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
203 if (!who) 211 if (!who)
204 p = current; 212 p = current;
205 else 213 else
206 p = find_task_by_vpid(who); 214 p = find_task_by_vpid(who);
207 if (p) 215 if (p)
208 ret = get_task_ioprio(p); 216 ret = get_task_ioprio(p);
217 rcu_read_unlock();
209 break; 218 break;
210 case IOPRIO_WHO_PGRP: 219 case IOPRIO_WHO_PGRP:
211 if (!who) 220 if (!who)
@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
232 break; 241 break;
233 242
234 do_each_thread(g, p) { 243 do_each_thread(g, p) {
235 if (__task_cred(p)->uid != user->uid) 244 int match;
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
236 continue; 250 continue;
237 tmpio = get_task_ioprio(p); 251 tmpio = get_task_ioprio(p);
238 if (tmpio < 0) 252 if (tmpio < 0)
diff --git a/fs/locks.c b/fs/locks.c
index 65765cb6afed..0e62dd35d088 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1504,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp)
1504 1504
1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1506{ 1506{
1507 struct file_lock *fl; 1507 struct file_lock *fl, *ret;
1508 struct fasync_struct *new; 1508 struct fasync_struct *new;
1509 struct inode *inode = filp->f_path.dentry->d_inode;
1510 int error; 1509 int error;
1511 1510
1512 fl = lease_alloc(filp, arg); 1511 fl = lease_alloc(filp, arg);
@@ -1518,13 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1518 locks_free_lock(fl); 1517 locks_free_lock(fl);
1519 return -ENOMEM; 1518 return -ENOMEM;
1520 } 1519 }
1520 ret = fl;
1521 lock_flocks(); 1521 lock_flocks();
1522 error = __vfs_setlease(filp, arg, &fl); 1522 error = __vfs_setlease(filp, arg, &ret);
1523 if (error) { 1523 if (error) {
1524 unlock_flocks(); 1524 unlock_flocks();
1525 locks_free_lock(fl); 1525 locks_free_lock(fl);
1526 goto out_free_fasync; 1526 goto out_free_fasync;
1527 } 1527 }
1528 if (ret != fl)
1529 locks_free_lock(fl);
1528 1530
1529 /* 1531 /*
1530 * fasync_insert_entry() returns the old entry if any. 1532 * fasync_insert_entry() returns the old entry if any.
@@ -1532,17 +1534,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1532 * inserted it into the fasync list. Clear new so that 1534 * inserted it into the fasync list. Clear new so that
1533 * we don't release it here. 1535 * we don't release it here.
1534 */ 1536 */
1535 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) 1537 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1536 new = NULL; 1538 new = NULL;
1537 1539
1538 if (error < 0) { 1540 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1539 /* remove lease just inserted by setlease */
1540 fl->fl_type = F_UNLCK | F_INPROGRESS;
1541 fl->fl_break_time = jiffies - 10;
1542 time_out_leases(inode);
1543 } else {
1544 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1545 }
1546 unlock_flocks(); 1541 unlock_flocks();
1547 1542
1548out_free_fasync: 1543out_free_fasync:
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index cd51a36b37f0..57afd4a6fabb 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -486,7 +486,7 @@ static inline int logfs_get_sb_bdev(struct logfs_super *s,
486 486
487/* dev_mtd.c */ 487/* dev_mtd.c */
488#ifdef CONFIG_MTD 488#ifdef CONFIG_MTD
489int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) 489int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
490#else 490#else
491static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) 491static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
492{ 492{
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f1e5ec6b5105..ad2bfa68d534 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
673 spin_unlock(&clp->cl_lock); 673 spin_unlock(&clp->cl_lock);
674} 674}
675 675
676static void nfsd4_register_conn(struct nfsd4_conn *conn) 676static int nfsd4_register_conn(struct nfsd4_conn *conn)
677{ 677{
678 conn->cn_xpt_user.callback = nfsd4_conn_lost; 678 conn->cn_xpt_user.callback = nfsd4_conn_lost;
679 register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 679 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
680} 680}
681 681
682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) 682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
683{ 683{
684 struct nfsd4_conn *conn; 684 struct nfsd4_conn *conn;
685 u32 flags = NFS4_CDFC4_FORE; 685 u32 flags = NFS4_CDFC4_FORE;
686 int ret;
686 687
687 if (ses->se_flags & SESSION4_BACK_CHAN) 688 if (ses->se_flags & SESSION4_BACK_CHAN)
688 flags |= NFS4_CDFC4_BACK; 689 flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
690 if (!conn) 691 if (!conn)
691 return nfserr_jukebox; 692 return nfserr_jukebox;
692 nfsd4_hash_conn(conn, ses); 693 nfsd4_hash_conn(conn, ses);
693 nfsd4_register_conn(conn); 694 ret = nfsd4_register_conn(conn);
695 if (ret)
696 /* oops; xprt is already down: */
697 nfsd4_conn_lost(&conn->cn_xpt_user);
694 return nfs_ok; 698 return nfs_ok;
695} 699}
696 700
@@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1644{ 1648{
1645 struct nfs4_client *clp = ses->se_client; 1649 struct nfs4_client *clp = ses->se_client;
1646 struct nfsd4_conn *c; 1650 struct nfsd4_conn *c;
1651 int ret;
1647 1652
1648 spin_lock(&clp->cl_lock); 1653 spin_lock(&clp->cl_lock);
1649 c = __nfsd4_find_conn(new->cn_xprt, ses); 1654 c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1654 } 1659 }
1655 __nfsd4_hash_conn(new, ses); 1660 __nfsd4_hash_conn(new, ses);
1656 spin_unlock(&clp->cl_lock); 1661 spin_unlock(&clp->cl_lock);
1657 nfsd4_register_conn(new); 1662 ret = nfsd4_register_conn(new);
1663 if (ret)
1664 /* oops; xprt is already down: */
1665 nfsd4_conn_lost(&new->cn_xpt_user);
1658 return; 1666 return;
1659} 1667}
1660 1668
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index ddb1f41376e5..911e61f348fc 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -418,7 +418,7 @@ out_no_root:
418static struct dentry *openprom_mount(struct file_system_type *fs_type, 418static struct dentry *openprom_mount(struct file_system_type *fs_type,
419 int flags, const char *dev_name, void *data) 419 int flags, const char *dev_name, void *data)
420{ 420{
421 return mount_single(fs_type, flags, data, openprom_fill_super) 421 return mount_single(fs_type, flags, data, openprom_fill_super);
422} 422}
423 423
424static struct file_system_type openprom_fs_type = { 424static struct file_system_type openprom_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c9af48fffcd7..7d287afccde5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1111,11 +1111,12 @@ xfs_vm_writepage(
1111 uptodate = 0; 1111 uptodate = 0;
1112 1112
1113 /* 1113 /*
1114 * A hole may still be marked uptodate because discard_buffer 1114 * set_page_dirty dirties all buffers in a page, independent
1115 * leaves the flag set. 1115 * of their state. The dirty state however is entirely
1116 * meaningless for holes (!mapped && uptodate), so skip
1117 * buffers covering holes here.
1116 */ 1118 */
1117 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 1119 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1118 ASSERT(!buffer_dirty(bh));
1119 imap_valid = 0; 1120 imap_valid = 0;
1120 continue; 1121 continue;
1121 } 1122 }
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 63fd2c07cb57..aa1d353def29 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split(
1781 INIT_LIST_HEAD(list); 1781 INIT_LIST_HEAD(list);
1782 spin_lock(dwlk); 1782 spin_lock(dwlk);
1783 list_for_each_entry_safe(bp, n, dwq, b_list) { 1783 list_for_each_entry_safe(bp, n, dwq, b_list) {
1784 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1785 ASSERT(bp->b_flags & XBF_DELWRI); 1784 ASSERT(bp->b_flags & XBF_DELWRI);
1786 1785
1787 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { 1786 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split(
1795 _XBF_RUN_QUEUES); 1794 _XBF_RUN_QUEUES);
1796 bp->b_flags |= XBF_WRITE; 1795 bp->b_flags |= XBF_WRITE;
1797 list_move_tail(&bp->b_list, list); 1796 list_move_tail(&bp->b_list, list);
1797 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1798 } else 1798 } else
1799 skipped++; 1799 skipped++;
1800 } 1800 }
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2ea238f6d38e..ad442d9e392e 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -416,7 +416,7 @@ xfs_attrlist_by_handle(
416 if (IS_ERR(dentry)) 416 if (IS_ERR(dentry))
417 return PTR_ERR(dentry); 417 return PTR_ERR(dentry);
418 418
419 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); 419 kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
420 if (!kbuf) 420 if (!kbuf)
421 goto out_dput; 421 goto out_dput;
422 422
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 96107efc0c61..94d5fd6a2973 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -762,7 +762,8 @@ xfs_setup_inode(
762 inode->i_state = I_NEW; 762 inode->i_state = I_NEW;
763 763
764 inode_sb_list_add(inode); 764 inode_sb_list_add(inode);
765 insert_inode_hash(inode); 765 /* make the inode look hashed for the writeback code */
766 hlist_add_fake(&inode->i_hash);
766 767
767 inode->i_mode = ip->i_d.di_mode; 768 inode->i_mode = ip->i_d.di_mode;
768 inode->i_nlink = ip->i_d.di_nlink; 769 inode->i_nlink = ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9f3a78fe6ae4..064f964d4f3c 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -353,9 +353,6 @@ xfs_parseargs(
353 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 353 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 mp->m_flags |= XFS_MOUNT_DELAYLOG; 355 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 cmn_err(CE_WARN,
357 "Enabling EXPERIMENTAL delayed logging feature "
358 "- use at your own risk.\n");
359 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
360 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
361 } else if (!strcmp(this_char, "ihashsize")) { 358 } else if (!strcmp(this_char, "ihashsize")) {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37d33254981d..afb0d7cfad1c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -853,6 +853,7 @@ restart:
853 if (trylock) { 853 if (trylock) {
854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
855 skipped++; 855 skipped++;
856 xfs_perag_put(pag);
856 continue; 857 continue;
857 } 858 }
858 first_index = pag->pag_ici_reclaim_cursor; 859 first_index = pag->pag_ici_reclaim_cursor;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9b715dce5699..9124425b7f2f 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -744,9 +744,15 @@ xfs_filestream_new_ag(
744 * If the file's parent directory is known, take its iolock in exclusive 744 * If the file's parent directory is known, take its iolock in exclusive
745 * mode to prevent two sibling files from racing each other to migrate 745 * mode to prevent two sibling files from racing each other to migrate
746 * themselves and their parent to different AGs. 746 * themselves and their parent to different AGs.
747 *
748 * Note that we lock the parent directory iolock inside the child
749 * iolock here. That's fine as we never hold both parent and child
750 * iolock in any other place. This is different from the ilock,
751 * which requires locking of the child after the parent for namespace
752 * operations.
747 */ 753 */
748 if (pip) 754 if (pip)
749 xfs_ilock(pip, XFS_IOLOCK_EXCL); 755 xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
750 756
751 /* 757 /*
752 * A new AG needs to be found for the file. If the file's parent 758 * A new AG needs to be found for the file. If the file's parent
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b1498ab5a399..19e9dfa1c254 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -275,6 +275,7 @@ xfs_free_perag(
275 pag = radix_tree_delete(&mp->m_perag_tree, agno); 275 pag = radix_tree_delete(&mp->m_perag_tree, agno);
276 spin_unlock(&mp->m_perag_lock); 276 spin_unlock(&mp->m_perag_lock);
277 ASSERT(pag); 277 ASSERT(pag);
278 ASSERT(atomic_read(&pag->pag_ref) == 0);
278 call_rcu(&pag->rcu_head, __xfs_free_perag); 279 call_rcu(&pag->rcu_head, __xfs_free_perag);
279 } 280 }
280} 281}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e0e64b113bd6..9bb6eda4cd21 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) 346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
347#define xfs_trans_apply_dquot_deltas(tp) 347#define xfs_trans_apply_dquot_deltas(tp)
348#define xfs_trans_unreserve_and_mod_dquots(tp) 348#define xfs_trans_unreserve_and_mod_dquots(tp)
349#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) 349static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
350#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) 350 struct xfs_inode *ip, long nblks, long ninos, uint flags)
351{
352 return 0;
353}
354static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
355 struct xfs_mount *mp, struct xfs_dquot *udqp,
356 struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
357{
358 return 0;
359}
351#define xfs_qm_vop_create_dqattach(tp, ip, u, g) 360#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
352#define xfs_qm_vop_rename_dqattach(it) (0) 361#define xfs_qm_vop_rename_dqattach(it) (0)
353#define xfs_qm_vop_chown(tp, ip, old, new) (NULL) 362#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
357#define xfs_qm_dqdetach(ip) 366#define xfs_qm_dqdetach(ip)
358#define xfs_qm_dqrele(d) 367#define xfs_qm_dqrele(d)
359#define xfs_qm_statvfs(ip, s) 368#define xfs_qm_statvfs(ip, s)
360#define xfs_qm_sync(mp, fl) (0) 369static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
370{
371 return 0;
372}
361#define xfs_qm_newmount(mp, a, b) (0) 373#define xfs_qm_newmount(mp, a, b) (0)
362#define xfs_qm_mount_quotas(mp) 374#define xfs_qm_mount_quotas(mp)
363#define xfs_qm_unmount(mp) 375#define xfs_qm_unmount(mp)
364#define xfs_qm_unmount_quotas(mp) (0) 376#define xfs_qm_unmount_quotas(mp)
365#endif /* CONFIG_XFS_QUOTA */ 377#endif /* CONFIG_XFS_QUOTA */
366 378
367#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ 379#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
index 47e64170305d..bd8cad21998e 100644
--- a/include/asm-generic/stat.h
+++ b/include/asm-generic/stat.h
@@ -33,18 +33,18 @@ struct stat {
33 int st_blksize; /* Optimal block size for I/O. */ 33 int st_blksize; /* Optimal block size for I/O. */
34 int __pad2; 34 int __pad2;
35 long st_blocks; /* Number 512-byte blocks allocated. */ 35 long st_blocks; /* Number 512-byte blocks allocated. */
36 int st_atime; /* Time of last access. */ 36 long st_atime; /* Time of last access. */
37 unsigned int st_atime_nsec; 37 unsigned long st_atime_nsec;
38 int st_mtime; /* Time of last modification. */ 38 long st_mtime; /* Time of last modification. */
39 unsigned int st_mtime_nsec; 39 unsigned long st_mtime_nsec;
40 int st_ctime; /* Time of last status change. */ 40 long st_ctime; /* Time of last status change. */
41 unsigned int st_ctime_nsec; 41 unsigned long st_ctime_nsec;
42 unsigned int __unused4; 42 unsigned int __unused4;
43 unsigned int __unused5; 43 unsigned int __unused5;
44}; 44};
45 45
46#if __BITS_PER_LONG != 64
47/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 46/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
47#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
48struct stat64 { 48struct stat64 {
49 unsigned long long st_dev; /* Device. */ 49 unsigned long long st_dev; /* Device. */
50 unsigned long long st_ino; /* File serial number. */ 50 unsigned long long st_ino; /* File serial number. */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 5afa5b52063e..beafc156a535 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
432 * together with the @destroy function, 432 * together with the @destroy function,
433 * enables driver-specific objects derived from a ttm_buffer_object. 433 * enables driver-specific objects derived from a ttm_buffer_object.
434 * On successful return, the object kref and list_kref are set to 1. 434 * On successful return, the object kref and list_kref are set to 1.
435 * If a failure occurs, the function will call the @destroy function, or
436 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
437 * illegal and will likely cause memory corruption.
438 *
435 * Returns 439 * Returns
436 * -ENOMEM: Out of memory. 440 * -ENOMEM: Out of memory.
437 * -EINVAL: Invalid placement flags. 441 * -EINVAL: Invalid placement flags.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d01b4ddbdc56..8e0c848326b6 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -206,14 +206,84 @@ struct ttm_tt {
206struct ttm_mem_type_manager; 206struct ttm_mem_type_manager;
207 207
208struct ttm_mem_type_manager_func { 208struct ttm_mem_type_manager_func {
209 /**
210 * struct ttm_mem_type_manager member init
211 *
212 * @man: Pointer to a memory type manager.
213 * @p_size: Implementation dependent, but typically the size of the
214 * range to be managed in pages.
215 *
216 * Called to initialize a private range manager. The function is
217 * expected to initialize the man::priv member.
218 * Returns 0 on success, negative error code on failure.
219 */
209 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 220 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
221
222 /**
223 * struct ttm_mem_type_manager member takedown
224 *
225 * @man: Pointer to a memory type manager.
226 *
227 * Called to undo the setup done in init. All allocated resources
228 * should be freed.
229 */
210 int (*takedown)(struct ttm_mem_type_manager *man); 230 int (*takedown)(struct ttm_mem_type_manager *man);
231
232 /**
233 * struct ttm_mem_type_manager member get_node
234 *
235 * @man: Pointer to a memory type manager.
236 * @bo: Pointer to the buffer object we're allocating space for.
237 * @placement: Placement details.
238 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
239 *
240 * This function should allocate space in the memory type managed
241 * by @man. Placement details if
242 * applicable are given by @placement. If successful,
243 * @mem::mm_node should be set to a non-null value, and
244 * @mem::start should be set to a value identifying the beginning
245 * of the range allocated, and the function should return zero.
246 * If the memory region accomodate the buffer object, @mem::mm_node
247 * should be set to NULL, and the function should return 0.
248 * If a system error occured, preventing the request to be fulfilled,
249 * the function should return a negative error code.
250 *
251 * Note that @mem::mm_node will only be dereferenced by
252 * struct ttm_mem_type_manager functions and optionally by the driver,
253 * which has knowledge of the underlying type.
254 *
255 * This function may not be called from within atomic context, so
256 * an implementation can and must use either a mutex or a spinlock to
257 * protect any data structures managing the space.
258 */
211 int (*get_node)(struct ttm_mem_type_manager *man, 259 int (*get_node)(struct ttm_mem_type_manager *man,
212 struct ttm_buffer_object *bo, 260 struct ttm_buffer_object *bo,
213 struct ttm_placement *placement, 261 struct ttm_placement *placement,
214 struct ttm_mem_reg *mem); 262 struct ttm_mem_reg *mem);
263
264 /**
265 * struct ttm_mem_type_manager member put_node
266 *
267 * @man: Pointer to a memory type manager.
268 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
269 *
270 * This function frees memory type resources previously allocated
271 * and that are identified by @mem::mm_node and @mem::start. May not
272 * be called from within atomic context.
273 */
215 void (*put_node)(struct ttm_mem_type_manager *man, 274 void (*put_node)(struct ttm_mem_type_manager *man,
216 struct ttm_mem_reg *mem); 275 struct ttm_mem_reg *mem);
276
277 /**
278 * struct ttm_mem_type_manager member debug
279 *
280 * @man: Pointer to a memory type manager.
281 * @prefix: Prefix to be used in printout to identify the caller.
282 *
283 * This function is called to print out the state of the memory
284 * type manager to aid debugging of out-of-memory conditions.
285 * It may not be called from within atomic context.
286 */
217 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
218}; 288};
219 289
@@ -231,14 +301,13 @@ struct ttm_mem_type_manager {
231 uint64_t size; 301 uint64_t size;
232 uint32_t available_caching; 302 uint32_t available_caching;
233 uint32_t default_caching; 303 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func;
305 void *priv;
234 306
235 /* 307 /*
236 * Protected by the bdev->lru_lock. 308 * Protected by the global->lru_lock.
237 * TODO: Consider one lru_lock per ttm_mem_type_manager.
238 * Plays ill with list removal, though.
239 */ 309 */
240 const struct ttm_mem_type_manager_func *func; 310
241 void *priv;
242 struct list_head lru; 311 struct list_head lru;
243}; 312};
244 313
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 000000000000..96c038e43d66
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,37 @@
1#ifndef _LINUX_ATOMIC_H
2#define _LINUX_ATOMIC_H
3#include <asm/atomic.h>
4
5/**
6 * atomic_inc_not_zero_hint - increment if not null
7 * @v: pointer of type atomic_t
8 * @hint: probable value of the atomic before the increment
9 *
10 * This version of atomic_inc_not_zero() gives a hint of probable
11 * value of the atomic. This helps processor to not read the memory
12 * before doing the atomic read/modify/write cycle, lowering
13 * number of bus transactions on some arches.
14 *
15 * Returns: 0 if increment was not done, 1 otherwise.
16 */
17#ifndef atomic_inc_not_zero_hint
18static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
19{
20 int val, c = hint;
21
22 /* sanity test, should be removed by compiler if hint is a constant */
23 if (!hint)
24 return atomic_inc_not_zero(v);
25
26 do {
27 val = atomic_cmpxchg(v, c, c + 1);
28 if (val == c)
29 return 1;
30 c = val;
31 } while (c);
32
33 return 0;
34}
35#endif
36
37#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ba679992d39b..35dcdb3589bc 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -66,10 +66,6 @@
66#define bio_offset(bio) bio_iovec((bio))->bv_offset 66#define bio_offset(bio) bio_iovec((bio))->bv_offset
67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
68#define bio_sectors(bio) ((bio)->bi_size >> 9) 68#define bio_sectors(bio) ((bio)->bi_size >> 9)
69#define bio_empty_barrier(bio) \
70 ((bio->bi_rw & REQ_HARDBARRIER) && \
71 !bio_has_data(bio) && \
72 !(bio->bi_rw & REQ_DISCARD))
73 69
74static inline unsigned int bio_cur_bytes(struct bio *bio) 70static inline unsigned int bio_cur_bytes(struct bio *bio)
75{ 71{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0437ab6bb54c..46ad5197537a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -122,7 +122,6 @@ enum rq_flag_bits {
122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
124 124
125 __REQ_HARDBARRIER, /* may not be passed by drive either */
126 __REQ_SYNC, /* request is sync (sync write or read) */ 125 __REQ_SYNC, /* request is sync (sync write or read) */
127 __REQ_META, /* metadata io request */ 126 __REQ_META, /* metadata io request */
128 __REQ_DISCARD, /* request to discard sectors */ 127 __REQ_DISCARD, /* request to discard sectors */
@@ -159,7 +158,6 @@ enum rq_flag_bits {
159#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 158#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
160#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 159#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
161#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 160#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
162#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
163#define REQ_SYNC (1 << __REQ_SYNC) 161#define REQ_SYNC (1 << __REQ_SYNC)
164#define REQ_META (1 << __REQ_META) 162#define REQ_META (1 << __REQ_META)
165#define REQ_DISCARD (1 << __REQ_DISCARD) 163#define REQ_DISCARD (1 << __REQ_DISCARD)
@@ -168,8 +166,8 @@ enum rq_flag_bits {
168#define REQ_FAILFAST_MASK \ 166#define REQ_FAILFAST_MASK \
169 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
170#define REQ_COMMON_MASK \ 168#define REQ_COMMON_MASK \
171 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ 169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
172 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
173#define REQ_CLONE_MASK REQ_COMMON_MASK 171#define REQ_CLONE_MASK REQ_COMMON_MASK
174 172
175#define REQ_UNPLUG (1 << __REQ_UNPLUG) 173#define REQ_UNPLUG (1 << __REQ_UNPLUG)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5027a599077d..aae86fd10c4f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
552 * it already be started by driver. 552 * it already be started by driver.
553 */ 553 */
554#define RQ_NOMERGE_FLAGS \ 554#define RQ_NOMERGE_FLAGS \
555 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ 555 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
556 REQ_FLUSH | REQ_FUA)
557#define rq_mergeable(rq) \ 556#define rq_mergeable(rq) \
558 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 557 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
559 (((rq)->cmd_flags & REQ_DISCARD) || \ 558 (((rq)->cmd_flags & REQ_DISCARD) || \
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 749f01ccd26e..010e2d87ed75 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -197,6 +197,21 @@ enum dccp_feature_numbers {
197 DCCPF_MAX_CCID_SPECIFIC = 255, 197 DCCPF_MAX_CCID_SPECIFIC = 255,
198}; 198};
199 199
200/* DCCP socket control message types for cmsg */
201enum dccp_cmsg_type {
202 DCCP_SCM_PRIORITY = 1,
203 DCCP_SCM_QPOLICY_MAX = 0xFFFF,
204 /* ^-- Up to here reserved exclusively for qpolicy parameters */
205 DCCP_SCM_MAX
206};
207
208/* DCCP priorities for outgoing/queued packets */
209enum dccp_packet_dequeueing_policy {
210 DCCPQ_POLICY_SIMPLE,
211 DCCPQ_POLICY_PRIO,
212 DCCPQ_POLICY_MAX
213};
214
200/* DCCP socket options */ 215/* DCCP socket options */
201#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ 216#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */
202#define DCCP_SOCKOPT_SERVICE 2 217#define DCCP_SOCKOPT_SERVICE 2
@@ -210,6 +225,8 @@ enum dccp_feature_numbers {
210#define DCCP_SOCKOPT_CCID 13 225#define DCCP_SOCKOPT_CCID 13
211#define DCCP_SOCKOPT_TX_CCID 14 226#define DCCP_SOCKOPT_TX_CCID 14
212#define DCCP_SOCKOPT_RX_CCID 15 227#define DCCP_SOCKOPT_RX_CCID 15
228#define DCCP_SOCKOPT_QPOLICY_ID 16
229#define DCCP_SOCKOPT_QPOLICY_TXQLEN 17
213#define DCCP_SOCKOPT_CCID_RX_INFO 128 230#define DCCP_SOCKOPT_CCID_RX_INFO 128
214#define DCCP_SOCKOPT_CCID_TX_INFO 192 231#define DCCP_SOCKOPT_CCID_TX_INFO 192
215 232
@@ -458,10 +475,13 @@ struct dccp_ackvec;
458 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) 475 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
459 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) 476 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
460 * @dccps_options_received - parsed set of retrieved options 477 * @dccps_options_received - parsed set of retrieved options
478 * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
479 * @dccps_tx_qlen - maximum length of the TX queue
461 * @dccps_role - role of this sock, one of %dccp_role 480 * @dccps_role - role of this sock, one of %dccp_role
462 * @dccps_hc_rx_insert_options - receiver wants to add options when acking 481 * @dccps_hc_rx_insert_options - receiver wants to add options when acking
463 * @dccps_hc_tx_insert_options - sender wants to add options when sending 482 * @dccps_hc_tx_insert_options - sender wants to add options when sending
464 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) 483 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
484 * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
465 * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets 485 * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
466 * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) 486 * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
467 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) 487 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
@@ -499,10 +519,13 @@ struct dccp_sock {
499 struct ccid *dccps_hc_rx_ccid; 519 struct ccid *dccps_hc_rx_ccid;
500 struct ccid *dccps_hc_tx_ccid; 520 struct ccid *dccps_hc_tx_ccid;
501 struct dccp_options_received dccps_options_received; 521 struct dccp_options_received dccps_options_received;
522 __u8 dccps_qpolicy;
523 __u32 dccps_tx_qlen;
502 enum dccp_role dccps_role:2; 524 enum dccp_role dccps_role:2;
503 __u8 dccps_hc_rx_insert_options:1; 525 __u8 dccps_hc_rx_insert_options:1;
504 __u8 dccps_hc_tx_insert_options:1; 526 __u8 dccps_hc_tx_insert_options:1;
505 __u8 dccps_server_timewait:1; 527 __u8 dccps_server_timewait:1;
528 __u8 dccps_sync_scheduled:1;
506 struct tasklet_struct dccps_xmitlet; 529 struct tasklet_struct dccps_xmitlet;
507 struct timer_list dccps_xmit_timer; 530 struct timer_list dccps_xmit_timer;
508}; 531};
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9b2a0158f399..ef44c7a0638c 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.9rc2" 56#define REL_VERSION "8.3.9"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 95 59#define PRO_VERSION_MAX 95
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 6628a507fd3b..1908929204a9 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -691,7 +691,9 @@ struct ethtool_ops {
691#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ 691#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
692#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ 692#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
693#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ 693#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */
694#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ 694/* Get link status for host, i.e. whether the interface *and* the
695 * physical port (if there is one) are up (ethtool_value). */
696#define ETHTOOL_GLINK 0x0000000a
695#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ 697#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
696#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ 698#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
697#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ 699#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 69b43dbea6c6..45266b75409a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,54 +91,6 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
91#define BPF_TAX 0x00 91#define BPF_TAX 0x00
92#define BPF_TXA 0x80 92#define BPF_TXA 0x80
93 93
94enum {
95 BPF_S_RET_K = 0,
96 BPF_S_RET_A,
97 BPF_S_ALU_ADD_K,
98 BPF_S_ALU_ADD_X,
99 BPF_S_ALU_SUB_K,
100 BPF_S_ALU_SUB_X,
101 BPF_S_ALU_MUL_K,
102 BPF_S_ALU_MUL_X,
103 BPF_S_ALU_DIV_X,
104 BPF_S_ALU_AND_K,
105 BPF_S_ALU_AND_X,
106 BPF_S_ALU_OR_K,
107 BPF_S_ALU_OR_X,
108 BPF_S_ALU_LSH_K,
109 BPF_S_ALU_LSH_X,
110 BPF_S_ALU_RSH_K,
111 BPF_S_ALU_RSH_X,
112 BPF_S_ALU_NEG,
113 BPF_S_LD_W_ABS,
114 BPF_S_LD_H_ABS,
115 BPF_S_LD_B_ABS,
116 BPF_S_LD_W_LEN,
117 BPF_S_LD_W_IND,
118 BPF_S_LD_H_IND,
119 BPF_S_LD_B_IND,
120 BPF_S_LD_IMM,
121 BPF_S_LDX_W_LEN,
122 BPF_S_LDX_B_MSH,
123 BPF_S_LDX_IMM,
124 BPF_S_MISC_TAX,
125 BPF_S_MISC_TXA,
126 BPF_S_ALU_DIV_K,
127 BPF_S_LD_MEM,
128 BPF_S_LDX_MEM,
129 BPF_S_ST,
130 BPF_S_STX,
131 BPF_S_JMP_JA,
132 BPF_S_JMP_JEQ_K,
133 BPF_S_JMP_JEQ_X,
134 BPF_S_JMP_JGE_K,
135 BPF_S_JMP_JGE_X,
136 BPF_S_JMP_JGT_K,
137 BPF_S_JMP_JGT_X,
138 BPF_S_JMP_JSET_K,
139 BPF_S_JMP_JSET_X,
140};
141
142#ifndef BPF_MAXINSNS 94#ifndef BPF_MAXINSNS
143#define BPF_MAXINSNS 4096 95#define BPF_MAXINSNS 4096
144#endif 96#endif
@@ -172,7 +124,9 @@ enum {
172#define SKF_AD_MARK 20 124#define SKF_AD_MARK 20
173#define SKF_AD_QUEUE 24 125#define SKF_AD_QUEUE 24
174#define SKF_AD_HATYPE 28 126#define SKF_AD_HATYPE 28
175#define SKF_AD_MAX 32 127#define SKF_AD_RXHASH 32
128#define SKF_AD_CPU 36
129#define SKF_AD_MAX 40
176#define SKF_NET_OFF (-0x100000) 130#define SKF_NET_OFF (-0x100000)
177#define SKF_LL_OFF (-0x200000) 131#define SKF_LL_OFF (-0x200000)
178 132
@@ -194,8 +148,8 @@ struct sk_buff;
194struct sock; 148struct sock;
195 149
196extern int sk_filter(struct sock *sk, struct sk_buff *skb); 150extern int sk_filter(struct sock *sk, struct sk_buff *skb);
197extern unsigned int sk_run_filter(struct sk_buff *skb, 151extern unsigned int sk_run_filter(const struct sk_buff *skb,
198 struct sock_filter *filter, int flen); 152 const struct sock_filter *filter);
199extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 153extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
200extern int sk_detach_filter(struct sock *sk); 154extern int sk_detach_filter(struct sock *sk);
201extern int sk_chk_filter(struct sock_filter *filter, int flen); 155extern int sk_chk_filter(struct sock_filter *filter, int flen);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8a389b608ce3..41cb31f14ee3 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -96,11 +96,15 @@
96 */ 96 */
97#define in_nmi() (preempt_count() & NMI_MASK) 97#define in_nmi() (preempt_count() & NMI_MASK)
98 98
99#if defined(CONFIG_PREEMPT) 99#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
100# define PREEMPT_INATOMIC_BASE kernel_locked() 100# define PREEMPT_INATOMIC_BASE kernel_locked()
101# define PREEMPT_CHECK_OFFSET 1
102#else 101#else
103# define PREEMPT_INATOMIC_BASE 0 102# define PREEMPT_INATOMIC_BASE 0
103#endif
104
105#if defined(CONFIG_PREEMPT)
106# define PREEMPT_CHECK_OFFSET 1
107#else
104# define PREEMPT_CHECK_OFFSET 0 108# define PREEMPT_CHECK_OFFSET 0
105#endif 109#endif
106 110
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e9138198e823..b676c585574e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,6 +5,7 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/hardirq.h>
8 9
9#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
10 11
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 3c5d6b6e765c..cec17cf6cac2 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller 2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2010 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -77,13 +77,26 @@
77 /* Configuration Register1 */ 77 /* Configuration Register1 */
78#define ADP5588_AUTO_INC (1 << 7) 78#define ADP5588_AUTO_INC (1 << 7)
79#define ADP5588_GPIEM_CFG (1 << 6) 79#define ADP5588_GPIEM_CFG (1 << 6)
80#define ADP5588_OVR_FLOW_M (1 << 5)
80#define ADP5588_INT_CFG (1 << 4) 81#define ADP5588_INT_CFG (1 << 4)
82#define ADP5588_OVR_FLOW_IEN (1 << 3)
83#define ADP5588_K_LCK_IM (1 << 2)
81#define ADP5588_GPI_IEN (1 << 1) 84#define ADP5588_GPI_IEN (1 << 1)
85#define ADP5588_KE_IEN (1 << 0)
82 86
83/* Interrupt Status Register */ 87/* Interrupt Status Register */
88#define ADP5588_CMP2_INT (1 << 5)
89#define ADP5588_CMP1_INT (1 << 4)
90#define ADP5588_OVR_FLOW_INT (1 << 3)
91#define ADP5588_K_LCK_INT (1 << 2)
84#define ADP5588_GPI_INT (1 << 1) 92#define ADP5588_GPI_INT (1 << 1)
85#define ADP5588_KE_INT (1 << 0) 93#define ADP5588_KE_INT (1 << 0)
86 94
95/* Key Lock and Event Counter Register */
96#define ADP5588_K_LCK_EN (1 << 6)
97#define ADP5588_LCK21 0x30
98#define ADP5588_KEC 0xF
99
87#define ADP5588_MAXGPIO 18 100#define ADP5588_MAXGPIO 18
88#define ADP5588_BANK(offs) ((offs) >> 3) 101#define ADP5588_BANK(offs) ((offs) >> 3)
89#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) 102#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0d241a5c4909..f7e73c338c40 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -102,7 +102,9 @@ struct __fdb_entry {
102#include <linux/netdevice.h> 102#include <linux/netdevice.h>
103 103
104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
105extern int (*br_should_route_hook)(struct sk_buff *skb); 105
106typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
107extern br_should_route_hook_t __rcu *br_should_route_hook;
106 108
107#endif 109#endif
108 110
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 2fc66dd783ee..6485d2a89bec 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -80,6 +80,24 @@ struct rtnl_link_ifmap {
80 __u8 port; 80 __u8 port;
81}; 81};
82 82
83/*
84 * IFLA_AF_SPEC
85 * Contains nested attributes for address family specific attributes.
86 * Each address family may create a attribute with the address family
87 * number as type and create its own attribute structure in it.
88 *
89 * Example:
90 * [IFLA_AF_SPEC] = {
91 * [AF_INET] = {
92 * [IFLA_INET_CONF] = ...,
93 * },
94 * [AF_INET6] = {
95 * [IFLA_INET6_FLAGS] = ...,
96 * [IFLA_INET6_CONF] = ...,
97 * }
98 * }
99 */
100
83enum { 101enum {
84 IFLA_UNSPEC, 102 IFLA_UNSPEC,
85 IFLA_ADDRESS, 103 IFLA_ADDRESS,
@@ -116,6 +134,7 @@ enum {
116 IFLA_STATS64, 134 IFLA_STATS64,
117 IFLA_VF_PORTS, 135 IFLA_VF_PORTS,
118 IFLA_PORT_SELF, 136 IFLA_PORT_SELF,
137 IFLA_AF_SPEC,
119 __IFLA_MAX 138 __IFLA_MAX
120}; 139};
121 140
@@ -128,6 +147,14 @@ enum {
128#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) 147#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
129#endif 148#endif
130 149
150enum {
151 IFLA_INET_UNSPEC,
152 IFLA_INET_CONF,
153 __IFLA_INET_MAX,
154};
155
156#define IFLA_INET_MAX (__IFLA_INET_MAX - 1)
157
131/* ifi_flags. 158/* ifi_flags.
132 159
133 IFF_* flags. 160 IFF_* flags.
@@ -232,6 +259,7 @@ enum macvlan_mode {
232 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */ 259 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
233 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ 260 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
234 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ 261 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
262 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
235}; 263};
236 264
237/* SR-IOV virtual function management section */ 265/* SR-IOV virtual function management section */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 8a2fd66a8b5f..e28b2e4959d4 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -25,19 +25,25 @@ struct macvlan_port;
25struct macvtap_queue; 25struct macvtap_queue;
26 26
27/** 27/**
28 * struct macvlan_rx_stats - MACVLAN percpu rx stats 28 * struct macvlan_pcpu_stats - MACVLAN percpu stats
29 * @rx_packets: number of received packets 29 * @rx_packets: number of received packets
30 * @rx_bytes: number of received bytes 30 * @rx_bytes: number of received bytes
31 * @rx_multicast: number of received multicast packets 31 * @rx_multicast: number of received multicast packets
32 * @tx_packets: number of transmitted packets
33 * @tx_bytes: number of transmitted bytes
32 * @syncp: synchronization point for 64bit counters 34 * @syncp: synchronization point for 64bit counters
33 * @rx_errors: number of errors 35 * @rx_errors: number of rx errors
36 * @tx_dropped: number of tx dropped packets
34 */ 37 */
35struct macvlan_rx_stats { 38struct macvlan_pcpu_stats {
36 u64 rx_packets; 39 u64 rx_packets;
37 u64 rx_bytes; 40 u64 rx_bytes;
38 u64 rx_multicast; 41 u64 rx_multicast;
42 u64 tx_packets;
43 u64 tx_bytes;
39 struct u64_stats_sync syncp; 44 struct u64_stats_sync syncp;
40 unsigned long rx_errors; 45 u32 rx_errors;
46 u32 tx_dropped;
41}; 47};
42 48
43/* 49/*
@@ -52,7 +58,7 @@ struct macvlan_dev {
52 struct hlist_node hlist; 58 struct hlist_node hlist;
53 struct macvlan_port *port; 59 struct macvlan_port *port;
54 struct net_device *lowerdev; 60 struct net_device *lowerdev;
55 struct macvlan_rx_stats __percpu *rx_stats; 61 struct macvlan_pcpu_stats __percpu *pcpu_stats;
56 enum macvlan_mode mode; 62 enum macvlan_mode mode;
57 int (*receive)(struct sk_buff *skb); 63 int (*receive)(struct sk_buff *skb);
58 int (*forward)(struct net_device *dev, struct sk_buff *skb); 64 int (*forward)(struct net_device *dev, struct sk_buff *skb);
@@ -64,18 +70,18 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
64 unsigned int len, bool success, 70 unsigned int len, bool success,
65 bool multicast) 71 bool multicast)
66{ 72{
67 struct macvlan_rx_stats *rx_stats;
68
69 rx_stats = this_cpu_ptr(vlan->rx_stats);
70 if (likely(success)) { 73 if (likely(success)) {
71 u64_stats_update_begin(&rx_stats->syncp); 74 struct macvlan_pcpu_stats *pcpu_stats;
72 rx_stats->rx_packets++;; 75
73 rx_stats->rx_bytes += len; 76 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
77 u64_stats_update_begin(&pcpu_stats->syncp);
78 pcpu_stats->rx_packets++;
79 pcpu_stats->rx_bytes += len;
74 if (multicast) 80 if (multicast)
75 rx_stats->rx_multicast++; 81 pcpu_stats->rx_multicast++;
76 u64_stats_update_end(&rx_stats->syncp); 82 u64_stats_update_end(&pcpu_stats->syncp);
77 } else { 83 } else {
78 rx_stats->rx_errors++; 84 this_cpu_inc(vlan->pcpu_stats->rx_errors);
79 } 85 }
80} 86}
81 87
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c2f3a72712ce..635e1faec412 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -339,6 +339,31 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
339 } 339 }
340} 340}
341 341
342/**
343 * vlan_get_protocol - get protocol EtherType.
344 * @skb: skbuff to query
345 *
346 * Returns the EtherType of the packet, regardless of whether it is
347 * vlan encapsulated (normal or hardware accelerated) or not.
348 */
349static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
350{
351 __be16 protocol = 0;
352
353 if (vlan_tx_tag_present(skb) ||
354 skb->protocol != cpu_to_be16(ETH_P_8021Q))
355 protocol = skb->protocol;
356 else {
357 __be16 proto, *protop;
358 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
359 h_vlan_encapsulated_proto),
360 sizeof(proto), &proto);
361 if (likely(protop))
362 protocol = *protop;
363 }
364
365 return protocol;
366}
342#endif /* __KERNEL__ */ 367#endif /* __KERNEL__ */
343 368
344/* VLAN IOCTLs are found in sockios.h */ 369/* VLAN IOCTLs are found in sockios.h */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 93fc2449af10..c4987f265109 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -167,10 +167,10 @@ struct ip_sf_socklist {
167 */ 167 */
168 168
169struct ip_mc_socklist { 169struct ip_mc_socklist {
170 struct ip_mc_socklist *next; 170 struct ip_mc_socklist __rcu *next_rcu;
171 struct ip_mreqn multi; 171 struct ip_mreqn multi;
172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
173 struct ip_sf_socklist *sflist; 173 struct ip_sf_socklist __rcu *sflist;
174 struct rcu_head rcu; 174 struct rcu_head rcu;
175}; 175};
176 176
@@ -186,11 +186,14 @@ struct ip_sf_list {
186struct ip_mc_list { 186struct ip_mc_list {
187 struct in_device *interface; 187 struct in_device *interface;
188 __be32 multiaddr; 188 __be32 multiaddr;
189 unsigned int sfmode;
189 struct ip_sf_list *sources; 190 struct ip_sf_list *sources;
190 struct ip_sf_list *tomb; 191 struct ip_sf_list *tomb;
191 unsigned int sfmode;
192 unsigned long sfcount[2]; 192 unsigned long sfcount[2];
193 struct ip_mc_list *next; 193 union {
194 struct ip_mc_list *next;
195 struct ip_mc_list __rcu *next_rcu;
196 };
194 struct timer_list timer; 197 struct timer_list timer;
195 int users; 198 int users;
196 atomic_t refcnt; 199 atomic_t refcnt;
@@ -201,6 +204,7 @@ struct ip_mc_list {
201 char loaded; 204 char loaded;
202 unsigned char gsquery; /* check source marks? */ 205 unsigned char gsquery; /* check source marks? */
203 unsigned char crcount; 206 unsigned char crcount;
207 struct rcu_head rcu;
204}; 208};
205 209
206/* V3 exponential field decoding */ 210/* V3 exponential field decoding */
@@ -234,7 +238,7 @@ extern void ip_mc_unmap(struct in_device *);
234extern void ip_mc_remap(struct in_device *); 238extern void ip_mc_remap(struct in_device *);
235extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); 239extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
236extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); 240extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
237extern void ip_mc_rejoin_group(struct ip_mc_list *im); 241extern void ip_mc_rejoin_groups(struct in_device *in_dev);
238 242
239#endif 243#endif
240#endif 244#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ccd5b07d678d..ae8fdc54e0c0 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -41,10 +41,12 @@ enum
41 __IPV4_DEVCONF_MAX 41 __IPV4_DEVCONF_MAX
42}; 42};
43 43
44#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
45
44struct ipv4_devconf { 46struct ipv4_devconf {
45 void *sysctl; 47 void *sysctl;
46 int data[__IPV4_DEVCONF_MAX - 1]; 48 int data[IPV4_DEVCONF_MAX];
47 DECLARE_BITMAP(state, __IPV4_DEVCONF_MAX - 1); 49 DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
48}; 50};
49 51
50struct in_device { 52struct in_device {
@@ -52,9 +54,8 @@ struct in_device {
52 atomic_t refcnt; 54 atomic_t refcnt;
53 int dead; 55 int dead;
54 struct in_ifaddr *ifa_list; /* IP ifaddr chain */ 56 struct in_ifaddr *ifa_list; /* IP ifaddr chain */
55 rwlock_t mc_list_lock; 57 struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
56 struct ip_mc_list *mc_list; /* IP multicast filter chain */ 58 int mc_count; /* Number of installed mcasts */
57 int mc_count; /* Number of installed mcasts */
58 spinlock_t mc_tomb_lock; 59 spinlock_t mc_tomb_lock;
59 struct ip_mc_list *mc_tomb; 60 struct ip_mc_list *mc_tomb;
60 unsigned long mr_v1_seen; 61 unsigned long mr_v1_seen;
@@ -91,7 +92,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
91 92
92static inline void ipv4_devconf_setall(struct in_device *in_dev) 93static inline void ipv4_devconf_setall(struct in_device *in_dev)
93{ 94{
94 bitmap_fill(in_dev->cnf.state, __IPV4_DEVCONF_MAX - 1); 95 bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
95} 96}
96 97
97#define IN_DEV_CONF_GET(in_dev, attr) \ 98#define IN_DEV_CONF_GET(in_dev, attr) \
@@ -221,7 +222,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev)
221 222
222static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) 223static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
223{ 224{
224 return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held()); 225 return rtnl_dereference(dev->ip_ptr);
225} 226}
226 227
227extern void in_dev_finish_destroy(struct in_device *idev); 228extern void in_dev_finish_destroy(struct in_device *idev);
diff --git a/include/linux/input.h b/include/linux/input.h
index 51af441f3a21..6ef44465db8d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1406,6 +1406,8 @@ static inline void input_set_drvdata(struct input_dev *dev, void *data)
1406int __must_check input_register_device(struct input_dev *); 1406int __must_check input_register_device(struct input_dev *);
1407void input_unregister_device(struct input_dev *); 1407void input_unregister_device(struct input_dev *);
1408 1408
1409void input_reset_device(struct input_dev *);
1410
1409int __must_check input_register_handler(struct input_handler *); 1411int __must_check input_register_handler(struct input_handler *);
1410void input_unregister_handler(struct input_handler *); 1412void input_unregister_handler(struct input_handler *);
1411 1413
@@ -1421,7 +1423,7 @@ void input_release_device(struct input_handle *);
1421int input_open_device(struct input_handle *); 1423int input_open_device(struct input_handle *);
1422void input_close_device(struct input_handle *); 1424void input_close_device(struct input_handle *);
1423 1425
1424int input_flush_device(struct input_handle* handle, struct file* file); 1426int input_flush_device(struct input_handle *handle, struct file *file);
1425 1427
1426void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); 1428void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
1427void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); 1429void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 3e70b21884a9..b2eee896dcbc 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc);
76void exit_io_context(struct task_struct *task); 76void exit_io_context(struct task_struct *task);
77struct io_context *get_io_context(gfp_t gfp_flags, int node); 77struct io_context *get_io_context(gfp_t gfp_flags, int node);
78struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 78struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
79void copy_io_context(struct io_context **pdst, struct io_context **psrc);
80#else 79#else
81static inline void exit_io_context(struct task_struct *task) 80static inline void exit_io_context(struct task_struct *task)
82{ 81{
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8e429d0e0405..0c997767429a 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -364,7 +364,7 @@ struct ipv6_pinfo {
364 364
365 __u32 dst_cookie; 365 __u32 dst_cookie;
366 366
367 struct ipv6_mc_socklist *ipv6_mc_list; 367 struct ipv6_mc_socklist __rcu *ipv6_mc_list;
368 struct ipv6_ac_socklist *ipv6_ac_list; 368 struct ipv6_ac_socklist *ipv6_ac_list;
369 struct ipv6_fl_socklist *ipv6_fl_list; 369 struct ipv6_fl_socklist *ipv6_fl_list;
370 370
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ced1159fa4f2..47cb09edec1a 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -3,129 +3,156 @@
3 3
4/* jhash.h: Jenkins hash support. 4/* jhash.h: Jenkins hash support.
5 * 5 *
6 * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net) 6 * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
7 * 7 *
8 * http://burtleburtle.net/bob/hash/ 8 * http://burtleburtle.net/bob/hash/
9 * 9 *
10 * These are the credits from Bob's sources: 10 * These are the credits from Bob's sources:
11 * 11 *
12 * lookup2.c, by Bob Jenkins, December 1996, Public Domain. 12 * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
13 * hash(), hash2(), hash3, and mix() are externally useful functions.
14 * Routines to test the hash are included if SELF_TEST is defined.
15 * You can use this free for any purpose. It has no warranty.
16 * 13 *
17 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 14 * These are functions for producing 32-bit hashes for hash table lookup.
15 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
16 * are externally useful functions. Routines to test the hash are included
17 * if SELF_TEST is defined. You can use this free for any purpose. It's in
18 * the public domain. It has no warranty.
19 *
20 * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
18 * 21 *
19 * I've modified Bob's hash to be useful in the Linux kernel, and 22 * I've modified Bob's hash to be useful in the Linux kernel, and
20 * any bugs present are surely my fault. -DaveM 23 * any bugs present are my fault.
24 * Jozsef
21 */ 25 */
26#include <linux/bitops.h>
27#include <linux/unaligned/packed_struct.h>
28
29/* Best hash sizes are of power of two */
30#define jhash_size(n) ((u32)1<<(n))
31/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
32#define jhash_mask(n) (jhash_size(n)-1)
33
34/* __jhash_mix -- mix 3 32-bit values reversibly. */
35#define __jhash_mix(a, b, c) \
36{ \
37 a -= c; a ^= rol32(c, 4); c += b; \
38 b -= a; b ^= rol32(a, 6); a += c; \
39 c -= b; c ^= rol32(b, 8); b += a; \
40 a -= c; a ^= rol32(c, 16); c += b; \
41 b -= a; b ^= rol32(a, 19); a += c; \
42 c -= b; c ^= rol32(b, 4); b += a; \
43}
22 44
23/* NOTE: Arguments are modified. */ 45/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
24#define __jhash_mix(a, b, c) \ 46#define __jhash_final(a, b, c) \
25{ \ 47{ \
26 a -= b; a -= c; a ^= (c>>13); \ 48 c ^= b; c -= rol32(b, 14); \
27 b -= c; b -= a; b ^= (a<<8); \ 49 a ^= c; a -= rol32(c, 11); \
28 c -= a; c -= b; c ^= (b>>13); \ 50 b ^= a; b -= rol32(a, 25); \
29 a -= b; a -= c; a ^= (c>>12); \ 51 c ^= b; c -= rol32(b, 16); \
30 b -= c; b -= a; b ^= (a<<16); \ 52 a ^= c; a -= rol32(c, 4); \
31 c -= a; c -= b; c ^= (b>>5); \ 53 b ^= a; b -= rol32(a, 14); \
32 a -= b; a -= c; a ^= (c>>3); \ 54 c ^= b; c -= rol32(b, 24); \
33 b -= c; b -= a; b ^= (a<<10); \
34 c -= a; c -= b; c ^= (b>>15); \
35} 55}
36 56
37/* The golden ration: an arbitrary value */ 57/* An arbitrary initial parameter */
38#define JHASH_GOLDEN_RATIO 0x9e3779b9 58#define JHASH_INITVAL 0xdeadbeef
39 59
40/* The most generic version, hashes an arbitrary sequence 60/* jhash - hash an arbitrary key
41 * of bytes. No alignment or length assumptions are made about 61 * @k: sequence of bytes as key
42 * the input key. 62 * @length: the length of the key
63 * @initval: the previous hash, or an arbitray value
64 *
65 * The generic version, hashes an arbitrary sequence of bytes.
66 * No alignment or length assumptions are made about the input key.
67 *
68 * Returns the hash value of the key. The result depends on endianness.
43 */ 69 */
44static inline u32 jhash(const void *key, u32 length, u32 initval) 70static inline u32 jhash(const void *key, u32 length, u32 initval)
45{ 71{
46 u32 a, b, c, len; 72 u32 a, b, c;
47 const u8 *k = key; 73 const u8 *k = key;
48 74
49 len = length; 75 /* Set up the internal state */
50 a = b = JHASH_GOLDEN_RATIO; 76 a = b = c = JHASH_INITVAL + length + initval;
51 c = initval;
52
53 while (len >= 12) {
54 a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
55 b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
56 c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
57
58 __jhash_mix(a,b,c);
59 77
78 /* All but the last block: affect some 32 bits of (a,b,c) */
79 while (length > 12) {
80 a += __get_unaligned_cpu32(k);
81 b += __get_unaligned_cpu32(k + 4);
82 c += __get_unaligned_cpu32(k + 8);
83 __jhash_mix(a, b, c);
84 length -= 12;
60 k += 12; 85 k += 12;
61 len -= 12;
62 } 86 }
63 87 /* Last block: affect all 32 bits of (c) */
64 c += length; 88 /* All the case statements fall through */
65 switch (len) { 89 switch (length) {
66 case 11: c += ((u32)k[10]<<24); 90 case 12: c += (u32)k[11]<<24;
67 case 10: c += ((u32)k[9]<<16); 91 case 11: c += (u32)k[10]<<16;
68 case 9 : c += ((u32)k[8]<<8); 92 case 10: c += (u32)k[9]<<8;
69 case 8 : b += ((u32)k[7]<<24); 93 case 9: c += k[8];
70 case 7 : b += ((u32)k[6]<<16); 94 case 8: b += (u32)k[7]<<24;
71 case 6 : b += ((u32)k[5]<<8); 95 case 7: b += (u32)k[6]<<16;
72 case 5 : b += k[4]; 96 case 6: b += (u32)k[5]<<8;
73 case 4 : a += ((u32)k[3]<<24); 97 case 5: b += k[4];
74 case 3 : a += ((u32)k[2]<<16); 98 case 4: a += (u32)k[3]<<24;
75 case 2 : a += ((u32)k[1]<<8); 99 case 3: a += (u32)k[2]<<16;
76 case 1 : a += k[0]; 100 case 2: a += (u32)k[1]<<8;
77 }; 101 case 1: a += k[0];
78 102 __jhash_final(a, b, c);
79 __jhash_mix(a,b,c); 103 case 0: /* Nothing left to add */
104 break;
105 }
80 106
81 return c; 107 return c;
82} 108}
83 109
84/* A special optimized version that handles 1 or more of u32s. 110/* jhash2 - hash an array of u32's
85 * The length parameter here is the number of u32s in the key. 111 * @k: the key which must be an array of u32's
112 * @length: the number of u32's in the key
113 * @initval: the previous hash, or an arbitray value
114 *
115 * Returns the hash value of the key.
86 */ 116 */
87static inline u32 jhash2(const u32 *k, u32 length, u32 initval) 117static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
88{ 118{
89 u32 a, b, c, len; 119 u32 a, b, c;
90 120
91 a = b = JHASH_GOLDEN_RATIO; 121 /* Set up the internal state */
92 c = initval; 122 a = b = c = JHASH_INITVAL + (length<<2) + initval;
93 len = length;
94 123
95 while (len >= 3) { 124 /* Handle most of the key */
125 while (length > 3) {
96 a += k[0]; 126 a += k[0];
97 b += k[1]; 127 b += k[1];
98 c += k[2]; 128 c += k[2];
99 __jhash_mix(a, b, c); 129 __jhash_mix(a, b, c);
100 k += 3; len -= 3; 130 length -= 3;
131 k += 3;
101 } 132 }
102 133
103 c += length * 4; 134 /* Handle the last 3 u32's: all the case statements fall through */
104 135 switch (length) {
105 switch (len) { 136 case 3: c += k[2];
106 case 2 : b += k[1]; 137 case 2: b += k[1];
107 case 1 : a += k[0]; 138 case 1: a += k[0];
108 }; 139 __jhash_final(a, b, c);
109 140 case 0: /* Nothing left to add */
110 __jhash_mix(a,b,c); 141 break;
142 }
111 143
112 return c; 144 return c;
113} 145}
114 146
115 147
116/* A special ultra-optimized versions that knows they are hashing exactly 148/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
117 * 3, 2 or 1 word(s).
118 *
119 * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally
120 * done at the end is not done here.
121 */
122static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) 149static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
123{ 150{
124 a += JHASH_GOLDEN_RATIO; 151 a += JHASH_INITVAL;
125 b += JHASH_GOLDEN_RATIO; 152 b += JHASH_INITVAL;
126 c += initval; 153 c += initval;
127 154
128 __jhash_mix(a, b, c); 155 __jhash_final(a, b, c);
129 156
130 return c; 157 return c;
131} 158}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 450092c1e35f..fc3da9e4da19 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -60,7 +60,7 @@ extern const char linux_proc_banner[];
60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
61#define roundup(x, y) ( \ 61#define roundup(x, y) ( \
62{ \ 62{ \
63 typeof(y) __y = y; \ 63 const typeof(y) __y = y; \
64 (((x) + (__y - 1)) / __y) * __y; \ 64 (((x) + (__y - 1)) / __y) * __y; \
65} \ 65} \
66) 66)
@@ -293,6 +293,7 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
293 unsigned int interval_msec); 293 unsigned int interval_msec);
294 294
295extern int printk_delay_msec; 295extern int printk_delay_msec;
296extern int dmesg_restrict;
296 297
297/* 298/*
298 * Print a one-time message (analogous to WARN_ONCE() et al): 299 * Print a one-time message (analogous to WARN_ONCE() et al):
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644
index 000000000000..38368d785f08
--- /dev/null
+++ b/include/linux/leds-lp5521.h
@@ -0,0 +1,47 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5521_H
24#define __LINUX_LP5521_H
25
26/* See Documentation/leds/leds-lp5521.txt */
27
28struct lp5521_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5521_CLOCK_AUTO 0
35#define LP5521_CLOCK_INT 1
36#define LP5521_CLOCK_EXT 2
37
38struct lp5521_platform_data {
39 struct lp5521_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644
index 000000000000..796747637b80
--- /dev/null
+++ b/include/linux/leds-lp5523.h
@@ -0,0 +1,47 @@
1/*
2 * LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5523_H
24#define __LINUX_LP5523_H
25
26/* See Documentation/leds/leds-lp5523.txt */
27
28struct lp5523_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5523_CLOCK_AUTO 0
35#define LP5523_CLOCK_INT 1
36#define LP5523_CLOCK_EXT 2
37
38struct lp5523_platform_data {
39 struct lp5523_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ba6986a11663..0f19df9e37b0 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/timer.h>
18 19
19struct device; 20struct device;
20/* 21/*
@@ -45,10 +46,14 @@ struct led_classdev {
45 /* Get LED brightness level */ 46 /* Get LED brightness level */
46 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); 47 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
47 48
48 /* Activate hardware accelerated blink, delays are in 49 /*
49 * miliseconds and if none is provided then a sensible default 50 * Activate hardware accelerated blink, delays are in milliseconds
50 * should be chosen. The call can adjust the timings if it can't 51 * and if both are zero then a sensible default should be chosen.
51 * match the values specified exactly. */ 52 * The call should adjust the timings in that case and if it can't
53 * match the values specified exactly.
54 * Deactivate blinking again when the brightness is set to a fixed
55 * value via the brightness_set() callback.
56 */
52 int (*blink_set)(struct led_classdev *led_cdev, 57 int (*blink_set)(struct led_classdev *led_cdev,
53 unsigned long *delay_on, 58 unsigned long *delay_on,
54 unsigned long *delay_off); 59 unsigned long *delay_off);
@@ -57,6 +62,10 @@ struct led_classdev {
57 struct list_head node; /* LED Device list */ 62 struct list_head node; /* LED Device list */
58 const char *default_trigger; /* Trigger to use */ 63 const char *default_trigger; /* Trigger to use */
59 64
65 unsigned long blink_delay_on, blink_delay_off;
66 struct timer_list blink_timer;
67 int blink_brightness;
68
60#ifdef CONFIG_LEDS_TRIGGERS 69#ifdef CONFIG_LEDS_TRIGGERS
61 /* Protects the trigger data below */ 70 /* Protects the trigger data below */
62 struct rw_semaphore trigger_lock; 71 struct rw_semaphore trigger_lock;
@@ -73,6 +82,36 @@ extern void led_classdev_unregister(struct led_classdev *led_cdev);
73extern void led_classdev_suspend(struct led_classdev *led_cdev); 82extern void led_classdev_suspend(struct led_classdev *led_cdev);
74extern void led_classdev_resume(struct led_classdev *led_cdev); 83extern void led_classdev_resume(struct led_classdev *led_cdev);
75 84
85/**
86 * led_blink_set - set blinking with software fallback
87 * @led_cdev: the LED to start blinking
88 * @delay_on: the time it should be on (in ms)
89 * @delay_off: the time it should ble off (in ms)
90 *
91 * This function makes the LED blink, attempting to use the
92 * hardware acceleration if possible, but falling back to
93 * software blinking if there is no hardware blinking or if
94 * the LED refuses the passed values.
95 *
96 * Note that if software blinking is active, simply calling
97 * led_cdev->brightness_set() will not stop the blinking,
98 * use led_classdev_brightness_set() instead.
99 */
100extern void led_blink_set(struct led_classdev *led_cdev,
101 unsigned long *delay_on,
102 unsigned long *delay_off);
103/**
104 * led_brightness_set - set LED brightness
105 * @led_cdev: the LED to set
106 * @brightness: the brightness to set it to
107 *
108 * Set an LED's brightness, and, if necessary, cancel the
109 * software blink timer that implements blinking when the
110 * hardware doesn't.
111 */
112extern void led_brightness_set(struct led_classdev *led_cdev,
113 enum led_brightness brightness);
114
76/* 115/*
77 * LED Triggers 116 * LED Triggers
78 */ 117 */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 1ff81b51b656..dd3c34ebca9a 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -11,6 +11,7 @@
11#define MARVELL_PHY_ID_88E1118 0x01410e10 11#define MARVELL_PHY_ID_88E1118 0x01410e10
12#define MARVELL_PHY_ID_88E1121R 0x01410cb0 12#define MARVELL_PHY_ID_88E1121R 0x01410cb0
13#define MARVELL_PHY_ID_88E1145 0x01410cd0 13#define MARVELL_PHY_ID_88E1145 0x01410cd0
14#define MARVELL_PHY_ID_88E1149R 0x01410e50
14#define MARVELL_PHY_ID_88E1240 0x01410e30 15#define MARVELL_PHY_ID_88E1240 0x01410e30
15#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
16 17
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index c779b49a1fda..b1494aced217 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -55,6 +55,7 @@
55#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ 55#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
56#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ 56#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
57#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ 57#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
58#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
58 59
59/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ 60/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
60#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ 61#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -235,6 +236,10 @@
235#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ 236#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
236#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ 237#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
237 238
239/* AN EEE Advertisement register. */
240#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */
241#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */
242
238/* LASI RX_ALARM control/status registers. */ 243/* LASI RX_ALARM control/status registers. */
239#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ 244#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
240#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */ 245#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index d19e2114fd86..5c99da1078aa 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -59,19 +59,19 @@ struct sh_mmcif_plat_data {
59#define MMCIF_CE_HOST_STS2 0x0000004C 59#define MMCIF_CE_HOST_STS2 0x0000004C
60#define MMCIF_CE_VERSION 0x0000007C 60#define MMCIF_CE_VERSION 0x0000007C
61 61
62extern inline u32 sh_mmcif_readl(void __iomem *addr, int reg) 62static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
63{ 63{
64 return readl(addr + reg); 64 return readl(addr + reg);
65} 65}
66 66
67extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) 67static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
68{ 68{
69 writel(val, addr + reg); 69 writel(val, addr + reg);
70} 70}
71 71
72#define SH_MMCIF_BBS 512 /* boot block size */ 72#define SH_MMCIF_BBS 512 /* boot block size */
73 73
74extern inline void sh_mmcif_boot_cmd_send(void __iomem *base, 74static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
75 unsigned long cmd, unsigned long arg) 75 unsigned long cmd, unsigned long arg)
76{ 76{
77 sh_mmcif_writel(base, MMCIF_CE_INT, 0); 77 sh_mmcif_writel(base, MMCIF_CE_INT, 0);
@@ -79,7 +79,7 @@ extern inline void sh_mmcif_boot_cmd_send(void __iomem *base,
79 sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); 79 sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd);
80} 80}
81 81
82extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) 82static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
83{ 83{
84 unsigned long tmp; 84 unsigned long tmp;
85 int cnt; 85 int cnt;
@@ -95,14 +95,14 @@ extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
95 return -1; 95 return -1;
96} 96}
97 97
98extern inline int sh_mmcif_boot_cmd(void __iomem *base, 98static inline int sh_mmcif_boot_cmd(void __iomem *base,
99 unsigned long cmd, unsigned long arg) 99 unsigned long cmd, unsigned long arg)
100{ 100{
101 sh_mmcif_boot_cmd_send(base, cmd, arg); 101 sh_mmcif_boot_cmd_send(base, cmd, arg);
102 return sh_mmcif_boot_cmd_poll(base, 0x00010000); 102 return sh_mmcif_boot_cmd_poll(base, 0x00010000);
103} 103}
104 104
105extern inline int sh_mmcif_boot_do_read_single(void __iomem *base, 105static inline int sh_mmcif_boot_do_read_single(void __iomem *base,
106 unsigned int block_nr, 106 unsigned int block_nr,
107 unsigned long *buf) 107 unsigned long *buf)
108{ 108{
@@ -125,7 +125,7 @@ extern inline int sh_mmcif_boot_do_read_single(void __iomem *base,
125 return 0; 125 return 0;
126} 126}
127 127
128extern inline int sh_mmcif_boot_do_read(void __iomem *base, 128static inline int sh_mmcif_boot_do_read(void __iomem *base,
129 unsigned long first_block, 129 unsigned long first_block,
130 unsigned long nr_blocks, 130 unsigned long nr_blocks,
131 void *buf) 131 void *buf)
@@ -143,7 +143,7 @@ extern inline int sh_mmcif_boot_do_read(void __iomem *base,
143 return ret; 143 return ret;
144} 144}
145 145
146extern inline void sh_mmcif_boot_init(void __iomem *base) 146static inline void sh_mmcif_boot_init(void __iomem *base)
147{ 147{
148 unsigned long tmp; 148 unsigned long tmp;
149 149
@@ -177,7 +177,7 @@ extern inline void sh_mmcif_boot_init(void __iomem *base)
177 sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); 177 sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
178} 178}
179 179
180extern inline void sh_mmcif_boot_slurp(void __iomem *base, 180static inline void sh_mmcif_boot_slurp(void __iomem *base,
181 unsigned char *buf, 181 unsigned char *buf,
182 unsigned long no_bytes) 182 unsigned long no_bytes)
183{ 183{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 072652d94d9f..d31bc3c94717 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
493enum netdev_queue_state_t { 493enum netdev_queue_state_t {
494 __QUEUE_STATE_XOFF, 494 __QUEUE_STATE_XOFF,
495 __QUEUE_STATE_FROZEN, 495 __QUEUE_STATE_FROZEN,
496#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
497 (1 << __QUEUE_STATE_FROZEN))
496}; 498};
497 499
498struct netdev_queue { 500struct netdev_queue {
@@ -503,6 +505,12 @@ struct netdev_queue {
503 struct Qdisc *qdisc; 505 struct Qdisc *qdisc;
504 unsigned long state; 506 unsigned long state;
505 struct Qdisc *qdisc_sleeping; 507 struct Qdisc *qdisc_sleeping;
508#ifdef CONFIG_RPS
509 struct kobject kobj;
510#endif
511#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
512 int numa_node;
513#endif
506/* 514/*
507 * write mostly part 515 * write mostly part
508 */ 516 */
@@ -517,6 +525,22 @@ struct netdev_queue {
517 u64 tx_dropped; 525 u64 tx_dropped;
518} ____cacheline_aligned_in_smp; 526} ____cacheline_aligned_in_smp;
519 527
528static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
529{
530#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
531 return q->numa_node;
532#else
533 return -1;
534#endif
535}
536
537static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
538{
539#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
540 q->numa_node = node;
541#endif
542}
543
520#ifdef CONFIG_RPS 544#ifdef CONFIG_RPS
521/* 545/*
522 * This structure holds an RPS map which can be of variable length. The 546 * This structure holds an RPS map which can be of variable length. The
@@ -592,11 +616,36 @@ struct netdev_rx_queue {
592 struct rps_map __rcu *rps_map; 616 struct rps_map __rcu *rps_map;
593 struct rps_dev_flow_table __rcu *rps_flow_table; 617 struct rps_dev_flow_table __rcu *rps_flow_table;
594 struct kobject kobj; 618 struct kobject kobj;
595 struct netdev_rx_queue *first; 619 struct net_device *dev;
596 atomic_t count;
597} ____cacheline_aligned_in_smp; 620} ____cacheline_aligned_in_smp;
598#endif /* CONFIG_RPS */ 621#endif /* CONFIG_RPS */
599 622
623#ifdef CONFIG_XPS
624/*
625 * This structure holds an XPS map which can be of variable length. The
626 * map is an array of queues.
627 */
628struct xps_map {
629 unsigned int len;
630 unsigned int alloc_len;
631 struct rcu_head rcu;
632 u16 queues[0];
633};
634#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
635#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
636 / sizeof(u16))
637
638/*
639 * This structure holds all XPS maps for device. Maps are indexed by CPU.
640 */
641struct xps_dev_maps {
642 struct rcu_head rcu;
643 struct xps_map __rcu *cpu_map[0];
644};
645#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
646 (nr_cpu_ids * sizeof(struct xps_map *)))
647#endif /* CONFIG_XPS */
648
600/* 649/*
601 * This structure defines the management hooks for network devices. 650 * This structure defines the management hooks for network devices.
602 * The following hooks can be defined; unless noted otherwise, they are 651 * The following hooks can be defined; unless noted otherwise, they are
@@ -951,7 +1000,7 @@ struct net_device {
951#endif 1000#endif
952 void *atalk_ptr; /* AppleTalk link */ 1001 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1002 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 1003 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1004 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 1005 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 1006 void *ax25_ptr; /* AX.25 specific data */
@@ -995,8 +1044,8 @@ struct net_device {
995 unsigned int real_num_rx_queues; 1044 unsigned int real_num_rx_queues;
996#endif 1045#endif
997 1046
998 rx_handler_func_t *rx_handler; 1047 rx_handler_func_t __rcu *rx_handler;
999 void *rx_handler_data; 1048 void __rcu *rx_handler_data;
1000 1049
1001 struct netdev_queue __rcu *ingress_queue; 1050 struct netdev_queue __rcu *ingress_queue;
1002 1051
@@ -1017,6 +1066,10 @@ struct net_device {
1017 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1066 unsigned long tx_queue_len; /* Max frames per queue allowed */
1018 spinlock_t tx_global_lock; 1067 spinlock_t tx_global_lock;
1019 1068
1069#ifdef CONFIG_XPS
1070 struct xps_dev_maps __rcu *xps_maps;
1071#endif
1072
1020 /* These may be needed for future network-power-down code. */ 1073 /* These may be needed for future network-power-down code. */
1021 1074
1022 /* 1075 /*
@@ -1307,7 +1360,8 @@ static inline struct net_device *first_net_device(struct net *net)
1307 1360
1308extern int netdev_boot_setup_check(struct net_device *dev); 1361extern int netdev_boot_setup_check(struct net_device *dev);
1309extern unsigned long netdev_boot_base(const char *prefix, int unit); 1362extern unsigned long netdev_boot_base(const char *prefix, int unit);
1310extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); 1363extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1364 const char *hwaddr);
1311extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1365extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1312extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1366extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1313extern void dev_add_pack(struct packet_type *pt); 1367extern void dev_add_pack(struct packet_type *pt);
@@ -1554,6 +1608,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1554 1608
1555static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1609static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1556{ 1610{
1611 if (WARN_ON(!dev_queue)) {
1612 printk(KERN_INFO "netif_stop_queue() cannot be called before "
1613 "register_netdev()");
1614 return;
1615 }
1557 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1616 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1558} 1617}
1559 1618
@@ -1595,9 +1654,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1595 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1654 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1596} 1655}
1597 1656
1598static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) 1657static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1599{ 1658{
1600 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); 1659 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1601} 1660}
1602 1661
1603/** 1662/**
@@ -2234,6 +2293,8 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
2234void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2293void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2235 struct net_device *dev); 2294 struct net_device *dev);
2236 2295
2296int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
2297
2237static inline int net_gso_ok(int features, int gso_type) 2298static inline int net_gso_ok(int features, int gso_type)
2238{ 2299{
2239 int feature = gso_type << NETIF_F_GSO_SHIFT; 2300 int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2249,10 +2310,7 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
2249static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2310static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
2250{ 2311{
2251 if (skb_is_gso(skb)) { 2312 if (skb_is_gso(skb)) {
2252 int features = dev->features; 2313 int features = netif_get_vlan_features(skb, dev);
2253
2254 if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
2255 features &= dev->vlan_features;
2256 2314
2257 return (!skb_gso_ok(skb, features) || 2315 return (!skb_gso_ok(skb, features) ||
2258 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2316 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 89341c32631a..1893837b3966 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -33,6 +33,8 @@
33 33
34#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE) 34#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
35 35
36#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
37
36/* only for userspace compatibility */ 38/* only for userspace compatibility */
37#ifndef __KERNEL__ 39#ifndef __KERNEL__
38/* Generic cache responses from hook functions. 40/* Generic cache responses from hook functions.
@@ -215,7 +217,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
215 int ret; 217 int ret;
216 218
217 if (!cond || 219 if (!cond ||
218 (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) 220 ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
219 ret = okfn(skb); 221 ret = okfn(skb);
220 return ret; 222 return ret;
221} 223}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22a8323..40150f345982 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@ struct perf_event {
747 u64 tstamp_running; 747 u64 tstamp_running;
748 u64 tstamp_stopped; 748 u64 tstamp_stopped;
749 749
750 /*
751 * timestamp shadows the actual context timing but it can
752 * be safely used in NMI interrupt context. It reflects the
753 * context time as it was when the event was last scheduled in.
754 *
755 * ctx_time already accounts for ctx->timestamp. Therefore to
756 * compute ctx_time for a sample, simply add perf_clock().
757 */
758 u64 shadow_ctx_time;
759
750 struct perf_event_attr attr; 760 struct perf_event_attr attr;
751 struct hw_perf_event hw; 761 struct hw_perf_event hw;
752 762
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 01b3d759f1fc..e031e1a486d9 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,6 +8,7 @@ struct platform_pwm_backlight_data {
8 int pwm_id; 8 int pwm_id;
9 unsigned int max_brightness; 9 unsigned int max_brightness;
10 unsigned int dft_brightness; 10 unsigned int dft_brightness;
11 unsigned int lth_brightness;
11 unsigned int pwm_period_ns; 12 unsigned int pwm_period_ns;
12 int (*init)(struct device *dev); 13 int (*init)(struct device *dev);
13 int (*notify)(struct device *dev, int brightness); 14 int (*notify)(struct device *dev, int brightness);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index a39cbed9ee17..ab2baa5c4884 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -34,19 +34,13 @@
34 * needed for RCU lookups (because root->height is unreliable). The only 34 * needed for RCU lookups (because root->height is unreliable). The only
35 * time callers need worry about this is when doing a lookup_slot under 35 * time callers need worry about this is when doing a lookup_slot under
36 * RCU. 36 * RCU.
37 *
38 * Indirect pointer in fact is also used to tag the last pointer of a node
39 * when it is shrunk, before we rcu free the node. See shrink code for
40 * details.
37 */ 41 */
38#define RADIX_TREE_INDIRECT_PTR 1 42#define RADIX_TREE_INDIRECT_PTR 1
39#define RADIX_TREE_RETRY ((void *)-1UL)
40
41static inline void *radix_tree_ptr_to_indirect(void *ptr)
42{
43 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
44}
45 43
46static inline void *radix_tree_indirect_to_ptr(void *ptr)
47{
48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
49}
50#define radix_tree_indirect_to_ptr(ptr) \ 44#define radix_tree_indirect_to_ptr(ptr) \
51 radix_tree_indirect_to_ptr((void __force *)(ptr)) 45 radix_tree_indirect_to_ptr((void __force *)(ptr))
52 46
@@ -140,16 +134,29 @@ do { \
140 * removed. 134 * removed.
141 * 135 *
142 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read 136 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
143 * locked across slot lookup and dereference. More likely, will be used with 137 * locked across slot lookup and dereference. Not required if write lock is
144 * radix_tree_replace_slot(), as well, so caller will hold tree write locked. 138 * held (ie. items cannot be concurrently inserted).
139 *
140 * radix_tree_deref_retry must be used to confirm validity of the pointer if
141 * only the read lock is held.
145 */ 142 */
146static inline void *radix_tree_deref_slot(void **pslot) 143static inline void *radix_tree_deref_slot(void **pslot)
147{ 144{
148 void *ret = rcu_dereference(*pslot); 145 return rcu_dereference(*pslot);
149 if (unlikely(radix_tree_is_indirect_ptr(ret)))
150 ret = RADIX_TREE_RETRY;
151 return ret;
152} 146}
147
148/**
149 * radix_tree_deref_retry - check radix_tree_deref_slot
150 * @arg: pointer returned by radix_tree_deref_slot
151 * Returns: 0 if retry is not required, otherwise retry is required
152 *
153 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
154 */
155static inline int radix_tree_deref_retry(void *arg)
156{
157 return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
158}
159
153/** 160/**
154 * radix_tree_replace_slot - replace item in a slot 161 * radix_tree_replace_slot - replace item in a slot
155 * @pslot: pointer to slot, returned by radix_tree_lookup_slot 162 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 88d36f9145ba..d01c96c1966e 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -2,6 +2,7 @@
2#define _LINUX_RESOURCE_H 2#define _LINUX_RESOURCE_H
3 3
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/types.h>
5 6
6/* 7/*
7 * Resource control/accounting header file for linux 8 * Resource control/accounting header file for linux
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d42f274418b8..bbad657a3725 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -6,7 +6,6 @@
6#include <linux/if_link.h> 6#include <linux/if_link.h>
7#include <linux/if_addr.h> 7#include <linux/if_addr.h>
8#include <linux/neighbour.h> 8#include <linux/neighbour.h>
9#include <linux/netdevice.h>
10 9
11/* rtnetlink families. Values up to 127 are reserved for real address 10/* rtnetlink families. Values up to 127 are reserved for real address
12 * families, values above 128 may be used arbitrarily. 11 * families, values above 128 may be used arbitrarily.
@@ -606,6 +605,7 @@ struct tcamsg {
606#ifdef __KERNEL__ 605#ifdef __KERNEL__
607 606
608#include <linux/mutex.h> 607#include <linux/mutex.h>
608#include <linux/netdevice.h>
609 609
610static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) 610static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
611{ 611{
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 4dca992f3093..cea0c38e7a63 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -122,6 +122,10 @@ int clk_rate_table_find(struct clk *clk,
122long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 122long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
123 unsigned int div_max, unsigned long rate); 123 unsigned int div_max, unsigned long rate);
124 124
125long clk_round_parent(struct clk *clk, unsigned long target,
126 unsigned long *best_freq, unsigned long *parent_freq,
127 unsigned int div_min, unsigned int div_max);
128
125#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ 129#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \
126{ \ 130{ \
127 .parent = _parent, \ 131 .parent = _parent, \
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 864bd56bd3b0..4d9dcd138315 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -5,7 +5,6 @@ struct sh_timer_config {
5 char *name; 5 char *name;
6 long channel_offset; 6 long channel_offset;
7 int timer_bit; 7 int timer_bit;
8 char *clk;
9 unsigned long clockevent_rating; 8 unsigned long clockevent_rating;
10 unsigned long clocksource_rating; 9 unsigned long clocksource_rating;
11}; 10};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e6ba898de61c..19f37a6ee6c4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -386,9 +386,10 @@ struct sk_buff {
386#else 386#else
387 __u8 deliver_no_wcard:1; 387 __u8 deliver_no_wcard:1;
388#endif 388#endif
389 __u8 ooo_okay:1;
389 kmemcheck_bitfield_end(flags2); 390 kmemcheck_bitfield_end(flags2);
390 391
391 /* 0/14 bit hole */ 392 /* 0/13 bit hole */
392 393
393#ifdef CONFIG_NET_DMA 394#ifdef CONFIG_NET_DMA
394 dma_cookie_t dma_cookie; 395 dma_cookie_t dma_cookie;
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index ebb0c80ffd6e..12b2b18e50c1 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -230,6 +230,7 @@ enum
230 LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ 230 LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
231 LINUX_MIB_TCPDEFERACCEPTDROP, 231 LINUX_MIB_TCPDEFERACCEPTDROP,
232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
233 __LINUX_MIB_MAX 234 __LINUX_MIB_MAX
234}; 235};
235 236
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d66c61774d95..e10352915698 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -40,9 +40,9 @@ struct plat_stmmacenet_data {
40 int pmt; 40 int pmt;
41 void (*fix_mac_speed)(void *priv, unsigned int speed); 41 void (*fix_mac_speed)(void *priv, unsigned int speed);
42 void (*bus_setup)(void __iomem *ioaddr); 42 void (*bus_setup)(void __iomem *ioaddr);
43#ifdef CONFIG_STM_DRIVERS 43 int (*init)(struct platform_device *pdev);
44 struct stm_pad_config *pad_config; 44 void (*exit)(struct platform_device *pdev);
45#endif 45 void *custom_cfg;
46 void *bsp_priv; 46 void *bsp_priv;
47}; 47};
48 48
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index bbdb680ffbe9..aea0d438e3c7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -82,18 +82,28 @@ struct svc_xprt {
82 struct net *xpt_net; 82 struct net *xpt_net;
83}; 83};
84 84
85static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 85static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
86{ 86{
87 spin_lock(&xpt->xpt_lock); 87 spin_lock(&xpt->xpt_lock);
88 list_add(&u->list, &xpt->xpt_users); 88 list_del_init(&u->list);
89 spin_unlock(&xpt->xpt_lock); 89 spin_unlock(&xpt->xpt_lock);
90} 90}
91 91
92static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 92static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
93{ 93{
94 spin_lock(&xpt->xpt_lock); 94 spin_lock(&xpt->xpt_lock);
95 list_del_init(&u->list); 95 if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
96 /*
97 * The connection is about to be deleted soon (or,
98 * worse, may already be deleted--in which case we've
99 * already notified the xpt_users).
100 */
101 spin_unlock(&xpt->xpt_lock);
102 return -ENOTCONN;
103 }
104 list_add(&u->list, &xpt->xpt_users);
96 spin_unlock(&xpt->xpt_lock); 105 spin_unlock(&xpt->xpt_lock);
106 return 0;
97} 107}
98 108
99int svc_reg_xprt_class(struct svc_xprt_class *); 109int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a754748dd5f..c7ea9bc8897c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
50#define N_V253 19 /* Codec control over voice modem */ 50#define N_V253 19 /* Codec control over voice modem */
51#define N_CAIF 20 /* CAIF protocol for talking to modems */ 51#define N_CAIF 20 /* CAIF protocol for talking to modems */
52#define N_GSM0710 21 /* GSM 0710 Mux */ 52#define N_GSM0710 21 /* GSM 0710 Mux */
53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ 53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
54 54
55/* 55/*
56 * This character is the same as _POSIX_VDISABLE: it cannot be used as 56 * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab222bb..24300d8a1bc1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@ struct usbdrv_wrap {
797 * @disconnect: Called when the interface is no longer accessible, usually 797 * @disconnect: Called when the interface is no longer accessible, usually
798 * because its device has been (or is being) disconnected or the 798 * because its device has been (or is being) disconnected or the
799 * driver module is being unloaded. 799 * driver module is being unloaded.
800 * @ioctl: Used for drivers that want to talk to userspace through 800 * @unlocked_ioctl: Used for drivers that want to talk to userspace through
801 * the "usbfs" filesystem. This lets devices provide ways to 801 * the "usbfs" filesystem. This lets devices provide ways to
802 * expose information to user space regardless of where they 802 * expose information to user space regardless of where they
803 * do (or don't) show up otherwise in the filesystem. 803 * do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d506ed..2387f9fc8138 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@ struct musb_hdrc_config {
89 /* A GPIO controlling VRSEL in Blackfin */ 89 /* A GPIO controlling VRSEL in Blackfin */
90 unsigned int gpio_vrsel; 90 unsigned int gpio_vrsel;
91 unsigned int gpio_vrsel_active; 91 unsigned int gpio_vrsel_active;
92 /* musb CLKIN in Blackfin in MHZ */
93 unsigned char clkin;
92#endif 94#endif
93 95
94}; 96};
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 7ae27a473818..44842c8d38c0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -97,6 +97,12 @@ struct driver_info {
97 97
98#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ 98#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */
99 99
100/*
101 * Indicates to usbnet, that USB driver accumulates multiple IP packets.
102 * Affects statistic (counters) and short packet handling.
103 */
104#define FLAG_MULTI_PACKET 0x1000
105
100 /* init device ... can sleep, or cause probe() failure */ 106 /* init device ... can sleep, or cause probe() failure */
101 int (*bind)(struct usbnet *, struct usb_interface *); 107 int (*bind)(struct usbnet *, struct usb_interface *);
102 108
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index b971e3848493..930fdd2de79c 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -283,6 +283,7 @@ enum xfrm_attr_type_t {
283 XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */ 283 XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */
284 XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */ 284 XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
285 XFRMA_MARK, /* struct xfrm_mark */ 285 XFRMA_MARK, /* struct xfrm_mark */
286 XFRMA_TFCPAD, /* __u32 */
286 __XFRMA_MAX 287 __XFRMA_MAX
287 288
288#define XFRMA_MAX (__XFRMA_MAX - 1) 289#define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a9441249306c..23710aa6a181 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,6 @@
1#ifndef _ADDRCONF_H 1#ifndef _ADDRCONF_H
2#define _ADDRCONF_H 2#define _ADDRCONF_H
3 3
4#define RETRANS_TIMER HZ
5
6#define MAX_RTR_SOLICITATIONS 3 4#define MAX_RTR_SOLICITATIONS 3
7#define RTR_SOLICITATION_INTERVAL (4*HZ) 5#define RTR_SOLICITATION_INTERVAL (4*HZ)
8 6
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 90c9e2872f27..18e5c3f67580 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp);
10extern void unix_notinflight(struct file *fp); 10extern void unix_notinflight(struct file *fp);
11extern void unix_gc(void); 11extern void unix_gc(void);
12extern void wait_for_unix_gc(void); 12extern void wait_for_unix_gc(void);
13extern struct sock *unix_get_socket(struct file *filp);
13 14
14#define UNIX_HASH_SIZE 256 15#define UNIX_HASH_SIZE 256
15 16
@@ -56,6 +57,7 @@ struct unix_sock {
56 spinlock_t lock; 57 spinlock_t lock;
57 unsigned int gc_candidate : 1; 58 unsigned int gc_candidate : 1;
58 unsigned int gc_maybe_cycle : 1; 59 unsigned int gc_maybe_cycle : 1;
60 unsigned char recursion_level;
59 struct socket_wq peer_wq; 61 struct socket_wq peer_wq;
60}; 62};
61#define unix_sk(__sk) ((struct unix_sock *)__sk) 63#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 6da573c75d54..8eff83b95366 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -28,7 +28,7 @@ struct caif_param {
28 * @sockaddr: Socket address to connect. 28 * @sockaddr: Socket address to connect.
29 * @priority: Priority of the connection. 29 * @priority: Priority of the connection.
30 * @link_selector: Link selector (high bandwidth or low latency) 30 * @link_selector: Link selector (high bandwidth or low latency)
31 * @link_name: Name of the CAIF Link Layer to use. 31 * @ifindex: kernel index of the interface.
32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). 32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
33 * 33 *
34 * This struct is used when connecting a CAIF channel. 34 * This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@ struct caif_connect_request {
39 struct sockaddr_caif sockaddr; 39 struct sockaddr_caif sockaddr;
40 enum caif_channel_priority priority; 40 enum caif_channel_priority priority;
41 enum caif_link_selector link_selector; 41 enum caif_link_selector link_selector;
42 char link_name[16]; 42 int ifindex;
43 struct caif_param param; 43 struct caif_param param;
44}; 44};
45 45
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index ce4570dff020..87c3d11b8e55 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -121,6 +121,8 @@ struct cfspi {
121 wait_queue_head_t wait; 121 wait_queue_head_t wait;
122 spinlock_t lock; 122 spinlock_t lock;
123 bool flow_stop; 123 bool flow_stop;
124 bool slave;
125 bool slave_talked;
124#ifdef CONFIG_DEBUG_FS 126#ifdef CONFIG_DEBUG_FS
125 enum cfspi_state dbg_state; 127 enum cfspi_state dbg_state;
126 u16 pcmd; 128 u16 pcmd;
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index bd646faffa47..f688478bfb84 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -139,10 +139,10 @@ struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
139 enum cfcnfg_phy_preference phy_pref); 139 enum cfcnfg_phy_preference phy_pref);
140 140
141/** 141/**
142 * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer 142 * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
143 * it matches caif physical id with the kernel interface id.
143 * @cnfg: Configuration object 144 * @cnfg: Configuration object
144 * @name: Name of the Physical Layer (Caif Link Layer) 145 * @ifi: ifindex obtained from socket.c bindtodevice.
145 */ 146 */
146int cfcnfg_get_named(struct cfcnfg *cnfg, char *name); 147int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
147
148#endif /* CFCNFG_H_ */ 148#endif /* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
index 9402543fc20d..e54f6396fa4c 100644
--- a/include/net/caif/cfctrl.h
+++ b/include/net/caif/cfctrl.h
@@ -51,7 +51,7 @@ struct cfctrl_rsp {
51 void (*restart_rsp)(void); 51 void (*restart_rsp)(void);
52 void (*radioset_rsp)(void); 52 void (*radioset_rsp)(void);
53 void (*reject_rsp)(struct cflayer *layer, u8 linkid, 53 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
54 struct cflayer *client_layer);; 54 struct cflayer *client_layer);
55}; 55};
56 56
57/* Link Setup Parameters for CAIF-Links. */ 57/* Link Setup Parameters for CAIF-Links. */
diff --git a/include/net/dn.h b/include/net/dn.h
index e5469f7b67a3..a514a3cf4573 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -225,7 +225,7 @@ extern int decnet_di_count;
225extern int decnet_dr_count; 225extern int decnet_dr_count;
226extern int decnet_no_fc_max_cwnd; 226extern int decnet_no_fc_max_cwnd;
227 227
228extern int sysctl_decnet_mem[3]; 228extern long sysctl_decnet_mem[3];
229extern int sysctl_decnet_wmem[3]; 229extern int sysctl_decnet_wmem[3];
230extern int sysctl_decnet_rmem[3]; 230extern int sysctl_decnet_rmem[3];
231 231
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 0916bbf3bdff..b9e32db03f20 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -5,13 +5,14 @@
5struct dn_dev; 5struct dn_dev;
6 6
7struct dn_ifaddr { 7struct dn_ifaddr {
8 struct dn_ifaddr *ifa_next; 8 struct dn_ifaddr __rcu *ifa_next;
9 struct dn_dev *ifa_dev; 9 struct dn_dev *ifa_dev;
10 __le16 ifa_local; 10 __le16 ifa_local;
11 __le16 ifa_address; 11 __le16 ifa_address;
12 __u8 ifa_flags; 12 __u8 ifa_flags;
13 __u8 ifa_scope; 13 __u8 ifa_scope;
14 char ifa_label[IFNAMSIZ]; 14 char ifa_label[IFNAMSIZ];
15 struct rcu_head rcu;
15}; 16};
16 17
17#define DN_DEV_S_RU 0 /* Run - working normally */ 18#define DN_DEV_S_RU 0 /* Run - working normally */
@@ -83,7 +84,7 @@ struct dn_dev_parms {
83 84
84 85
85struct dn_dev { 86struct dn_dev {
86 struct dn_ifaddr *ifa_list; 87 struct dn_ifaddr __rcu *ifa_list;
87 struct net_device *dev; 88 struct net_device *dev;
88 struct dn_dev_parms parms; 89 struct dn_dev_parms parms;
89 char use_long; 90 char use_long;
@@ -171,19 +172,27 @@ extern int unregister_dnaddr_notifier(struct notifier_block *nb);
171 172
172static inline int dn_dev_islocal(struct net_device *dev, __le16 addr) 173static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
173{ 174{
174 struct dn_dev *dn_db = dev->dn_ptr; 175 struct dn_dev *dn_db;
175 struct dn_ifaddr *ifa; 176 struct dn_ifaddr *ifa;
177 int res = 0;
176 178
179 rcu_read_lock();
180 dn_db = rcu_dereference(dev->dn_ptr);
177 if (dn_db == NULL) { 181 if (dn_db == NULL) {
178 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n"); 182 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
179 return 0; 183 goto out;
180 } 184 }
181 185
182 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) 186 for (ifa = rcu_dereference(dn_db->ifa_list);
183 if ((addr ^ ifa->ifa_local) == 0) 187 ifa != NULL;
184 return 1; 188 ifa = rcu_dereference(ifa->ifa_next))
185 189 if ((addr ^ ifa->ifa_local) == 0) {
186 return 0; 190 res = 1;
191 break;
192 }
193out:
194 rcu_read_unlock();
195 return res;
187} 196}
188 197
189#endif /* _NET_DN_DEV_H */ 198#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index ccadab3aa3f6..9b185df265fb 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -80,6 +80,16 @@ struct dn_route {
80 unsigned rt_type; 80 unsigned rt_type;
81}; 81};
82 82
83static inline bool dn_is_input_route(struct dn_route *rt)
84{
85 return rt->fl.iif != 0;
86}
87
88static inline bool dn_is_output_route(struct dn_route *rt)
89{
90 return rt->fl.iif == 0;
91}
92
83extern void dn_route_init(void); 93extern void dn_route_init(void);
84extern void dn_route_cleanup(void); 94extern void dn_route_cleanup(void);
85 95
diff --git a/include/net/dst.h b/include/net/dst.h
index ffe9cb719c0e..755ac6c1aa03 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -70,7 +70,7 @@ struct dst_entry {
70 70
71 struct dst_ops *ops; 71 struct dst_ops *ops;
72 72
73 u32 metrics[RTAX_MAX]; 73 u32 _metrics[RTAX_MAX];
74 74
75#ifdef CONFIG_NET_CLS_ROUTE 75#ifdef CONFIG_NET_CLS_ROUTE
76 __u32 tclassid; 76 __u32 tclassid;
@@ -94,19 +94,46 @@ struct dst_entry {
94 int __use; 94 int __use;
95 unsigned long lastuse; 95 unsigned long lastuse;
96 union { 96 union {
97 struct dst_entry *next; 97 struct dst_entry *next;
98 struct rtable __rcu *rt_next; 98 struct rtable __rcu *rt_next;
99 struct rt6_info *rt6_next; 99 struct rt6_info *rt6_next;
100 struct dn_route *dn_next; 100 struct dn_route __rcu *dn_next;
101 }; 101 };
102}; 102};
103 103
104#ifdef __KERNEL__ 104#ifdef __KERNEL__
105 105
106static inline u32 106static inline u32
107dst_metric(const struct dst_entry *dst, int metric) 107dst_metric_raw(const struct dst_entry *dst, const int metric)
108{ 108{
109 return dst->metrics[metric-1]; 109 return dst->_metrics[metric-1];
110}
111
112static inline u32
113dst_metric(const struct dst_entry *dst, const int metric)
114{
115 WARN_ON_ONCE(metric == RTAX_HOPLIMIT);
116 return dst_metric_raw(dst, metric);
117}
118
119static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
120{
121 dst->_metrics[metric-1] = val;
122}
123
124static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
125{
126 memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
127}
128
129static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
130{
131 dst_import_metrics(dest, src->_metrics);
132}
133
134static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
135{
136 return dst->_metrics;
110} 137}
111 138
112static inline u32 139static inline u32
@@ -134,7 +161,7 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
134static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 161static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
135 unsigned long rtt) 162 unsigned long rtt)
136{ 163{
137 dst->metrics[metric-1] = jiffies_to_msecs(rtt); 164 dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
138} 165}
139 166
140static inline u32 167static inline u32
diff --git a/include/net/flow.h b/include/net/flow.h
index 0ac3fb5e0973..7196e6864b8d 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -67,6 +67,7 @@ struct flowi {
67 } dnports; 67 } dnports;
68 68
69 __be32 spi; 69 __be32 spi;
70 __be32 gre_key;
70 71
71 struct { 72 struct {
72 __u8 type; 73 __u8 type;
@@ -78,6 +79,7 @@ struct flowi {
78#define fl_icmp_code uli_u.icmpt.code 79#define fl_icmp_code uli_u.icmpt.code
79#define fl_ipsec_spi uli_u.spi 80#define fl_ipsec_spi uli_u.spi
80#define fl_mh_type uli_u.mht.type 81#define fl_mh_type uli_u.mht.type
82#define fl_gre_key uli_u.gre_key
81 __u32 secid; /* used by xfrm; see secid.txt */ 83 __u32 secid; /* used by xfrm; see secid.txt */
82} __attribute__((__aligned__(BITS_PER_LONG/8))); 84} __attribute__((__aligned__(BITS_PER_LONG/8)));
83 85
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f95ff8d9aa47..04977eefb0ee 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -89,10 +89,11 @@ struct ip6_sf_socklist {
89struct ipv6_mc_socklist { 89struct ipv6_mc_socklist {
90 struct in6_addr addr; 90 struct in6_addr addr;
91 int ifindex; 91 int ifindex;
92 struct ipv6_mc_socklist *next; 92 struct ipv6_mc_socklist __rcu *next;
93 rwlock_t sflock; 93 rwlock_t sflock;
94 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 94 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
95 struct ip6_sf_socklist *sflist; 95 struct ip6_sf_socklist *sflist;
96 struct rcu_head rcu;
96}; 97};
97 98
98struct ip6_sf_list { 99struct ip6_sf_list {
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index aae08f686633..ff013505236b 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -25,6 +25,9 @@ struct sockaddr;
25extern int inet6_csk_bind_conflict(const struct sock *sk, 25extern int inet6_csk_bind_conflict(const struct sock *sk,
26 const struct inet_bind_bucket *tb); 26 const struct inet_bind_bucket *tb);
27 27
28extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
29 const struct request_sock *req);
30
28extern struct request_sock *inet6_csk_search_req(const struct sock *sk, 31extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
29 struct request_sock ***prevp, 32 struct request_sock ***prevp,
30 const __be16 rport, 33 const __be16 rport,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e4f494b42e06..6c93a56cc958 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,7 @@ struct inet_connection_sock_af_ops {
43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
44 struct request_sock *req, 44 struct request_sock *req,
45 struct dst_entry *dst); 45 struct dst_entry *dst);
46 int (*remember_stamp)(struct sock *sk); 46 struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
47 u16 net_header_len; 47 u16 net_header_len;
48 u16 sockaddr_len; 48 u16 sockaddr_len;
49 int (*setsockopt)(struct sock *sk, int level, int optname, 49 int (*setsockopt)(struct sock *sk, int level, int optname,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1989cfd7405f..8181498fa96c 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -116,8 +116,9 @@ struct inet_sock {
116 struct ipv6_pinfo *pinet6; 116 struct ipv6_pinfo *pinet6;
117#endif 117#endif
118 /* Socket demultiplex comparisons on incoming packets. */ 118 /* Socket demultiplex comparisons on incoming packets. */
119 __be32 inet_daddr; 119#define inet_daddr sk.__sk_common.skc_daddr
120 __be32 inet_rcv_saddr; 120#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
121
121 __be16 inet_dport; 122 __be16 inet_dport;
122 __u16 inet_num; 123 __u16 inet_num;
123 __be32 inet_saddr; 124 __be32 inet_saddr;
@@ -141,7 +142,7 @@ struct inet_sock {
141 nodefrag:1; 142 nodefrag:1;
142 int mc_index; 143 int mc_index;
143 __be32 mc_addr; 144 __be32 mc_addr;
144 struct ip_mc_socklist *mc_list; 145 struct ip_mc_socklist __rcu *mc_list;
145 struct { 146 struct {
146 unsigned int flags; 147 unsigned int flags;
147 unsigned int fragsize; 148 unsigned int fragsize;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index a066fdd50da6..17404b5388a7 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -88,12 +88,6 @@ extern void inet_twdr_hangman(unsigned long data);
88extern void inet_twdr_twkill_work(struct work_struct *work); 88extern void inet_twdr_twkill_work(struct work_struct *work);
89extern void inet_twdr_twcal_tick(unsigned long data); 89extern void inet_twdr_twcal_tick(unsigned long data);
90 90
91#if (BITS_PER_LONG == 64)
92#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
93#else
94#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
95#endif
96
97struct inet_bind_bucket; 91struct inet_bind_bucket;
98 92
99/* 93/*
@@ -117,15 +111,15 @@ struct inet_timewait_sock {
117#define tw_hash __tw_common.skc_hash 111#define tw_hash __tw_common.skc_hash
118#define tw_prot __tw_common.skc_prot 112#define tw_prot __tw_common.skc_prot
119#define tw_net __tw_common.skc_net 113#define tw_net __tw_common.skc_net
114#define tw_daddr __tw_common.skc_daddr
115#define tw_rcv_saddr __tw_common.skc_rcv_saddr
120 int tw_timeout; 116 int tw_timeout;
121 volatile unsigned char tw_substate; 117 volatile unsigned char tw_substate;
122 /* 3 bits hole, try to pack */
123 unsigned char tw_rcv_wscale; 118 unsigned char tw_rcv_wscale;
119
124 /* Socket demultiplex comparisons on incoming packets. */ 120 /* Socket demultiplex comparisons on incoming packets. */
125 /* these five are in inet_sock */ 121 /* these three are in inet_sock */
126 __be16 tw_sport; 122 __be16 tw_sport;
127 __be32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
128 __be32 tw_rcv_saddr;
129 __be16 tw_dport; 123 __be16 tw_dport;
130 __u16 tw_num; 124 __u16 tw_num;
131 kmemcheck_bitfield_begin(flags); 125 kmemcheck_bitfield_begin(flags);
@@ -191,10 +185,10 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
191 return (struct inet_timewait_sock *)sk; 185 return (struct inet_timewait_sock *)sk;
192} 186}
193 187
194static inline __be32 inet_rcv_saddr(const struct sock *sk) 188static inline __be32 sk_rcv_saddr(const struct sock *sk)
195{ 189{
196 return likely(sk->sk_state != TCP_TIME_WAIT) ? 190/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
197 inet_sk(sk)->inet_rcv_saddr : inet_twsk(sk)->tw_rcv_saddr; 191 return sk->__sk_common.skc_rcv_saddr;
198} 192}
199 193
200extern void inet_twsk_put(struct inet_timewait_sock *tw); 194extern void inet_twsk_put(struct inet_timewait_sock *tw);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index fe239bfe5f7f..599d96e74114 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,12 +11,21 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/jiffies.h> 12#include <linux/jiffies.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <net/ipv6.h>
14#include <asm/atomic.h> 15#include <asm/atomic.h>
15 16
17struct inetpeer_addr {
18 union {
19 __be32 a4;
20 __be32 a6[4];
21 };
22 __u16 family;
23};
24
16struct inet_peer { 25struct inet_peer {
17 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 26 /* group together avl_left,avl_right,v4daddr to speedup lookups */
18 struct inet_peer __rcu *avl_left, *avl_right; 27 struct inet_peer __rcu *avl_left, *avl_right;
19 __be32 v4daddr; /* peer's address */ 28 struct inetpeer_addr daddr;
20 __u32 avl_height; 29 __u32 avl_height;
21 struct list_head unused; 30 struct list_head unused;
22 __u32 dtime; /* the time of last use of not 31 __u32 dtime; /* the time of last use of not
@@ -26,7 +35,6 @@ struct inet_peer {
26 * Once inet_peer is queued for deletion (refcnt == -1), following fields 35 * Once inet_peer is queued for deletion (refcnt == -1), following fields
27 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 36 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
28 * We can share memory with rcu_head to keep inet_peer small 37 * We can share memory with rcu_head to keep inet_peer small
29 * (less then 64 bytes)
30 */ 38 */
31 union { 39 union {
32 struct { 40 struct {
@@ -42,7 +50,25 @@ struct inet_peer {
42void inet_initpeers(void) __init; 50void inet_initpeers(void) __init;
43 51
44/* can be called with or without local BH being disabled */ 52/* can be called with or without local BH being disabled */
45struct inet_peer *inet_getpeer(__be32 daddr, int create); 53struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
54
55static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
56{
57 struct inetpeer_addr daddr;
58
59 daddr.a4 = v4daddr;
60 daddr.family = AF_INET;
61 return inet_getpeer(&daddr, create);
62}
63
64static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int create)
65{
66 struct inetpeer_addr daddr;
67
68 ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
69 daddr.family = AF_INET6;
70 return inet_getpeer(&daddr, create);
71}
46 72
47/* can be called from BH context or outside */ 73/* can be called from BH context or outside */
48extern void inet_putpeer(struct inet_peer *p); 74extern void inet_putpeer(struct inet_peer *p);
diff --git a/include/net/ip.h b/include/net/ip.h
index 86e2b182a0c0..67fac78a186b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -201,7 +201,6 @@ static inline int inet_is_reserved_local_port(int port)
201 return test_bit(port, sysctl_local_reserved_ports); 201 return test_bit(port, sysctl_local_reserved_ports);
202} 202}
203 203
204extern int sysctl_ip_default_ttl;
205extern int sysctl_ip_nonlocal_bind; 204extern int sysctl_ip_nonlocal_bind;
206 205
207extern struct ctl_path net_core_path[]; 206extern struct ctl_path net_core_path[];
@@ -428,15 +427,6 @@ extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
428extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 427extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
429 u32 info); 428 u32 info);
430 429
431/* sysctl helpers - any sysctl which holds a value that ends up being
432 * fed into the routing cache should use these handlers.
433 */
434int ipv4_doint_and_flush(ctl_table *ctl, int write,
435 void __user *buffer,
436 size_t *lenp, loff_t *ppos);
437int ipv4_doint_and_flush_strategy(ctl_table *table,
438 void __user *oldval, size_t __user *oldlenp,
439 void __user *newval, size_t newlen);
440#ifdef CONFIG_PROC_FS 430#ifdef CONFIG_PROC_FS
441extern int ip_misc_proc_init(void); 431extern int ip_misc_proc_init(void);
442#endif 432#endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 062a823d311c..708ff7cb8806 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -21,6 +21,7 @@
21#include <net/dst.h> 21#include <net/dst.h>
22#include <net/flow.h> 22#include <net/flow.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24#include <net/inetpeer.h>
24 25
25#ifdef CONFIG_IPV6_MULTIPLE_TABLES 26#ifdef CONFIG_IPV6_MULTIPLE_TABLES
26#define FIB6_TABLE_HASHSZ 256 27#define FIB6_TABLE_HASHSZ 256
@@ -109,6 +110,7 @@ struct rt6_info {
109 u32 rt6i_metric; 110 u32 rt6i_metric;
110 111
111 struct inet6_dev *rt6i_idev; 112 struct inet6_dev *rt6i_idev;
113 struct inet_peer *rt6i_peer;
112 114
113#ifdef CONFIG_XFRM 115#ifdef CONFIG_XFRM
114 u32 rt6i_flow_cache_genid; 116 u32 rt6i_flow_cache_genid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 278312c95f96..e06e0ca1e91b 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -3,7 +3,6 @@
3 3
4#define IP6_RT_PRIO_USER 1024 4#define IP6_RT_PRIO_USER 1024
5#define IP6_RT_PRIO_ADDRCONF 256 5#define IP6_RT_PRIO_ADDRCONF 256
6#define IP6_RT_PRIO_KERN 512
7 6
8struct route_info { 7struct route_info {
9 __u8 type; 8 __u8 type;
@@ -56,6 +55,18 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
56 return (flags >> 3) & 7; 55 return (flags >> 3) & 7;
57} 56}
58 57
58extern void rt6_bind_peer(struct rt6_info *rt,
59 int create);
60
61static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
62{
63 if (rt->rt6i_peer)
64 return rt->rt6i_peer;
65
66 rt6_bind_peer(rt, 0);
67 return rt->rt6i_peer;
68}
69
59extern void ip6_route_input(struct sk_buff *skb); 70extern void ip6_route_input(struct sk_buff *skb);
60 71
61extern struct dst_entry * ip6_route_output(struct net *net, 72extern struct dst_entry * ip6_route_output(struct net *net,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 895997bc2ead..e0e594f8e9d9 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -42,9 +42,6 @@ enum {
42#define ND_REACHABLE_TIME (30*HZ) 42#define ND_REACHABLE_TIME (30*HZ)
43#define ND_RETRANS_TIMER HZ 43#define ND_RETRANS_TIMER HZ
44 44
45#define ND_MIN_RANDOM_FACTOR (1/2)
46#define ND_MAX_RANDOM_FACTOR (3/2)
47
48#ifdef __KERNEL__ 45#ifdef __KERNEL__
49 46
50#include <linux/compiler.h> 47#include <linux/compiler.h>
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 55590ab16b3e..4014b623880c 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -96,16 +96,16 @@ struct neighbour {
96 struct neigh_parms *parms; 96 struct neigh_parms *parms;
97 unsigned long confirmed; 97 unsigned long confirmed;
98 unsigned long updated; 98 unsigned long updated;
99 __u8 flags; 99 rwlock_t lock;
100 __u8 nud_state;
101 __u8 type;
102 __u8 dead;
103 atomic_t refcnt; 100 atomic_t refcnt;
104 struct sk_buff_head arp_queue; 101 struct sk_buff_head arp_queue;
105 struct timer_list timer; 102 struct timer_list timer;
106 unsigned long used; 103 unsigned long used;
107 atomic_t probes; 104 atomic_t probes;
108 rwlock_t lock; 105 __u8 flags;
106 __u8 nud_state;
107 __u8 type;
108 __u8 dead;
109 seqlock_t ha_lock; 109 seqlock_t ha_lock;
110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
111 struct hh_cache *hh; 111 struct hh_cache *hh;
@@ -303,7 +303,7 @@ static inline void neigh_confirm(struct neighbour *neigh)
303 303
304static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 304static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
305{ 305{
306 unsigned long now = ACCESS_ONCE(jiffies); 306 unsigned long now = jiffies;
307 307
308 if (neigh->used != now) 308 if (neigh->used != now)
309 neigh->used = now; 309 neigh->used = now;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f3b201d335b3..373f1a900cf4 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -225,13 +225,15 @@ extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
225 u32 pid, unsigned int group, int report, 225 u32 pid, unsigned int group, int report,
226 gfp_t flags); 226 gfp_t flags);
227 227
228extern int nla_validate(struct nlattr *head, int len, int maxtype, 228extern int nla_validate(const struct nlattr *head,
229 int len, int maxtype,
229 const struct nla_policy *policy); 230 const struct nla_policy *policy);
230extern int nla_parse(struct nlattr *tb[], int maxtype, 231extern int nla_parse(struct nlattr **tb, int maxtype,
231 struct nlattr *head, int len, 232 const struct nlattr *head, int len,
232 const struct nla_policy *policy); 233 const struct nla_policy *policy);
233extern int nla_policy_len(const struct nla_policy *, int); 234extern int nla_policy_len(const struct nla_policy *, int);
234extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype); 235extern struct nlattr * nla_find(const struct nlattr *head,
236 int len, int attrtype);
235extern size_t nla_strlcpy(char *dst, const struct nlattr *nla, 237extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
236 size_t dstsize); 238 size_t dstsize);
237extern int nla_memcpy(void *dest, const struct nlattr *src, int count); 239extern int nla_memcpy(void *dest, const struct nlattr *src, int count);
@@ -346,7 +348,8 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
346 * Returns the next netlink message in the message stream and 348 * Returns the next netlink message in the message stream and
347 * decrements remaining by the size of the current message. 349 * decrements remaining by the size of the current message.
348 */ 350 */
349static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining) 351static inline struct nlmsghdr *
352nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
350{ 353{
351 int totlen = NLMSG_ALIGN(nlh->nlmsg_len); 354 int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
352 355
@@ -384,7 +387,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
384 * 387 *
385 * Returns the first attribute which matches the specified type. 388 * Returns the first attribute which matches the specified type.
386 */ 389 */
387static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, 390static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
388 int hdrlen, int attrtype) 391 int hdrlen, int attrtype)
389{ 392{
390 return nla_find(nlmsg_attrdata(nlh, hdrlen), 393 return nla_find(nlmsg_attrdata(nlh, hdrlen),
@@ -398,7 +401,8 @@ static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
398 * @maxtype: maximum attribute type to be expected 401 * @maxtype: maximum attribute type to be expected
399 * @policy: validation policy 402 * @policy: validation policy
400 */ 403 */
401static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype, 404static inline int nlmsg_validate(const struct nlmsghdr *nlh,
405 int hdrlen, int maxtype,
402 const struct nla_policy *policy) 406 const struct nla_policy *policy)
403{ 407{
404 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 408 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
@@ -727,7 +731,8 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
727 * 731 *
728 * Returns the first attribute which matches the specified type. 732 * Returns the first attribute which matches the specified type.
729 */ 733 */
730static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype) 734static inline struct nlattr *
735nla_find_nested(const struct nlattr *nla, int attrtype)
731{ 736{
732 return nla_find(nla_data(nla), nla_len(nla), attrtype); 737 return nla_find(nla_data(nla), nla_len(nla), attrtype);
733} 738}
@@ -1032,7 +1037,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
1032 * 1037 *
1033 * Returns 0 on success or a negative error code. 1038 * Returns 0 on success or a negative error code.
1034 */ 1039 */
1035static inline int nla_validate_nested(struct nlattr *start, int maxtype, 1040static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
1036 const struct nla_policy *policy) 1041 const struct nla_policy *policy)
1037{ 1042{
1038 return nla_validate(nla_data(start), nla_len(start), maxtype, policy); 1043 return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 81a31c0db3e7..3419bf5cd154 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -30,7 +30,7 @@ struct net_generic {
30 void *ptr[0]; 30 void *ptr[0];
31}; 31};
32 32
33static inline void *net_generic(struct net *net, int id) 33static inline void *net_generic(const struct net *net, int id)
34{ 34{
35 struct net_generic *ng; 35 struct net_generic *ng;
36 void *ptr; 36 void *ptr;
diff --git a/include/net/route.h b/include/net/route.h
index 7e5e73bfa4de..27002362944a 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -55,8 +55,6 @@ struct rtable {
55 /* Cache lookup keys */ 55 /* Cache lookup keys */
56 struct flowi fl; 56 struct flowi fl;
57 57
58 struct in_device *idev;
59
60 int rt_genid; 58 int rt_genid;
61 unsigned rt_flags; 59 unsigned rt_flags;
62 __u16 rt_type; 60 __u16 rt_type;
@@ -73,6 +71,16 @@ struct rtable {
73 struct inet_peer *peer; /* long-living peer info */ 71 struct inet_peer *peer; /* long-living peer info */
74}; 72};
75 73
74static inline bool rt_is_input_route(struct rtable *rt)
75{
76 return rt->fl.iif != 0;
77}
78
79static inline bool rt_is_output_route(struct rtable *rt)
80{
81 return rt->fl.iif == 0;
82}
83
76struct ip_rt_acct { 84struct ip_rt_acct {
77 __u32 o_bytes; 85 __u32 o_bytes;
78 __u32 o_packets; 86 __u32 o_packets;
@@ -161,14 +169,12 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
161{ 169{
162 struct flowi fl = { .oif = oif, 170 struct flowi fl = { .oif = oif,
163 .mark = sk->sk_mark, 171 .mark = sk->sk_mark,
164 .nl_u = { .ip4_u = { .daddr = dst, 172 .fl4_dst = dst,
165 .saddr = src, 173 .fl4_src = src,
166 .tos = tos } }, 174 .fl4_tos = tos,
167 .proto = protocol, 175 .proto = protocol,
168 .uli_u = { .ports = 176 .fl_ip_sport = sport,
169 { .sport = sport, 177 .fl_ip_dport = dport };
170 .dport = dport } } };
171
172 int err; 178 int err;
173 struct net *net = sock_net(sk); 179 struct net *net = sock_net(sk);
174 180
@@ -225,4 +231,15 @@ static inline int inet_iif(const struct sk_buff *skb)
225 return skb_rtable(skb)->rt_iif; 231 return skb_rtable(skb)->rt_iif;
226} 232}
227 233
234extern int sysctl_ip_default_ttl;
235
236static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
237{
238 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
239
240 if (hoplimit == 0)
241 hoplimit = sysctl_ip_default_ttl;
242 return hoplimit;
243}
244
228#endif /* _ROUTE_H */ 245#endif /* _ROUTE_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e013c68bfb00..4093ca78cf60 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -83,6 +83,41 @@ extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
83extern int rtnl_link_register(struct rtnl_link_ops *ops); 83extern int rtnl_link_register(struct rtnl_link_ops *ops);
84extern void rtnl_link_unregister(struct rtnl_link_ops *ops); 84extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
85 85
86/**
87 * struct rtnl_af_ops - rtnetlink address family operations
88 *
89 * @list: Used internally
90 * @family: Address family
91 * @fill_link_af: Function to fill IFLA_AF_SPEC with address family
92 * specific netlink attributes.
93 * @get_link_af_size: Function to calculate size of address family specific
94 * netlink attributes exlusive the container attribute.
95 * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
96 * for invalid configuration settings.
97 * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
98 * net_device accordingly.
99 */
100struct rtnl_af_ops {
101 struct list_head list;
102 int family;
103
104 int (*fill_link_af)(struct sk_buff *skb,
105 const struct net_device *dev);
106 size_t (*get_link_af_size)(const struct net_device *dev);
107
108 int (*validate_link_af)(const struct net_device *dev,
109 const struct nlattr *attr);
110 int (*set_link_af)(struct net_device *dev,
111 const struct nlattr *attr);
112};
113
114extern int __rtnl_af_register(struct rtnl_af_ops *ops);
115extern void __rtnl_af_unregister(struct rtnl_af_ops *ops);
116
117extern int rtnl_af_register(struct rtnl_af_ops *ops);
118extern void rtnl_af_unregister(struct rtnl_af_ops *ops);
119
120
86extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); 121extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
87extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net, 122extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
88 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]); 123 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
diff --git a/include/net/scm.h b/include/net/scm.h
index 31656506d967..745460fa2f02 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -10,11 +10,12 @@
10/* Well, we should have at least one descriptor open 10/* Well, we should have at least one descriptor open
11 * to accept passed FDs 8) 11 * to accept passed FDs 8)
12 */ 12 */
13#define SCM_MAX_FD 255 13#define SCM_MAX_FD 253
14 14
15struct scm_fp_list { 15struct scm_fp_list {
16 struct list_head list; 16 struct list_head list;
17 int count; 17 short count;
18 short max;
18 struct file *fp[SCM_MAX_FD]; 19 struct file *fp[SCM_MAX_FD];
19}; 20};
20 21
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 2c55a7ea20af..c01dc99def07 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -111,9 +111,6 @@ typedef enum {
111 SCTP_CMD_LAST 111 SCTP_CMD_LAST
112} sctp_verb_t; 112} sctp_verb_t;
113 113
114#define SCTP_CMD_MAX (SCTP_CMD_LAST - 1)
115#define SCTP_CMD_NUM_VERBS (SCTP_CMD_MAX + 1)
116
117/* How many commands can you put in an sctp_cmd_seq_t? 114/* How many commands can you put in an sctp_cmd_seq_t?
118 * This is a rather arbitrary number, ideally derived from a careful 115 * This is a rather arbitrary number, ideally derived from a careful
119 * analysis of the state functions, but in reality just taken from 116 * analysis of the state functions, but in reality just taken from
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 63908840eef0..c70d8ccc55cb 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -61,7 +61,6 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
61 * symbols. CIDs are dense through SCTP_CID_BASE_MAX. 61 * symbols. CIDs are dense through SCTP_CID_BASE_MAX.
62 */ 62 */
63#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE 63#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE
64#define SCTP_CID_MAX SCTP_CID_ASCONF_ACK
65 64
66#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1) 65#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
67 66
@@ -86,9 +85,6 @@ typedef enum {
86 85
87} sctp_event_t; 86} sctp_event_t;
88 87
89#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE
90#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1)
91
92/* As a convenience for the state machine, we append SCTP_EVENT_* and 88/* As a convenience for the state machine, we append SCTP_EVENT_* and
93 * SCTP_ULP_* to the list of possible chunks. 89 * SCTP_ULP_* to the list of possible chunks.
94 */ 90 */
@@ -162,9 +158,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
162 - (unsigned long)(c->chunk_hdr)\ 158 - (unsigned long)(c->chunk_hdr)\
163 - sizeof(sctp_data_chunk_t))) 159 - sizeof(sctp_data_chunk_t)))
164 160
165#define SCTP_MAX_ERROR_CAUSE SCTP_ERROR_NONEXIST_IP
166#define SCTP_NUM_ERROR_CAUSE 10
167
168/* Internal error codes */ 161/* Internal error codes */
169typedef enum { 162typedef enum {
170 163
@@ -266,7 +259,6 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
266#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG 259#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
267#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL 260#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
268#define SCTP_TSN_MAP_SIZE 4096 261#define SCTP_TSN_MAP_SIZE 4096
269#define SCTP_TSN_MAX_GAP 65535
270 262
271/* We will not record more than this many duplicate TSNs between two 263/* We will not record more than this many duplicate TSNs between two
272 * SACKs. The minimum PMTU is 576. Remove all the headers and there 264 * SACKs. The minimum PMTU is 576. Remove all the headers and there
@@ -301,9 +293,6 @@ enum { SCTP_MAX_GABS = 16 };
301 293
302#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */ 294#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */
303 295
304#define SCTP_DEF_MAX_INIT 6
305#define SCTP_DEF_MAX_SEND 10
306
307#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */ 296#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */
308 297
309#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */ 298#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */
@@ -317,9 +306,6 @@ enum { SCTP_MAX_GABS = 16 };
317 */ 306 */
318#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */ 307#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
319#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */ 308#define SCTP_HOW_MANY_SECRETS 2 /* How many secrets I keep */
320#define SCTP_HOW_LONG_COOKIE_LIVE 3600 /* How many seconds the current
321 * secret will live?
322 */
323#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */ 309#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
324 310
325#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */ 311#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 69fef4fb79c0..cc9185ca8fd1 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -261,8 +261,6 @@ extern struct sctp_globals {
261#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize) 261#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize)
262#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) 262#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
263#define sctp_port_hashsize (sctp_globals.port_hashsize) 263#define sctp_port_hashsize (sctp_globals.port_hashsize)
264#define sctp_port_rover (sctp_globals.port_rover)
265#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
266#define sctp_port_hashtable (sctp_globals.port_hashtable) 264#define sctp_port_hashtable (sctp_globals.port_hashtable)
267#define sctp_local_addr_list (sctp_globals.local_addr_list) 265#define sctp_local_addr_list (sctp_globals.local_addr_list)
268#define sctp_local_addr_lock (sctp_globals.addr_list_lock) 266#define sctp_local_addr_lock (sctp_globals.addr_list_lock)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index a0e61806d480..762e2abce889 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -60,9 +60,7 @@ struct ipstats_mib {
60}; 60};
61 61
62/* ICMP */ 62/* ICMP */
63#define ICMP_MIB_DUMMY __ICMP_MIB_MAX 63#define ICMP_MIB_MAX __ICMP_MIB_MAX
64#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
65
66struct icmp_mib { 64struct icmp_mib {
67 unsigned long mibs[ICMP_MIB_MAX]; 65 unsigned long mibs[ICMP_MIB_MAX];
68}; 66};
diff --git a/include/net/sock.h b/include/net/sock.h
index c7a736228ca2..82e86034702f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,7 @@
57#include <linux/rculist_nulls.h> 57#include <linux/rculist_nulls.h>
58#include <linux/poll.h> 58#include <linux/poll.h>
59 59
60#include <asm/atomic.h> 60#include <linux/atomic.h>
61#include <net/dst.h> 61#include <net/dst.h>
62#include <net/checksum.h> 62#include <net/checksum.h>
63 63
@@ -105,10 +105,8 @@ struct net;
105 105
106/** 106/**
107 * struct sock_common - minimal network layer representation of sockets 107 * struct sock_common - minimal network layer representation of sockets
108 * @skc_node: main hash linkage for various protocol lookup tables 108 * @skc_daddr: Foreign IPv4 addr
109 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 109 * @skc_rcv_saddr: Bound local IPv4 addr
110 * @skc_refcnt: reference count
111 * @skc_tx_queue_mapping: tx queue number for this connection
112 * @skc_hash: hash value used with various protocol lookup tables 110 * @skc_hash: hash value used with various protocol lookup tables
113 * @skc_u16hashes: two u16 hash values used by UDP lookup tables 111 * @skc_u16hashes: two u16 hash values used by UDP lookup tables
114 * @skc_family: network address family 112 * @skc_family: network address family
@@ -119,20 +117,20 @@ struct net;
119 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 117 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
120 * @skc_prot: protocol handlers inside a network family 118 * @skc_prot: protocol handlers inside a network family
121 * @skc_net: reference to the network namespace of this socket 119 * @skc_net: reference to the network namespace of this socket
120 * @skc_node: main hash linkage for various protocol lookup tables
121 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
122 * @skc_tx_queue_mapping: tx queue number for this connection
123 * @skc_refcnt: reference count
122 * 124 *
123 * This is the minimal network layer representation of sockets, the header 125 * This is the minimal network layer representation of sockets, the header
124 * for struct sock and struct inet_timewait_sock. 126 * for struct sock and struct inet_timewait_sock.
125 */ 127 */
126struct sock_common { 128struct sock_common {
127 /* 129 /* skc_daddr and skc_rcv_saddr must be grouped :
128 * first fields are not copied in sock_copy() 130 * cf INET_MATCH() and INET_TW_MATCH()
129 */ 131 */
130 union { 132 __be32 skc_daddr;
131 struct hlist_node skc_node; 133 __be32 skc_rcv_saddr;
132 struct hlist_nulls_node skc_nulls_node;
133 };
134 atomic_t skc_refcnt;
135 int skc_tx_queue_mapping;
136 134
137 union { 135 union {
138 unsigned int skc_hash; 136 unsigned int skc_hash;
@@ -150,6 +148,18 @@ struct sock_common {
150#ifdef CONFIG_NET_NS 148#ifdef CONFIG_NET_NS
151 struct net *skc_net; 149 struct net *skc_net;
152#endif 150#endif
151 /*
152 * fields between dontcopy_begin/dontcopy_end
153 * are not copied in sock_copy()
154 */
155 int skc_dontcopy_begin[0];
156 union {
157 struct hlist_node skc_node;
158 struct hlist_nulls_node skc_nulls_node;
159 };
160 int skc_tx_queue_mapping;
161 atomic_t skc_refcnt;
162 int skc_dontcopy_end[0];
153}; 163};
154 164
155/** 165/**
@@ -232,7 +242,8 @@ struct sock {
232#define sk_refcnt __sk_common.skc_refcnt 242#define sk_refcnt __sk_common.skc_refcnt
233#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 243#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
234 244
235#define sk_copy_start __sk_common.skc_hash 245#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
246#define sk_dontcopy_end __sk_common.skc_dontcopy_end
236#define sk_hash __sk_common.skc_hash 247#define sk_hash __sk_common.skc_hash
237#define sk_family __sk_common.skc_family 248#define sk_family __sk_common.skc_family
238#define sk_state __sk_common.skc_state 249#define sk_state __sk_common.skc_state
@@ -241,59 +252,67 @@ struct sock {
241#define sk_bind_node __sk_common.skc_bind_node 252#define sk_bind_node __sk_common.skc_bind_node
242#define sk_prot __sk_common.skc_prot 253#define sk_prot __sk_common.skc_prot
243#define sk_net __sk_common.skc_net 254#define sk_net __sk_common.skc_net
244 kmemcheck_bitfield_begin(flags);
245 unsigned int sk_shutdown : 2,
246 sk_no_check : 2,
247 sk_userlocks : 4,
248 sk_protocol : 8,
249 sk_type : 16;
250 kmemcheck_bitfield_end(flags);
251 int sk_rcvbuf;
252 socket_lock_t sk_lock; 255 socket_lock_t sk_lock;
256 struct sk_buff_head sk_receive_queue;
253 /* 257 /*
254 * The backlog queue is special, it is always used with 258 * The backlog queue is special, it is always used with
255 * the per-socket spinlock held and requires low latency 259 * the per-socket spinlock held and requires low latency
256 * access. Therefore we special case it's implementation. 260 * access. Therefore we special case it's implementation.
261 * Note : rmem_alloc is in this structure to fill a hole
262 * on 64bit arches, not because its logically part of
263 * backlog.
257 */ 264 */
258 struct { 265 struct {
259 struct sk_buff *head; 266 atomic_t rmem_alloc;
260 struct sk_buff *tail; 267 int len;
261 int len; 268 struct sk_buff *head;
269 struct sk_buff *tail;
262 } sk_backlog; 270 } sk_backlog;
271#define sk_rmem_alloc sk_backlog.rmem_alloc
272 int sk_forward_alloc;
273#ifdef CONFIG_RPS
274 __u32 sk_rxhash;
275#endif
276 atomic_t sk_drops;
277 int sk_rcvbuf;
278
279 struct sk_filter __rcu *sk_filter;
263 struct socket_wq *sk_wq; 280 struct socket_wq *sk_wq;
264 struct dst_entry *sk_dst_cache; 281
282#ifdef CONFIG_NET_DMA
283 struct sk_buff_head sk_async_wait_queue;
284#endif
285
265#ifdef CONFIG_XFRM 286#ifdef CONFIG_XFRM
266 struct xfrm_policy *sk_policy[2]; 287 struct xfrm_policy *sk_policy[2];
267#endif 288#endif
289 unsigned long sk_flags;
290 struct dst_entry *sk_dst_cache;
268 spinlock_t sk_dst_lock; 291 spinlock_t sk_dst_lock;
269 atomic_t sk_rmem_alloc;
270 atomic_t sk_wmem_alloc; 292 atomic_t sk_wmem_alloc;
271 atomic_t sk_omem_alloc; 293 atomic_t sk_omem_alloc;
272 int sk_sndbuf; 294 int sk_sndbuf;
273 struct sk_buff_head sk_receive_queue;
274 struct sk_buff_head sk_write_queue; 295 struct sk_buff_head sk_write_queue;
275#ifdef CONFIG_NET_DMA 296 kmemcheck_bitfield_begin(flags);
276 struct sk_buff_head sk_async_wait_queue; 297 unsigned int sk_shutdown : 2,
277#endif 298 sk_no_check : 2,
299 sk_userlocks : 4,
300 sk_protocol : 8,
301 sk_type : 16;
302 kmemcheck_bitfield_end(flags);
278 int sk_wmem_queued; 303 int sk_wmem_queued;
279 int sk_forward_alloc;
280 gfp_t sk_allocation; 304 gfp_t sk_allocation;
281 int sk_route_caps; 305 int sk_route_caps;
282 int sk_route_nocaps; 306 int sk_route_nocaps;
283 int sk_gso_type; 307 int sk_gso_type;
284 unsigned int sk_gso_max_size; 308 unsigned int sk_gso_max_size;
285 int sk_rcvlowat; 309 int sk_rcvlowat;
286#ifdef CONFIG_RPS
287 __u32 sk_rxhash;
288#endif
289 unsigned long sk_flags;
290 unsigned long sk_lingertime; 310 unsigned long sk_lingertime;
291 struct sk_buff_head sk_error_queue; 311 struct sk_buff_head sk_error_queue;
292 struct proto *sk_prot_creator; 312 struct proto *sk_prot_creator;
293 rwlock_t sk_callback_lock; 313 rwlock_t sk_callback_lock;
294 int sk_err, 314 int sk_err,
295 sk_err_soft; 315 sk_err_soft;
296 atomic_t sk_drops;
297 unsigned short sk_ack_backlog; 316 unsigned short sk_ack_backlog;
298 unsigned short sk_max_ack_backlog; 317 unsigned short sk_max_ack_backlog;
299 __u32 sk_priority; 318 __u32 sk_priority;
@@ -301,7 +320,6 @@ struct sock {
301 const struct cred *sk_peer_cred; 320 const struct cred *sk_peer_cred;
302 long sk_rcvtimeo; 321 long sk_rcvtimeo;
303 long sk_sndtimeo; 322 long sk_sndtimeo;
304 struct sk_filter __rcu *sk_filter;
305 void *sk_protinfo; 323 void *sk_protinfo;
306 struct timer_list sk_timer; 324 struct timer_list sk_timer;
307 ktime_t sk_stamp; 325 ktime_t sk_stamp;
@@ -509,9 +527,6 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
509#define sk_nulls_for_each_from(__sk, node) \ 527#define sk_nulls_for_each_from(__sk, node) \
510 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 528 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
511 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 529 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
512#define sk_for_each_continue(__sk, node) \
513 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
514 hlist_for_each_entry_continue(__sk, node, sk_node)
515#define sk_for_each_safe(__sk, node, tmp, list) \ 530#define sk_for_each_safe(__sk, node, tmp, list) \
516 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 531 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
517#define sk_for_each_bound(__sk, node, list) \ 532#define sk_for_each_bound(__sk, node, list) \
@@ -762,7 +777,7 @@ struct proto {
762 777
763 /* Memory pressure */ 778 /* Memory pressure */
764 void (*enter_memory_pressure)(struct sock *sk); 779 void (*enter_memory_pressure)(struct sock *sk);
765 atomic_t *memory_allocated; /* Current allocated memory. */ 780 atomic_long_t *memory_allocated; /* Current allocated memory. */
766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 781 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
767 /* 782 /*
768 * Pressure flag: try to collapse. 783 * Pressure flag: try to collapse.
@@ -771,7 +786,7 @@ struct proto {
771 * is strict, actions are advisory and have some latency. 786 * is strict, actions are advisory and have some latency.
772 */ 787 */
773 int *memory_pressure; 788 int *memory_pressure;
774 int *sysctl_mem; 789 long *sysctl_mem;
775 int *sysctl_wmem; 790 int *sysctl_wmem;
776 int *sysctl_rmem; 791 int *sysctl_rmem;
777 int max_header; 792 int max_header;
@@ -1155,6 +1170,8 @@ extern void sk_common_release(struct sock *sk);
1155/* Initialise core socket variables */ 1170/* Initialise core socket variables */
1156extern void sock_init_data(struct socket *sock, struct sock *sk); 1171extern void sock_init_data(struct socket *sock, struct sock *sk);
1157 1172
1173extern void sk_filter_release_rcu(struct rcu_head *rcu);
1174
1158/** 1175/**
1159 * sk_filter_release - release a socket filter 1176 * sk_filter_release - release a socket filter
1160 * @fp: filter to remove 1177 * @fp: filter to remove
@@ -1165,7 +1182,7 @@ extern void sock_init_data(struct socket *sock, struct sock *sk);
1165static inline void sk_filter_release(struct sk_filter *fp) 1182static inline void sk_filter_release(struct sk_filter *fp)
1166{ 1183{
1167 if (atomic_dec_and_test(&fp->refcnt)) 1184 if (atomic_dec_and_test(&fp->refcnt))
1168 kfree(fp); 1185 call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
1169} 1186}
1170 1187
1171static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1188static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4fee0424af7e..3f227baee4be 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -100,12 +100,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
100#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a 100#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
101 * connection: ~180sec is RFC minimum */ 101 * connection: ~180sec is RFC minimum */
102 102
103
104#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
105 * socket. 7 is ~50sec-16min.
106 */
107
108
109#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 103#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
110 * state, about 60 seconds */ 104 * state, about 60 seconds */
111#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 105#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
@@ -224,7 +218,7 @@ extern int sysctl_tcp_fack;
224extern int sysctl_tcp_reordering; 218extern int sysctl_tcp_reordering;
225extern int sysctl_tcp_ecn; 219extern int sysctl_tcp_ecn;
226extern int sysctl_tcp_dsack; 220extern int sysctl_tcp_dsack;
227extern int sysctl_tcp_mem[3]; 221extern long sysctl_tcp_mem[3];
228extern int sysctl_tcp_wmem[3]; 222extern int sysctl_tcp_wmem[3];
229extern int sysctl_tcp_rmem[3]; 223extern int sysctl_tcp_rmem[3];
230extern int sysctl_tcp_app_win; 224extern int sysctl_tcp_app_win;
@@ -247,7 +241,7 @@ extern int sysctl_tcp_cookie_size;
247extern int sysctl_tcp_thin_linear_timeouts; 241extern int sysctl_tcp_thin_linear_timeouts;
248extern int sysctl_tcp_thin_dupack; 242extern int sysctl_tcp_thin_dupack;
249 243
250extern atomic_t tcp_memory_allocated; 244extern atomic_long_t tcp_memory_allocated;
251extern struct percpu_counter tcp_sockets_allocated; 245extern struct percpu_counter tcp_sockets_allocated;
252extern int tcp_memory_pressure; 246extern int tcp_memory_pressure;
253 247
@@ -280,7 +274,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
280 } 274 }
281 275
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 276 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 277 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
284 return true; 278 return true;
285 return false; 279 return false;
286} 280}
@@ -312,7 +306,8 @@ extern void tcp_shutdown (struct sock *sk, int how);
312 306
313extern int tcp_v4_rcv(struct sk_buff *skb); 307extern int tcp_v4_rcv(struct sk_buff *skb);
314 308
315extern int tcp_v4_remember_stamp(struct sock *sk); 309extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
310extern void *tcp_v4_tw_get_peer(struct sock *sk);
316extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 311extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
317extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 312extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
318 size_t size); 313 size_t size);
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 97c3b14da55d..053b3cf2c66a 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -21,6 +21,7 @@ struct timewait_sock_ops {
21 int (*twsk_unique)(struct sock *sk, 21 int (*twsk_unique)(struct sock *sk,
22 struct sock *sktw, void *twp); 22 struct sock *sktw, void *twp);
23 void (*twsk_destructor)(struct sock *sk); 23 void (*twsk_destructor)(struct sock *sk);
24 void *(*twsk_getpeer)(struct sock *sk);
24}; 25};
25 26
26static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 27static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -39,4 +40,11 @@ static inline void twsk_destructor(struct sock *sk)
39 sk->sk_prot->twsk_prot->twsk_destructor(sk); 40 sk->sk_prot->twsk_prot->twsk_destructor(sk);
40} 41}
41 42
43static inline void *twsk_getpeer(struct sock *sk)
44{
45 if (sk->sk_prot->twsk_prot->twsk_getpeer)
46 return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
47 return NULL;
48}
49
42#endif /* _TIMEWAIT_SOCK_H */ 50#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
deleted file mode 100644
index 1e0645e1eed2..000000000000
--- a/include/net/tipc/tipc.h
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * include/net/tipc/tipc.h: Main include file for TIPC users
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005,2010 Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_H_
38#define _NET_TIPC_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc.h>
43#include <linux/skbuff.h>
44
45/*
46 * Native API
47 */
48
49/*
50 * TIPC operating mode routines
51 */
52
53#define TIPC_NOT_RUNNING 0
54#define TIPC_NODE_MODE 1
55#define TIPC_NET_MODE 2
56
57typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
58
59int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
60
61void tipc_detach(unsigned int userref);
62
63/*
64 * TIPC port manipulation routines
65 */
66
67typedef void (*tipc_msg_err_event) (void *usr_handle,
68 u32 portref,
69 struct sk_buff **buf,
70 unsigned char const *data,
71 unsigned int size,
72 int reason,
73 struct tipc_portid const *attmpt_destid);
74
75typedef void (*tipc_named_msg_err_event) (void *usr_handle,
76 u32 portref,
77 struct sk_buff **buf,
78 unsigned char const *data,
79 unsigned int size,
80 int reason,
81 struct tipc_name_seq const *attmpt_dest);
82
83typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
84 u32 portref,
85 struct sk_buff **buf,
86 unsigned char const *data,
87 unsigned int size,
88 int reason);
89
90typedef void (*tipc_msg_event) (void *usr_handle,
91 u32 portref,
92 struct sk_buff **buf,
93 unsigned char const *data,
94 unsigned int size,
95 unsigned int importance,
96 struct tipc_portid const *origin);
97
98typedef void (*tipc_named_msg_event) (void *usr_handle,
99 u32 portref,
100 struct sk_buff **buf,
101 unsigned char const *data,
102 unsigned int size,
103 unsigned int importance,
104 struct tipc_portid const *orig,
105 struct tipc_name_seq const *dest);
106
107typedef void (*tipc_conn_msg_event) (void *usr_handle,
108 u32 portref,
109 struct sk_buff **buf,
110 unsigned char const *data,
111 unsigned int size);
112
113typedef void (*tipc_continue_event) (void *usr_handle,
114 u32 portref);
115
116int tipc_createport(unsigned int tipc_user,
117 void *usr_handle,
118 unsigned int importance,
119 tipc_msg_err_event error_cb,
120 tipc_named_msg_err_event named_error_cb,
121 tipc_conn_shutdown_event conn_error_cb,
122 tipc_msg_event message_cb,
123 tipc_named_msg_event named_message_cb,
124 tipc_conn_msg_event conn_message_cb,
125 tipc_continue_event continue_event_cb,
126 u32 *portref);
127
128int tipc_deleteport(u32 portref);
129
130int tipc_ownidentity(u32 portref, struct tipc_portid *port);
131
132int tipc_portimportance(u32 portref, unsigned int *importance);
133int tipc_set_portimportance(u32 portref, unsigned int importance);
134
135int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
136int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
137
138int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
139int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
140
141int tipc_publish(u32 portref, unsigned int scope,
142 struct tipc_name_seq const *name_seq);
143int tipc_withdraw(u32 portref, unsigned int scope,
144 struct tipc_name_seq const *name_seq);
145
146int tipc_connect2port(u32 portref, struct tipc_portid const *port);
147
148int tipc_disconnect(u32 portref);
149
150int tipc_shutdown(u32 ref);
151
152/*
153 * TIPC messaging routines
154 */
155
156#define TIPC_PORT_IMPORTANCE 100 /* send using current port setting */
157
158
159int tipc_send(u32 portref,
160 unsigned int num_sect,
161 struct iovec const *msg_sect);
162
163int tipc_send2name(u32 portref,
164 struct tipc_name const *name,
165 u32 domain,
166 unsigned int num_sect,
167 struct iovec const *msg_sect);
168
169int tipc_send2port(u32 portref,
170 struct tipc_portid const *dest,
171 unsigned int num_sect,
172 struct iovec const *msg_sect);
173
174int tipc_send_buf2port(u32 portref,
175 struct tipc_portid const *dest,
176 struct sk_buff *buf,
177 unsigned int dsz);
178
179int tipc_multicast(u32 portref,
180 struct tipc_name_seq const *seq,
181 u32 domain, /* currently unused */
182 unsigned int section_count,
183 struct iovec const *msg);
184#endif
185
186#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
deleted file mode 100644
index ee2f304e4919..000000000000
--- a/include/net/tipc/tipc_bearer.h
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_BEARER_H_
38#define _NET_TIPC_BEARER_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc_config.h>
43#include <linux/skbuff.h>
44#include <linux/spinlock.h>
45
46/*
47 * Identifiers of supported TIPC media types
48 */
49
50#define TIPC_MEDIA_TYPE_ETH 1
51
52/*
53 * Destination address structure used by TIPC bearers when sending messages
54 *
55 * IMPORTANT: The fields of this structure MUST be stored using the specified
56 * byte order indicated below, as the structure is exchanged between nodes
57 * as part of a link setup process.
58 */
59
60struct tipc_media_addr {
61 __be32 type; /* bearer type (network byte order) */
62 union {
63 __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
64#if 0
65 /* Prototypes for other possible bearer types */
66
67 struct {
68 __u16 sin_family;
69 __u16 sin_port;
70 struct {
71 __u32 s_addr;
72 } sin_addr;
73 char pad[4];
74 } addr_in; /* IP-based bearer */
75 __u16 sock_descr; /* generic socket bearer */
76#endif
77 } dev_addr;
78};
79
80/**
81 * struct tipc_bearer - TIPC bearer info available to privileged users
82 * @usr_handle: pointer to additional user-defined information about bearer
83 * @mtu: max packet size bearer can support
84 * @blocked: non-zero if bearer is blocked
85 * @lock: spinlock for controlling access to bearer
86 * @addr: media-specific address associated with bearer
87 * @name: bearer name (format = media:interface)
88 *
89 * Note: TIPC initializes "name" and "lock" fields; user is responsible for
90 * initialization all other fields when a bearer is enabled.
91 */
92
93struct tipc_bearer {
94 void *usr_handle;
95 u32 mtu;
96 int blocked;
97 spinlock_t lock;
98 struct tipc_media_addr addr;
99 char name[TIPC_MAX_BEARER_NAME];
100};
101
102/*
103 * TIPC routines available to supported media types
104 */
105
106int tipc_register_media(u32 media_type,
107 char *media_name,
108 int (*enable)(struct tipc_bearer *),
109 void (*disable)(struct tipc_bearer *),
110 int (*send_msg)(struct sk_buff *,
111 struct tipc_bearer *,
112 struct tipc_media_addr *),
113 char *(*addr2str)(struct tipc_media_addr *a,
114 char *str_buf,
115 int str_size),
116 struct tipc_media_addr *bcast_addr,
117 const u32 bearer_priority,
118 const u32 link_tolerance, /* [ms] */
119 const u32 send_window_limit);
120
121void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
122
123int tipc_block_bearer(const char *name);
124void tipc_continue(struct tipc_bearer *tb_ptr);
125
126int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
127int tipc_disable_bearer(const char *name);
128
129/*
130 * Routines made available to TIPC by supported media types
131 */
132
133int tipc_eth_media_start(void);
134void tipc_eth_media_stop(void);
135
136#endif
137
138#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
deleted file mode 100644
index ffe50b4e7b93..000000000000
--- a/include/net/tipc/tipc_msg.h
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_MSG_H_
38#define _NET_TIPC_MSG_H_
39
40#ifdef __KERNEL__
41
42struct tipc_msg {
43 __be32 hdr[15];
44};
45
46
47/*
48 TIPC user data message header format, version 2:
49
50
51 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
52 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
53 w0:|vers | user |hdr sz |n|d|s|-| message size |
54 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
55 w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
56 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
57 w2:| link level ack no | broadcast/link level seq no |
58 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
59 w3:| previous node |
60 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 w4:| originating port |
62 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
63 w5:| destination port |
64 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
65 w6:| originating node |
66 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
67 w7:| destination node |
68 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
69 w8:| name type / transport sequence number |
70 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
71 w9:| name instance/multicast lower bound |
72 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 wA:| multicast upper bound |
74 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
75 / /
76 \ options \
77 / /
78 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
79
80*/
81
82#define TIPC_CONN_MSG 0
83#define TIPC_MCAST_MSG 1
84#define TIPC_NAMED_MSG 2
85#define TIPC_DIRECT_MSG 3
86
87
88static inline u32 msg_word(struct tipc_msg *m, u32 pos)
89{
90 return ntohl(m->hdr[pos]);
91}
92
93static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
94{
95 return (msg_word(m, w) >> pos) & mask;
96}
97
98static inline u32 msg_importance(struct tipc_msg *m)
99{
100 return msg_bits(m, 0, 25, 0xf);
101}
102
103static inline u32 msg_hdr_sz(struct tipc_msg *m)
104{
105 return msg_bits(m, 0, 21, 0xf) << 2;
106}
107
108static inline int msg_short(struct tipc_msg *m)
109{
110 return msg_hdr_sz(m) == 24;
111}
112
113static inline u32 msg_size(struct tipc_msg *m)
114{
115 return msg_bits(m, 0, 0, 0x1ffff);
116}
117
118static inline u32 msg_data_sz(struct tipc_msg *m)
119{
120 return msg_size(m) - msg_hdr_sz(m);
121}
122
123static inline unchar *msg_data(struct tipc_msg *m)
124{
125 return ((unchar *)m) + msg_hdr_sz(m);
126}
127
128static inline u32 msg_type(struct tipc_msg *m)
129{
130 return msg_bits(m, 1, 29, 0x7);
131}
132
133static inline u32 msg_named(struct tipc_msg *m)
134{
135 return msg_type(m) == TIPC_NAMED_MSG;
136}
137
138static inline u32 msg_mcast(struct tipc_msg *m)
139{
140 return msg_type(m) == TIPC_MCAST_MSG;
141}
142
143static inline u32 msg_connected(struct tipc_msg *m)
144{
145 return msg_type(m) == TIPC_CONN_MSG;
146}
147
148static inline u32 msg_errcode(struct tipc_msg *m)
149{
150 return msg_bits(m, 1, 25, 0xf);
151}
152
153static inline u32 msg_prevnode(struct tipc_msg *m)
154{
155 return msg_word(m, 3);
156}
157
158static inline u32 msg_origport(struct tipc_msg *m)
159{
160 return msg_word(m, 4);
161}
162
163static inline u32 msg_destport(struct tipc_msg *m)
164{
165 return msg_word(m, 5);
166}
167
168static inline u32 msg_mc_netid(struct tipc_msg *m)
169{
170 return msg_word(m, 5);
171}
172
173static inline u32 msg_orignode(struct tipc_msg *m)
174{
175 if (likely(msg_short(m)))
176 return msg_prevnode(m);
177 return msg_word(m, 6);
178}
179
180static inline u32 msg_destnode(struct tipc_msg *m)
181{
182 return msg_word(m, 7);
183}
184
185static inline u32 msg_nametype(struct tipc_msg *m)
186{
187 return msg_word(m, 8);
188}
189
190static inline u32 msg_nameinst(struct tipc_msg *m)
191{
192 return msg_word(m, 9);
193}
194
195static inline u32 msg_namelower(struct tipc_msg *m)
196{
197 return msg_nameinst(m);
198}
199
200static inline u32 msg_nameupper(struct tipc_msg *m)
201{
202 return msg_word(m, 10);
203}
204
205#endif
206
207#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
deleted file mode 100644
index 1893aaf49426..000000000000
--- a/include/net/tipc/tipc_port.h
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
3 *
4 * Copyright (c) 1994-2007, Ericsson AB
5 * Copyright (c) 2005-2008, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _NET_TIPC_PORT_H_
38#define _NET_TIPC_PORT_H_
39
40#ifdef __KERNEL__
41
42#include <linux/tipc.h>
43#include <linux/skbuff.h>
44#include <net/tipc/tipc_msg.h>
45
46#define TIPC_FLOW_CONTROL_WIN 512
47
48/**
49 * struct tipc_port - native TIPC port info available to privileged users
50 * @usr_handle: pointer to additional user-defined information about port
51 * @lock: pointer to spinlock for controlling access to port
52 * @connected: non-zero if port is currently connected to a peer port
53 * @conn_type: TIPC type used when connection was established
54 * @conn_instance: TIPC instance used when connection was established
55 * @conn_unacked: number of unacknowledged messages received from peer port
56 * @published: non-zero if port has one or more associated names
57 * @congested: non-zero if cannot send because of link or port congestion
58 * @max_pkt: maximum packet size "hint" used when building messages sent by port
59 * @ref: unique reference to port in TIPC object registry
60 * @phdr: preformatted message header used when sending messages
61 */
62
63struct tipc_port {
64 void *usr_handle;
65 spinlock_t *lock;
66 int connected;
67 u32 conn_type;
68 u32 conn_instance;
69 u32 conn_unacked;
70 int published;
71 u32 congested;
72 u32 max_pkt;
73 u32 ref;
74 struct tipc_msg phdr;
75};
76
77
78struct tipc_port *tipc_createport_raw(void *usr_handle,
79 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
80 void (*wakeup)(struct tipc_port *),
81 const u32 importance);
82
83int tipc_reject_msg(struct sk_buff *buf, u32 err);
84
85int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
86
87void tipc_acknowledge(u32 port_ref,u32 ack);
88
89struct tipc_port *tipc_get_port(const u32 ref);
90
91/*
92 * The following routines require that the port be locked on entry
93 */
94
95int tipc_disconnect_port(struct tipc_port *tp_ptr);
96
97
98#endif
99
100#endif
101
diff --git a/include/net/udp.h b/include/net/udp.h
index 200b82848c9a..bb967dd59bf7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -105,10 +105,10 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
105 105
106extern struct proto udp_prot; 106extern struct proto udp_prot;
107 107
108extern atomic_t udp_memory_allocated; 108extern atomic_long_t udp_memory_allocated;
109 109
110/* sysctl variables for udp */ 110/* sysctl variables for udp */
111extern int sysctl_udp_mem[3]; 111extern long sysctl_udp_mem[3];
112extern int sysctl_udp_rmem_min; 112extern int sysctl_udp_rmem_min;
113extern int sysctl_udp_wmem_min; 113extern int sysctl_udp_wmem_min;
114 114
diff --git a/include/net/x25.h b/include/net/x25.h
index 1479cb4a41fc..a06119a05129 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -315,6 +315,8 @@ extern struct list_head x25_route_list;
315extern rwlock_t x25_route_list_lock; 315extern rwlock_t x25_route_list_lock;
316extern struct list_head x25_forward_list; 316extern struct list_head x25_forward_list;
317extern rwlock_t x25_forward_list_lock; 317extern rwlock_t x25_forward_list_lock;
318extern struct list_head x25_neigh_list;
319extern rwlock_t x25_neigh_list_lock;
318 320
319extern int x25_proc_init(void); 321extern int x25_proc_init(void);
320extern void x25_proc_exit(void); 322extern void x25_proc_exit(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index bcfb6b24b019..b9f385da758e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -143,6 +143,7 @@ struct xfrm_state {
143 struct xfrm_id id; 143 struct xfrm_id id;
144 struct xfrm_selector sel; 144 struct xfrm_selector sel;
145 struct xfrm_mark mark; 145 struct xfrm_mark mark;
146 u32 tfcpad;
146 147
147 u32 genid; 148 u32 genid;
148 149
@@ -805,6 +806,9 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
805 case IPPROTO_MH: 806 case IPPROTO_MH:
806 port = htons(fl->fl_mh_type); 807 port = htons(fl->fl_mh_type);
807 break; 808 break;
809 case IPPROTO_GRE:
810 port = htons(ntohl(fl->fl_gre_key) >> 16);
811 break;
808 default: 812 default:
809 port = 0; /*XXX*/ 813 port = 0; /*XXX*/
810 } 814 }
@@ -826,6 +830,9 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
826 case IPPROTO_ICMPV6: 830 case IPPROTO_ICMPV6:
827 port = htons(fl->fl_icmp_code); 831 port = htons(fl->fl_icmp_code);
828 break; 832 break;
833 case IPPROTO_GRE:
834 port = htons(ntohl(fl->fl_gre_key) & 0xffff);
835 break;
829 default: 836 default:
830 port = 0; /*XXX*/ 837 port = 0; /*XXX*/
831 } 838 }
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 289010d3270b..e5e345fb2a5c 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -98,6 +98,103 @@ TRACE_EVENT(ext4_allocate_inode,
98 (unsigned long) __entry->dir, __entry->mode) 98 (unsigned long) __entry->dir, __entry->mode)
99); 99);
100 100
101TRACE_EVENT(ext4_evict_inode,
102 TP_PROTO(struct inode *inode),
103
104 TP_ARGS(inode),
105
106 TP_STRUCT__entry(
107 __field( int, dev_major )
108 __field( int, dev_minor )
109 __field( ino_t, ino )
110 __field( int, nlink )
111 ),
112
113 TP_fast_assign(
114 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
115 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
116 __entry->ino = inode->i_ino;
117 __entry->nlink = inode->i_nlink;
118 ),
119
120 TP_printk("dev %d,%d ino %lu nlink %d",
121 __entry->dev_major, __entry->dev_minor,
122 (unsigned long) __entry->ino, __entry->nlink)
123);
124
125TRACE_EVENT(ext4_drop_inode,
126 TP_PROTO(struct inode *inode, int drop),
127
128 TP_ARGS(inode, drop),
129
130 TP_STRUCT__entry(
131 __field( int, dev_major )
132 __field( int, dev_minor )
133 __field( ino_t, ino )
134 __field( int, drop )
135 ),
136
137 TP_fast_assign(
138 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
139 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
140 __entry->ino = inode->i_ino;
141 __entry->drop = drop;
142 ),
143
144 TP_printk("dev %d,%d ino %lu drop %d",
145 __entry->dev_major, __entry->dev_minor,
146 (unsigned long) __entry->ino, __entry->drop)
147);
148
149TRACE_EVENT(ext4_mark_inode_dirty,
150 TP_PROTO(struct inode *inode, unsigned long IP),
151
152 TP_ARGS(inode, IP),
153
154 TP_STRUCT__entry(
155 __field( int, dev_major )
156 __field( int, dev_minor )
157 __field( ino_t, ino )
158 __field(unsigned long, ip )
159 ),
160
161 TP_fast_assign(
162 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
163 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
164 __entry->ino = inode->i_ino;
165 __entry->ip = IP;
166 ),
167
168 TP_printk("dev %d,%d ino %lu caller %pF",
169 __entry->dev_major, __entry->dev_minor,
170 (unsigned long) __entry->ino, (void *)__entry->ip)
171);
172
173TRACE_EVENT(ext4_begin_ordered_truncate,
174 TP_PROTO(struct inode *inode, loff_t new_size),
175
176 TP_ARGS(inode, new_size),
177
178 TP_STRUCT__entry(
179 __field( int, dev_major )
180 __field( int, dev_minor )
181 __field( ino_t, ino )
182 __field( loff_t, new_size )
183 ),
184
185 TP_fast_assign(
186 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
187 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
188 __entry->ino = inode->i_ino;
189 __entry->new_size = new_size;
190 ),
191
192 TP_printk("dev %d,%d ino %lu new_size %lld",
193 __entry->dev_major, __entry->dev_minor,
194 (unsigned long) __entry->ino,
195 (long long) __entry->new_size)
196);
197
101DECLARE_EVENT_CLASS(ext4__write_begin, 198DECLARE_EVENT_CLASS(ext4__write_begin,
102 199
103 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 200 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
diff --git a/kernel/exit.c b/kernel/exit.c
index b194febf5799..21aa7b3001fb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -96,6 +96,14 @@ static void __exit_signal(struct task_struct *tsk)
96 sig->tty = NULL; 96 sig->tty = NULL;
97 } else { 97 } else {
98 /* 98 /*
99 * This can only happen if the caller is de_thread().
100 * FIXME: this is the temporary hack, we should teach
101 * posix-cpu-timers to handle this case correctly.
102 */
103 if (unlikely(has_group_leader_pid(tsk)))
104 posix_cpu_timers_exit_group(tsk);
105
106 /*
99 * If there is any task waiting for the group exit 107 * If there is any task waiting for the group exit
100 * then notify it: 108 * then notify it:
101 */ 109 */
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 877fb306d415..17110a4a4fc2 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
194 194
195 account_global_scheduler_latency(tsk, &lat); 195 account_global_scheduler_latency(tsk, &lat);
196 196
197 /* 197 for (i = 0; i < tsk->latency_record_count; i++) {
198 * short term hack; if we're > 32 we stop; future we recycle:
199 */
200 tsk->latency_record_count++;
201 if (tsk->latency_record_count >= LT_SAVECOUNT)
202 goto out_unlock;
203
204 for (i = 0; i < LT_SAVECOUNT; i++) {
205 struct latency_record *mylat; 198 struct latency_record *mylat;
206 int same = 1; 199 int same = 1;
207 200
@@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
227 } 220 }
228 } 221 }
229 222
223 /*
224 * short term hack; if we're > 32 we stop; future we recycle:
225 */
226 if (tsk->latency_record_count >= LT_SAVECOUNT)
227 goto out_unlock;
228
230 /* Allocated a new one: */ 229 /* Allocated a new one: */
231 i = tsk->latency_record_count; 230 i = tsk->latency_record_count++;
232 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
233 232
234out_unlock: 233out_unlock:
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 517d827f4982..cb6c0d2af68f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event,
674 674
675 event->tstamp_running += ctx->time - event->tstamp_stopped; 675 event->tstamp_running += ctx->time - event->tstamp_stopped;
676 676
677 event->shadow_ctx_time = ctx->time - ctx->timestamp;
678
677 if (!is_software_event(event)) 679 if (!is_software_event(event))
678 cpuctx->active_oncpu++; 680 cpuctx->active_oncpu++;
679 ctx->nr_active++; 681 ctx->nr_active++;
@@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3396} 3398}
3397 3399
3398static void perf_output_read_one(struct perf_output_handle *handle, 3400static void perf_output_read_one(struct perf_output_handle *handle,
3399 struct perf_event *event) 3401 struct perf_event *event,
3402 u64 enabled, u64 running)
3400{ 3403{
3401 u64 read_format = event->attr.read_format; 3404 u64 read_format = event->attr.read_format;
3402 u64 values[4]; 3405 u64 values[4];
@@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3404 3407
3405 values[n++] = perf_event_count(event); 3408 values[n++] = perf_event_count(event);
3406 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3409 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3407 values[n++] = event->total_time_enabled + 3410 values[n++] = enabled +
3408 atomic64_read(&event->child_total_time_enabled); 3411 atomic64_read(&event->child_total_time_enabled);
3409 } 3412 }
3410 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3413 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3411 values[n++] = event->total_time_running + 3414 values[n++] = running +
3412 atomic64_read(&event->child_total_time_running); 3415 atomic64_read(&event->child_total_time_running);
3413 } 3416 }
3414 if (read_format & PERF_FORMAT_ID) 3417 if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3421 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 3424 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3422 */ 3425 */
3423static void perf_output_read_group(struct perf_output_handle *handle, 3426static void perf_output_read_group(struct perf_output_handle *handle,
3424 struct perf_event *event) 3427 struct perf_event *event,
3428 u64 enabled, u64 running)
3425{ 3429{
3426 struct perf_event *leader = event->group_leader, *sub; 3430 struct perf_event *leader = event->group_leader, *sub;
3427 u64 read_format = event->attr.read_format; 3431 u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3431 values[n++] = 1 + leader->nr_siblings; 3435 values[n++] = 1 + leader->nr_siblings;
3432 3436
3433 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3437 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3434 values[n++] = leader->total_time_enabled; 3438 values[n++] = enabled;
3435 3439
3436 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3440 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3437 values[n++] = leader->total_time_running; 3441 values[n++] = running;
3438 3442
3439 if (leader != event) 3443 if (leader != event)
3440 leader->pmu->read(leader); 3444 leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3459 } 3463 }
3460} 3464}
3461 3465
3466#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3467 PERF_FORMAT_TOTAL_TIME_RUNNING)
3468
3462static void perf_output_read(struct perf_output_handle *handle, 3469static void perf_output_read(struct perf_output_handle *handle,
3463 struct perf_event *event) 3470 struct perf_event *event)
3464{ 3471{
3472 u64 enabled = 0, running = 0, now, ctx_time;
3473 u64 read_format = event->attr.read_format;
3474
3475 /*
3476 * compute total_time_enabled, total_time_running
3477 * based on snapshot values taken when the event
3478 * was last scheduled in.
3479 *
3480 * we cannot simply called update_context_time()
3481 * because of locking issue as we are called in
3482 * NMI context
3483 */
3484 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3485 now = perf_clock();
3486 ctx_time = event->shadow_ctx_time + now;
3487 enabled = ctx_time - event->tstamp_enabled;
3488 running = ctx_time - event->tstamp_running;
3489 }
3490
3465 if (event->attr.read_format & PERF_FORMAT_GROUP) 3491 if (event->attr.read_format & PERF_FORMAT_GROUP)
3466 perf_output_read_group(handle, event); 3492 perf_output_read_group(handle, event, enabled, running);
3467 else 3493 else
3468 perf_output_read_one(handle, event); 3494 perf_output_read_one(handle, event, enabled, running);
3469} 3495}
3470 3496
3471void perf_output_sample(struct perf_output_handle *handle, 3497void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/printk.c b/kernel/printk.c
index b2ebaee8c377..38e7d5868d60 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -261,6 +261,12 @@ static inline void boot_delay_msec(void)
261} 261}
262#endif 262#endif
263 263
264#ifdef CONFIG_SECURITY_DMESG_RESTRICT
265int dmesg_restrict = 1;
266#else
267int dmesg_restrict;
268#endif
269
264int do_syslog(int type, char __user *buf, int len, bool from_file) 270int do_syslog(int type, char __user *buf, int len, bool from_file)
265{ 271{
266 unsigned i, j, limit, count; 272 unsigned i, j, limit, count;
diff --git a/kernel/range.c b/kernel/range.c
index 471b66acabb5..37fa9b99ad58 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2)
119 119
120int clean_sort_range(struct range *range, int az) 120int clean_sort_range(struct range *range, int az)
121{ 121{
122 int i, j, k = az - 1, nr_range = 0; 122 int i, j, k = az - 1, nr_range = az;
123 123
124 for (i = 0; i < k; i++) { 124 for (i = 0; i < k; i++) {
125 if (range[i].end) 125 if (range[i].end)
diff --git a/kernel/relay.c b/kernel/relay.c
index c7cf397fb929..859ea5a9605f 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -70,17 +70,10 @@ static const struct vm_operations_struct relay_file_mmap_ops = {
70 */ 70 */
71static struct page **relay_alloc_page_array(unsigned int n_pages) 71static struct page **relay_alloc_page_array(unsigned int n_pages)
72{ 72{
73 struct page **array; 73 const size_t pa_size = n_pages * sizeof(struct page *);
74 size_t pa_size = n_pages * sizeof(struct page *); 74 if (pa_size > PAGE_SIZE)
75 75 return vzalloc(pa_size);
76 if (pa_size > PAGE_SIZE) { 76 return kzalloc(pa_size, GFP_KERNEL);
77 array = vmalloc(pa_size);
78 if (array)
79 memset(array, 0, pa_size);
80 } else {
81 array = kzalloc(pa_size, GFP_KERNEL);
82 }
83 return array;
84} 77}
85 78
86/* 79/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c33a1edb799f..b65bf634035e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -704,6 +704,15 @@ static struct ctl_table kern_table[] = {
704 }, 704 },
705#endif 705#endif
706 { 706 {
707 .procname = "dmesg_restrict",
708 .data = &dmesg_restrict,
709 .maxlen = sizeof(int),
710 .mode = 0644,
711 .proc_handler = proc_dointvec_minmax,
712 .extra1 = &zero,
713 .extra2 = &one,
714 },
715 {
707 .procname = "ngroups_max", 716 .procname = "ngroups_max",
708 .data = &ngroups_max, 717 .data = &ngroups_max,
709 .maxlen = sizeof (int), 718 .maxlen = sizeof (int),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bc251ed66724..7b8ec0281548 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
169 BLK_TC_ACT(BLK_TC_WRITE) }; 169 BLK_TC_ACT(BLK_TC_WRITE) };
170 170
171#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
172#define BLK_TC_RAHEAD BLK_TC_AHEAD 171#define BLK_TC_RAHEAD BLK_TC_AHEAD
173 172
174/* The ilog2() calls fall out because they're constant */ 173/* The ilog2() calls fall out because they're constant */
@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
196 return; 195 return;
197 196
198 what |= ddir_act[rw & WRITE]; 197 what |= ddir_act[rw & WRITE];
199 what |= MASK_TC_BIT(rw, HARDBARRIER);
200 what |= MASK_TC_BIT(rw, SYNC); 198 what |= MASK_TC_BIT(rw, SYNC);
201 what |= MASK_TC_BIT(rw, RAHEAD); 199 what |= MASK_TC_BIT(rw, RAHEAD);
202 what |= MASK_TC_BIT(rw, META); 200 what |= MASK_TC_BIT(rw, META);
@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1807 1805
1808 if (rw & REQ_RAHEAD) 1806 if (rw & REQ_RAHEAD)
1809 rwbs[i++] = 'A'; 1807 rwbs[i++] = 'A';
1810 if (rw & REQ_HARDBARRIER)
1811 rwbs[i++] = 'B';
1812 if (rw & REQ_SYNC) 1808 if (rw & REQ_SYNC)
1813 rwbs[i++] = 'S'; 1809 rwbs[i++] = 'S';
1814 if (rw & REQ_META) 1810 if (rw & REQ_META)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index bafba687a6d8..6e3c41a4024c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44#endif 44#endif
45 45
46static int __initdata no_watchdog; 46static int no_watchdog;
47 47
48 48
49/* boot commands */ 49/* boot commands */
diff --git a/lib/nlattr.c b/lib/nlattr.c
index c4706eb98d3d..00e8a02681a6 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -15,7 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <net/netlink.h> 16#include <net/netlink.h>
17 17
18static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = { 18static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
19 [NLA_U8] = sizeof(u8), 19 [NLA_U8] = sizeof(u8),
20 [NLA_U16] = sizeof(u16), 20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32), 21 [NLA_U32] = sizeof(u32),
@@ -23,7 +23,7 @@ static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
23 [NLA_NESTED] = NLA_HDRLEN, 23 [NLA_NESTED] = NLA_HDRLEN,
24}; 24};
25 25
26static int validate_nla(struct nlattr *nla, int maxtype, 26static int validate_nla(const struct nlattr *nla, int maxtype,
27 const struct nla_policy *policy) 27 const struct nla_policy *policy)
28{ 28{
29 const struct nla_policy *pt; 29 const struct nla_policy *pt;
@@ -115,10 +115,10 @@ static int validate_nla(struct nlattr *nla, int maxtype,
115 * 115 *
116 * Returns 0 on success or a negative error code. 116 * Returns 0 on success or a negative error code.
117 */ 117 */
118int nla_validate(struct nlattr *head, int len, int maxtype, 118int nla_validate(const struct nlattr *head, int len, int maxtype,
119 const struct nla_policy *policy) 119 const struct nla_policy *policy)
120{ 120{
121 struct nlattr *nla; 121 const struct nlattr *nla;
122 int rem, err; 122 int rem, err;
123 123
124 nla_for_each_attr(nla, head, len, rem) { 124 nla_for_each_attr(nla, head, len, rem) {
@@ -173,10 +173,10 @@ nla_policy_len(const struct nla_policy *p, int n)
173 * 173 *
174 * Returns 0 on success or a negative error code. 174 * Returns 0 on success or a negative error code.
175 */ 175 */
176int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, 176int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
177 const struct nla_policy *policy) 177 int len, const struct nla_policy *policy)
178{ 178{
179 struct nlattr *nla; 179 const struct nlattr *nla;
180 int rem, err; 180 int rem, err;
181 181
182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -191,7 +191,7 @@ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
191 goto errout; 191 goto errout;
192 } 192 }
193 193
194 tb[type] = nla; 194 tb[type] = (struct nlattr *)nla;
195 } 195 }
196 } 196 }
197 197
@@ -212,14 +212,14 @@ errout:
212 * 212 *
213 * Returns the first attribute in the stream matching the specified type. 213 * Returns the first attribute in the stream matching the specified type.
214 */ 214 */
215struct nlattr *nla_find(struct nlattr *head, int len, int attrtype) 215struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
216{ 216{
217 struct nlattr *nla; 217 const struct nlattr *nla;
218 int rem; 218 int rem;
219 219
220 nla_for_each_attr(nla, head, len, rem) 220 nla_for_each_attr(nla, head, len, rem)
221 if (nla_type(nla) == attrtype) 221 if (nla_type(nla) == attrtype)
222 return nla; 222 return (struct nlattr *)nla;
223 223
224 return NULL; 224 return NULL;
225} 225}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f412ab4c24f..5086bb962b4d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -82,6 +82,16 @@ struct radix_tree_preload {
82}; 82};
83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
84 84
85static inline void *ptr_to_indirect(void *ptr)
86{
87 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
88}
89
90static inline void *indirect_to_ptr(void *ptr)
91{
92 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
93}
94
85static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 95static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
86{ 96{
87 return root->gfp_mask & __GFP_BITS_MASK; 97 return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
265 return -ENOMEM; 275 return -ENOMEM;
266 276
267 /* Increase the height. */ 277 /* Increase the height. */
268 node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); 278 node->slots[0] = indirect_to_ptr(root->rnode);
269 279
270 /* Propagate the aggregated tag info into the new root */ 280 /* Propagate the aggregated tag info into the new root */
271 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 281 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
276 newheight = root->height+1; 286 newheight = root->height+1;
277 node->height = newheight; 287 node->height = newheight;
278 node->count = 1; 288 node->count = 1;
279 node = radix_tree_ptr_to_indirect(node); 289 node = ptr_to_indirect(node);
280 rcu_assign_pointer(root->rnode, node); 290 rcu_assign_pointer(root->rnode, node);
281 root->height = newheight; 291 root->height = newheight;
282 } while (height > root->height); 292 } while (height > root->height);
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root,
309 return error; 319 return error;
310 } 320 }
311 321
312 slot = radix_tree_indirect_to_ptr(root->rnode); 322 slot = indirect_to_ptr(root->rnode);
313 323
314 height = root->height; 324 height = root->height;
315 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 325 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root,
325 rcu_assign_pointer(node->slots[offset], slot); 335 rcu_assign_pointer(node->slots[offset], slot);
326 node->count++; 336 node->count++;
327 } else 337 } else
328 rcu_assign_pointer(root->rnode, 338 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
329 radix_tree_ptr_to_indirect(slot));
330 } 339 }
331 340
332 /* Go a level down */ 341 /* Go a level down */
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
374 return NULL; 383 return NULL;
375 return is_slot ? (void *)&root->rnode : node; 384 return is_slot ? (void *)&root->rnode : node;
376 } 385 }
377 node = radix_tree_indirect_to_ptr(node); 386 node = indirect_to_ptr(node);
378 387
379 height = node->height; 388 height = node->height;
380 if (index > radix_tree_maxindex(height)) 389 if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
393 height--; 402 height--;
394 } while (height > 0); 403 } while (height > 0);
395 404
396 return is_slot ? (void *)slot:node; 405 return is_slot ? (void *)slot : indirect_to_ptr(node);
397} 406}
398 407
399/** 408/**
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
455 height = root->height; 464 height = root->height;
456 BUG_ON(index > radix_tree_maxindex(height)); 465 BUG_ON(index > radix_tree_maxindex(height));
457 466
458 slot = radix_tree_indirect_to_ptr(root->rnode); 467 slot = indirect_to_ptr(root->rnode);
459 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 468 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
460 469
461 while (height > 0) { 470 while (height > 0) {
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
509 518
510 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 519 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
511 pathp->node = NULL; 520 pathp->node = NULL;
512 slot = radix_tree_indirect_to_ptr(root->rnode); 521 slot = indirect_to_ptr(root->rnode);
513 522
514 while (height > 0) { 523 while (height > 0) {
515 int offset; 524 int offset;
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
579 588
580 if (!radix_tree_is_indirect_ptr(node)) 589 if (!radix_tree_is_indirect_ptr(node))
581 return (index == 0); 590 return (index == 0);
582 node = radix_tree_indirect_to_ptr(node); 591 node = indirect_to_ptr(node);
583 592
584 height = node->height; 593 height = node->height;
585 if (index > radix_tree_maxindex(height)) 594 if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
666 } 675 }
667 676
668 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 677 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
669 slot = radix_tree_indirect_to_ptr(root->rnode); 678 slot = indirect_to_ptr(root->rnode);
670 679
671 /* 680 /*
672 * we fill the path from (root->height - 2) to 0, leaving the index at 681 * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
897 results[0] = node; 906 results[0] = node;
898 return 1; 907 return 1;
899 } 908 }
900 node = radix_tree_indirect_to_ptr(node); 909 node = indirect_to_ptr(node);
901 910
902 max_index = radix_tree_maxindex(node->height); 911 max_index = radix_tree_maxindex(node->height);
903 912
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
916 slot = *(((void ***)results)[ret + i]); 925 slot = *(((void ***)results)[ret + i]);
917 if (!slot) 926 if (!slot)
918 continue; 927 continue;
919 results[ret + nr_found] = rcu_dereference_raw(slot); 928 results[ret + nr_found] =
929 indirect_to_ptr(rcu_dereference_raw(slot));
920 nr_found++; 930 nr_found++;
921 } 931 }
922 ret += nr_found; 932 ret += nr_found;
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
965 results[0] = (void **)&root->rnode; 975 results[0] = (void **)&root->rnode;
966 return 1; 976 return 1;
967 } 977 }
968 node = radix_tree_indirect_to_ptr(node); 978 node = indirect_to_ptr(node);
969 979
970 max_index = radix_tree_maxindex(node->height); 980 max_index = radix_tree_maxindex(node->height);
971 981
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1090 results[0] = node; 1100 results[0] = node;
1091 return 1; 1101 return 1;
1092 } 1102 }
1093 node = radix_tree_indirect_to_ptr(node); 1103 node = indirect_to_ptr(node);
1094 1104
1095 max_index = radix_tree_maxindex(node->height); 1105 max_index = radix_tree_maxindex(node->height);
1096 1106
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1109 slot = *(((void ***)results)[ret + i]); 1119 slot = *(((void ***)results)[ret + i]);
1110 if (!slot) 1120 if (!slot)
1111 continue; 1121 continue;
1112 results[ret + nr_found] = rcu_dereference_raw(slot); 1122 results[ret + nr_found] =
1123 indirect_to_ptr(rcu_dereference_raw(slot));
1113 nr_found++; 1124 nr_found++;
1114 } 1125 }
1115 ret += nr_found; 1126 ret += nr_found;
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1159 results[0] = (void **)&root->rnode; 1170 results[0] = (void **)&root->rnode;
1160 return 1; 1171 return 1;
1161 } 1172 }
1162 node = radix_tree_indirect_to_ptr(node); 1173 node = indirect_to_ptr(node);
1163 1174
1164 max_index = radix_tree_maxindex(node->height); 1175 max_index = radix_tree_maxindex(node->height);
1165 1176
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1195 void *newptr; 1206 void *newptr;
1196 1207
1197 BUG_ON(!radix_tree_is_indirect_ptr(to_free)); 1208 BUG_ON(!radix_tree_is_indirect_ptr(to_free));
1198 to_free = radix_tree_indirect_to_ptr(to_free); 1209 to_free = indirect_to_ptr(to_free);
1199 1210
1200 /* 1211 /*
1201 * The candidate node has more than one child, or its child 1212 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1208 1219
1209 /* 1220 /*
1210 * We don't need rcu_assign_pointer(), since we are simply 1221 * We don't need rcu_assign_pointer(), since we are simply
1211 * moving the node from one part of the tree to another. If 1222 * moving the node from one part of the tree to another: if it
1212 * it was safe to dereference the old pointer to it 1223 * was safe to dereference the old pointer to it
1213 * (to_free->slots[0]), it will be safe to dereference the new 1224 * (to_free->slots[0]), it will be safe to dereference the new
1214 * one (root->rnode). 1225 * one (root->rnode) as far as dependent read barriers go.
1215 */ 1226 */
1216 newptr = to_free->slots[0]; 1227 newptr = to_free->slots[0];
1217 if (root->height > 1) 1228 if (root->height > 1)
1218 newptr = radix_tree_ptr_to_indirect(newptr); 1229 newptr = ptr_to_indirect(newptr);
1219 root->rnode = newptr; 1230 root->rnode = newptr;
1220 root->height--; 1231 root->height--;
1232
1233 /*
1234 * We have a dilemma here. The node's slot[0] must not be
1235 * NULLed in case there are concurrent lookups expecting to
1236 * find the item. However if this was a bottom-level node,
1237 * then it may be subject to the slot pointer being visible
1238 * to callers dereferencing it. If item corresponding to
1239 * slot[0] is subsequently deleted, these callers would expect
1240 * their slot to become empty sooner or later.
1241 *
1242 * For example, lockless pagecache will look up a slot, deref
1243 * the page pointer, and if the page is 0 refcount it means it
1244 * was concurrently deleted from pagecache so try the deref
1245 * again. Fortunately there is already a requirement for logic
1246 * to retry the entire slot lookup -- the indirect pointer
1247 * problem (replacing direct root node with an indirect pointer
1248 * also results in a stale slot). So tag the slot as indirect
1249 * to force callers to retry.
1250 */
1251 if (root->height == 0)
1252 *((unsigned long *)&to_free->slots[0]) |=
1253 RADIX_TREE_INDIRECT_PTR;
1254
1221 radix_tree_node_free(to_free); 1255 radix_tree_node_free(to_free);
1222 } 1256 }
1223} 1257}
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1254 root->rnode = NULL; 1288 root->rnode = NULL;
1255 goto out; 1289 goto out;
1256 } 1290 }
1257 slot = radix_tree_indirect_to_ptr(slot); 1291 slot = indirect_to_ptr(slot);
1258 1292
1259 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 1293 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
1260 pathp->node = NULL; 1294 pathp->node = NULL;
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1296 radix_tree_node_free(to_free); 1330 radix_tree_node_free(to_free);
1297 1331
1298 if (pathp->node->count) { 1332 if (pathp->node->count) {
1299 if (pathp->node == 1333 if (pathp->node == indirect_to_ptr(root->rnode))
1300 radix_tree_indirect_to_ptr(root->rnode))
1301 radix_tree_shrink(root); 1334 radix_tree_shrink(root);
1302 goto out; 1335 goto out;
1303 } 1336 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 75572b5f2374..ea89840fc65f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -644,7 +644,9 @@ repeat:
644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
645 if (pagep) { 645 if (pagep) {
646 page = radix_tree_deref_slot(pagep); 646 page = radix_tree_deref_slot(pagep);
647 if (unlikely(!page || page == RADIX_TREE_RETRY)) 647 if (unlikely(!page))
648 goto out;
649 if (radix_tree_deref_retry(page))
648 goto repeat; 650 goto repeat;
649 651
650 if (!page_cache_get_speculative(page)) 652 if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@ repeat:
660 goto repeat; 662 goto repeat;
661 } 663 }
662 } 664 }
665out:
663 rcu_read_unlock(); 666 rcu_read_unlock();
664 667
665 return page; 668 return page;
@@ -777,12 +780,11 @@ repeat:
777 page = radix_tree_deref_slot((void **)pages[i]); 780 page = radix_tree_deref_slot((void **)pages[i]);
778 if (unlikely(!page)) 781 if (unlikely(!page))
779 continue; 782 continue;
780 /* 783 if (radix_tree_deref_retry(page)) {
781 * this can only trigger if nr_found == 1, making livelock 784 if (ret)
782 * a non issue. 785 start = pages[ret-1]->index;
783 */
784 if (unlikely(page == RADIX_TREE_RETRY))
785 goto restart; 786 goto restart;
787 }
786 788
787 if (!page_cache_get_speculative(page)) 789 if (!page_cache_get_speculative(page))
788 goto repeat; 790 goto repeat;
@@ -830,11 +832,7 @@ repeat:
830 page = radix_tree_deref_slot((void **)pages[i]); 832 page = radix_tree_deref_slot((void **)pages[i]);
831 if (unlikely(!page)) 833 if (unlikely(!page))
832 continue; 834 continue;
833 /* 835 if (radix_tree_deref_retry(page))
834 * this can only trigger if nr_found == 1, making livelock
835 * a non issue.
836 */
837 if (unlikely(page == RADIX_TREE_RETRY))
838 goto restart; 836 goto restart;
839 837
840 if (page->mapping == NULL || page->index != index) 838 if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@ repeat:
887 page = radix_tree_deref_slot((void **)pages[i]); 885 page = radix_tree_deref_slot((void **)pages[i]);
888 if (unlikely(!page)) 886 if (unlikely(!page))
889 continue; 887 continue;
890 /* 888 if (radix_tree_deref_retry(page))
891 * this can only trigger if nr_found == 1, making livelock
892 * a non issue.
893 */
894 if (unlikely(page == RADIX_TREE_RETRY))
895 goto restart; 889 goto restart;
896 890
897 if (!page_cache_get_speculative(page)) 891 if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@ find_page:
1029 goto page_not_up_to_date; 1023 goto page_not_up_to_date;
1030 if (!trylock_page(page)) 1024 if (!trylock_page(page))
1031 goto page_not_up_to_date; 1025 goto page_not_up_to_date;
1026 /* Did it get truncated before we got the lock? */
1027 if (!page->mapping)
1028 goto page_not_up_to_date_locked;
1032 if (!mapping->a_ops->is_partially_uptodate(page, 1029 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset)) 1030 desc, offset))
1034 goto page_not_up_to_date_locked; 1031 goto page_not_up_to_date_locked;
@@ -1563,8 +1560,10 @@ retry_find:
1563 goto no_cached_page; 1560 goto no_cached_page;
1564 } 1561 }
1565 1562
1566 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) 1563 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1564 page_cache_release(page);
1567 return ret | VM_FAULT_RETRY; 1565 return ret | VM_FAULT_RETRY;
1566 }
1568 1567
1569 /* Did it get truncated? */ 1568 /* Did it get truncated? */
1570 if (unlikely(page->mapping != mapping)) { 1569 if (unlikely(page->mapping != mapping)) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a99cfaf0a19..2efa8ea07ff7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4208,15 +4208,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4208 4208
4209 memset(mem, 0, size); 4209 memset(mem, 0, size);
4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4211 if (!mem->stat) { 4211 if (!mem->stat)
4212 if (size < PAGE_SIZE) 4212 goto out_free;
4213 kfree(mem);
4214 else
4215 vfree(mem);
4216 mem = NULL;
4217 }
4218 spin_lock_init(&mem->pcp_counter_lock); 4213 spin_lock_init(&mem->pcp_counter_lock);
4219 return mem; 4214 return mem;
4215
4216out_free:
4217 if (size < PAGE_SIZE)
4218 kfree(mem);
4219 else
4220 vfree(mem);
4221 return NULL;
4220} 4222}
4221 4223
4222/* 4224/*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2d1bf7cf8851..4c5133873097 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,7 @@ success:
211 mmu_notifier_invalidate_range_end(mm, start, end); 211 mmu_notifier_invalidate_range_end(mm, start, end);
212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
213 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 213 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
214 perf_event_mmap(vma);
214 return 0; 215 return 0;
215 216
216fail: 217fail:
@@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
299 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
300 if (error) 301 if (error)
301 goto out; 302 goto out;
302 perf_event_mmap(vma);
303 nstart = tmp; 303 nstart = tmp;
304 304
305 if (nstart < prev->vm_end) 305 if (nstart < prev->vm_end)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc21312..d31d7ce52c0e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -913,7 +913,7 @@ keep_lumpy:
913 * back off and wait for congestion to clear because further reclaim 913 * back off and wait for congestion to clear because further reclaim
914 * will encounter the same problem 914 * will encounter the same problem
915 */ 915 */
916 if (nr_dirty == nr_congested) 916 if (nr_dirty == nr_congested && nr_dirty != 0)
917 zone_set_flag(zone, ZONE_CONGESTED); 917 zone_set_flag(zone, ZONE_CONGESTED);
918 918
919 free_page_list(&free_pages); 919 free_page_list(&free_pages);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index cd2e42be7b68..42eac4d33216 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -949,7 +949,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
949 v[PGPGIN] /= 2; /* sectors -> kbytes */ 949 v[PGPGIN] /= 2; /* sectors -> kbytes */
950 v[PGPGOUT] /= 2; 950 v[PGPGOUT] /= 2;
951#endif 951#endif
952 return m->private + *pos; 952 return (unsigned long *)m->private + *pos;
953} 953}
954 954
955static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 955static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 52077ca22072..6e64f7c6a2e9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -272,13 +272,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
273 } 273 }
274 274
275 new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name, 275 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
276 vlan_setup, real_dev->num_tx_queues);
277 276
278 if (new_dev == NULL) 277 if (new_dev == NULL)
279 return -ENOBUFS; 278 return -ENOBUFS;
280 279
281 netif_copy_real_num_queues(new_dev, real_dev);
282 dev_net_set(new_dev, net); 280 dev_net_set(new_dev, net);
283 /* need 4 bytes for extra VLAN header info, 281 /* need 4 bytes for extra VLAN header info,
284 * hope the underlying device can handle it. 282 * hope the underlying device can handle it.
@@ -334,12 +332,15 @@ static void vlan_transfer_features(struct net_device *dev,
334 vlandev->features &= ~dev->vlan_features; 332 vlandev->features &= ~dev->vlan_features;
335 vlandev->features |= dev->features & dev->vlan_features; 333 vlandev->features |= dev->features & dev->vlan_features;
336 vlandev->gso_max_size = dev->gso_max_size; 334 vlandev->gso_max_size = dev->gso_max_size;
335
336 if (dev->features & NETIF_F_HW_VLAN_TX)
337 vlandev->hard_header_len = dev->hard_header_len;
338 else
339 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
340
337#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 341#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
338 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 342 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
339#endif 343#endif
340 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
341 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
342
343 if (old_features != vlandev->features) 344 if (old_features != vlandev->features)
344 netdev_features_change(vlandev); 345 netdev_features_change(vlandev);
345} 346}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index db01b3181fdc..5687c9b95f33 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -19,19 +19,25 @@ struct vlan_priority_tci_mapping {
19 19
20 20
21/** 21/**
22 * struct vlan_rx_stats - VLAN percpu rx stats 22 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
23 * @rx_packets: number of received packets 23 * @rx_packets: number of received packets
24 * @rx_bytes: number of received bytes 24 * @rx_bytes: number of received bytes
25 * @rx_multicast: number of received multicast packets 25 * @rx_multicast: number of received multicast packets
26 * @tx_packets: number of transmitted packets
27 * @tx_bytes: number of transmitted bytes
26 * @syncp: synchronization point for 64bit counters 28 * @syncp: synchronization point for 64bit counters
27 * @rx_errors: number of errors 29 * @rx_errors: number of rx errors
30 * @tx_dropped: number of tx drops
28 */ 31 */
29struct vlan_rx_stats { 32struct vlan_pcpu_stats {
30 u64 rx_packets; 33 u64 rx_packets;
31 u64 rx_bytes; 34 u64 rx_bytes;
32 u64 rx_multicast; 35 u64 rx_multicast;
36 u64 tx_packets;
37 u64 tx_bytes;
33 struct u64_stats_sync syncp; 38 struct u64_stats_sync syncp;
34 unsigned long rx_errors; 39 u32 rx_errors;
40 u32 tx_dropped;
35}; 41};
36 42
37/** 43/**
@@ -45,9 +51,7 @@ struct vlan_rx_stats {
45 * @real_dev: underlying netdevice 51 * @real_dev: underlying netdevice
46 * @real_dev_addr: address of underlying netdevice 52 * @real_dev_addr: address of underlying netdevice
47 * @dent: proc dir entry 53 * @dent: proc dir entry
48 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX 54 * @vlan_pcpu_stats: ptr to percpu rx stats
49 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
50 * @vlan_rx_stats: ptr to percpu rx stats
51 */ 55 */
52struct vlan_dev_info { 56struct vlan_dev_info {
53 unsigned int nr_ingress_mappings; 57 unsigned int nr_ingress_mappings;
@@ -62,9 +66,7 @@ struct vlan_dev_info {
62 unsigned char real_dev_addr[ETH_ALEN]; 66 unsigned char real_dev_addr[ETH_ALEN];
63 67
64 struct proc_dir_entry *dent; 68 struct proc_dir_entry *dent;
65 unsigned long cnt_inc_headroom_on_tx; 69 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
66 unsigned long cnt_encap_on_xmit;
67 struct vlan_rx_stats __percpu *vlan_rx_stats;
68}; 70};
69 71
70static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 72static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 69b2f79800a5..ce8e3ab3e7a5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 struct vlan_rx_stats *rx_stats; 12 struct vlan_pcpu_stats *rx_stats;
13 13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) { 15 if (!vlan_dev) {
@@ -26,7 +26,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 27 skb->vlan_tci = 0;
28 28
29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats); 29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
30 30
31 u64_stats_update_begin(&rx_stats->syncp); 31 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 32 rx_stats->rx_packets++;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 14e3d1fa07a0..be737539f34d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev) 141 struct packet_type *ptype, struct net_device *orig_dev)
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev; 145 struct net_device *vlan_dev;
146 u16 vlan_id; 146 u16 vlan_id;
147 u16 vlan_tci; 147 u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
177 } else { 177 } else {
178 skb->dev = vlan_dev; 178 skb->dev = vlan_dev;
179 179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats); 180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181 181
182 u64_stats_update_begin(&rx_stats->syncp); 182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++; 183 rx_stats->rx_packets++;
@@ -274,9 +274,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
274 u16 vlan_tci = 0; 274 u16 vlan_tci = 0;
275 int rc; 275 int rc;
276 276
277 if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
278 return -ENOSPC;
279
280 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) { 277 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
281 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); 278 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
282 279
@@ -313,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
313static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
314 struct net_device *dev) 311 struct net_device *dev)
315{ 312{
316 int i = skb_get_queue_mapping(skb);
317 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
318 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 313 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
319 unsigned int len; 314 unsigned int len;
320 int ret; 315 int ret;
@@ -326,71 +321,31 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
326 */ 321 */
327 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 322 if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
328 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { 323 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
329 unsigned int orig_headroom = skb_headroom(skb);
330 u16 vlan_tci; 324 u16 vlan_tci;
331
332 vlan_dev_info(dev)->cnt_encap_on_xmit++;
333
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 325 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 326 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_put_tag(skb, vlan_tci); 327 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 if (!skb) {
338 txq->tx_dropped++;
339 return NETDEV_TX_OK;
340 }
341
342 if (orig_headroom < VLAN_HLEN)
343 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
344 } 328 }
345 329
346
347 skb_set_dev(skb, vlan_dev_info(dev)->real_dev); 330 skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
348 len = skb->len; 331 len = skb->len;
349 ret = dev_queue_xmit(skb); 332 ret = dev_queue_xmit(skb);
350 333
351 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 334 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
352 txq->tx_packets++; 335 struct vlan_pcpu_stats *stats;
353 txq->tx_bytes += len;
354 } else
355 txq->tx_dropped++;
356 336
357 return ret; 337 stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
358} 338 u64_stats_update_begin(&stats->syncp);
359 339 stats->tx_packets++;
360static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 340 stats->tx_bytes += len;
361 struct net_device *dev) 341 u64_stats_update_begin(&stats->syncp);
362{ 342 } else {
363 int i = skb_get_queue_mapping(skb); 343 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
364 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 344 }
365 u16 vlan_tci;
366 unsigned int len;
367 int ret;
368
369 vlan_tci = vlan_dev_info(dev)->vlan_id;
370 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
371 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
372
373 skb->dev = vlan_dev_info(dev)->real_dev;
374 len = skb->len;
375 ret = dev_queue_xmit(skb);
376
377 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
378 txq->tx_packets++;
379 txq->tx_bytes += len;
380 } else
381 txq->tx_dropped++;
382 345
383 return ret; 346 return ret;
384} 347}
385 348
386static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
387{
388 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
389 const struct net_device_ops *ops = rdev->netdev_ops;
390
391 return ops->ndo_select_queue(rdev, skb);
392}
393
394static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 349static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
395{ 350{
396 /* TODO: gotta make sure the underlying layer can handle it, 351 /* TODO: gotta make sure the underlying layer can handle it,
@@ -719,8 +674,7 @@ static const struct header_ops vlan_header_ops = {
719 .parse = eth_header_parse, 674 .parse = eth_header_parse,
720}; 675};
721 676
722static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, 677static const struct net_device_ops vlan_netdev_ops;
723 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
724 678
725static int vlan_dev_init(struct net_device *dev) 679static int vlan_dev_init(struct net_device *dev)
726{ 680{
@@ -738,6 +692,7 @@ static int vlan_dev_init(struct net_device *dev)
738 (1<<__LINK_STATE_PRESENT); 692 (1<<__LINK_STATE_PRESENT);
739 693
740 dev->features |= real_dev->features & real_dev->vlan_features; 694 dev->features |= real_dev->features & real_dev->vlan_features;
695 dev->features |= NETIF_F_LLTX;
741 dev->gso_max_size = real_dev->gso_max_size; 696 dev->gso_max_size = real_dev->gso_max_size;
742 697
743 /* ipv6 shared card related stuff */ 698 /* ipv6 shared card related stuff */
@@ -755,26 +710,20 @@ static int vlan_dev_init(struct net_device *dev)
755 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 710 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
756 dev->header_ops = real_dev->header_ops; 711 dev->header_ops = real_dev->header_ops;
757 dev->hard_header_len = real_dev->hard_header_len; 712 dev->hard_header_len = real_dev->hard_header_len;
758 if (real_dev->netdev_ops->ndo_select_queue)
759 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
760 else
761 dev->netdev_ops = &vlan_netdev_accel_ops;
762 } else { 713 } else {
763 dev->header_ops = &vlan_header_ops; 714 dev->header_ops = &vlan_header_ops;
764 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 715 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
765 if (real_dev->netdev_ops->ndo_select_queue)
766 dev->netdev_ops = &vlan_netdev_ops_sq;
767 else
768 dev->netdev_ops = &vlan_netdev_ops;
769 } 716 }
770 717
718 dev->netdev_ops = &vlan_netdev_ops;
719
771 if (is_vlan_dev(real_dev)) 720 if (is_vlan_dev(real_dev))
772 subclass = 1; 721 subclass = 1;
773 722
774 vlan_dev_set_lockdep_class(dev, subclass); 723 vlan_dev_set_lockdep_class(dev, subclass);
775 724
776 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); 725 vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
777 if (!vlan_dev_info(dev)->vlan_rx_stats) 726 if (!vlan_dev_info(dev)->vlan_pcpu_stats)
778 return -ENOMEM; 727 return -ENOMEM;
779 728
780 return 0; 729 return 0;
@@ -786,8 +735,8 @@ static void vlan_dev_uninit(struct net_device *dev)
786 struct vlan_dev_info *vlan = vlan_dev_info(dev); 735 struct vlan_dev_info *vlan = vlan_dev_info(dev);
787 int i; 736 int i;
788 737
789 free_percpu(vlan->vlan_rx_stats); 738 free_percpu(vlan->vlan_pcpu_stats);
790 vlan->vlan_rx_stats = NULL; 739 vlan->vlan_pcpu_stats = NULL;
791 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 740 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
792 while ((pm = vlan->egress_priority_map[i]) != NULL) { 741 while ((pm = vlan->egress_priority_map[i]) != NULL) {
793 vlan->egress_priority_map[i] = pm->next; 742 vlan->egress_priority_map[i] = pm->next;
@@ -825,33 +774,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
825 774
826static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 775static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
827{ 776{
828 dev_txq_stats_fold(dev, stats);
829 777
830 if (vlan_dev_info(dev)->vlan_rx_stats) { 778 if (vlan_dev_info(dev)->vlan_pcpu_stats) {
831 struct vlan_rx_stats *p, accum = {0}; 779 struct vlan_pcpu_stats *p;
780 u32 rx_errors = 0, tx_dropped = 0;
832 int i; 781 int i;
833 782
834 for_each_possible_cpu(i) { 783 for_each_possible_cpu(i) {
835 u64 rxpackets, rxbytes, rxmulticast; 784 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
836 unsigned int start; 785 unsigned int start;
837 786
838 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); 787 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
839 do { 788 do {
840 start = u64_stats_fetch_begin_bh(&p->syncp); 789 start = u64_stats_fetch_begin_bh(&p->syncp);
841 rxpackets = p->rx_packets; 790 rxpackets = p->rx_packets;
842 rxbytes = p->rx_bytes; 791 rxbytes = p->rx_bytes;
843 rxmulticast = p->rx_multicast; 792 rxmulticast = p->rx_multicast;
793 txpackets = p->tx_packets;
794 txbytes = p->tx_bytes;
844 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 795 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
845 accum.rx_packets += rxpackets; 796
846 accum.rx_bytes += rxbytes; 797 stats->rx_packets += rxpackets;
847 accum.rx_multicast += rxmulticast; 798 stats->rx_bytes += rxbytes;
848 /* rx_errors is ulong, not protected by syncp */ 799 stats->multicast += rxmulticast;
849 accum.rx_errors += p->rx_errors; 800 stats->tx_packets += txpackets;
801 stats->tx_bytes += txbytes;
802 /* rx_errors & tx_dropped are u32 */
803 rx_errors += p->rx_errors;
804 tx_dropped += p->tx_dropped;
850 } 805 }
851 stats->rx_packets = accum.rx_packets; 806 stats->rx_errors = rx_errors;
852 stats->rx_bytes = accum.rx_bytes; 807 stats->tx_dropped = tx_dropped;
853 stats->rx_errors = accum.rx_errors;
854 stats->multicast = accum.rx_multicast;
855 } 808 }
856 return stats; 809 return stats;
857} 810}
@@ -908,80 +861,6 @@ static const struct net_device_ops vlan_netdev_ops = {
908#endif 861#endif
909}; 862};
910 863
911static const struct net_device_ops vlan_netdev_accel_ops = {
912 .ndo_change_mtu = vlan_dev_change_mtu,
913 .ndo_init = vlan_dev_init,
914 .ndo_uninit = vlan_dev_uninit,
915 .ndo_open = vlan_dev_open,
916 .ndo_stop = vlan_dev_stop,
917 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
918 .ndo_validate_addr = eth_validate_addr,
919 .ndo_set_mac_address = vlan_dev_set_mac_address,
920 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
921 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
922 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
923 .ndo_do_ioctl = vlan_dev_ioctl,
924 .ndo_neigh_setup = vlan_dev_neigh_setup,
925 .ndo_get_stats64 = vlan_dev_get_stats64,
926#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
927 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
928 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
929 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
930 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
931 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
932#endif
933};
934
935static const struct net_device_ops vlan_netdev_ops_sq = {
936 .ndo_select_queue = vlan_dev_select_queue,
937 .ndo_change_mtu = vlan_dev_change_mtu,
938 .ndo_init = vlan_dev_init,
939 .ndo_uninit = vlan_dev_uninit,
940 .ndo_open = vlan_dev_open,
941 .ndo_stop = vlan_dev_stop,
942 .ndo_start_xmit = vlan_dev_hard_start_xmit,
943 .ndo_validate_addr = eth_validate_addr,
944 .ndo_set_mac_address = vlan_dev_set_mac_address,
945 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
946 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
947 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
948 .ndo_do_ioctl = vlan_dev_ioctl,
949 .ndo_neigh_setup = vlan_dev_neigh_setup,
950 .ndo_get_stats64 = vlan_dev_get_stats64,
951#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
952 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
953 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
954 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
955 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
956 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
957#endif
958};
959
960static const struct net_device_ops vlan_netdev_accel_ops_sq = {
961 .ndo_select_queue = vlan_dev_select_queue,
962 .ndo_change_mtu = vlan_dev_change_mtu,
963 .ndo_init = vlan_dev_init,
964 .ndo_uninit = vlan_dev_uninit,
965 .ndo_open = vlan_dev_open,
966 .ndo_stop = vlan_dev_stop,
967 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
968 .ndo_validate_addr = eth_validate_addr,
969 .ndo_set_mac_address = vlan_dev_set_mac_address,
970 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
971 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
972 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
973 .ndo_do_ioctl = vlan_dev_ioctl,
974 .ndo_neigh_setup = vlan_dev_neigh_setup,
975 .ndo_get_stats64 = vlan_dev_get_stats64,
976#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
977 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
978 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
979 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
980 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
981 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
982#endif
983};
984
985void vlan_setup(struct net_device *dev) 864void vlan_setup(struct net_device *dev)
986{ 865{
987 ether_setup(dev); 866 ether_setup(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index ddc105734af7..be9a5c19a775 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -101,25 +101,6 @@ static int vlan_changelink(struct net_device *dev,
101 return 0; 101 return 0;
102} 102}
103 103
104static int vlan_get_tx_queues(struct net *net,
105 struct nlattr *tb[],
106 unsigned int *num_tx_queues,
107 unsigned int *real_num_tx_queues)
108{
109 struct net_device *real_dev;
110
111 if (!tb[IFLA_LINK])
112 return -EINVAL;
113
114 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
115 if (!real_dev)
116 return -ENODEV;
117
118 *num_tx_queues = real_dev->num_tx_queues;
119 *real_num_tx_queues = real_dev->real_num_tx_queues;
120 return 0;
121}
122
123static int vlan_newlink(struct net *src_net, struct net_device *dev, 104static int vlan_newlink(struct net *src_net, struct net_device *dev,
124 struct nlattr *tb[], struct nlattr *data[]) 105 struct nlattr *tb[], struct nlattr *data[])
125{ 106{
@@ -237,7 +218,6 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
237 .maxtype = IFLA_VLAN_MAX, 218 .maxtype = IFLA_VLAN_MAX,
238 .policy = vlan_policy, 219 .policy = vlan_policy,
239 .priv_size = sizeof(struct vlan_dev_info), 220 .priv_size = sizeof(struct vlan_dev_info),
240 .get_tx_queues = vlan_get_tx_queues,
241 .setup = vlan_setup, 221 .setup = vlan_setup,
242 .validate = vlan_validate, 222 .validate = vlan_validate,
243 .newlink = vlan_newlink, 223 .newlink = vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 80e280f56686..d1314cf18adf 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -280,7 +280,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
281 struct rtnl_link_stats64 temp; 281 struct rtnl_link_stats64 temp;
282 const struct rtnl_link_stats64 *stats; 282 const struct rtnl_link_stats64 *stats;
283 static const char fmt[] = "%30s %12lu\n";
284 static const char fmt64[] = "%30s %12llu\n"; 283 static const char fmt64[] = "%30s %12llu\n";
285 int i; 284 int i;
286 285
@@ -299,10 +298,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
299 seq_puts(seq, "\n"); 298 seq_puts(seq, "\n");
300 seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); 299 seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
301 seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); 300 seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
302 seq_printf(seq, fmt, "total headroom inc",
303 dev_info->cnt_inc_headroom_on_tx);
304 seq_printf(seq, fmt, "total encap on xmit",
305 dev_info->cnt_encap_on_xmit);
306 seq_printf(seq, "Device: %s", dev_info->real_dev->name); 301 seq_printf(seq, "Device: %s", dev_info->real_dev->name);
307 /* now show all PRIORITY mappings relating to this VLAN */ 302 /* now show all PRIORITY mappings relating to this VLAN */
308 seq_printf(seq, "\nINGRESS priority mappings: " 303 seq_printf(seq, "\nINGRESS priority mappings: "
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 45c15f491401..798beac7f100 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -27,31 +27,16 @@
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/kernel.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/stddef.h>
33#include <linux/types.h> 35#include <linux/types.h>
34#include <net/9p/9p.h> 36#include <net/9p/9p.h>
35#include <net/9p/client.h> 37#include <net/9p/client.h>
36#include "protocol.h" 38#include "protocol.h"
37 39
38#ifndef MIN
39#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40#endif
41
42#ifndef MAX
43#define MAX(a, b) (((a) > (b)) ? (a) : (b))
44#endif
45
46#ifndef offset_of
47#define offset_of(type, memb) \
48 ((unsigned long)(&((type *)0)->memb))
49#endif
50#ifndef container_of
51#define container_of(obj, type, memb) \
52 ((type *)(((char *)obj) - offset_of(type, memb)))
53#endif
54
55static int 40static int
56p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); 41p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
57 42
@@ -104,7 +89,7 @@ EXPORT_SYMBOL(p9stat_free);
104 89
105static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) 90static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
106{ 91{
107 size_t len = MIN(pdu->size - pdu->offset, size); 92 size_t len = min(pdu->size - pdu->offset, size);
108 memcpy(data, &pdu->sdata[pdu->offset], len); 93 memcpy(data, &pdu->sdata[pdu->offset], len);
109 pdu->offset += len; 94 pdu->offset += len;
110 return size - len; 95 return size - len;
@@ -112,7 +97,7 @@ static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
112 97
113static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) 98static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
114{ 99{
115 size_t len = MIN(pdu->capacity - pdu->size, size); 100 size_t len = min(pdu->capacity - pdu->size, size);
116 memcpy(&pdu->sdata[pdu->size], data, len); 101 memcpy(&pdu->sdata[pdu->size], data, len);
117 pdu->size += len; 102 pdu->size += len;
118 return size - len; 103 return size - len;
@@ -121,7 +106,7 @@ static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
121static size_t 106static size_t
122pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) 107pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
123{ 108{
124 size_t len = MIN(pdu->capacity - pdu->size, size); 109 size_t len = min(pdu->capacity - pdu->size, size);
125 if (copy_from_user(&pdu->sdata[pdu->size], udata, len)) 110 if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
126 len = 0; 111 len = 0;
127 112
@@ -201,7 +186,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
201 if (errcode) 186 if (errcode)
202 break; 187 break;
203 188
204 size = MAX(len, 0); 189 size = max_t(int16_t, len, 0);
205 190
206 *sptr = kmalloc(size + 1, GFP_KERNEL); 191 *sptr = kmalloc(size + 1, GFP_KERNEL);
207 if (*sptr == NULL) { 192 if (*sptr == NULL) {
@@ -256,8 +241,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
256 p9pdu_readf(pdu, proto_version, "d", count); 241 p9pdu_readf(pdu, proto_version, "d", count);
257 if (!errcode) { 242 if (!errcode) {
258 *count = 243 *count =
259 MIN(*count, 244 min_t(int32_t, *count,
260 pdu->size - pdu->offset); 245 pdu->size - pdu->offset);
261 *data = &pdu->sdata[pdu->offset]; 246 *data = &pdu->sdata[pdu->offset];
262 } 247 }
263 } 248 }
@@ -421,7 +406,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
421 const char *sptr = va_arg(ap, const char *); 406 const char *sptr = va_arg(ap, const char *);
422 int16_t len = 0; 407 int16_t len = 0;
423 if (sptr) 408 if (sptr)
424 len = MIN(strlen(sptr), USHRT_MAX); 409 len = min_t(int16_t, strlen(sptr), USHRT_MAX);
425 410
426 errcode = p9pdu_writef(pdu, proto_version, 411 errcode = p9pdu_writef(pdu, proto_version,
427 "w", len); 412 "w", len);
diff --git a/net/Kconfig b/net/Kconfig
index 55fd82e9ffd9..126c2af0fc1f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,11 @@ config RPS
220 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 220 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
221 default y 221 default y
222 222
223config XPS
224 boolean
225 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
226 default y
227
223menu "Network testing" 228menu "Network testing"
224 229
225config NET_PKTGEN 230config NET_PKTGEN
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index ad2b232a2055..fce2eae8d476 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -97,7 +97,7 @@ static LIST_HEAD(br2684_devs);
97 97
98static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev) 98static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
99{ 99{
100 return (struct br2684_dev *)netdev_priv(net_dev); 100 return netdev_priv(net_dev);
101} 101}
102 102
103static inline struct net_device *list_entry_brdev(const struct list_head *le) 103static inline struct net_device *list_entry_brdev(const struct list_head *le)
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ff956d1115bc..d257da50fcfb 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -502,7 +502,8 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
502 struct atmarp_entry *entry; 502 struct atmarp_entry *entry;
503 int error; 503 int error;
504 struct clip_vcc *clip_vcc; 504 struct clip_vcc *clip_vcc;
505 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1}} }; 505 struct flowi fl = { .fl4_dst = ip,
506 .fl4_tos = 1 };
506 struct rtable *rt; 507 struct rtable *rt;
507 508
508 if (vcc->push != clip_push) { 509 if (vcc->push != clip_push) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 181d70c73d70..179e04bc99dd 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -816,8 +816,7 @@ static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
816 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) 816 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
817 return -EINVAL; 817 return -EINVAL;
818 vcc->proto_data = dev_lec[arg]; 818 vcc->proto_data = dev_lec[arg];
819 return lec_mcast_make((struct lec_priv *)netdev_priv(dev_lec[arg]), 819 return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
820 vcc);
821} 820}
822 821
823/* Initialize device. */ 822/* Initialize device. */
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf4aaa9..bb86d2932394 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1392 ax25_cb *ax25; 1392 ax25_cb *ax25;
1393 int err = 0; 1393 int err = 0;
1394 1394
1395 memset(fsa, 0, sizeof(fsa));
1395 lock_sock(sk); 1396 lock_sock(sk);
1396 ax25 = ax25_sk(sk); 1397 ax25 = ax25_sk(sk);
1397 1398
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1403 1404
1404 fsa->fsa_ax25.sax25_family = AF_AX25; 1405 fsa->fsa_ax25.sax25_family = AF_AX25;
1405 fsa->fsa_ax25.sax25_call = ax25->dest_addr; 1406 fsa->fsa_ax25.sax25_call = ax25->dest_addr;
1406 fsa->fsa_ax25.sax25_ndigis = 0;
1407 1407
1408 if (ax25->digipeat != NULL) { 1408 if (ax25->digipeat != NULL) {
1409 ndigi = ax25->digipeat->ndigi; 1409 ndigi = ax25->digipeat->ndigi;
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index d1e433f7d673..7ca1f46a471a 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 10obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 11obj-$(CONFIG_BT_HIDP) += hidp/
12 12
13bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o 13bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index c8436fa31344..84bbb82599b2 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,8 +22,6 @@
22 22
23#include "br_private.h" 23#include "br_private.h"
24 24
25int (*br_should_route_hook)(struct sk_buff *skb);
26
27static const struct stp_proto br_stp_proto = { 25static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 26 .rcv = br_stp_rcv,
29}; 27};
@@ -102,8 +100,6 @@ static void __exit br_deinit(void)
102 br_fdb_fini(); 100 br_fdb_fini();
103} 101}
104 102
105EXPORT_SYMBOL(br_should_route_hook);
106
107module_init(br_init) 103module_init(br_init)
108module_exit(br_deinit) 104module_exit(br_deinit)
109MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 17cb0b633576..556443566e9c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -141,7 +141,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
141 141
142#ifdef CONFIG_BRIDGE_NETFILTER 142#ifdef CONFIG_BRIDGE_NETFILTER
143 /* remember the MTU in the rtable for PMTU */ 143 /* remember the MTU in the rtable for PMTU */
144 br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu; 144 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
145#endif 145#endif
146 146
147 return 0; 147 return 0;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 90512ccfd3e9..2872393b2939 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -238,15 +238,18 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
238int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) 238int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
239{ 239{
240 struct net_bridge_fdb_entry *fdb; 240 struct net_bridge_fdb_entry *fdb;
241 struct net_bridge_port *port;
241 int ret; 242 int ret;
242 243
243 if (!br_port_exists(dev))
244 return 0;
245
246 rcu_read_lock(); 244 rcu_read_lock();
247 fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr); 245 port = br_port_get_rcu(dev);
248 ret = fdb && fdb->dst->dev != dev && 246 if (!port)
249 fdb->dst->state == BR_STATE_FORWARDING; 247 ret = 0;
248 else {
249 fdb = __br_fdb_get(port->br, addr);
250 ret = fdb && fdb->dst->dev != dev &&
251 fdb->dst->state == BR_STATE_FORWARDING;
252 }
250 rcu_read_unlock(); 253 rcu_read_unlock();
251 254
252 return ret; 255 return ret;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index cbfe87f0f34a..2bd11ec6d166 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -223,7 +223,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
223 struct net_bridge_port_group *p; 223 struct net_bridge_port_group *p;
224 struct hlist_node *rp; 224 struct hlist_node *rp;
225 225
226 rp = rcu_dereference(br->router_list.first); 226 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
227 p = mdst ? rcu_dereference(mdst->ports) : NULL; 227 p = mdst ? rcu_dereference(mdst->ports) : NULL;
228 while (p || rp) { 228 while (p || rp) {
229 struct net_bridge_port *port, *lport, *rport; 229 struct net_bridge_port *port, *lport, *rport;
@@ -242,7 +242,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
242 if ((unsigned long)lport >= (unsigned long)port) 242 if ((unsigned long)lport >= (unsigned long)port)
243 p = rcu_dereference(p->next); 243 p = rcu_dereference(p->next);
244 if ((unsigned long)rport >= (unsigned long)port) 244 if ((unsigned long)rport >= (unsigned long)port)
245 rp = rcu_dereference(rp->next); 245 rp = rcu_dereference(hlist_next_rcu(rp));
246 } 246 }
247 247
248 if (!prev) 248 if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 89ad25a76202..d9d1e2bac1d6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -475,11 +475,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
475{ 475{
476 struct net_bridge_port *p; 476 struct net_bridge_port *p;
477 477
478 if (!br_port_exists(dev)) 478 p = br_port_get_rtnl(dev);
479 return -EINVAL; 479 if (!p || p->br != br)
480
481 p = br_port_get(dev);
482 if (p->br != br)
483 return -EINVAL; 480 return -EINVAL;
484 481
485 del_nbp(p); 482 del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 25207a1f182b..6f6d8e1b776f 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -21,6 +21,10 @@
21/* Bridge group multicast address 802.1d (pg 51). */ 21/* Bridge group multicast address 802.1d (pg 51). */
22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
23 23
24/* Hook for brouter */
25br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
26EXPORT_SYMBOL(br_should_route_hook);
27
24static int br_pass_frame_up(struct sk_buff *skb) 28static int br_pass_frame_up(struct sk_buff *skb)
25{ 29{
26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 30 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -139,7 +143,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
139{ 143{
140 struct net_bridge_port *p; 144 struct net_bridge_port *p;
141 const unsigned char *dest = eth_hdr(skb)->h_dest; 145 const unsigned char *dest = eth_hdr(skb)->h_dest;
142 int (*rhook)(struct sk_buff *skb); 146 br_should_route_hook_t *rhook;
143 147
144 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 148 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
145 return skb; 149 return skb;
@@ -173,8 +177,8 @@ forward:
173 switch (p->state) { 177 switch (p->state) {
174 case BR_STATE_FORWARDING: 178 case BR_STATE_FORWARDING:
175 rhook = rcu_dereference(br_should_route_hook); 179 rhook = rcu_dereference(br_should_route_hook);
176 if (rhook != NULL) { 180 if (rhook) {
177 if (rhook(skb)) 181 if ((*rhook)(skb))
178 return skb; 182 return skb;
179 dest = eth_hdr(skb)->h_dest; 183 dest = eth_hdr(skb)->h_dest;
180 } 184 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eb5b256ffc88..85a0398b221e 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,6 +33,9 @@
33 33
34#include "br_private.h" 34#include "br_private.h"
35 35
36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38
36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
38{ 41{
@@ -135,7 +138,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 138struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
136 struct sk_buff *skb) 139 struct sk_buff *skb)
137{ 140{
138 struct net_bridge_mdb_htable *mdb = br->mdb; 141 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
139 struct br_ip ip; 142 struct br_ip ip;
140 143
141 if (br->multicast_disabled) 144 if (br->multicast_disabled)
@@ -235,7 +238,8 @@ static void br_multicast_group_expired(unsigned long data)
235 if (mp->ports) 238 if (mp->ports)
236 goto out; 239 goto out;
237 240
238 mdb = br->mdb; 241 mdb = mlock_dereference(br->mdb, br);
242
239 hlist_del_rcu(&mp->hlist[mdb->ver]); 243 hlist_del_rcu(&mp->hlist[mdb->ver]);
240 mdb->size--; 244 mdb->size--;
241 245
@@ -249,16 +253,20 @@ out:
249static void br_multicast_del_pg(struct net_bridge *br, 253static void br_multicast_del_pg(struct net_bridge *br,
250 struct net_bridge_port_group *pg) 254 struct net_bridge_port_group *pg)
251{ 255{
252 struct net_bridge_mdb_htable *mdb = br->mdb; 256 struct net_bridge_mdb_htable *mdb;
253 struct net_bridge_mdb_entry *mp; 257 struct net_bridge_mdb_entry *mp;
254 struct net_bridge_port_group *p; 258 struct net_bridge_port_group *p;
255 struct net_bridge_port_group **pp; 259 struct net_bridge_port_group __rcu **pp;
260
261 mdb = mlock_dereference(br->mdb, br);
256 262
257 mp = br_mdb_ip_get(mdb, &pg->addr); 263 mp = br_mdb_ip_get(mdb, &pg->addr);
258 if (WARN_ON(!mp)) 264 if (WARN_ON(!mp))
259 return; 265 return;
260 266
261 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 267 for (pp = &mp->ports;
268 (p = mlock_dereference(*pp, br)) != NULL;
269 pp = &p->next) {
262 if (p != pg) 270 if (p != pg)
263 continue; 271 continue;
264 272
@@ -294,10 +302,10 @@ out:
294 spin_unlock(&br->multicast_lock); 302 spin_unlock(&br->multicast_lock);
295} 303}
296 304
297static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max, 305static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
298 int elasticity) 306 int elasticity)
299{ 307{
300 struct net_bridge_mdb_htable *old = *mdbp; 308 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
301 struct net_bridge_mdb_htable *mdb; 309 struct net_bridge_mdb_htable *mdb;
302 int err; 310 int err;
303 311
@@ -569,7 +577,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
569 struct net_bridge *br, struct net_bridge_port *port, 577 struct net_bridge *br, struct net_bridge_port *port,
570 struct br_ip *group, int hash) 578 struct br_ip *group, int hash)
571{ 579{
572 struct net_bridge_mdb_htable *mdb = br->mdb; 580 struct net_bridge_mdb_htable *mdb;
573 struct net_bridge_mdb_entry *mp; 581 struct net_bridge_mdb_entry *mp;
574 struct hlist_node *p; 582 struct hlist_node *p;
575 unsigned count = 0; 583 unsigned count = 0;
@@ -577,6 +585,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
577 int elasticity; 585 int elasticity;
578 int err; 586 int err;
579 587
588 mdb = rcu_dereference_protected(br->mdb, 1);
580 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 589 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
581 count++; 590 count++;
582 if (unlikely(br_ip_equal(group, &mp->addr))) 591 if (unlikely(br_ip_equal(group, &mp->addr)))
@@ -642,13 +651,16 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
642 struct net_bridge *br, struct net_bridge_port *port, 651 struct net_bridge *br, struct net_bridge_port *port,
643 struct br_ip *group) 652 struct br_ip *group)
644{ 653{
645 struct net_bridge_mdb_htable *mdb = br->mdb; 654 struct net_bridge_mdb_htable *mdb;
646 struct net_bridge_mdb_entry *mp; 655 struct net_bridge_mdb_entry *mp;
647 int hash; 656 int hash;
657 int err;
648 658
659 mdb = rcu_dereference_protected(br->mdb, 1);
649 if (!mdb) { 660 if (!mdb) {
650 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0)) 661 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
651 return NULL; 662 if (err)
663 return ERR_PTR(err);
652 goto rehash; 664 goto rehash;
653 } 665 }
654 666
@@ -660,7 +672,7 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
660 672
661 case -EAGAIN: 673 case -EAGAIN:
662rehash: 674rehash:
663 mdb = br->mdb; 675 mdb = rcu_dereference_protected(br->mdb, 1);
664 hash = br_ip_hash(mdb, group); 676 hash = br_ip_hash(mdb, group);
665 break; 677 break;
666 678
@@ -670,7 +682,7 @@ rehash:
670 682
671 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 683 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
672 if (unlikely(!mp)) 684 if (unlikely(!mp))
673 goto out; 685 return ERR_PTR(-ENOMEM);
674 686
675 mp->br = br; 687 mp->br = br;
676 mp->addr = *group; 688 mp->addr = *group;
@@ -692,7 +704,7 @@ static int br_multicast_add_group(struct net_bridge *br,
692{ 704{
693 struct net_bridge_mdb_entry *mp; 705 struct net_bridge_mdb_entry *mp;
694 struct net_bridge_port_group *p; 706 struct net_bridge_port_group *p;
695 struct net_bridge_port_group **pp; 707 struct net_bridge_port_group __rcu **pp;
696 unsigned long now = jiffies; 708 unsigned long now = jiffies;
697 int err; 709 int err;
698 710
@@ -703,7 +715,7 @@ static int br_multicast_add_group(struct net_bridge *br,
703 715
704 mp = br_multicast_new_group(br, port, group); 716 mp = br_multicast_new_group(br, port, group);
705 err = PTR_ERR(mp); 717 err = PTR_ERR(mp);
706 if (unlikely(IS_ERR(mp) || !mp)) 718 if (IS_ERR(mp))
707 goto err; 719 goto err;
708 720
709 if (!port) { 721 if (!port) {
@@ -712,7 +724,9 @@ static int br_multicast_add_group(struct net_bridge *br,
712 goto out; 724 goto out;
713 } 725 }
714 726
715 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 727 for (pp = &mp->ports;
728 (p = mlock_dereference(*pp, br)) != NULL;
729 pp = &p->next) {
716 if (p->port == port) 730 if (p->port == port)
717 goto found; 731 goto found;
718 if ((unsigned long)p->port < (unsigned long)port) 732 if ((unsigned long)p->port < (unsigned long)port)
@@ -1106,7 +1120,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1106 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1107 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
1108 struct net_bridge_port_group *p; 1122 struct net_bridge_port_group *p;
1109 struct net_bridge_port_group **pp; 1123 struct net_bridge_port_group __rcu **pp;
1110 unsigned long max_delay; 1124 unsigned long max_delay;
1111 unsigned long now = jiffies; 1125 unsigned long now = jiffies;
1112 __be32 group; 1126 __be32 group;
@@ -1145,7 +1159,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1145 if (!group) 1159 if (!group)
1146 goto out; 1160 goto out;
1147 1161
1148 mp = br_mdb_ip4_get(br->mdb, group); 1162 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
1149 if (!mp) 1163 if (!mp)
1150 goto out; 1164 goto out;
1151 1165
@@ -1157,7 +1171,9 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1157 try_to_del_timer_sync(&mp->timer) >= 0)) 1171 try_to_del_timer_sync(&mp->timer) >= 0))
1158 mod_timer(&mp->timer, now + max_delay); 1172 mod_timer(&mp->timer, now + max_delay);
1159 1173
1160 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1174 for (pp = &mp->ports;
1175 (p = mlock_dereference(*pp, br)) != NULL;
1176 pp = &p->next) {
1161 if (timer_pending(&p->timer) ? 1177 if (timer_pending(&p->timer) ?
1162 time_after(p->timer.expires, now + max_delay) : 1178 time_after(p->timer.expires, now + max_delay) :
1163 try_to_del_timer_sync(&p->timer) >= 0) 1179 try_to_del_timer_sync(&p->timer) >= 0)
@@ -1178,7 +1194,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1178 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1179 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1180 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
1181 struct net_bridge_port_group *p, **pp; 1197 struct net_bridge_port_group *p;
1198 struct net_bridge_port_group __rcu **pp;
1182 unsigned long max_delay; 1199 unsigned long max_delay;
1183 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1184 struct in6_addr *group = NULL; 1201 struct in6_addr *group = NULL;
@@ -1214,7 +1231,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1214 if (!group) 1231 if (!group)
1215 goto out; 1232 goto out;
1216 1233
1217 mp = br_mdb_ip6_get(br->mdb, group); 1234 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
1218 if (!mp) 1235 if (!mp)
1219 goto out; 1236 goto out;
1220 1237
@@ -1225,7 +1242,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1225 try_to_del_timer_sync(&mp->timer) >= 0)) 1242 try_to_del_timer_sync(&mp->timer) >= 0))
1226 mod_timer(&mp->timer, now + max_delay); 1243 mod_timer(&mp->timer, now + max_delay);
1227 1244
1228 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1245 for (pp = &mp->ports;
1246 (p = mlock_dereference(*pp, br)) != NULL;
1247 pp = &p->next) {
1229 if (timer_pending(&p->timer) ? 1248 if (timer_pending(&p->timer) ?
1230 time_after(p->timer.expires, now + max_delay) : 1249 time_after(p->timer.expires, now + max_delay) :
1231 try_to_del_timer_sync(&p->timer) >= 0) 1250 try_to_del_timer_sync(&p->timer) >= 0)
@@ -1254,7 +1273,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1254 timer_pending(&br->multicast_querier_timer)) 1273 timer_pending(&br->multicast_querier_timer))
1255 goto out; 1274 goto out;
1256 1275
1257 mdb = br->mdb; 1276 mdb = mlock_dereference(br->mdb, br);
1258 mp = br_mdb_ip_get(mdb, group); 1277 mp = br_mdb_ip_get(mdb, group);
1259 if (!mp) 1278 if (!mp)
1260 goto out; 1279 goto out;
@@ -1277,7 +1296,9 @@ static void br_multicast_leave_group(struct net_bridge *br,
1277 goto out; 1296 goto out;
1278 } 1297 }
1279 1298
1280 for (p = mp->ports; p; p = p->next) { 1299 for (p = mlock_dereference(mp->ports, br);
1300 p != NULL;
1301 p = mlock_dereference(p->next, br)) {
1281 if (p->port != port) 1302 if (p->port != port)
1282 continue; 1303 continue;
1283 1304
@@ -1625,7 +1646,7 @@ void br_multicast_stop(struct net_bridge *br)
1625 del_timer_sync(&br->multicast_query_timer); 1646 del_timer_sync(&br->multicast_query_timer);
1626 1647
1627 spin_lock_bh(&br->multicast_lock); 1648 spin_lock_bh(&br->multicast_lock);
1628 mdb = br->mdb; 1649 mdb = mlock_dereference(br->mdb, br);
1629 if (!mdb) 1650 if (!mdb)
1630 goto out; 1651 goto out;
1631 1652
@@ -1729,6 +1750,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1729{ 1750{
1730 struct net_bridge_port *port; 1751 struct net_bridge_port *port;
1731 int err = 0; 1752 int err = 0;
1753 struct net_bridge_mdb_htable *mdb;
1732 1754
1733 spin_lock(&br->multicast_lock); 1755 spin_lock(&br->multicast_lock);
1734 if (br->multicast_disabled == !val) 1756 if (br->multicast_disabled == !val)
@@ -1741,15 +1763,16 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1741 if (!netif_running(br->dev)) 1763 if (!netif_running(br->dev))
1742 goto unlock; 1764 goto unlock;
1743 1765
1744 if (br->mdb) { 1766 mdb = mlock_dereference(br->mdb, br);
1745 if (br->mdb->old) { 1767 if (mdb) {
1768 if (mdb->old) {
1746 err = -EEXIST; 1769 err = -EEXIST;
1747rollback: 1770rollback:
1748 br->multicast_disabled = !!val; 1771 br->multicast_disabled = !!val;
1749 goto unlock; 1772 goto unlock;
1750 } 1773 }
1751 1774
1752 err = br_mdb_rehash(&br->mdb, br->mdb->max, 1775 err = br_mdb_rehash(&br->mdb, mdb->max,
1753 br->hash_elasticity); 1776 br->hash_elasticity);
1754 if (err) 1777 if (err)
1755 goto rollback; 1778 goto rollback;
@@ -1774,6 +1797,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1774{ 1797{
1775 int err = -ENOENT; 1798 int err = -ENOENT;
1776 u32 old; 1799 u32 old;
1800 struct net_bridge_mdb_htable *mdb;
1777 1801
1778 spin_lock(&br->multicast_lock); 1802 spin_lock(&br->multicast_lock);
1779 if (!netif_running(br->dev)) 1803 if (!netif_running(br->dev))
@@ -1782,7 +1806,9 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1782 err = -EINVAL; 1806 err = -EINVAL;
1783 if (!is_power_of_2(val)) 1807 if (!is_power_of_2(val))
1784 goto unlock; 1808 goto unlock;
1785 if (br->mdb && val < br->mdb->size) 1809
1810 mdb = mlock_dereference(br->mdb, br);
1811 if (mdb && val < mdb->size)
1786 goto unlock; 1812 goto unlock;
1787 1813
1788 err = 0; 1814 err = 0;
@@ -1790,8 +1816,8 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1790 old = br->hash_max; 1816 old = br->hash_max;
1791 br->hash_max = val; 1817 br->hash_max = val;
1792 1818
1793 if (br->mdb) { 1819 if (mdb) {
1794 if (br->mdb->old) { 1820 if (mdb->old) {
1795 err = -EEXIST; 1821 err = -EEXIST;
1796rollback: 1822rollback:
1797 br->hash_max = old; 1823 br->hash_max = old;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 865fd7634b67..4b5b66d07bba 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -124,24 +124,25 @@ void br_netfilter_rtable_init(struct net_bridge *br)
124 atomic_set(&rt->dst.__refcnt, 1); 124 atomic_set(&rt->dst.__refcnt, 1);
125 rt->dst.dev = br->dev; 125 rt->dst.dev = br->dev;
126 rt->dst.path = &rt->dst; 126 rt->dst.path = &rt->dst;
127 rt->dst.metrics[RTAX_MTU - 1] = 1500; 127 dst_metric_set(&rt->dst, RTAX_MTU, 1500);
128 rt->dst.flags = DST_NOXFRM; 128 rt->dst.flags = DST_NOXFRM;
129 rt->dst.ops = &fake_dst_ops; 129 rt->dst.ops = &fake_dst_ops;
130} 130}
131 131
132static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 132static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
133{ 133{
134 if (!br_port_exists(dev)) 134 struct net_bridge_port *port;
135 return NULL; 135
136 return &br_port_get_rcu(dev)->br->fake_rtable; 136 port = br_port_get_rcu(dev);
137 return port ? &port->br->fake_rtable : NULL;
137} 138}
138 139
139static inline struct net_device *bridge_parent(const struct net_device *dev) 140static inline struct net_device *bridge_parent(const struct net_device *dev)
140{ 141{
141 if (!br_port_exists(dev)) 142 struct net_bridge_port *port;
142 return NULL;
143 143
144 return br_port_get_rcu(dev)->br->dev; 144 port = br_port_get_rcu(dev);
145 return port ? port->br->dev : NULL;
145} 146}
146 147
147static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) 148static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -412,13 +413,8 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
412 if (dnat_took_place(skb)) { 413 if (dnat_took_place(skb)) {
413 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 414 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
414 struct flowi fl = { 415 struct flowi fl = {
415 .nl_u = { 416 .fl4_dst = iph->daddr,
416 .ip4_u = { 417 .fl4_tos = RT_TOS(iph->tos),
417 .daddr = iph->daddr,
418 .saddr = 0,
419 .tos = RT_TOS(iph->tos) },
420 },
421 .proto = 0,
422 }; 418 };
423 struct in_device *in_dev = __in_dev_get_rcu(dev); 419 struct in_device *in_dev = __in_dev_get_rcu(dev);
424 420
@@ -566,26 +562,26 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
566 u32 pkt_len; 562 u32 pkt_len;
567 563
568 if (skb->len < sizeof(struct ipv6hdr)) 564 if (skb->len < sizeof(struct ipv6hdr))
569 goto inhdr_error; 565 return NF_DROP;
570 566
571 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 567 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
572 goto inhdr_error; 568 return NF_DROP;
573 569
574 hdr = ipv6_hdr(skb); 570 hdr = ipv6_hdr(skb);
575 571
576 if (hdr->version != 6) 572 if (hdr->version != 6)
577 goto inhdr_error; 573 return NF_DROP;
578 574
579 pkt_len = ntohs(hdr->payload_len); 575 pkt_len = ntohs(hdr->payload_len);
580 576
581 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { 577 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
582 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) 578 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
583 goto inhdr_error; 579 return NF_DROP;
584 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 580 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
585 goto inhdr_error; 581 return NF_DROP;
586 } 582 }
587 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) 583 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
588 goto inhdr_error; 584 return NF_DROP;
589 585
590 nf_bridge_put(skb->nf_bridge); 586 nf_bridge_put(skb->nf_bridge);
591 if (!nf_bridge_alloc(skb)) 587 if (!nf_bridge_alloc(skb))
@@ -598,9 +594,6 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
598 br_nf_pre_routing_finish_ipv6); 594 br_nf_pre_routing_finish_ipv6);
599 595
600 return NF_STOLEN; 596 return NF_STOLEN;
601
602inhdr_error:
603 return NF_DROP;
604} 597}
605 598
606/* Direct IPv6 traffic to br_nf_pre_routing_ipv6. 599/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
@@ -619,11 +612,11 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
619 __u32 len = nf_bridge_encap_header_len(skb); 612 __u32 len = nf_bridge_encap_header_len(skb);
620 613
621 if (unlikely(!pskb_may_pull(skb, len))) 614 if (unlikely(!pskb_may_pull(skb, len)))
622 goto out; 615 return NF_DROP;
623 616
624 p = br_port_get_rcu(in); 617 p = br_port_get_rcu(in);
625 if (p == NULL) 618 if (p == NULL)
626 goto out; 619 return NF_DROP;
627 br = p->br; 620 br = p->br;
628 621
629 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 622 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
@@ -645,8 +638,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
645 nf_bridge_pull_encap_header_rcsum(skb); 638 nf_bridge_pull_encap_header_rcsum(skb);
646 639
647 if (br_parse_ip_options(skb)) 640 if (br_parse_ip_options(skb))
648 /* Drop invalid packet */ 641 return NF_DROP;
649 goto out;
650 642
651 nf_bridge_put(skb->nf_bridge); 643 nf_bridge_put(skb->nf_bridge);
652 if (!nf_bridge_alloc(skb)) 644 if (!nf_bridge_alloc(skb))
@@ -660,9 +652,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
660 br_nf_pre_routing_finish); 652 br_nf_pre_routing_finish);
661 653
662 return NF_STOLEN; 654 return NF_STOLEN;
663
664out:
665 return NF_DROP;
666} 655}
667 656
668 657
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4a6a378c84e3..f8bf4c7f842c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -119,11 +119,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
119 119
120 idx = 0; 120 idx = 0;
121 for_each_netdev(net, dev) { 121 for_each_netdev(net, dev) {
122 struct net_bridge_port *port = br_port_get_rtnl(dev);
123
122 /* not a bridge port */ 124 /* not a bridge port */
123 if (!br_port_exists(dev) || idx < cb->args[0]) 125 if (!port || idx < cb->args[0])
124 goto skip; 126 goto skip;
125 127
126 if (br_fill_ifinfo(skb, br_port_get(dev), 128 if (br_fill_ifinfo(skb, port,
127 NETLINK_CB(cb->skb).pid, 129 NETLINK_CB(cb->skb).pid,
128 cb->nlh->nlmsg_seq, RTM_NEWLINK, 130 cb->nlh->nlmsg_seq, RTM_NEWLINK,
129 NLM_F_MULTI) < 0) 131 NLM_F_MULTI) < 0)
@@ -169,9 +171,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
169 if (!dev) 171 if (!dev)
170 return -ENODEV; 172 return -ENODEV;
171 173
172 if (!br_port_exists(dev)) 174 p = br_port_get_rtnl(dev);
175 if (!p)
173 return -EINVAL; 176 return -EINVAL;
174 p = br_port_get(dev);
175 177
176 /* if kernel STP is running, don't allow changes */ 178 /* if kernel STP is running, don't allow changes */
177 if (p->br->stp_enabled == BR_KERNEL_STP) 179 if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 404d4e14c6a7..7d337c9b6082 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,15 +32,15 @@ struct notifier_block br_device_notifier = {
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) 32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{ 33{
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p = br_port_get(dev); 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* not a port of a bridge */ 39 /* not a port of a bridge */
40 if (!br_port_exists(dev)) 40 p = br_port_get_rtnl(dev);
41 if (!p)
41 return NOTIFY_DONE; 42 return NOTIFY_DONE;
42 43
43 p = br_port_get(dev);
44 br = p->br; 44 br = p->br;
45 45
46 switch (event) { 46 switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 75c90edaf7db..84aac7734bfc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -72,7 +72,7 @@ struct net_bridge_fdb_entry
72 72
73struct net_bridge_port_group { 73struct net_bridge_port_group {
74 struct net_bridge_port *port; 74 struct net_bridge_port *port;
75 struct net_bridge_port_group *next; 75 struct net_bridge_port_group __rcu *next;
76 struct hlist_node mglist; 76 struct hlist_node mglist;
77 struct rcu_head rcu; 77 struct rcu_head rcu;
78 struct timer_list timer; 78 struct timer_list timer;
@@ -86,7 +86,7 @@ struct net_bridge_mdb_entry
86 struct hlist_node hlist[2]; 86 struct hlist_node hlist[2];
87 struct hlist_node mglist; 87 struct hlist_node mglist;
88 struct net_bridge *br; 88 struct net_bridge *br;
89 struct net_bridge_port_group *ports; 89 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 90 struct rcu_head rcu;
91 struct timer_list timer; 91 struct timer_list timer;
92 struct timer_list query_timer; 92 struct timer_list query_timer;
@@ -151,11 +151,20 @@ struct net_bridge_port
151#endif 151#endif
152}; 152};
153 153
154#define br_port_get_rcu(dev) \
155 ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
156#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
157#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 154#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
158 155
156static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
157{
158 struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
159 return br_port_exists(dev) ? port : NULL;
160}
161
162static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
163{
164 return br_port_exists(dev) ?
165 rtnl_dereference(dev->rx_handler_data) : NULL;
166}
167
159struct br_cpu_netstats { 168struct br_cpu_netstats {
160 u64 rx_packets; 169 u64 rx_packets;
161 u64 rx_bytes; 170 u64 rx_bytes;
@@ -227,7 +236,7 @@ struct net_bridge
227 unsigned long multicast_startup_query_interval; 236 unsigned long multicast_startup_query_interval;
228 237
229 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
230 struct net_bridge_mdb_htable *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
231 struct hlist_head router_list; 240 struct hlist_head router_list;
232 struct hlist_head mglist; 241 struct hlist_head mglist;
233 242
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 35cf27087b56..3d9a55d3822f 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -141,10 +141,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
141 struct net_bridge *br; 141 struct net_bridge *br;
142 const unsigned char *buf; 142 const unsigned char *buf;
143 143
144 if (!br_port_exists(dev))
145 goto err;
146 p = br_port_get_rcu(dev);
147
148 if (!pskb_may_pull(skb, 4)) 144 if (!pskb_may_pull(skb, 4))
149 goto err; 145 goto err;
150 146
@@ -153,6 +149,10 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 149 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 150 goto err;
155 151
152 p = br_port_get_rcu(dev);
153 if (!p)
154 goto err;
155
156 br = p->br; 156 br = p->br;
157 spin_lock(&br->lock); 157 spin_lock(&br->lock);
158 158
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index ae3f106c3908..1bcaf36ad612 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,7 +87,8 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, ebt_broute); 90 rcu_assign_pointer(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute);
91 return 0; 92 return 0;
92} 93}
93 94
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index a1dcf83f0d58..cbc9f395ab1e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -128,6 +128,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128 const struct net_device *in, const struct net_device *out) 128 const struct net_device *in, const struct net_device *out)
129{ 129{
130 const struct ethhdr *h = eth_hdr(skb); 130 const struct ethhdr *h = eth_hdr(skb);
131 const struct net_bridge_port *p;
131 __be16 ethproto; 132 __be16 ethproto;
132 int verdict, i; 133 int verdict, i;
133 134
@@ -148,13 +149,11 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
148 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
149 return 1; 150 return 1;
150 /* rcu_read_lock()ed by nf_hook_slow */ 151 /* rcu_read_lock()ed by nf_hook_slow */
151 if (in && br_port_exists(in) && 152 if (in && (p = br_port_get_rcu(in)) != NULL &&
152 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev), 153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
153 EBT_ILOGICALIN))
154 return 1; 154 return 1;
155 if (out && br_port_exists(out) && 155 if (out && (p = br_port_get_rcu(out)) != NULL &&
156 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev), 156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157 EBT_ILOGICALOUT))
158 return 1; 157 return 1;
159 158
160 if (e->bitmask & EBT_SOURCEMAC) { 159 if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Makefile b/net/caif/Makefile
index f87481fb0e65..9d38e406e4a4 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,8 +1,6 @@
1ifeq ($(CONFIG_CAIF_DEBUG),y) 1ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4 2
5caif-objs := caif_dev.o \ 3caif-y := caif_dev.o \
6 cfcnfg.o cfmuxl.o cfctrl.o \ 4 cfcnfg.o cfmuxl.o cfctrl.o \
7 cffrml.o cfveil.o cfdbgl.o\ 5 cffrml.o cfveil.o cfdbgl.o\
8 cfserl.o cfdgml.o \ 6 cfserl.o cfdgml.o \
@@ -13,4 +11,4 @@ obj-$(CONFIG_CAIF) += caif.o
13obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
14obj-$(CONFIG_CAIF) += caif_socket.o 12obj-$(CONFIG_CAIF) += caif_socket.o
15 13
16export-objs := caif.o 14export-y := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae68303d3a..d522d8c1703e 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
16{ 16{
17 struct dev_info *dev_info; 17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref; 18 enum cfcnfg_phy_preference pref;
19 int res;
20
19 memset(l, 0, sizeof(*l)); 21 memset(l, 0, sizeof(*l));
20 l->priority = s->priority; 22 /* In caif protocol low value is high priority */
23 l->priority = CAIF_PRIO_MAX - s->priority + 1;
21 24
22 if (s->link_name[0] != '\0') 25 if (s->ifindex != 0){
23 l->phyid = cfcnfg_get_named(cnfg, s->link_name); 26 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
27 if (res < 0)
28 return res;
29 l->phyid = res;
30 }
24 else { 31 else {
25 switch (s->link_selector) { 32 switch (s->link_selector) {
26 case CAIF_LINK_HIGH_BANDW: 33 case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a055d1..a42a408306e4 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
307 307
308 case NETDEV_UNREGISTER: 308 case NETDEV_UNREGISTER:
309 caifd = caif_get(dev); 309 caifd = caif_get(dev);
310 if (caifd == NULL)
311 break;
310 netdev_info(dev, "unregister\n"); 312 netdev_info(dev, "unregister\n");
311 atomic_set(&caifd->state, what); 313 atomic_set(&caifd->state, what);
312 caif_device_destroy(dev); 314 caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd0000f..1bf0cf503796 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock,
716{ 716{
717 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
719 int prio, linksel; 719 int linksel;
720 struct ifreq ifreq;
721 720
722 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 721 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
723 return -ENOPROTOOPT; 722 return -ENOPROTOOPT;
@@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock,
735 release_sock(&cf_sk->sk); 734 release_sock(&cf_sk->sk);
736 return 0; 735 return 0;
737 736
738 case SO_PRIORITY:
739 if (lvl != SOL_SOCKET)
740 goto bad_sol;
741 if (ol < sizeof(int))
742 return -EINVAL;
743 if (copy_from_user(&prio, ov, sizeof(int)))
744 return -EINVAL;
745 lock_sock(&(cf_sk->sk));
746 cf_sk->conn_req.priority = prio;
747 release_sock(&cf_sk->sk);
748 return 0;
749
750 case SO_BINDTODEVICE:
751 if (lvl != SOL_SOCKET)
752 goto bad_sol;
753 if (ol < sizeof(struct ifreq))
754 return -EINVAL;
755 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
756 return -EFAULT;
757 lock_sock(&(cf_sk->sk));
758 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
759 sizeof(cf_sk->conn_req.link_name));
760 cf_sk->conn_req.link_name
761 [sizeof(cf_sk->conn_req.link_name)-1] = 0;
762 release_sock(&cf_sk->sk);
763 return 0;
764
765 case CAIFSO_REQ_PARAM: 737 case CAIFSO_REQ_PARAM:
766 if (lvl != SOL_CAIF) 738 if (lvl != SOL_CAIF)
767 goto bad_sol; 739 goto bad_sol;
@@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
880 sock->state = SS_CONNECTING; 852 sock->state = SS_CONNECTING;
881 sk->sk_state = CAIF_CONNECTING; 853 sk->sk_state = CAIF_CONNECTING;
882 854
855 /* Check priority value comming from socket */
856 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
859 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
860 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
861 else
862 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
863
864 /*ifindex = id of the interface.*/
865 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
866
883 dbfs_atomic_inc(&cnt.num_connect_req); 867 dbfs_atomic_inc(&cnt.num_connect_req);
884 cf_sk->layer.receive = caif_sktrecv_cb; 868 cf_sk->layer.receive = caif_sktrecv_cb;
885 err = caif_connect_client(&cf_sk->conn_req, 869 err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
905 cf_sk->maxframe = mtu - (headroom + tailroom); 889 cf_sk->maxframe = mtu - (headroom + tailroom);
906 if (cf_sk->maxframe < 1) { 890 if (cf_sk->maxframe < 1) {
907 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 891 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
892 err = -ENODEV;
908 goto out; 893 goto out;
909 } 894 }
910 895
@@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1142 set_rx_flow_on(cf_sk); 1127 set_rx_flow_on(cf_sk);
1143 1128
1144 /* Set default options on configuration */ 1129 /* Set default options on configuration */
1145 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; 1130 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
1146 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1131 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1147 cf_sk->conn_req.protocol = protocol; 1132 cf_sk->conn_req.protocol = protocol;
1148 /* Increase the number of sockets created. */ 1133 /* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd18914..21ede141018a 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
173 return NULL; 173 return NULL;
174} 174}
175 175
176int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) 176
177int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
177{ 178{
178 int i; 179 int i;
179 180 for (i = 0; i < MAX_PHY_LAYERS; i++)
180 /* Try to match with specified name */ 181 if (cnfg->phy_layers[i].frm_layer != NULL &&
181 for (i = 0; i < MAX_PHY_LAYERS; i++) { 182 cnfg->phy_layers[i].ifindex == ifi)
182 if (cnfg->phy_layers[i].frm_layer != NULL 183 return i;
183 && strcmp(cnfg->phy_layers[i].phy_layer->name, 184 return -ENODEV;
184 name) == 0)
185 return cnfg->phy_layers[i].frm_layer->id;
186 }
187 return 0;
188} 185}
189 186
190int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 187int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a109aa..3cd8f978e309 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
361 struct cfctrl_request_info *p, *tmp; 361 struct cfctrl_request_info *p, *tmp;
362 struct cfctrl *ctrl = container_obj(layr); 362 struct cfctrl *ctrl = container_obj(layr);
363 spin_lock(&ctrl->info_list_lock); 363 spin_lock(&ctrl->info_list_lock);
364 pr_warn("enter\n");
365 364
366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
367 if (p->client_layer == adap_layer) { 366 if (p->client_layer == adap_layer) {
368 pr_warn("cancel req :%d\n", p->sequence_no); 367 pr_debug("cancel req :%d\n", p->sequence_no);
369 list_del(&p->list); 368 list_del(&p->list);
370 kfree(p); 369 kfree(p);
371 } 370 }
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9ac66f..11a2af4c162a 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
12#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
14 14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16
15static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); 17static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
16static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); 18static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
17 19
@@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
38 40
39static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) 41static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
40{ 42{
43 struct cfsrvl *service = container_obj(layr);
44 struct caif_payload_info *info;
45 int ret;
46
47 if (!cfsrvl_ready(service, &ret))
48 return ret;
49
50 /* Add info for MUX-layer to route the packet out */
51 info = cfpkt_info(pkt);
52 info->channel_id = service->layer.id;
53 info->dev_info = &service->dev_info;
54
41 return layr->dn->transmit(layr->dn, pkt); 55 return layr->dn->transmit(layr->dn, pkt);
42} 56}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481e8d25..e2fb5fa75795 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@ out:
193 193
194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
195{ 195{
196 caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); 196 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
197 197
198 /* Add info for MUX-layer to route the packet out. */ 198 /* Add info for MUX-layer to route the packet out. */
199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; 199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/can/Makefile b/net/can/Makefile
index 9cd3c4b3abda..2d3894b32742 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -3,10 +3,10 @@
3# 3#
4 4
5obj-$(CONFIG_CAN) += can.o 5obj-$(CONFIG_CAN) += can.o
6can-objs := af_can.o proc.o 6can-y := af_can.o proc.o
7 7
8obj-$(CONFIG_CAN_RAW) += can-raw.o 8obj-$(CONFIG_CAN_RAW) += can-raw.o
9can-raw-objs := raw.o 9can-raw-y := raw.o
10 10
11obj-$(CONFIG_CAN_BCM) += can-bcm.o 11obj-$(CONFIG_CAN_BCM) += can-bcm.o
12can-bcm-objs := bcm.o 12can-bcm-y := bcm.o
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 08ffe9e4be20..6faa8256e10c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
125 struct list_head tx_ops; 125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs; 126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read; 127 struct proc_dir_entry *bcm_proc_read;
128 char procname [9]; /* pointer printed in ASCII with \0 */ 128 char procname [20]; /* pointer printed in ASCII with \0 */
129}; 129};
130 130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk) 131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index aab1cabb8035..e87ef435e11b 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -1,12 +1,9 @@
1# 1#
2# Makefile for CEPH filesystem. 2# Makefile for CEPH filesystem.
3# 3#
4
5ifneq ($(KERNELRELEASE),)
6
7obj-$(CONFIG_CEPH_LIB) += libceph.o 4obj-$(CONFIG_CEPH_LIB) += libceph.o
8 5
9libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ 6libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
10 mon_client.o \ 7 mon_client.o \
11 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ 8 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
12 debugfs.o \ 9 debugfs.o \
@@ -16,22 +13,3 @@ libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
16 ceph_fs.o ceph_strings.o ceph_hash.o \ 13 ceph_fs.o ceph_strings.o ceph_hash.o \
17 pagevec.o 14 pagevec.o
18 15
19else
20#Otherwise we were called directly from the command
21# line; invoke the kernel build system.
22
23KERNELDIR ?= /lib/modules/$(shell uname -r)/build
24PWD := $(shell pwd)
25
26default: all
27
28all:
29 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
30
31modules_install:
32 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
33
34clean:
35 $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
36
37endif
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 53d8abfa25d5..bf3e6a13c215 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
19 if (b->vec.iov_base) { 19 if (b->vec.iov_base) {
20 b->is_vmalloc = false; 20 b->is_vmalloc = false;
21 } else { 21 } else {
22 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); 22 b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
23 if (!b->vec.iov_base) { 23 if (!b->vec.iov_base) {
24 kfree(b); 24 kfree(b);
25 return NULL; 25 return NULL;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cd1e039c8755..18ac112ea7ae 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -177,7 +177,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
177 * interrupt level will suddenly eat the receive_queue. 177 * interrupt level will suddenly eat the receive_queue.
178 * 178 *
179 * Look at current nfs client by the way... 179 * Look at current nfs client by the way...
180 * However, this function was corrent in any case. 8) 180 * However, this function was correct in any case. 8)
181 */ 181 */
182 unsigned long cpu_flags; 182 unsigned long cpu_flags;
183 183
diff --git a/net/core/dev.c b/net/core/dev.c
index 35dfb8318483..d28b3a023bb2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -743,34 +743,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
743EXPORT_SYMBOL(dev_get_by_index); 743EXPORT_SYMBOL(dev_get_by_index);
744 744
745/** 745/**
746 * dev_getbyhwaddr - find a device by its hardware address 746 * dev_getbyhwaddr_rcu - find a device by its hardware address
747 * @net: the applicable net namespace 747 * @net: the applicable net namespace
748 * @type: media type of device 748 * @type: media type of device
749 * @ha: hardware address 749 * @ha: hardware address
750 * 750 *
751 * Search for an interface by MAC address. Returns NULL if the device 751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold the 752 * is not found or a pointer to the device. The caller must hold RCU
753 * rtnl semaphore. The returned device has not had its ref count increased 753 * The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking 754 * and the caller must therefore be careful about locking
755 * 755 *
756 * BUGS:
757 * If the API was consistent this would be __dev_get_by_hwaddr
758 */ 756 */
759 757
760struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha) 758struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
759 const char *ha)
761{ 760{
762 struct net_device *dev; 761 struct net_device *dev;
763 762
764 ASSERT_RTNL(); 763 for_each_netdev_rcu(net, dev)
765
766 for_each_netdev(net, dev)
767 if (dev->type == type && 764 if (dev->type == type &&
768 !memcmp(dev->dev_addr, ha, dev->addr_len)) 765 !memcmp(dev->dev_addr, ha, dev->addr_len))
769 return dev; 766 return dev;
770 767
771 return NULL; 768 return NULL;
772} 769}
773EXPORT_SYMBOL(dev_getbyhwaddr); 770EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
774 771
775struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 772struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
776{ 773{
@@ -1557,12 +1554,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1557 */ 1554 */
1558int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 1555int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1559{ 1556{
1557 int rc;
1558
1560 if (txq < 1 || txq > dev->num_tx_queues) 1559 if (txq < 1 || txq > dev->num_tx_queues)
1561 return -EINVAL; 1560 return -EINVAL;
1562 1561
1563 if (dev->reg_state == NETREG_REGISTERED) { 1562 if (dev->reg_state == NETREG_REGISTERED) {
1564 ASSERT_RTNL(); 1563 ASSERT_RTNL();
1565 1564
1565 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1566 txq);
1567 if (rc)
1568 return rc;
1569
1566 if (txq < dev->real_num_tx_queues) 1570 if (txq < dev->real_num_tx_queues)
1567 qdisc_reset_all_tx_gt(dev, txq); 1571 qdisc_reset_all_tx_gt(dev, txq);
1568 } 1572 }
@@ -1794,16 +1798,18 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1794 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1798 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1795 struct packet_type *ptype; 1799 struct packet_type *ptype;
1796 __be16 type = skb->protocol; 1800 __be16 type = skb->protocol;
1801 int vlan_depth = ETH_HLEN;
1797 int err; 1802 int err;
1798 1803
1799 if (type == htons(ETH_P_8021Q)) { 1804 while (type == htons(ETH_P_8021Q)) {
1800 struct vlan_ethhdr *veh; 1805 struct vlan_hdr *vh;
1801 1806
1802 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) 1807 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1803 return ERR_PTR(-EINVAL); 1808 return ERR_PTR(-EINVAL);
1804 1809
1805 veh = (struct vlan_ethhdr *)skb->data; 1810 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1806 type = veh->h_vlan_encapsulated_proto; 1811 type = vh->h_vlan_encapsulated_proto;
1812 vlan_depth += VLAN_HLEN;
1807 } 1813 }
1808 1814
1809 skb_reset_mac_header(skb); 1815 skb_reset_mac_header(skb);
@@ -1817,8 +1823,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1817 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1823 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1818 dev->ethtool_ops->get_drvinfo(dev, &info); 1824 dev->ethtool_ops->get_drvinfo(dev, &info);
1819 1825
1820 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " 1826 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1821 "ip_summed=%d",
1822 info.driver, dev ? dev->features : 0L, 1827 info.driver, dev ? dev->features : 0L,
1823 skb->sk ? skb->sk->sk_route_caps : 0L, 1828 skb->sk ? skb->sk->sk_route_caps : 0L,
1824 skb->len, skb->data_len, skb->ip_summed); 1829 skb->len, skb->data_len, skb->ip_summed);
@@ -1967,6 +1972,23 @@ static inline void skb_orphan_try(struct sk_buff *skb)
1967 } 1972 }
1968} 1973}
1969 1974
1975int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
1976{
1977 __be16 protocol = skb->protocol;
1978
1979 if (protocol == htons(ETH_P_8021Q)) {
1980 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1981 protocol = veh->h_vlan_encapsulated_proto;
1982 } else if (!skb->vlan_tci)
1983 return dev->features;
1984
1985 if (protocol != htons(ETH_P_8021Q))
1986 return dev->features & dev->vlan_features;
1987 else
1988 return 0;
1989}
1990EXPORT_SYMBOL(netif_get_vlan_features);
1991
1970/* 1992/*
1971 * Returns true if either: 1993 * Returns true if either:
1972 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 1994 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -1977,15 +1999,20 @@ static inline void skb_orphan_try(struct sk_buff *skb)
1977static inline int skb_needs_linearize(struct sk_buff *skb, 1999static inline int skb_needs_linearize(struct sk_buff *skb,
1978 struct net_device *dev) 2000 struct net_device *dev)
1979{ 2001{
1980 int features = dev->features; 2002 if (skb_is_nonlinear(skb)) {
2003 int features = dev->features;
1981 2004
1982 if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb)) 2005 if (vlan_tx_tag_present(skb))
1983 features &= dev->vlan_features; 2006 features &= dev->vlan_features;
2007
2008 return (skb_has_frag_list(skb) &&
2009 !(features & NETIF_F_FRAGLIST)) ||
2010 (skb_shinfo(skb)->nr_frags &&
2011 (!(features & NETIF_F_SG) ||
2012 illegal_highdma(dev, skb)));
2013 }
1984 2014
1985 return skb_is_nonlinear(skb) && 2015 return 0;
1986 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
1987 (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
1988 illegal_highdma(dev, skb))));
1989} 2016}
1990 2017
1991int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2018int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1995,9 +2022,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1995 int rc = NETDEV_TX_OK; 2022 int rc = NETDEV_TX_OK;
1996 2023
1997 if (likely(!skb->next)) { 2024 if (likely(!skb->next)) {
1998 if (!list_empty(&ptype_all))
1999 dev_queue_xmit_nit(skb, dev);
2000
2001 /* 2025 /*
2002 * If device doesnt need skb->dst, release it right now while 2026 * If device doesnt need skb->dst, release it right now while
2003 * its hot in this cpu cache 2027 * its hot in this cpu cache
@@ -2005,6 +2029,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2005 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2029 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2006 skb_dst_drop(skb); 2030 skb_dst_drop(skb);
2007 2031
2032 if (!list_empty(&ptype_all))
2033 dev_queue_xmit_nit(skb, dev);
2034
2008 skb_orphan_try(skb); 2035 skb_orphan_try(skb);
2009 2036
2010 if (vlan_tx_tag_present(skb) && 2037 if (vlan_tx_tag_present(skb) &&
@@ -2119,26 +2146,70 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2119 return queue_index; 2146 return queue_index;
2120} 2147}
2121 2148
2149static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2150{
2151#ifdef CONFIG_XPS
2152 struct xps_dev_maps *dev_maps;
2153 struct xps_map *map;
2154 int queue_index = -1;
2155
2156 rcu_read_lock();
2157 dev_maps = rcu_dereference(dev->xps_maps);
2158 if (dev_maps) {
2159 map = rcu_dereference(
2160 dev_maps->cpu_map[raw_smp_processor_id()]);
2161 if (map) {
2162 if (map->len == 1)
2163 queue_index = map->queues[0];
2164 else {
2165 u32 hash;
2166 if (skb->sk && skb->sk->sk_hash)
2167 hash = skb->sk->sk_hash;
2168 else
2169 hash = (__force u16) skb->protocol ^
2170 skb->rxhash;
2171 hash = jhash_1word(hash, hashrnd);
2172 queue_index = map->queues[
2173 ((u64)hash * map->len) >> 32];
2174 }
2175 if (unlikely(queue_index >= dev->real_num_tx_queues))
2176 queue_index = -1;
2177 }
2178 }
2179 rcu_read_unlock();
2180
2181 return queue_index;
2182#else
2183 return -1;
2184#endif
2185}
2186
2122static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2187static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2123 struct sk_buff *skb) 2188 struct sk_buff *skb)
2124{ 2189{
2125 int queue_index; 2190 int queue_index;
2126 const struct net_device_ops *ops = dev->netdev_ops; 2191 const struct net_device_ops *ops = dev->netdev_ops;
2127 2192
2128 if (ops->ndo_select_queue) { 2193 if (dev->real_num_tx_queues == 1)
2194 queue_index = 0;
2195 else if (ops->ndo_select_queue) {
2129 queue_index = ops->ndo_select_queue(dev, skb); 2196 queue_index = ops->ndo_select_queue(dev, skb);
2130 queue_index = dev_cap_txqueue(dev, queue_index); 2197 queue_index = dev_cap_txqueue(dev, queue_index);
2131 } else { 2198 } else {
2132 struct sock *sk = skb->sk; 2199 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk); 2200 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0) {
2135 2201
2136 queue_index = 0; 2202 if (queue_index < 0 || skb->ooo_okay ||
2137 if (dev->real_num_tx_queues > 1) 2203 queue_index >= dev->real_num_tx_queues) {
2204 int old_index = queue_index;
2205
2206 queue_index = get_xps_queue(dev, skb);
2207 if (queue_index < 0)
2138 queue_index = skb_tx_hash(dev, skb); 2208 queue_index = skb_tx_hash(dev, skb);
2139 2209
2140 if (sk) { 2210 if (queue_index != old_index && sk) {
2141 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); 2211 struct dst_entry *dst =
2212 rcu_dereference_check(sk->sk_dst_cache, 1);
2142 2213
2143 if (dst && skb_dst(skb) == dst) 2214 if (dst && skb_dst(skb) == dst)
2144 sk_tx_queue_set(sk, queue_index); 2215 sk_tx_queue_set(sk, queue_index);
@@ -4967,10 +5038,13 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4967 } 5038 }
4968 5039
4969 if (features & NETIF_F_UFO) { 5040 if (features & NETIF_F_UFO) {
4970 if (!(features & NETIF_F_GEN_CSUM)) { 5041 /* maybe split UFO into V4 and V6? */
5042 if (!((features & NETIF_F_GEN_CSUM) ||
5043 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5044 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4971 if (name) 5045 if (name)
4972 printk(KERN_ERR "%s: Dropping NETIF_F_UFO " 5046 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4973 "since no NETIF_F_HW_CSUM feature.\n", 5047 "since no checksum offload features.\n",
4974 name); 5048 name);
4975 features &= ~NETIF_F_UFO; 5049 features &= ~NETIF_F_UFO;
4976 } 5050 }
@@ -5014,9 +5088,9 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5014} 5088}
5015EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5089EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5016 5090
5091#ifdef CONFIG_RPS
5017static int netif_alloc_rx_queues(struct net_device *dev) 5092static int netif_alloc_rx_queues(struct net_device *dev)
5018{ 5093{
5019#ifdef CONFIG_RPS
5020 unsigned int i, count = dev->num_rx_queues; 5094 unsigned int i, count = dev->num_rx_queues;
5021 struct netdev_rx_queue *rx; 5095 struct netdev_rx_queue *rx;
5022 5096
@@ -5029,15 +5103,22 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5029 } 5103 }
5030 dev->_rx = rx; 5104 dev->_rx = rx;
5031 5105
5032 /*
5033 * Set a pointer to first element in the array which holds the
5034 * reference count.
5035 */
5036 for (i = 0; i < count; i++) 5106 for (i = 0; i < count; i++)
5037 rx[i].first = rx; 5107 rx[i].dev = dev;
5038#endif
5039 return 0; 5108 return 0;
5040} 5109}
5110#endif
5111
5112static void netdev_init_one_queue(struct net_device *dev,
5113 struct netdev_queue *queue, void *_unused)
5114{
5115 /* Initialize queue lock */
5116 spin_lock_init(&queue->_xmit_lock);
5117 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5118 queue->xmit_lock_owner = -1;
5119 netdev_queue_numa_node_write(queue, -1);
5120 queue->dev = dev;
5121}
5041 5122
5042static int netif_alloc_netdev_queues(struct net_device *dev) 5123static int netif_alloc_netdev_queues(struct net_device *dev)
5043{ 5124{
@@ -5053,25 +5134,11 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5053 return -ENOMEM; 5134 return -ENOMEM;
5054 } 5135 }
5055 dev->_tx = tx; 5136 dev->_tx = tx;
5056 return 0;
5057}
5058 5137
5059static void netdev_init_one_queue(struct net_device *dev,
5060 struct netdev_queue *queue,
5061 void *_unused)
5062{
5063 queue->dev = dev;
5064
5065 /* Initialize queue lock */
5066 spin_lock_init(&queue->_xmit_lock);
5067 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5068 queue->xmit_lock_owner = -1;
5069}
5070
5071static void netdev_init_queues(struct net_device *dev)
5072{
5073 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5138 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5074 spin_lock_init(&dev->tx_global_lock); 5139 spin_lock_init(&dev->tx_global_lock);
5140
5141 return 0;
5075} 5142}
5076 5143
5077/** 5144/**
@@ -5110,16 +5177,6 @@ int register_netdevice(struct net_device *dev)
5110 5177
5111 dev->iflink = -1; 5178 dev->iflink = -1;
5112 5179
5113 ret = netif_alloc_rx_queues(dev);
5114 if (ret)
5115 goto out;
5116
5117 ret = netif_alloc_netdev_queues(dev);
5118 if (ret)
5119 goto out;
5120
5121 netdev_init_queues(dev);
5122
5123 /* Init, if this function is available */ 5180 /* Init, if this function is available */
5124 if (dev->netdev_ops->ndo_init) { 5181 if (dev->netdev_ops->ndo_init) {
5125 ret = dev->netdev_ops->ndo_init(dev); 5182 ret = dev->netdev_ops->ndo_init(dev);
@@ -5577,10 +5634,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5577 5634
5578 dev->num_tx_queues = queue_count; 5635 dev->num_tx_queues = queue_count;
5579 dev->real_num_tx_queues = queue_count; 5636 dev->real_num_tx_queues = queue_count;
5637 if (netif_alloc_netdev_queues(dev))
5638 goto free_pcpu;
5580 5639
5581#ifdef CONFIG_RPS 5640#ifdef CONFIG_RPS
5582 dev->num_rx_queues = queue_count; 5641 dev->num_rx_queues = queue_count;
5583 dev->real_num_rx_queues = queue_count; 5642 dev->real_num_rx_queues = queue_count;
5643 if (netif_alloc_rx_queues(dev))
5644 goto free_pcpu;
5584#endif 5645#endif
5585 5646
5586 dev->gso_max_size = GSO_MAX_SIZE; 5647 dev->gso_max_size = GSO_MAX_SIZE;
@@ -5597,6 +5658,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5597 5658
5598free_pcpu: 5659free_pcpu:
5599 free_percpu(dev->pcpu_refcnt); 5660 free_percpu(dev->pcpu_refcnt);
5661 kfree(dev->_tx);
5662#ifdef CONFIG_RPS
5663 kfree(dev->_rx);
5664#endif
5665
5600free_p: 5666free_p:
5601 kfree(p); 5667 kfree(p);
5602 return NULL; 5668 return NULL;
@@ -5618,6 +5684,9 @@ void free_netdev(struct net_device *dev)
5618 release_net(dev_net(dev)); 5684 release_net(dev_net(dev));
5619 5685
5620 kfree(dev->_tx); 5686 kfree(dev->_tx);
5687#ifdef CONFIG_RPS
5688 kfree(dev->_rx);
5689#endif
5621 5690
5622 kfree(rcu_dereference_raw(dev->ingress_queue)); 5691 kfree(rcu_dereference_raw(dev->ingress_queue));
5623 5692
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628b79f1..b99c7c7ffce2 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
370 370
371static struct notifier_block dst_dev_notifier = { 371static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event, 372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
373}; 374};
374 375
375void __init dst_init(void) 376void __init dst_init(void)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 956a9f4971cb..17741782a345 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -891,6 +891,20 @@ static int ethtool_nway_reset(struct net_device *dev)
891 return dev->ethtool_ops->nway_reset(dev); 891 return dev->ethtool_ops->nway_reset(dev);
892} 892}
893 893
894static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
895{
896 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
897
898 if (!dev->ethtool_ops->get_link)
899 return -EOPNOTSUPP;
900
901 edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
902
903 if (copy_to_user(useraddr, &edata, sizeof(edata)))
904 return -EFAULT;
905 return 0;
906}
907
894static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 908static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
895{ 909{
896 struct ethtool_eeprom eeprom; 910 struct ethtool_eeprom eeprom;
@@ -1171,7 +1185,9 @@ static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
1171 return -EFAULT; 1185 return -EFAULT;
1172 if (edata.data && !(dev->features & NETIF_F_SG)) 1186 if (edata.data && !(dev->features & NETIF_F_SG))
1173 return -EINVAL; 1187 return -EINVAL;
1174 if (edata.data && !(dev->features & NETIF_F_HW_CSUM)) 1188 if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
1189 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
1190 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
1175 return -EINVAL; 1191 return -EINVAL;
1176 return dev->ethtool_ops->set_ufo(dev, edata.data); 1192 return dev->ethtool_ops->set_ufo(dev, edata.data);
1177} 1193}
@@ -1528,8 +1544,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1528 rc = ethtool_nway_reset(dev); 1544 rc = ethtool_nway_reset(dev);
1529 break; 1545 break;
1530 case ETHTOOL_GLINK: 1546 case ETHTOOL_GLINK:
1531 rc = ethtool_get_value(dev, useraddr, ethcmd, 1547 rc = ethtool_get_link(dev, useraddr);
1532 dev->ethtool_ops->get_link);
1533 break; 1548 break;
1534 case ETHTOOL_GEEPROM: 1549 case ETHTOOL_GEEPROM:
1535 rc = ethtool_get_eeprom(dev, useraddr); 1550 rc = ethtool_get_eeprom(dev, useraddr);
diff --git a/net/core/filter.c b/net/core/filter.c
index 7beaec36b541..e8a6ac411ffb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -37,9 +37,58 @@
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h>
41
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88};
40 89
41/* No hurry in this branch */ 90/* No hurry in this branch */
42static void *__load_pointer(struct sk_buff *skb, int k) 91static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
43{ 92{
44 u8 *ptr = NULL; 93 u8 *ptr = NULL;
45 94
@@ -48,12 +97,12 @@ static void *__load_pointer(struct sk_buff *skb, int k)
48 else if (k >= SKF_LL_OFF) 97 else if (k >= SKF_LL_OFF)
49 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 98 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
50 99
51 if (ptr >= skb->head && ptr < skb_tail_pointer(skb)) 100 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
52 return ptr; 101 return ptr;
53 return NULL; 102 return NULL;
54} 103}
55 104
56static inline void *load_pointer(struct sk_buff *skb, int k, 105static inline void *load_pointer(const struct sk_buff *skb, int k,
57 unsigned int size, void *buffer) 106 unsigned int size, void *buffer)
58{ 107{
59 if (k >= 0) 108 if (k >= 0)
@@ -61,7 +110,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
61 else { 110 else {
62 if (k >= SKF_AD_OFF) 111 if (k >= SKF_AD_OFF)
63 return NULL; 112 return NULL;
64 return __load_pointer(skb, k); 113 return __load_pointer(skb, k, size);
65 } 114 }
66} 115}
67 116
@@ -89,7 +138,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
89 rcu_read_lock_bh(); 138 rcu_read_lock_bh();
90 filter = rcu_dereference_bh(sk->sk_filter); 139 filter = rcu_dereference_bh(sk->sk_filter);
91 if (filter) { 140 if (filter) {
92 unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); 141 unsigned int pkt_len = sk_run_filter(skb, filter->insns);
93 142
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 143 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 } 144 }
@@ -103,48 +152,52 @@ EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket 152 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on 153 * @skb: buffer to run the filter on
105 * @filter: filter to apply 154 * @filter: filter to apply
106 * @flen: length of filter
107 * 155 *
108 * Decode and apply filter instructions to the skb->data. 156 * Decode and apply filter instructions to the skb->data.
109 * Return length to keep, 0 for none. skb is the data we are 157 * Return length to keep, 0 for none. @skb is the data we are
110 * filtering, filter is the array of filter instructions, and 158 * filtering, @filter is the array of filter instructions.
111 * len is the number of filter blocks in the array. 159 * Because all jumps are guaranteed to be before last instruction,
160 * and last instruction guaranteed to be a RET, we dont need to check
161 * flen. (We used to pass to this function the length of filter)
112 */ 162 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 163unsigned int sk_run_filter(const struct sk_buff *skb,
164 const struct sock_filter *fentry)
114{ 165{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 166 void *ptr;
117 u32 A = 0; /* Accumulator */ 167 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 168 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 169 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
120 u32 tmp; 170 u32 tmp;
121 int k; 171 int k;
122 int pc;
123 172
124 /* 173 /*
125 * Process array of filter instructions. 174 * Process array of filter instructions.
126 */ 175 */
127 for (pc = 0; pc < flen; pc++) { 176 for (;; fentry++) {
128 fentry = &filter[pc]; 177#if defined(CONFIG_X86_32)
178#define K (fentry->k)
179#else
180 const u32 K = fentry->k;
181#endif
129 182
130 switch (fentry->code) { 183 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 184 case BPF_S_ALU_ADD_X:
132 A += X; 185 A += X;
133 continue; 186 continue;
134 case BPF_S_ALU_ADD_K: 187 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 188 A += K;
136 continue; 189 continue;
137 case BPF_S_ALU_SUB_X: 190 case BPF_S_ALU_SUB_X:
138 A -= X; 191 A -= X;
139 continue; 192 continue;
140 case BPF_S_ALU_SUB_K: 193 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 194 A -= K;
142 continue; 195 continue;
143 case BPF_S_ALU_MUL_X: 196 case BPF_S_ALU_MUL_X:
144 A *= X; 197 A *= X;
145 continue; 198 continue;
146 case BPF_S_ALU_MUL_K: 199 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 200 A *= K;
148 continue; 201 continue;
149 case BPF_S_ALU_DIV_X: 202 case BPF_S_ALU_DIV_X:
150 if (X == 0) 203 if (X == 0)
@@ -152,64 +205,64 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 205 A /= X;
153 continue; 206 continue;
154 case BPF_S_ALU_DIV_K: 207 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 208 A = reciprocal_divide(A, K);
156 continue; 209 continue;
157 case BPF_S_ALU_AND_X: 210 case BPF_S_ALU_AND_X:
158 A &= X; 211 A &= X;
159 continue; 212 continue;
160 case BPF_S_ALU_AND_K: 213 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 214 A &= K;
162 continue; 215 continue;
163 case BPF_S_ALU_OR_X: 216 case BPF_S_ALU_OR_X:
164 A |= X; 217 A |= X;
165 continue; 218 continue;
166 case BPF_S_ALU_OR_K: 219 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 220 A |= K;
168 continue; 221 continue;
169 case BPF_S_ALU_LSH_X: 222 case BPF_S_ALU_LSH_X:
170 A <<= X; 223 A <<= X;
171 continue; 224 continue;
172 case BPF_S_ALU_LSH_K: 225 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 226 A <<= K;
174 continue; 227 continue;
175 case BPF_S_ALU_RSH_X: 228 case BPF_S_ALU_RSH_X:
176 A >>= X; 229 A >>= X;
177 continue; 230 continue;
178 case BPF_S_ALU_RSH_K: 231 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 232 A >>= K;
180 continue; 233 continue;
181 case BPF_S_ALU_NEG: 234 case BPF_S_ALU_NEG:
182 A = -A; 235 A = -A;
183 continue; 236 continue;
184 case BPF_S_JMP_JA: 237 case BPF_S_JMP_JA:
185 pc += fentry->k; 238 fentry += K;
186 continue; 239 continue;
187 case BPF_S_JMP_JGT_K: 240 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 241 fentry += (A > K) ? fentry->jt : fentry->jf;
189 continue; 242 continue;
190 case BPF_S_JMP_JGE_K: 243 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 244 fentry += (A >= K) ? fentry->jt : fentry->jf;
192 continue; 245 continue;
193 case BPF_S_JMP_JEQ_K: 246 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 247 fentry += (A == K) ? fentry->jt : fentry->jf;
195 continue; 248 continue;
196 case BPF_S_JMP_JSET_K: 249 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 250 fentry += (A & K) ? fentry->jt : fentry->jf;
198 continue; 251 continue;
199 case BPF_S_JMP_JGT_X: 252 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 253 fentry += (A > X) ? fentry->jt : fentry->jf;
201 continue; 254 continue;
202 case BPF_S_JMP_JGE_X: 255 case BPF_S_JMP_JGE_X:
203 pc += (A >= X) ? fentry->jt : fentry->jf; 256 fentry += (A >= X) ? fentry->jt : fentry->jf;
204 continue; 257 continue;
205 case BPF_S_JMP_JEQ_X: 258 case BPF_S_JMP_JEQ_X:
206 pc += (A == X) ? fentry->jt : fentry->jf; 259 fentry += (A == X) ? fentry->jt : fentry->jf;
207 continue; 260 continue;
208 case BPF_S_JMP_JSET_X: 261 case BPF_S_JMP_JSET_X:
209 pc += (A & X) ? fentry->jt : fentry->jf; 262 fentry += (A & X) ? fentry->jt : fentry->jf;
210 continue; 263 continue;
211 case BPF_S_LD_W_ABS: 264 case BPF_S_LD_W_ABS:
212 k = fentry->k; 265 k = K;
213load_w: 266load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 267 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 268 if (ptr != NULL) {
@@ -218,7 +271,7 @@ load_w:
218 } 271 }
219 break; 272 break;
220 case BPF_S_LD_H_ABS: 273 case BPF_S_LD_H_ABS:
221 k = fentry->k; 274 k = K;
222load_h: 275load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 276 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 277 if (ptr != NULL) {
@@ -227,7 +280,7 @@ load_h:
227 } 280 }
228 break; 281 break;
229 case BPF_S_LD_B_ABS: 282 case BPF_S_LD_B_ABS:
230 k = fentry->k; 283 k = K;
231load_b: 284load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 285 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 286 if (ptr != NULL) {
@@ -242,32 +295,32 @@ load_b:
242 X = skb->len; 295 X = skb->len;
243 continue; 296 continue;
244 case BPF_S_LD_W_IND: 297 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 298 k = X + K;
246 goto load_w; 299 goto load_w;
247 case BPF_S_LD_H_IND: 300 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 301 k = X + K;
249 goto load_h; 302 goto load_h;
250 case BPF_S_LD_B_IND: 303 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 304 k = X + K;
252 goto load_b; 305 goto load_b;
253 case BPF_S_LDX_B_MSH: 306 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 307 ptr = load_pointer(skb, K, 1, &tmp);
255 if (ptr != NULL) { 308 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 309 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 310 continue;
258 } 311 }
259 return 0; 312 return 0;
260 case BPF_S_LD_IMM: 313 case BPF_S_LD_IMM:
261 A = fentry->k; 314 A = K;
262 continue; 315 continue;
263 case BPF_S_LDX_IMM: 316 case BPF_S_LDX_IMM:
264 X = fentry->k; 317 X = K;
265 continue; 318 continue;
266 case BPF_S_LD_MEM: 319 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 320 A = mem[K];
268 continue; 321 continue;
269 case BPF_S_LDX_MEM: 322 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 323 X = mem[K];
271 continue; 324 continue;
272 case BPF_S_MISC_TAX: 325 case BPF_S_MISC_TAX:
273 X = A; 326 X = A;
@@ -276,14 +329,14 @@ load_b:
276 A = X; 329 A = X;
277 continue; 330 continue;
278 case BPF_S_RET_K: 331 case BPF_S_RET_K:
279 return fentry->k; 332 return K;
280 case BPF_S_RET_A: 333 case BPF_S_RET_A:
281 return A; 334 return A;
282 case BPF_S_ST: 335 case BPF_S_ST:
283 mem[fentry->k] = A; 336 mem[K] = A;
284 continue; 337 continue;
285 case BPF_S_STX: 338 case BPF_S_STX:
286 mem[fentry->k] = X; 339 mem[K] = X;
287 continue; 340 continue;
288 default: 341 default:
289 WARN_ON(1); 342 WARN_ON(1);
@@ -317,6 +370,12 @@ load_b:
317 return 0; 370 return 0;
318 A = skb->dev->type; 371 A = skb->dev->type;
319 continue; 372 continue;
373 case SKF_AD_RXHASH:
374 A = skb->rxhash;
375 continue;
376 case SKF_AD_CPU:
377 A = raw_smp_processor_id();
378 continue;
320 case SKF_AD_NLATTR: { 379 case SKF_AD_NLATTR: {
321 struct nlattr *nla; 380 struct nlattr *nla;
322 381
@@ -361,6 +420,66 @@ load_b:
361} 420}
362EXPORT_SYMBOL(sk_run_filter); 421EXPORT_SYMBOL(sk_run_filter);
363 422
423/*
424 * Security :
425 * A BPF program is able to use 16 cells of memory to store intermediate
426 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
427 * As we dont want to clear mem[] array for each packet going through
428 * sk_run_filter(), we check that filter loaded by user never try to read
429 * a cell if not previously written, and we check all branches to be sure
430 * a malicious user doesnt try to abuse us.
431 */
432static int check_load_and_stores(struct sock_filter *filter, int flen)
433{
434 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
435 int pc, ret = 0;
436
437 BUILD_BUG_ON(BPF_MEMWORDS > 16);
438 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
439 if (!masks)
440 return -ENOMEM;
441 memset(masks, 0xff, flen * sizeof(*masks));
442
443 for (pc = 0; pc < flen; pc++) {
444 memvalid &= masks[pc];
445
446 switch (filter[pc].code) {
447 case BPF_S_ST:
448 case BPF_S_STX:
449 memvalid |= (1 << filter[pc].k);
450 break;
451 case BPF_S_LD_MEM:
452 case BPF_S_LDX_MEM:
453 if (!(memvalid & (1 << filter[pc].k))) {
454 ret = -EINVAL;
455 goto error;
456 }
457 break;
458 case BPF_S_JMP_JA:
459 /* a jump must set masks on target */
460 masks[pc + 1 + filter[pc].k] &= memvalid;
461 memvalid = ~0;
462 break;
463 case BPF_S_JMP_JEQ_K:
464 case BPF_S_JMP_JEQ_X:
465 case BPF_S_JMP_JGE_K:
466 case BPF_S_JMP_JGE_X:
467 case BPF_S_JMP_JGT_K:
468 case BPF_S_JMP_JGT_X:
469 case BPF_S_JMP_JSET_X:
470 case BPF_S_JMP_JSET_K:
471 /* a jump must set masks on targets */
472 masks[pc + 1 + filter[pc].jt] &= memvalid;
473 masks[pc + 1 + filter[pc].jf] &= memvalid;
474 memvalid = ~0;
475 break;
476 }
477 }
478error:
479 kfree(masks);
480 return ret;
481}
482
364/** 483/**
365 * sk_chk_filter - verify socket filter code 484 * sk_chk_filter - verify socket filter code
366 * @filter: filter to verify 485 * @filter: filter to verify
@@ -377,7 +496,57 @@ EXPORT_SYMBOL(sk_run_filter);
377 */ 496 */
378int sk_chk_filter(struct sock_filter *filter, int flen) 497int sk_chk_filter(struct sock_filter *filter, int flen)
379{ 498{
380 struct sock_filter *ftest; 499 /*
500 * Valid instructions are initialized to non-0.
501 * Invalid instructions are initialized to 0.
502 */
503 static const u8 codes[] = {
504 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
505 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
506 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
507 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
508 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
509 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
510 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
511 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
512 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
513 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
514 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
515 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
516 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
517 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
518 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
519 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
520 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
521 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
522 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
523 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
524 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
525 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
526 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
527 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
528 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
529 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
530 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
531 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
532 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
533 [BPF_RET|BPF_K] = BPF_S_RET_K,
534 [BPF_RET|BPF_A] = BPF_S_RET_A,
535 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
536 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
537 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
538 [BPF_ST] = BPF_S_ST,
539 [BPF_STX] = BPF_S_STX,
540 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
541 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
542 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
543 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
544 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
545 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
546 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
547 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
548 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
549 };
381 int pc; 550 int pc;
382 551
383 if (flen == 0 || flen > BPF_MAXINSNS) 552 if (flen == 0 || flen > BPF_MAXINSNS)
@@ -385,136 +554,31 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
385 554
386 /* check the filter code now */ 555 /* check the filter code now */
387 for (pc = 0; pc < flen; pc++) { 556 for (pc = 0; pc < flen; pc++) {
388 ftest = &filter[pc]; 557 struct sock_filter *ftest = &filter[pc];
389 558 u16 code = ftest->code;
390 /* Only allow valid instructions */
391 switch (ftest->code) {
392 case BPF_ALU|BPF_ADD|BPF_K:
393 ftest->code = BPF_S_ALU_ADD_K;
394 break;
395 case BPF_ALU|BPF_ADD|BPF_X:
396 ftest->code = BPF_S_ALU_ADD_X;
397 break;
398 case BPF_ALU|BPF_SUB|BPF_K:
399 ftest->code = BPF_S_ALU_SUB_K;
400 break;
401 case BPF_ALU|BPF_SUB|BPF_X:
402 ftest->code = BPF_S_ALU_SUB_X;
403 break;
404 case BPF_ALU|BPF_MUL|BPF_K:
405 ftest->code = BPF_S_ALU_MUL_K;
406 break;
407 case BPF_ALU|BPF_MUL|BPF_X:
408 ftest->code = BPF_S_ALU_MUL_X;
409 break;
410 case BPF_ALU|BPF_DIV|BPF_X:
411 ftest->code = BPF_S_ALU_DIV_X;
412 break;
413 case BPF_ALU|BPF_AND|BPF_K:
414 ftest->code = BPF_S_ALU_AND_K;
415 break;
416 case BPF_ALU|BPF_AND|BPF_X:
417 ftest->code = BPF_S_ALU_AND_X;
418 break;
419 case BPF_ALU|BPF_OR|BPF_K:
420 ftest->code = BPF_S_ALU_OR_K;
421 break;
422 case BPF_ALU|BPF_OR|BPF_X:
423 ftest->code = BPF_S_ALU_OR_X;
424 break;
425 case BPF_ALU|BPF_LSH|BPF_K:
426 ftest->code = BPF_S_ALU_LSH_K;
427 break;
428 case BPF_ALU|BPF_LSH|BPF_X:
429 ftest->code = BPF_S_ALU_LSH_X;
430 break;
431 case BPF_ALU|BPF_RSH|BPF_K:
432 ftest->code = BPF_S_ALU_RSH_K;
433 break;
434 case BPF_ALU|BPF_RSH|BPF_X:
435 ftest->code = BPF_S_ALU_RSH_X;
436 break;
437 case BPF_ALU|BPF_NEG:
438 ftest->code = BPF_S_ALU_NEG;
439 break;
440 case BPF_LD|BPF_W|BPF_ABS:
441 ftest->code = BPF_S_LD_W_ABS;
442 break;
443 case BPF_LD|BPF_H|BPF_ABS:
444 ftest->code = BPF_S_LD_H_ABS;
445 break;
446 case BPF_LD|BPF_B|BPF_ABS:
447 ftest->code = BPF_S_LD_B_ABS;
448 break;
449 case BPF_LD|BPF_W|BPF_LEN:
450 ftest->code = BPF_S_LD_W_LEN;
451 break;
452 case BPF_LD|BPF_W|BPF_IND:
453 ftest->code = BPF_S_LD_W_IND;
454 break;
455 case BPF_LD|BPF_H|BPF_IND:
456 ftest->code = BPF_S_LD_H_IND;
457 break;
458 case BPF_LD|BPF_B|BPF_IND:
459 ftest->code = BPF_S_LD_B_IND;
460 break;
461 case BPF_LD|BPF_IMM:
462 ftest->code = BPF_S_LD_IMM;
463 break;
464 case BPF_LDX|BPF_W|BPF_LEN:
465 ftest->code = BPF_S_LDX_W_LEN;
466 break;
467 case BPF_LDX|BPF_B|BPF_MSH:
468 ftest->code = BPF_S_LDX_B_MSH;
469 break;
470 case BPF_LDX|BPF_IMM:
471 ftest->code = BPF_S_LDX_IMM;
472 break;
473 case BPF_MISC|BPF_TAX:
474 ftest->code = BPF_S_MISC_TAX;
475 break;
476 case BPF_MISC|BPF_TXA:
477 ftest->code = BPF_S_MISC_TXA;
478 break;
479 case BPF_RET|BPF_K:
480 ftest->code = BPF_S_RET_K;
481 break;
482 case BPF_RET|BPF_A:
483 ftest->code = BPF_S_RET_A;
484 break;
485 559
560 if (code >= ARRAY_SIZE(codes))
561 return -EINVAL;
562 code = codes[code];
563 if (!code)
564 return -EINVAL;
486 /* Some instructions need special checks */ 565 /* Some instructions need special checks */
487 566 switch (code) {
567 case BPF_S_ALU_DIV_K:
488 /* check for division by zero */ 568 /* check for division by zero */
489 case BPF_ALU|BPF_DIV|BPF_K:
490 if (ftest->k == 0) 569 if (ftest->k == 0)
491 return -EINVAL; 570 return -EINVAL;
492 ftest->code = BPF_S_ALU_DIV_K; 571 ftest->k = reciprocal_value(ftest->k);
493 break;
494
495 /* check for invalid memory addresses */
496 case BPF_LD|BPF_MEM:
497 if (ftest->k >= BPF_MEMWORDS)
498 return -EINVAL;
499 ftest->code = BPF_S_LD_MEM;
500 break;
501 case BPF_LDX|BPF_MEM:
502 if (ftest->k >= BPF_MEMWORDS)
503 return -EINVAL;
504 ftest->code = BPF_S_LDX_MEM;
505 break;
506 case BPF_ST:
507 if (ftest->k >= BPF_MEMWORDS)
508 return -EINVAL;
509 ftest->code = BPF_S_ST;
510 break; 572 break;
511 case BPF_STX: 573 case BPF_S_LD_MEM:
574 case BPF_S_LDX_MEM:
575 case BPF_S_ST:
576 case BPF_S_STX:
577 /* check for invalid memory addresses */
512 if (ftest->k >= BPF_MEMWORDS) 578 if (ftest->k >= BPF_MEMWORDS)
513 return -EINVAL; 579 return -EINVAL;
514 ftest->code = BPF_S_STX;
515 break; 580 break;
516 581 case BPF_S_JMP_JA:
517 case BPF_JMP|BPF_JA:
518 /* 582 /*
519 * Note, the large ftest->k might cause loops. 583 * Note, the large ftest->k might cause loops.
520 * Compare this with conditional jumps below, 584 * Compare this with conditional jumps below,
@@ -522,40 +586,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
522 */ 586 */
523 if (ftest->k >= (unsigned)(flen-pc-1)) 587 if (ftest->k >= (unsigned)(flen-pc-1))
524 return -EINVAL; 588 return -EINVAL;
525 ftest->code = BPF_S_JMP_JA;
526 break;
527
528 case BPF_JMP|BPF_JEQ|BPF_K:
529 ftest->code = BPF_S_JMP_JEQ_K;
530 break;
531 case BPF_JMP|BPF_JEQ|BPF_X:
532 ftest->code = BPF_S_JMP_JEQ_X;
533 break;
534 case BPF_JMP|BPF_JGE|BPF_K:
535 ftest->code = BPF_S_JMP_JGE_K;
536 break; 589 break;
537 case BPF_JMP|BPF_JGE|BPF_X:
538 ftest->code = BPF_S_JMP_JGE_X;
539 break;
540 case BPF_JMP|BPF_JGT|BPF_K:
541 ftest->code = BPF_S_JMP_JGT_K;
542 break;
543 case BPF_JMP|BPF_JGT|BPF_X:
544 ftest->code = BPF_S_JMP_JGT_X;
545 break;
546 case BPF_JMP|BPF_JSET|BPF_K:
547 ftest->code = BPF_S_JMP_JSET_K;
548 break;
549 case BPF_JMP|BPF_JSET|BPF_X:
550 ftest->code = BPF_S_JMP_JSET_X;
551 break;
552
553 default:
554 return -EINVAL;
555 }
556
557 /* for conditionals both must be safe */
558 switch (ftest->code) {
559 case BPF_S_JMP_JEQ_K: 590 case BPF_S_JMP_JEQ_K:
560 case BPF_S_JMP_JEQ_X: 591 case BPF_S_JMP_JEQ_X:
561 case BPF_S_JMP_JGE_K: 592 case BPF_S_JMP_JGE_K:
@@ -564,42 +595,36 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
564 case BPF_S_JMP_JGT_X: 595 case BPF_S_JMP_JGT_X:
565 case BPF_S_JMP_JSET_X: 596 case BPF_S_JMP_JSET_X:
566 case BPF_S_JMP_JSET_K: 597 case BPF_S_JMP_JSET_K:
598 /* for conditionals both must be safe */
567 if (pc + ftest->jt + 1 >= flen || 599 if (pc + ftest->jt + 1 >= flen ||
568 pc + ftest->jf + 1 >= flen) 600 pc + ftest->jf + 1 >= flen)
569 return -EINVAL; 601 return -EINVAL;
602 break;
570 } 603 }
604 ftest->code = code;
571 } 605 }
572 606
573 /* last instruction must be a RET code */ 607 /* last instruction must be a RET code */
574 switch (filter[flen - 1].code) { 608 switch (filter[flen - 1].code) {
575 case BPF_S_RET_K: 609 case BPF_S_RET_K:
576 case BPF_S_RET_A: 610 case BPF_S_RET_A:
577 return 0; 611 return check_load_and_stores(filter, flen);
578 break; 612 }
579 default: 613 return -EINVAL;
580 return -EINVAL;
581 }
582} 614}
583EXPORT_SYMBOL(sk_chk_filter); 615EXPORT_SYMBOL(sk_chk_filter);
584 616
585/** 617/**
586 * sk_filter_rcu_release: Release a socket filter by rcu_head 618 * sk_filter_release_rcu - Release a socket filter by rcu_head
587 * @rcu: rcu_head that contains the sk_filter to free 619 * @rcu: rcu_head that contains the sk_filter to free
588 */ 620 */
589static void sk_filter_rcu_release(struct rcu_head *rcu) 621void sk_filter_release_rcu(struct rcu_head *rcu)
590{ 622{
591 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 623 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
592 624
593 sk_filter_release(fp); 625 kfree(fp);
594}
595
596static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
597{
598 unsigned int size = sk_filter_len(fp);
599
600 atomic_sub(size, &sk->sk_omem_alloc);
601 call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
602} 626}
627EXPORT_SYMBOL(sk_filter_release_rcu);
603 628
604/** 629/**
605 * sk_attach_filter - attach a socket filter 630 * sk_attach_filter - attach a socket filter
@@ -643,7 +668,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
643 rcu_assign_pointer(sk->sk_filter, fp); 668 rcu_assign_pointer(sk->sk_filter, fp);
644 669
645 if (old_fp) 670 if (old_fp)
646 sk_filter_delayed_uncharge(sk, old_fp); 671 sk_filter_uncharge(sk, old_fp);
647 return 0; 672 return 0;
648} 673}
649EXPORT_SYMBOL_GPL(sk_attach_filter); 674EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -657,7 +682,7 @@ int sk_detach_filter(struct sock *sk)
657 sock_owned_by_user(sk)); 682 sock_owned_by_user(sk));
658 if (filter) { 683 if (filter) {
659 rcu_assign_pointer(sk->sk_filter, NULL); 684 rcu_assign_pointer(sk->sk_filter, NULL);
660 sk_filter_delayed_uncharge(sk, filter); 685 sk_filter_uncharge(sk, filter);
661 ret = 0; 686 ret = 0;
662 } 687 }
663 return ret; 688 return ret;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a5ff5a89f376..85e8b5326dd6 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -706,21 +706,24 @@ static struct attribute *rx_queue_default_attrs[] = {
706static void rx_queue_release(struct kobject *kobj) 706static void rx_queue_release(struct kobject *kobj)
707{ 707{
708 struct netdev_rx_queue *queue = to_rx_queue(kobj); 708 struct netdev_rx_queue *queue = to_rx_queue(kobj);
709 struct netdev_rx_queue *first = queue->first;
710 struct rps_map *map; 709 struct rps_map *map;
711 struct rps_dev_flow_table *flow_table; 710 struct rps_dev_flow_table *flow_table;
712 711
713 712
714 map = rcu_dereference_raw(queue->rps_map); 713 map = rcu_dereference_raw(queue->rps_map);
715 if (map) 714 if (map) {
715 RCU_INIT_POINTER(queue->rps_map, NULL);
716 call_rcu(&map->rcu, rps_map_release); 716 call_rcu(&map->rcu, rps_map_release);
717 }
717 718
718 flow_table = rcu_dereference_raw(queue->rps_flow_table); 719 flow_table = rcu_dereference_raw(queue->rps_flow_table);
719 if (flow_table) 720 if (flow_table) {
721 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
720 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 722 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
723 }
721 724
722 if (atomic_dec_and_test(&first->count)) 725 memset(kobj, 0, sizeof(*kobj));
723 kfree(first); 726 dev_put(queue->dev);
724} 727}
725 728
726static struct kobj_type rx_queue_ktype = { 729static struct kobj_type rx_queue_ktype = {
@@ -732,7 +735,6 @@ static struct kobj_type rx_queue_ktype = {
732static int rx_queue_add_kobject(struct net_device *net, int index) 735static int rx_queue_add_kobject(struct net_device *net, int index)
733{ 736{
734 struct netdev_rx_queue *queue = net->_rx + index; 737 struct netdev_rx_queue *queue = net->_rx + index;
735 struct netdev_rx_queue *first = queue->first;
736 struct kobject *kobj = &queue->kobj; 738 struct kobject *kobj = &queue->kobj;
737 int error = 0; 739 int error = 0;
738 740
@@ -745,14 +747,16 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
745 } 747 }
746 748
747 kobject_uevent(kobj, KOBJ_ADD); 749 kobject_uevent(kobj, KOBJ_ADD);
748 atomic_inc(&first->count); 750 dev_hold(queue->dev);
749 751
750 return error; 752 return error;
751} 753}
754#endif /* CONFIG_RPS */
752 755
753int 756int
754net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num) 757net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
755{ 758{
759#ifdef CONFIG_RPS
756 int i; 760 int i;
757 int error = 0; 761 int error = 0;
758 762
@@ -768,23 +772,422 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
768 kobject_put(&net->_rx[i].kobj); 772 kobject_put(&net->_rx[i].kobj);
769 773
770 return error; 774 return error;
775#else
776 return 0;
777#endif
778}
779
780#ifdef CONFIG_XPS
781/*
782 * netdev_queue sysfs structures and functions.
783 */
784struct netdev_queue_attribute {
785 struct attribute attr;
786 ssize_t (*show)(struct netdev_queue *queue,
787 struct netdev_queue_attribute *attr, char *buf);
788 ssize_t (*store)(struct netdev_queue *queue,
789 struct netdev_queue_attribute *attr, const char *buf, size_t len);
790};
791#define to_netdev_queue_attr(_attr) container_of(_attr, \
792 struct netdev_queue_attribute, attr)
793
794#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
795
796static ssize_t netdev_queue_attr_show(struct kobject *kobj,
797 struct attribute *attr, char *buf)
798{
799 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
800 struct netdev_queue *queue = to_netdev_queue(kobj);
801
802 if (!attribute->show)
803 return -EIO;
804
805 return attribute->show(queue, attribute, buf);
806}
807
808static ssize_t netdev_queue_attr_store(struct kobject *kobj,
809 struct attribute *attr,
810 const char *buf, size_t count)
811{
812 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
813 struct netdev_queue *queue = to_netdev_queue(kobj);
814
815 if (!attribute->store)
816 return -EIO;
817
818 return attribute->store(queue, attribute, buf, count);
819}
820
821static const struct sysfs_ops netdev_queue_sysfs_ops = {
822 .show = netdev_queue_attr_show,
823 .store = netdev_queue_attr_store,
824};
825
826static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
827{
828 struct net_device *dev = queue->dev;
829 int i;
830
831 for (i = 0; i < dev->num_tx_queues; i++)
832 if (queue == &dev->_tx[i])
833 break;
834
835 BUG_ON(i >= dev->num_tx_queues);
836
837 return i;
771} 838}
772 839
773static int rx_queue_register_kobjects(struct net_device *net) 840
841static ssize_t show_xps_map(struct netdev_queue *queue,
842 struct netdev_queue_attribute *attribute, char *buf)
774{ 843{
844 struct net_device *dev = queue->dev;
845 struct xps_dev_maps *dev_maps;
846 cpumask_var_t mask;
847 unsigned long index;
848 size_t len = 0;
849 int i;
850
851 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
852 return -ENOMEM;
853
854 index = get_netdev_queue_index(queue);
855
856 rcu_read_lock();
857 dev_maps = rcu_dereference(dev->xps_maps);
858 if (dev_maps) {
859 for_each_possible_cpu(i) {
860 struct xps_map *map =
861 rcu_dereference(dev_maps->cpu_map[i]);
862 if (map) {
863 int j;
864 for (j = 0; j < map->len; j++) {
865 if (map->queues[j] == index) {
866 cpumask_set_cpu(i, mask);
867 break;
868 }
869 }
870 }
871 }
872 }
873 rcu_read_unlock();
874
875 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
876 if (PAGE_SIZE - len < 3) {
877 free_cpumask_var(mask);
878 return -EINVAL;
879 }
880
881 free_cpumask_var(mask);
882 len += sprintf(buf + len, "\n");
883 return len;
884}
885
886static void xps_map_release(struct rcu_head *rcu)
887{
888 struct xps_map *map = container_of(rcu, struct xps_map, rcu);
889
890 kfree(map);
891}
892
893static void xps_dev_maps_release(struct rcu_head *rcu)
894{
895 struct xps_dev_maps *dev_maps =
896 container_of(rcu, struct xps_dev_maps, rcu);
897
898 kfree(dev_maps);
899}
900
901static DEFINE_MUTEX(xps_map_mutex);
902#define xmap_dereference(P) \
903 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
904
905static ssize_t store_xps_map(struct netdev_queue *queue,
906 struct netdev_queue_attribute *attribute,
907 const char *buf, size_t len)
908{
909 struct net_device *dev = queue->dev;
910 cpumask_var_t mask;
911 int err, i, cpu, pos, map_len, alloc_len, need_set;
912 unsigned long index;
913 struct xps_map *map, *new_map;
914 struct xps_dev_maps *dev_maps, *new_dev_maps;
915 int nonempty = 0;
916 int numa_node = -2;
917
918 if (!capable(CAP_NET_ADMIN))
919 return -EPERM;
920
921 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
922 return -ENOMEM;
923
924 index = get_netdev_queue_index(queue);
925
926 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
927 if (err) {
928 free_cpumask_var(mask);
929 return err;
930 }
931
932 new_dev_maps = kzalloc(max_t(unsigned,
933 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
934 if (!new_dev_maps) {
935 free_cpumask_var(mask);
936 return -ENOMEM;
937 }
938
939 mutex_lock(&xps_map_mutex);
940
941 dev_maps = xmap_dereference(dev->xps_maps);
942
943 for_each_possible_cpu(cpu) {
944 map = dev_maps ?
945 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
946 new_map = map;
947 if (map) {
948 for (pos = 0; pos < map->len; pos++)
949 if (map->queues[pos] == index)
950 break;
951 map_len = map->len;
952 alloc_len = map->alloc_len;
953 } else
954 pos = map_len = alloc_len = 0;
955
956 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
957#ifdef CONFIG_NUMA
958 if (need_set) {
959 if (numa_node == -2)
960 numa_node = cpu_to_node(cpu);
961 else if (numa_node != cpu_to_node(cpu))
962 numa_node = -1;
963 }
964#endif
965 if (need_set && pos >= map_len) {
966 /* Need to add queue to this CPU's map */
967 if (map_len >= alloc_len) {
968 alloc_len = alloc_len ?
969 2 * alloc_len : XPS_MIN_MAP_ALLOC;
970 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
971 GFP_KERNEL,
972 cpu_to_node(cpu));
973 if (!new_map)
974 goto error;
975 new_map->alloc_len = alloc_len;
976 for (i = 0; i < map_len; i++)
977 new_map->queues[i] = map->queues[i];
978 new_map->len = map_len;
979 }
980 new_map->queues[new_map->len++] = index;
981 } else if (!need_set && pos < map_len) {
982 /* Need to remove queue from this CPU's map */
983 if (map_len > 1)
984 new_map->queues[pos] =
985 new_map->queues[--new_map->len];
986 else
987 new_map = NULL;
988 }
989 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
990 }
991
992 /* Cleanup old maps */
993 for_each_possible_cpu(cpu) {
994 map = dev_maps ?
995 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
996 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
997 call_rcu(&map->rcu, xps_map_release);
998 if (new_dev_maps->cpu_map[cpu])
999 nonempty = 1;
1000 }
1001
1002 if (nonempty)
1003 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1004 else {
1005 kfree(new_dev_maps);
1006 rcu_assign_pointer(dev->xps_maps, NULL);
1007 }
1008
1009 if (dev_maps)
1010 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1011
1012 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : -1);
1013
1014 mutex_unlock(&xps_map_mutex);
1015
1016 free_cpumask_var(mask);
1017 return len;
1018
1019error:
1020 mutex_unlock(&xps_map_mutex);
1021
1022 if (new_dev_maps)
1023 for_each_possible_cpu(i)
1024 kfree(rcu_dereference_protected(
1025 new_dev_maps->cpu_map[i],
1026 1));
1027 kfree(new_dev_maps);
1028 free_cpumask_var(mask);
1029 return -ENOMEM;
1030}
1031
1032static struct netdev_queue_attribute xps_cpus_attribute =
1033 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1034
1035static struct attribute *netdev_queue_default_attrs[] = {
1036 &xps_cpus_attribute.attr,
1037 NULL
1038};
1039
1040static void netdev_queue_release(struct kobject *kobj)
1041{
1042 struct netdev_queue *queue = to_netdev_queue(kobj);
1043 struct net_device *dev = queue->dev;
1044 struct xps_dev_maps *dev_maps;
1045 struct xps_map *map;
1046 unsigned long index;
1047 int i, pos, nonempty = 0;
1048
1049 index = get_netdev_queue_index(queue);
1050
1051 mutex_lock(&xps_map_mutex);
1052 dev_maps = xmap_dereference(dev->xps_maps);
1053
1054 if (dev_maps) {
1055 for_each_possible_cpu(i) {
1056 map = xmap_dereference(dev_maps->cpu_map[i]);
1057 if (!map)
1058 continue;
1059
1060 for (pos = 0; pos < map->len; pos++)
1061 if (map->queues[pos] == index)
1062 break;
1063
1064 if (pos < map->len) {
1065 if (map->len > 1)
1066 map->queues[pos] =
1067 map->queues[--map->len];
1068 else {
1069 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1070 NULL);
1071 call_rcu(&map->rcu, xps_map_release);
1072 map = NULL;
1073 }
1074 }
1075 if (map)
1076 nonempty = 1;
1077 }
1078
1079 if (!nonempty) {
1080 RCU_INIT_POINTER(dev->xps_maps, NULL);
1081 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1082 }
1083 }
1084
1085 mutex_unlock(&xps_map_mutex);
1086
1087 memset(kobj, 0, sizeof(*kobj));
1088 dev_put(queue->dev);
1089}
1090
1091static struct kobj_type netdev_queue_ktype = {
1092 .sysfs_ops = &netdev_queue_sysfs_ops,
1093 .release = netdev_queue_release,
1094 .default_attrs = netdev_queue_default_attrs,
1095};
1096
1097static int netdev_queue_add_kobject(struct net_device *net, int index)
1098{
1099 struct netdev_queue *queue = net->_tx + index;
1100 struct kobject *kobj = &queue->kobj;
1101 int error = 0;
1102
1103 kobj->kset = net->queues_kset;
1104 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1105 "tx-%u", index);
1106 if (error) {
1107 kobject_put(kobj);
1108 return error;
1109 }
1110
1111 kobject_uevent(kobj, KOBJ_ADD);
1112 dev_hold(queue->dev);
1113
1114 return error;
1115}
1116#endif /* CONFIG_XPS */
1117
1118int
1119netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1120{
1121#ifdef CONFIG_XPS
1122 int i;
1123 int error = 0;
1124
1125 for (i = old_num; i < new_num; i++) {
1126 error = netdev_queue_add_kobject(net, i);
1127 if (error) {
1128 new_num = old_num;
1129 break;
1130 }
1131 }
1132
1133 while (--i >= new_num)
1134 kobject_put(&net->_tx[i].kobj);
1135
1136 return error;
1137#else
1138 return 0;
1139#endif
1140}
1141
1142static int register_queue_kobjects(struct net_device *net)
1143{
1144 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1145
1146#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
775 net->queues_kset = kset_create_and_add("queues", 1147 net->queues_kset = kset_create_and_add("queues",
776 NULL, &net->dev.kobj); 1148 NULL, &net->dev.kobj);
777 if (!net->queues_kset) 1149 if (!net->queues_kset)
778 return -ENOMEM; 1150 return -ENOMEM;
779 return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues); 1151#endif
1152
1153#ifdef CONFIG_RPS
1154 real_rx = net->real_num_rx_queues;
1155#endif
1156 real_tx = net->real_num_tx_queues;
1157
1158 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1159 if (error)
1160 goto error;
1161 rxq = real_rx;
1162
1163 error = netdev_queue_update_kobjects(net, 0, real_tx);
1164 if (error)
1165 goto error;
1166 txq = real_tx;
1167
1168 return 0;
1169
1170error:
1171 netdev_queue_update_kobjects(net, txq, 0);
1172 net_rx_queue_update_kobjects(net, rxq, 0);
1173 return error;
780} 1174}
781 1175
782static void rx_queue_remove_kobjects(struct net_device *net) 1176static void remove_queue_kobjects(struct net_device *net)
783{ 1177{
784 net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0); 1178 int real_rx = 0, real_tx = 0;
1179
1180#ifdef CONFIG_RPS
1181 real_rx = net->real_num_rx_queues;
1182#endif
1183 real_tx = net->real_num_tx_queues;
1184
1185 net_rx_queue_update_kobjects(net, real_rx, 0);
1186 netdev_queue_update_kobjects(net, real_tx, 0);
1187#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
785 kset_unregister(net->queues_kset); 1188 kset_unregister(net->queues_kset);
1189#endif
786} 1190}
787#endif /* CONFIG_RPS */
788 1191
789static const void *net_current_ns(void) 1192static const void *net_current_ns(void)
790{ 1193{
@@ -883,9 +1286,7 @@ void netdev_unregister_kobject(struct net_device * net)
883 1286
884 kobject_get(&dev->kobj); 1287 kobject_get(&dev->kobj);
885 1288
886#ifdef CONFIG_RPS 1289 remove_queue_kobjects(net);
887 rx_queue_remove_kobjects(net);
888#endif
889 1290
890 device_del(dev); 1291 device_del(dev);
891} 1292}
@@ -924,13 +1325,11 @@ int netdev_register_kobject(struct net_device *net)
924 if (error) 1325 if (error)
925 return error; 1326 return error;
926 1327
927#ifdef CONFIG_RPS 1328 error = register_queue_kobjects(net);
928 error = rx_queue_register_kobjects(net);
929 if (error) { 1329 if (error) {
930 device_del(dev); 1330 device_del(dev);
931 return error; 1331 return error;
932 } 1332 }
933#endif
934 1333
935 return error; 1334 return error;
936} 1335}
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 778e1571548d..bd7751ec1c4d 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,8 +4,8 @@
4int netdev_kobject_init(void); 4int netdev_kobject_init(void);
5int netdev_register_kobject(struct net_device *); 5int netdev_register_kobject(struct net_device *);
6void netdev_unregister_kobject(struct net_device *); 6void netdev_unregister_kobject(struct net_device *);
7#ifdef CONFIG_RPS
8int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num); 7int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
9#endif 8int netdev_queue_update_kobjects(struct net_device *net,
9 int old_num, int new_num);
10 10
11#endif 11#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4e98ffac3af0..ee38acb6d463 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 __netif_tx_lock(txq, smp_processor_id()); 78 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_stopped(txq) || 79 if (netif_tx_queue_frozen_or_stopped(txq) ||
80 netif_tx_queue_frozen(txq) ||
81 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
82 skb_queue_head(&npinfo->txq, skb); 81 skb_queue_head(&npinfo->txq, skb);
83 __netif_tx_unlock(txq); 82 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fbce4b05a53e..18fe20dacc60 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -378,6 +378,7 @@ struct pktgen_dev {
378 378
379 u16 queue_map_min; 379 u16 queue_map_min;
380 u16 queue_map_max; 380 u16 queue_map_max;
381 __u32 skb_priority; /* skb priority field */
381 int node; /* Memory node */ 382 int node; /* Memory node */
382 383
383#ifdef CONFIG_XFRM 384#ifdef CONFIG_XFRM
@@ -394,6 +395,8 @@ struct pktgen_hdr {
394 __be32 tv_usec; 395 __be32 tv_usec;
395}; 396};
396 397
398static bool pktgen_exiting __read_mostly;
399
397struct pktgen_thread { 400struct pktgen_thread {
398 spinlock_t if_lock; /* for list of devices */ 401 spinlock_t if_lock; /* for list of devices */
399 struct list_head if_list; /* All device here */ 402 struct list_head if_list; /* All device here */
@@ -547,6 +550,10 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
547 pkt_dev->queue_map_min, 550 pkt_dev->queue_map_min,
548 pkt_dev->queue_map_max); 551 pkt_dev->queue_map_max);
549 552
553 if (pkt_dev->skb_priority)
554 seq_printf(seq, " skb_priority: %u\n",
555 pkt_dev->skb_priority);
556
550 if (pkt_dev->flags & F_IPV6) { 557 if (pkt_dev->flags & F_IPV6) {
551 char b1[128], b2[128], b3[128]; 558 char b1[128], b2[128], b3[128];
552 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 559 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
@@ -887,7 +894,7 @@ static ssize_t pktgen_if_write(struct file *file,
887 i += len; 894 i += len;
888 895
889 if (debug) { 896 if (debug) {
890 size_t copy = min(count, 1023); 897 size_t copy = min_t(size_t, count, 1023);
891 char tb[copy + 1]; 898 char tb[copy + 1];
892 if (copy_from_user(tb, user_buffer, copy)) 899 if (copy_from_user(tb, user_buffer, copy))
893 return -EFAULT; 900 return -EFAULT;
@@ -1711,6 +1718,18 @@ static ssize_t pktgen_if_write(struct file *file,
1711 return count; 1718 return count;
1712 } 1719 }
1713 1720
1721 if (!strcmp(name, "skb_priority")) {
1722 len = num_arg(&user_buffer[i], 9, &value);
1723 if (len < 0)
1724 return len;
1725
1726 i += len;
1727 pkt_dev->skb_priority = value;
1728 sprintf(pg_result, "OK: skb_priority=%i",
1729 pkt_dev->skb_priority);
1730 return count;
1731 }
1732
1714 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1733 sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
1715 return -EINVAL; 1734 return -EINVAL;
1716} 1735}
@@ -2612,8 +2631,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2612 /* Update any of the values, used when we're incrementing various 2631 /* Update any of the values, used when we're incrementing various
2613 * fields. 2632 * fields.
2614 */ 2633 */
2615 queue_map = pkt_dev->cur_queue_map;
2616 mod_cur_headers(pkt_dev); 2634 mod_cur_headers(pkt_dev);
2635 queue_map = pkt_dev->cur_queue_map;
2617 2636
2618 datalen = (odev->hard_header_len + 16) & ~0xf; 2637 datalen = (odev->hard_header_len + 16) & ~0xf;
2619 2638
@@ -2641,6 +2660,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2641 sprintf(pkt_dev->result, "No memory"); 2660 sprintf(pkt_dev->result, "No memory");
2642 return NULL; 2661 return NULL;
2643 } 2662 }
2663 prefetchw(skb->data);
2644 2664
2645 skb_reserve(skb, datalen); 2665 skb_reserve(skb, datalen);
2646 2666
@@ -2671,6 +2691,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2671 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2691 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2672 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2692 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2673 skb_set_queue_mapping(skb, queue_map); 2693 skb_set_queue_mapping(skb, queue_map);
2694 skb->priority = pkt_dev->skb_priority;
2695
2674 iph = ip_hdr(skb); 2696 iph = ip_hdr(skb);
2675 udph = udp_hdr(skb); 2697 udph = udp_hdr(skb);
2676 2698
@@ -2976,8 +2998,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2976 /* Update any of the values, used when we're incrementing various 2998 /* Update any of the values, used when we're incrementing various
2977 * fields. 2999 * fields.
2978 */ 3000 */
2979 queue_map = pkt_dev->cur_queue_map;
2980 mod_cur_headers(pkt_dev); 3001 mod_cur_headers(pkt_dev);
3002 queue_map = pkt_dev->cur_queue_map;
2981 3003
2982 skb = __netdev_alloc_skb(odev, 3004 skb = __netdev_alloc_skb(odev,
2983 pkt_dev->cur_pkt_size + 64 3005 pkt_dev->cur_pkt_size + 64
@@ -2986,6 +3008,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2986 sprintf(pkt_dev->result, "No memory"); 3008 sprintf(pkt_dev->result, "No memory");
2987 return NULL; 3009 return NULL;
2988 } 3010 }
3011 prefetchw(skb->data);
2989 3012
2990 skb_reserve(skb, 16); 3013 skb_reserve(skb, 16);
2991 3014
@@ -3016,6 +3039,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3016 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 3039 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
3017 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 3040 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
3018 skb_set_queue_mapping(skb, queue_map); 3041 skb_set_queue_mapping(skb, queue_map);
3042 skb->priority = pkt_dev->skb_priority;
3019 iph = ipv6_hdr(skb); 3043 iph = ipv6_hdr(skb);
3020 udph = udp_hdr(skb); 3044 udph = udp_hdr(skb);
3021 3045
@@ -3431,11 +3455,6 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3431 3455
3432 remove_proc_entry(t->tsk->comm, pg_proc_dir); 3456 remove_proc_entry(t->tsk->comm, pg_proc_dir);
3433 3457
3434 mutex_lock(&pktgen_thread_lock);
3435
3436 list_del(&t->th_list);
3437
3438 mutex_unlock(&pktgen_thread_lock);
3439} 3458}
3440 3459
3441static void pktgen_resched(struct pktgen_dev *pkt_dev) 3460static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3510,7 +3529,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3510 3529
3511 __netif_tx_lock_bh(txq); 3530 __netif_tx_lock_bh(txq);
3512 3531
3513 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { 3532 if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
3514 ret = NETDEV_TX_BUSY; 3533 ret = NETDEV_TX_BUSY;
3515 pkt_dev->last_ok = 0; 3534 pkt_dev->last_ok = 0;
3516 goto unlock; 3535 goto unlock;
@@ -3582,6 +3601,8 @@ static int pktgen_thread_worker(void *arg)
3582 pkt_dev = next_to_run(t); 3601 pkt_dev = next_to_run(t);
3583 3602
3584 if (unlikely(!pkt_dev && t->control == 0)) { 3603 if (unlikely(!pkt_dev && t->control == 0)) {
3604 if (pktgen_exiting)
3605 break;
3585 wait_event_interruptible_timeout(t->queue, 3606 wait_event_interruptible_timeout(t->queue,
3586 t->control != 0, 3607 t->control != 0,
3587 HZ/10); 3608 HZ/10);
@@ -3634,6 +3655,13 @@ static int pktgen_thread_worker(void *arg)
3634 pr_debug("%s removing thread\n", t->tsk->comm); 3655 pr_debug("%s removing thread\n", t->tsk->comm);
3635 pktgen_rem_thread(t); 3656 pktgen_rem_thread(t);
3636 3657
3658 /* Wait for kthread_stop */
3659 while (!kthread_should_stop()) {
3660 set_current_state(TASK_INTERRUPTIBLE);
3661 schedule();
3662 }
3663 __set_current_state(TASK_RUNNING);
3664
3637 return 0; 3665 return 0;
3638} 3666}
3639 3667
@@ -3908,6 +3936,7 @@ static void __exit pg_cleanup(void)
3908 struct list_head *q, *n; 3936 struct list_head *q, *n;
3909 3937
3910 /* Stop all interfaces & threads */ 3938 /* Stop all interfaces & threads */
3939 pktgen_exiting = true;
3911 3940
3912 list_for_each_safe(q, n, &pktgen_threads) { 3941 list_for_each_safe(q, n, &pktgen_threads) {
3913 t = list_entry(q, struct pktgen_thread, th_list); 3942 t = list_entry(q, struct pktgen_thread, th_list);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 7552495aff7a..182236b2510a 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -33,6 +33,7 @@
33 * Note : Dont forget somaxconn that may limit backlog too. 33 * Note : Dont forget somaxconn that may limit backlog too.
34 */ 34 */
35int sysctl_max_syn_backlog = 256; 35int sysctl_max_syn_backlog = 256;
36EXPORT_SYMBOL(sysctl_max_syn_backlog);
36 37
37int reqsk_queue_alloc(struct request_sock_queue *queue, 38int reqsk_queue_alloc(struct request_sock_queue *queue,
38 unsigned int nr_table_entries) 39 unsigned int nr_table_entries)
@@ -45,9 +46,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
45 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); 46 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
46 lopt_size += nr_table_entries * sizeof(struct request_sock *); 47 lopt_size += nr_table_entries * sizeof(struct request_sock *);
47 if (lopt_size > PAGE_SIZE) 48 if (lopt_size > PAGE_SIZE)
48 lopt = __vmalloc(lopt_size, 49 lopt = vzalloc(lopt_size);
49 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
50 PAGE_KERNEL);
51 else 50 else
52 lopt = kzalloc(lopt_size, GFP_KERNEL); 51 lopt = kzalloc(lopt_size, GFP_KERNEL);
53 if (lopt == NULL) 52 if (lopt == NULL)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268ddbdd..750db57f3bb3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,106 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
347 if (!ops) 347 if (!ops)
348 return 0; 348 return 0;
349 349
350 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 350 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
351 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 351 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
352 352
353 if (ops->get_size) 353 if (ops->get_size)
354 /* IFLA_INFO_DATA + nested data */ 354 /* IFLA_INFO_DATA + nested data */
355 size += nlmsg_total_size(sizeof(struct nlattr)) + 355 size += nla_total_size(sizeof(struct nlattr)) +
356 ops->get_size(dev); 356 ops->get_size(dev);
357 357
358 if (ops->get_xstats_size) 358 if (ops->get_xstats_size)
359 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 /* IFLA_INFO_XSTATS */
360 size += nla_total_size(ops->get_xstats_size(dev));
361
362 return size;
363}
364
365static LIST_HEAD(rtnl_af_ops);
366
367static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
368{
369 const struct rtnl_af_ops *ops;
370
371 list_for_each_entry(ops, &rtnl_af_ops, list) {
372 if (ops->family == family)
373 return ops;
374 }
375
376 return NULL;
377}
378
379/**
380 * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
381 * @ops: struct rtnl_af_ops * to register
382 *
383 * The caller must hold the rtnl_mutex.
384 *
385 * Returns 0 on success or a negative error code.
386 */
387int __rtnl_af_register(struct rtnl_af_ops *ops)
388{
389 list_add_tail(&ops->list, &rtnl_af_ops);
390 return 0;
391}
392EXPORT_SYMBOL_GPL(__rtnl_af_register);
393
394/**
395 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
396 * @ops: struct rtnl_af_ops * to register
397 *
398 * Returns 0 on success or a negative error code.
399 */
400int rtnl_af_register(struct rtnl_af_ops *ops)
401{
402 int err;
403
404 rtnl_lock();
405 err = __rtnl_af_register(ops);
406 rtnl_unlock();
407 return err;
408}
409EXPORT_SYMBOL_GPL(rtnl_af_register);
410
411/**
412 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
413 * @ops: struct rtnl_af_ops * to unregister
414 *
415 * The caller must hold the rtnl_mutex.
416 */
417void __rtnl_af_unregister(struct rtnl_af_ops *ops)
418{
419 list_del(&ops->list);
420}
421EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
422
423/**
424 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
425 * @ops: struct rtnl_af_ops * to unregister
426 */
427void rtnl_af_unregister(struct rtnl_af_ops *ops)
428{
429 rtnl_lock();
430 __rtnl_af_unregister(ops);
431 rtnl_unlock();
432}
433EXPORT_SYMBOL_GPL(rtnl_af_unregister);
434
435static size_t rtnl_link_get_af_size(const struct net_device *dev)
436{
437 struct rtnl_af_ops *af_ops;
438 size_t size;
439
440 /* IFLA_AF_SPEC */
441 size = nla_total_size(sizeof(struct nlattr));
442
443 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
444 if (af_ops->get_link_af_size) {
445 /* AF_* + nested data */
446 size += nla_total_size(sizeof(struct nlattr)) +
447 af_ops->get_link_af_size(dev);
448 }
449 }
360 450
361 return size; 451 return size;
362} 452}
@@ -670,7 +760,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
670 + nla_total_size(4) /* IFLA_NUM_VF */ 760 + nla_total_size(4) /* IFLA_NUM_VF */
671 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 761 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
672 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 762 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
673 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 763 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
764 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
674} 765}
675 766
676static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) 767static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -756,7 +847,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
756 struct nlmsghdr *nlh; 847 struct nlmsghdr *nlh;
757 struct rtnl_link_stats64 temp; 848 struct rtnl_link_stats64 temp;
758 const struct rtnl_link_stats64 *stats; 849 const struct rtnl_link_stats64 *stats;
759 struct nlattr *attr; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops;
760 852
761 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
762 if (nlh == NULL) 854 if (nlh == NULL)
@@ -865,6 +957,36 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
865 goto nla_put_failure; 957 goto nla_put_failure;
866 } 958 }
867 959
960 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
961 goto nla_put_failure;
962
963 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
964 if (af_ops->fill_link_af) {
965 struct nlattr *af;
966 int err;
967
968 if (!(af = nla_nest_start(skb, af_ops->family)))
969 goto nla_put_failure;
970
971 err = af_ops->fill_link_af(skb, dev);
972
973 /*
974 * Caller may return ENODATA to indicate that there
975 * was no data to be dumped. This is not an error, it
976 * means we should trim the attribute header and
977 * continue.
978 */
979 if (err == -ENODATA)
980 nla_nest_cancel(skb, af);
981 else if (err < 0)
982 goto nla_put_failure;
983
984 nla_nest_end(skb, af);
985 }
986 }
987
988 nla_nest_end(skb, af_spec);
989
868 return nlmsg_end(skb, nlh); 990 return nlmsg_end(skb, nlh);
869 991
870nla_put_failure: 992nla_put_failure:
@@ -923,6 +1045,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
923 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1045 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
924 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1046 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
925 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1047 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1048 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
926}; 1049};
927EXPORT_SYMBOL(ifla_policy); 1050EXPORT_SYMBOL(ifla_policy);
928 1051
@@ -984,6 +1107,28 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
984 return -EINVAL; 1107 return -EINVAL;
985 } 1108 }
986 1109
1110 if (tb[IFLA_AF_SPEC]) {
1111 struct nlattr *af;
1112 int rem, err;
1113
1114 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1115 const struct rtnl_af_ops *af_ops;
1116
1117 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1118 return -EAFNOSUPPORT;
1119
1120 if (!af_ops->set_link_af)
1121 return -EOPNOTSUPP;
1122
1123 if (af_ops->validate_link_af) {
1124 err = af_ops->validate_link_af(dev,
1125 tb[IFLA_AF_SPEC]);
1126 if (err < 0)
1127 return err;
1128 }
1129 }
1130 }
1131
987 return 0; 1132 return 0;
988} 1133}
989 1134
@@ -1224,6 +1369,24 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1224 goto errout; 1369 goto errout;
1225 modified = 1; 1370 modified = 1;
1226 } 1371 }
1372
1373 if (tb[IFLA_AF_SPEC]) {
1374 struct nlattr *af;
1375 int rem;
1376
1377 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1378 const struct rtnl_af_ops *af_ops;
1379
1380 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1381 BUG();
1382
1383 err = af_ops->set_link_af(dev, af);
1384 if (err < 0)
1385 goto errout;
1386
1387 modified = 1;
1388 }
1389 }
1227 err = 0; 1390 err = 0;
1228 1391
1229errout: 1392errout:
diff --git a/net/core/scm.c b/net/core/scm.c
index 413cab89017d..bbe454450801 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -79,10 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
79 return -ENOMEM; 79 return -ENOMEM;
80 *fplp = fpl; 80 *fplp = fpl;
81 fpl->count = 0; 81 fpl->count = 0;
82 fpl->max = SCM_MAX_FD;
82 } 83 }
83 fpp = &fpl->fp[fpl->count]; 84 fpp = &fpl->fp[fpl->count];
84 85
85 if (fpl->count + num > SCM_MAX_FD) 86 if (fpl->count + num > fpl->max)
86 return -EINVAL; 87 return -EINVAL;
87 88
88 /* 89 /*
@@ -331,11 +332,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
331 if (!fpl) 332 if (!fpl)
332 return NULL; 333 return NULL;
333 334
334 new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); 335 new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
336 GFP_KERNEL);
335 if (new_fpl) { 337 if (new_fpl) {
336 for (i=fpl->count-1; i>=0; i--) 338 for (i = 0; i < fpl->count; i++)
337 get_file(fpl->fp[i]); 339 get_file(fpl->fp[i]);
338 memcpy(new_fpl, fpl, sizeof(*fpl)); 340 new_fpl->max = new_fpl->count;
339 } 341 }
340 return new_fpl; 342 return new_fpl;
341} 343}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 104f8444754a..8814a9a52f47 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -778,6 +778,28 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
778 778
779 size = SKB_DATA_ALIGN(size); 779 size = SKB_DATA_ALIGN(size);
780 780
781 /* Check if we can avoid taking references on fragments if we own
782 * the last reference on skb->head. (see skb_release_data())
783 */
784 if (!skb->cloned)
785 fastpath = true;
786 else {
787 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
788
789 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
790 }
791
792 if (fastpath &&
793 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
794 memmove(skb->head + size, skb_shinfo(skb),
795 offsetof(struct skb_shared_info,
796 frags[skb_shinfo(skb)->nr_frags]));
797 memmove(skb->head + nhead, skb->head,
798 skb_tail_pointer(skb) - skb->head);
799 off = nhead;
800 goto adjust_others;
801 }
802
781 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 803 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
782 if (!data) 804 if (!data)
783 goto nodata; 805 goto nodata;
@@ -791,17 +813,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
791 skb_shinfo(skb), 813 skb_shinfo(skb),
792 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 814 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
793 815
794 /* Check if we can avoid taking references on fragments if we own
795 * the last reference on skb->head. (see skb_release_data())
796 */
797 if (!skb->cloned)
798 fastpath = true;
799 else {
800 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
801
802 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
803 }
804
805 if (fastpath) { 816 if (fastpath) {
806 kfree(skb->head); 817 kfree(skb->head);
807 } else { 818 } else {
@@ -816,6 +827,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
816 off = (data + nhead) - skb->head; 827 off = (data + nhead) - skb->head;
817 828
818 skb->head = data; 829 skb->head = data;
830adjust_others:
819 skb->data += off; 831 skb->data += off;
820#ifdef NET_SKBUFF_DATA_USES_OFFSET 832#ifdef NET_SKBUFF_DATA_USES_OFFSET
821 skb->end = size; 833 skb->end = size;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed5424e659..bcdb6ff6e621 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -992,17 +992,18 @@ static inline void sock_lock_init(struct sock *sk)
992/* 992/*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is. 994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
995 */ 996 */
996static void sock_copy(struct sock *nsk, const struct sock *osk) 997static void sock_copy(struct sock *nsk, const struct sock *osk)
997{ 998{
998#ifdef CONFIG_SECURITY_NETWORK 999#ifdef CONFIG_SECURITY_NETWORK
999 void *sptr = nsk->sk_security; 1000 void *sptr = nsk->sk_security;
1000#endif 1001#endif
1001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 1002 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + 1003
1003 sizeof(osk->sk_tx_queue_mapping)); 1004 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 1006
1006#ifdef CONFIG_SECURITY_NETWORK 1007#ifdef CONFIG_SECURITY_NETWORK
1007 nsk->sk_security = sptr; 1008 nsk->sk_security = sptr;
1008 security_sk_clone(osk, nsk); 1009 security_sk_clone(osk, nsk);
@@ -1653,10 +1654,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1653{ 1654{
1654 struct proto *prot = sk->sk_prot; 1655 struct proto *prot = sk->sk_prot;
1655 int amt = sk_mem_pages(size); 1656 int amt = sk_mem_pages(size);
1656 int allocated; 1657 long allocated;
1657 1658
1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1659 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1659 allocated = atomic_add_return(amt, prot->memory_allocated); 1660 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1660 1661
1661 /* Under limit. */ 1662 /* Under limit. */
1662 if (allocated <= prot->sysctl_mem[0]) { 1663 if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1715,7 @@ suppress_allocation:
1714 1715
1715 /* Alas. Undo changes. */ 1716 /* Alas. Undo changes. */
1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1717 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1717 atomic_sub(amt, prot->memory_allocated); 1718 atomic_long_sub(amt, prot->memory_allocated);
1718 return 0; 1719 return 0;
1719} 1720}
1720EXPORT_SYMBOL(__sk_mem_schedule); 1721EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1728,12 @@ void __sk_mem_reclaim(struct sock *sk)
1727{ 1728{
1728 struct proto *prot = sk->sk_prot; 1729 struct proto *prot = sk->sk_prot;
1729 1730
1730 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1731 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1731 prot->memory_allocated); 1732 prot->memory_allocated);
1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1733 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1733 1734
1734 if (prot->memory_pressure && *prot->memory_pressure && 1735 if (prot->memory_pressure && *prot->memory_pressure &&
1735 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1736 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1736 *prot->memory_pressure = 0; 1737 *prot->memory_pressure = 0;
1737} 1738}
1738EXPORT_SYMBOL(__sk_mem_reclaim); 1739EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2453,12 @@ static char proto_method_implemented(const void *method)
2452 2453
2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2454static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2454{ 2455{
2455 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2456 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2457 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2457 proto->name, 2458 proto->name,
2458 proto->obj_size, 2459 proto->obj_size,
2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2460 sock_prot_inuse_get(seq_file_net(seq), proto),
2460 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2461 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2462 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2462 proto->max_header, 2463 proto->max_header,
2463 proto->slab == NULL ? "no" : "yes", 2464 proto->slab == NULL ? "no" : "yes",
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 0ae6c22da85b..b124d28ff1c8 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -26,12 +26,12 @@ static struct sock_filter ptp_filter[] = {
26 PTP_FILTER 26 PTP_FILTER
27}; 27};
28 28
29static unsigned int classify(struct sk_buff *skb) 29static unsigned int classify(const struct sk_buff *skb)
30{ 30{
31 if (likely(skb->dev && 31 if (likely(skb->dev &&
32 skb->dev->phydev && 32 skb->dev->phydev &&
33 skb->dev->phydev->drv)) 33 skb->dev->phydev->drv))
34 return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter)); 34 return sk_run_filter(skb, ptp_filter);
35 else 35 else
36 return PTP_CLASS_NONE; 36 return PTP_CLASS_NONE;
37} 37}
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2991efcc8dea..5c8362b037ed 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o 1obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
2 2
3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o 3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o \
4 4 qpolicy.o
5# 5#
6# CCID algorithms to be used by dccp.ko 6# CCID algorithms to be used by dccp.ko
7# 7#
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 92a6fcb40d7d..25b7a8d1ad58 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -1,444 +1,375 @@
1/* 1/*
2 * net/dccp/ackvec.c 2 * net/dccp/ackvec.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of Ack Vectors for the DCCP protocol
5 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
5 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License; 10 * Free Software Foundation; version 2 of the License;
10 */ 11 */
11
12#include "ackvec.h"
13#include "dccp.h" 12#include "dccp.h"
14
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/kernel.h> 13#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/slab.h> 14#include <linux/slab.h>
20 15
21#include <net/sock.h>
22
23static struct kmem_cache *dccp_ackvec_slab; 16static struct kmem_cache *dccp_ackvec_slab;
24static struct kmem_cache *dccp_ackvec_record_slab; 17static struct kmem_cache *dccp_ackvec_record_slab;
25 18
26static struct dccp_ackvec_record *dccp_ackvec_record_new(void) 19struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
27{ 20{
28 struct dccp_ackvec_record *avr = 21 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
29 kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
30 22
31 if (avr != NULL) 23 if (av != NULL) {
32 INIT_LIST_HEAD(&avr->avr_node); 24 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
33 25 INIT_LIST_HEAD(&av->av_records);
34 return avr; 26 }
27 return av;
35} 28}
36 29
37static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr) 30static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
38{ 31{
39 if (unlikely(avr == NULL)) 32 struct dccp_ackvec_record *cur, *next;
40 return; 33
41 /* Check if deleting a linked record */ 34 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
42 WARN_ON(!list_empty(&avr->avr_node)); 35 kmem_cache_free(dccp_ackvec_record_slab, cur);
43 kmem_cache_free(dccp_ackvec_record_slab, avr); 36 INIT_LIST_HEAD(&av->av_records);
44} 37}
45 38
46static void dccp_ackvec_insert_avr(struct dccp_ackvec *av, 39void dccp_ackvec_free(struct dccp_ackvec *av)
47 struct dccp_ackvec_record *avr)
48{ 40{
49 /* 41 if (likely(av != NULL)) {
50 * AVRs are sorted by seqno. Since we are sending them in order, we 42 dccp_ackvec_purge_records(av);
51 * just add the AVR at the head of the list. 43 kmem_cache_free(dccp_ackvec_slab, av);
52 * -sorbo.
53 */
54 if (!list_empty(&av->av_records)) {
55 const struct dccp_ackvec_record *head =
56 list_entry(av->av_records.next,
57 struct dccp_ackvec_record,
58 avr_node);
59 BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
60 } 44 }
61
62 list_add(&avr->avr_node, &av->av_records);
63} 45}
64 46
65int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) 47/**
48 * dccp_ackvec_update_records - Record information about sent Ack Vectors
49 * @av: Ack Vector records to update
50 * @seqno: Sequence number of the packet carrying the Ack Vector just sent
51 * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
52 */
53int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
66{ 54{
67 struct dccp_sock *dp = dccp_sk(sk);
68 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
69 /* Figure out how many options do we need to represent the ackvec */
70 const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
71 u16 len = av->av_vec_len + 2 * nr_opts, i;
72 u32 elapsed_time;
73 const unsigned char *tail, *from;
74 unsigned char *to;
75 struct dccp_ackvec_record *avr; 55 struct dccp_ackvec_record *avr;
76 suseconds_t delta;
77
78 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
79 return -1;
80
81 delta = ktime_us_delta(ktime_get_real(), av->av_time);
82 elapsed_time = delta / 10;
83 56
84 if (elapsed_time != 0 && 57 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
85 dccp_insert_option_elapsed_time(skb, elapsed_time))
86 return -1;
87
88 avr = dccp_ackvec_record_new();
89 if (avr == NULL) 58 if (avr == NULL)
90 return -1; 59 return -ENOBUFS;
91
92 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
93
94 to = skb_push(skb, len);
95 len = av->av_vec_len;
96 from = av->av_buf + av->av_buf_head;
97 tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
98
99 for (i = 0; i < nr_opts; ++i) {
100 int copylen = len;
101
102 if (len > DCCP_SINGLE_OPT_MAXLEN)
103 copylen = DCCP_SINGLE_OPT_MAXLEN;
104
105 *to++ = DCCPO_ACK_VECTOR_0;
106 *to++ = copylen + 2;
107
108 /* Check if buf_head wraps */
109 if (from + copylen > tail) {
110 const u16 tailsize = tail - from;
111
112 memcpy(to, from, tailsize);
113 to += tailsize;
114 len -= tailsize;
115 copylen -= tailsize;
116 from = av->av_buf;
117 }
118
119 memcpy(to, from, copylen);
120 from += copylen;
121 to += copylen;
122 len -= copylen;
123 }
124 60
61 avr->avr_ack_seqno = seqno;
62 avr->avr_ack_ptr = av->av_buf_head;
63 avr->avr_ack_ackno = av->av_buf_ackno;
64 avr->avr_ack_nonce = nonce_sum;
65 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
125 /* 66 /*
126 * From RFC 4340, A.2: 67 * When the buffer overflows, we keep no more than one record. This is
127 * 68 * the simplest way of disambiguating sender-Acks dating from before the
128 * For each acknowledgement it sends, the HC-Receiver will add an 69 * overflow from sender-Acks which refer to after the overflow; a simple
129 * acknowledgement record. ack_seqno will equal the HC-Receiver 70 * solution is preferable here since we are handling an exception.
130 * sequence number it used for the ack packet; ack_ptr will equal
131 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
132 * equal buf_nonce.
133 */ 71 */
134 avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 72 if (av->av_overflow)
135 avr->avr_ack_ptr = av->av_buf_head; 73 dccp_ackvec_purge_records(av);
136 avr->avr_ack_ackno = av->av_buf_ackno; 74 /*
137 avr->avr_ack_nonce = av->av_buf_nonce; 75 * Since GSS is incremented for each packet, the list is automatically
138 avr->avr_sent_len = av->av_vec_len; 76 * arranged in descending order of @ack_seqno.
139 77 */
140 dccp_ackvec_insert_avr(av, avr); 78 list_add(&avr->avr_node, &av->av_records);
141 79
142 dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, " 80 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
143 "ack_ackno=%llu\n",
144 dccp_role(sk), avr->avr_sent_len,
145 (unsigned long long)avr->avr_ack_seqno, 81 (unsigned long long)avr->avr_ack_seqno,
146 (unsigned long long)avr->avr_ack_ackno); 82 (unsigned long long)avr->avr_ack_ackno,
83 avr->avr_ack_runlen);
147 return 0; 84 return 0;
148} 85}
149 86
150struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) 87static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
88 const u64 ackno)
151{ 89{
152 struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority); 90 struct dccp_ackvec_record *avr;
153 91 /*
154 if (av != NULL) { 92 * Exploit that records are inserted in descending order of sequence
155 av->av_buf_head = DCCP_MAX_ACKVEC_LEN - 1; 93 * number, start with the oldest record first. If @ackno is `before'
156 av->av_buf_ackno = UINT48_MAX + 1; 94 * the earliest ack_ackno, the packet is too old to be considered.
157 av->av_buf_nonce = 0; 95 */
158 av->av_time = ktime_set(0, 0); 96 list_for_each_entry_reverse(avr, av_list, avr_node) {
159 av->av_vec_len = 0; 97 if (avr->avr_ack_seqno == ackno)
160 INIT_LIST_HEAD(&av->av_records); 98 return avr;
99 if (before48(ackno, avr->avr_ack_seqno))
100 break;
161 } 101 }
162 102 return NULL;
163 return av;
164} 103}
165 104
166void dccp_ackvec_free(struct dccp_ackvec *av) 105/*
106 * Buffer index and length computation using modulo-buffersize arithmetic.
107 * Note that, as pointers move from right to left, head is `before' tail.
108 */
109static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
167{ 110{
168 if (unlikely(av == NULL)) 111 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
169 return;
170
171 if (!list_empty(&av->av_records)) {
172 struct dccp_ackvec_record *avr, *next;
173
174 list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
175 list_del_init(&avr->avr_node);
176 dccp_ackvec_record_delete(avr);
177 }
178 }
179
180 kmem_cache_free(dccp_ackvec_slab, av);
181} 112}
182 113
183static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, 114static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
184 const u32 index)
185{ 115{
186 return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK; 116 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
187} 117}
188 118
189static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, 119u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
190 const u32 index)
191{ 120{
192 return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK; 121 if (unlikely(av->av_overflow))
122 return DCCPAV_MAX_ACKVEC_LEN;
123 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
193} 124}
194 125
195/* 126/**
196 * If several packets are missing, the HC-Receiver may prefer to enter multiple 127 * dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
197 * bytes with run length 0, rather than a single byte with a larger run length; 128 * @av: non-empty buffer to update
198 * this simplifies table updates if one of the missing packets arrives. 129 * @distance: negative or zero distance of @seqno from buf_ackno downward
130 * @seqno: the (old) sequence number whose record is to be updated
131 * @state: state in which packet carrying @seqno was received
199 */ 132 */
200static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, 133static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
201 const unsigned int packets, 134 u64 seqno, enum dccp_ackvec_states state)
202 const unsigned char state)
203{ 135{
204 long gap; 136 u16 ptr = av->av_buf_head;
205 long new_head;
206 137
207 if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN) 138 BUG_ON(distance > 0);
208 return -ENOBUFS; 139 if (unlikely(dccp_ackvec_is_empty(av)))
140 return;
209 141
210 gap = packets - 1; 142 do {
211 new_head = av->av_buf_head - packets; 143 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
212 144
213 if (new_head < 0) { 145 if (distance + runlen >= 0) {
214 if (gap > 0) { 146 /*
215 memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED, 147 * Only update the state if packet has not been received
216 gap + new_head + 1); 148 * yet. This is OK as per the second table in RFC 4340,
217 gap = -new_head; 149 * 11.4.1; i.e. here we are using the following table:
150 * RECEIVED
151 * 0 1 3
152 * S +---+---+---+
153 * T 0 | 0 | 0 | 0 |
154 * O +---+---+---+
155 * R 1 | 1 | 1 | 1 |
156 * E +---+---+---+
157 * D 3 | 0 | 1 | 3 |
158 * +---+---+---+
159 * The "Not Received" state was set by reserve_seats().
160 */
161 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
162 av->av_buf[ptr] = state;
163 else
164 dccp_pr_debug("Not changing %llu state to %u\n",
165 (unsigned long long)seqno, state);
166 break;
218 } 167 }
219 new_head += DCCP_MAX_ACKVEC_LEN;
220 }
221 168
222 av->av_buf_head = new_head; 169 distance += runlen + 1;
170 ptr = __ackvec_idx_add(ptr, 1);
223 171
224 if (gap > 0) 172 } while (ptr != av->av_buf_tail);
225 memset(av->av_buf + av->av_buf_head + 1, 173}
226 DCCP_ACKVEC_STATE_NOT_RECEIVED, gap);
227 174
228 av->av_buf[av->av_buf_head] = state; 175/* Mark @num entries after buf_head as "Not yet received". */
229 av->av_vec_len += packets; 176static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
230 return 0; 177{
178 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
179 len = DCCPAV_MAX_ACKVEC_LEN - start;
180
181 /* check for buffer wrap-around */
182 if (num > len) {
183 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
184 start = 0;
185 num -= len;
186 }
187 if (num)
188 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
231} 189}
232 190
233/* 191/**
234 * Implements the RFC 4340, Appendix A 192 * dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
193 * @av: container of buffer to update (can be empty or non-empty)
194 * @num_packets: number of packets to register (must be >= 1)
195 * @seqno: sequence number of the first packet in @num_packets
196 * @state: state in which packet carrying @seqno was received
235 */ 197 */
236int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 198static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
237 const u64 ackno, const u8 state) 199 u64 seqno, enum dccp_ackvec_states state)
238{ 200{
239 /* 201 u32 num_cells = num_packets;
240 * Check at the right places if the buffer is full, if it is, tell the
241 * caller to start dropping packets till the HC-Sender acks our ACK
242 * vectors, when we will free up space in av_buf.
243 *
244 * We may well decide to do buffer compression, etc, but for now lets
245 * just drop.
246 *
247 * From Appendix A.1.1 (`New Packets'):
248 *
249 * Of course, the circular buffer may overflow, either when the
250 * HC-Sender is sending data at a very high rate, when the
251 * HC-Receiver's acknowledgements are not reaching the HC-Sender,
252 * or when the HC-Sender is forgetting to acknowledge those acks
253 * (so the HC-Receiver is unable to clean up old state). In this
254 * case, the HC-Receiver should either compress the buffer (by
255 * increasing run lengths when possible), transfer its state to
256 * a larger buffer, or, as a last resort, drop all received
257 * packets, without processing them whatsoever, until its buffer
258 * shrinks again.
259 */
260 202
261 /* See if this is the first ackno being inserted */ 203 if (num_packets > DCCPAV_BURST_THRESH) {
262 if (av->av_vec_len == 0) { 204 u32 lost_packets = num_packets - 1;
263 av->av_buf[av->av_buf_head] = state;
264 av->av_vec_len = 1;
265 } else if (after48(ackno, av->av_buf_ackno)) {
266 const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
267 205
206 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
268 /* 207 /*
269 * Look if the state of this packet is the same as the 208 * We received 1 packet and have a loss of size "num_packets-1"
270 * previous ackno and if so if we can bump the head len. 209 * which we squeeze into num_cells-1 rather than reserving an
210 * entire byte for each lost packet.
211 * The reason is that the vector grows in O(burst_length); when
212 * it grows too large there will no room left for the payload.
213 * This is a trade-off: if a few packets out of the burst show
214 * up later, their state will not be changed; it is simply too
215 * costly to reshuffle/reallocate/copy the buffer each time.
216 * Should such problems persist, we will need to switch to a
217 * different underlying data structure.
271 */ 218 */
272 if (delta == 1 && 219 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
273 dccp_ackvec_state(av, av->av_buf_head) == state && 220 u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
274 dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK)
275 av->av_buf[av->av_buf_head]++;
276 else if (dccp_ackvec_set_buf_head_state(av, delta, state))
277 return -ENOBUFS;
278 } else {
279 /*
280 * A.1.2. Old Packets
281 *
282 * When a packet with Sequence Number S <= buf_ackno
283 * arrives, the HC-Receiver will scan the table for
284 * the byte corresponding to S. (Indexing structures
285 * could reduce the complexity of this scan.)
286 */
287 u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
288 u32 index = av->av_buf_head;
289 221
290 while (1) { 222 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
291 const u8 len = dccp_ackvec_len(av, index); 223 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
292 const u8 av_state = dccp_ackvec_state(av, index); 224
293 /* 225 lost_packets -= len;
294 * valid packets not yet in av_buf have a reserved
295 * entry, with a len equal to 0.
296 */
297 if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
298 len == 0 && delta == 0) { /* Found our
299 reserved seat! */
300 dccp_pr_debug("Found %llu reserved seat!\n",
301 (unsigned long long)ackno);
302 av->av_buf[index] = state;
303 goto out;
304 }
305 /* len == 0 means one packet */
306 if (delta < len + 1)
307 goto out_duplicate;
308
309 delta -= len + 1;
310 if (++index == DCCP_MAX_ACKVEC_LEN)
311 index = 0;
312 } 226 }
313 } 227 }
314 228
315 av->av_buf_ackno = ackno; 229 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
316 av->av_time = ktime_get_real(); 230 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
317out: 231 av->av_overflow = true;
318 return 0; 232 }
233
234 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
235 if (av->av_overflow)
236 av->av_buf_tail = av->av_buf_head;
319 237
320out_duplicate: 238 av->av_buf[av->av_buf_head] = state;
321 /* Duplicate packet */ 239 av->av_buf_ackno = seqno;
322 dccp_pr_debug("Received a dup or already considered lost " 240
323 "packet: %llu\n", (unsigned long long)ackno); 241 if (num_packets > 1)
324 return -EILSEQ; 242 dccp_ackvec_reserve_seats(av, num_packets - 1);
325} 243}
326 244
327static void dccp_ackvec_throw_record(struct dccp_ackvec *av, 245/**
328 struct dccp_ackvec_record *avr) 246 * dccp_ackvec_input - Register incoming packet in the buffer
247 */
248void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
329{ 249{
330 struct dccp_ackvec_record *next; 250 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
251 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
331 252
332 /* sort out vector length */ 253 if (dccp_ackvec_is_empty(av)) {
333 if (av->av_buf_head <= avr->avr_ack_ptr) 254 dccp_ackvec_add_new(av, 1, seqno, state);
334 av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head; 255 av->av_tail_ackno = seqno;
335 else
336 av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 -
337 av->av_buf_head + avr->avr_ack_ptr;
338 256
339 /* free records */ 257 } else {
340 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) { 258 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
341 list_del_init(&avr->avr_node); 259 u8 *current_head = av->av_buf + av->av_buf_head;
342 dccp_ackvec_record_delete(avr);
343 }
344}
345 260
346void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, 261 if (num_packets == 1 &&
347 const u64 ackno) 262 dccp_ackvec_state(current_head) == state &&
348{ 263 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
349 struct dccp_ackvec_record *avr;
350 264
351 /* 265 *current_head += 1;
352 * If we traverse backwards, it should be faster when we have large 266 av->av_buf_ackno = seqno;
353 * windows. We will be receiving ACKs for stuff we sent a while back 267
354 * -sorbo. 268 } else if (num_packets > 0) {
355 */ 269 dccp_ackvec_add_new(av, num_packets, seqno, state);
356 list_for_each_entry_reverse(avr, &av->av_records, avr_node) { 270 } else {
357 if (ackno == avr->avr_ack_seqno) { 271 dccp_ackvec_update_old(av, num_packets, seqno, state);
358 dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, " 272 }
359 "ack_ackno=%llu, ACKED!\n",
360 dccp_role(sk), 1,
361 (unsigned long long)avr->avr_ack_seqno,
362 (unsigned long long)avr->avr_ack_ackno);
363 dccp_ackvec_throw_record(av, avr);
364 break;
365 } else if (avr->avr_ack_seqno > ackno)
366 break; /* old news */
367 } 273 }
368} 274}
369 275
370static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, 276/**
371 struct sock *sk, u64 *ackno, 277 * dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
372 const unsigned char len, 278 * This routine is called when the peer acknowledges the receipt of Ack Vectors
373 const unsigned char *vector) 279 * up to and including @ackno. While based on on section A.3 of RFC 4340, here
280 * are additional precautions to prevent corrupted buffer state. In particular,
281 * we use tail_ackno to identify outdated records; it always marks the earliest
282 * packet of group (2) in 11.4.2.
283 */
284void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
374{ 285{
375 unsigned char i; 286 struct dccp_ackvec_record *avr, *next;
376 struct dccp_ackvec_record *avr; 287 u8 runlen_now, eff_runlen;
288 s64 delta;
377 289
378 /* Check if we actually sent an ACK vector */ 290 avr = dccp_ackvec_lookup(&av->av_records, ackno);
379 if (list_empty(&av->av_records)) 291 if (avr == NULL)
380 return; 292 return;
293 /*
294 * Deal with outdated acknowledgments: this arises when e.g. there are
295 * several old records and the acks from the peer come in slowly. In
296 * that case we may still have records that pre-date tail_ackno.
297 */
298 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
299 if (delta < 0)
300 goto free_records;
301 /*
302 * Deal with overlapping Ack Vectors: don't subtract more than the
303 * number of packets between tail_ackno and ack_ackno.
304 */
305 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
381 306
382 i = len; 307 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
383 /* 308 /*
384 * XXX 309 * The run length of Ack Vector cells does not decrease over time. If
385 * I think it might be more efficient to work backwards. See comment on 310 * the run length is the same as at the time the Ack Vector was sent, we
386 * rcv_ackno. -sorbo. 311 * free the ack_ptr cell. That cell can however not be freed if the run
312 * length has increased: in this case we need to move the tail pointer
313 * backwards (towards higher indices), to its next-oldest neighbour.
387 */ 314 */
388 avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node); 315 if (runlen_now > eff_runlen) {
389 while (i--) {
390 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
391 u64 ackno_end_rl;
392 316
393 dccp_set_seqno(&ackno_end_rl, *ackno - rl); 317 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
318 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
394 319
320 /* This move may not have cleared the overflow flag. */
321 if (av->av_overflow)
322 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
323 } else {
324 av->av_buf_tail = avr->avr_ack_ptr;
395 /* 325 /*
396 * If our AVR sequence number is greater than the ack, go 326 * We have made sure that avr points to a valid cell within the
397 * forward in the AVR list until it is not so. 327 * buffer. This cell is either older than head, or equals head
328 * (empty buffer): in both cases we no longer have any overflow.
398 */ 329 */
399 list_for_each_entry_from(avr, &av->av_records, avr_node) { 330 av->av_overflow = 0;
400 if (!after48(avr->avr_ack_seqno, *ackno)) 331 }
401 goto found;
402 }
403 /* End of the av_records list, not found, exit */
404 break;
405found:
406 if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
407 const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
408 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
409 dccp_pr_debug("%s ACK vector 0, len=%d, "
410 "ack_seqno=%llu, ack_ackno=%llu, "
411 "ACKED!\n",
412 dccp_role(sk), len,
413 (unsigned long long)
414 avr->avr_ack_seqno,
415 (unsigned long long)
416 avr->avr_ack_ackno);
417 dccp_ackvec_throw_record(av, avr);
418 break;
419 }
420 /*
421 * If it wasn't received, continue scanning... we might
422 * find another one.
423 */
424 }
425 332
426 dccp_set_seqno(ackno, ackno_end_rl - 1); 333 /*
427 ++vector; 334 * The peer has acknowledged up to and including ack_ackno. Hence the
335 * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
336 */
337 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
338
339free_records:
340 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
341 list_del(&avr->avr_node);
342 kmem_cache_free(dccp_ackvec_record_slab, avr);
428 } 343 }
429} 344}
430 345
431int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, 346/*
432 u64 *ackno, const u8 opt, const u8 *value, const u8 len) 347 * Routines to keep track of Ack Vectors received in an skb
348 */
349int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
433{ 350{
434 if (len > DCCP_SINGLE_OPT_MAXLEN) 351 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
435 return -1; 352
353 if (new == NULL)
354 return -ENOBUFS;
355 new->vec = vec;
356 new->len = len;
357 new->nonce = nonce;
436 358
437 /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ 359 list_add_tail(&new->node, head);
438 dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk,
439 ackno, len, value);
440 return 0; 360 return 0;
441} 361}
362EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
363
364void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
365{
366 struct dccp_ackvec_parsed *cur, *next;
367
368 list_for_each_entry_safe(cur, next, parsed_chunks, node)
369 kfree(cur);
370 INIT_LIST_HEAD(parsed_chunks);
371}
372EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
442 373
443int __init dccp_ackvec_init(void) 374int __init dccp_ackvec_init(void)
444{ 375{
@@ -448,10 +379,9 @@ int __init dccp_ackvec_init(void)
448 if (dccp_ackvec_slab == NULL) 379 if (dccp_ackvec_slab == NULL)
449 goto out_err; 380 goto out_err;
450 381
451 dccp_ackvec_record_slab = 382 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
452 kmem_cache_create("dccp_ackvec_record", 383 sizeof(struct dccp_ackvec_record),
453 sizeof(struct dccp_ackvec_record), 384 0, SLAB_HWCACHE_ALIGN, NULL);
454 0, SLAB_HWCACHE_ALIGN, NULL);
455 if (dccp_ackvec_record_slab == NULL) 385 if (dccp_ackvec_record_slab == NULL)
456 goto out_destroy_slab; 386 goto out_destroy_slab;
457 387
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 7ea557b7c6b1..e2ab0627a5ff 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -3,9 +3,9 @@
3/* 3/*
4 * net/dccp/ackvec.h 4 * net/dccp/ackvec.h
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of Ack Vectors for the DCCP protocol
7 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as 10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -13,99 +13,124 @@
13 13
14#include <linux/dccp.h> 14#include <linux/dccp.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/ktime.h>
17#include <linux/list.h> 16#include <linux/list.h>
18#include <linux/types.h> 17#include <linux/types.h>
19 18
20/* We can spread an ack vector across multiple options */ 19/*
21#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2) 20 * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
21 * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
22 * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
23 * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
24 * The maximum value is bounded by the u16 types for indices and functions.
25 */
26#define DCCPAV_NUM_ACKVECS 2
27#define DCCPAV_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
22 28
23/* Estimated minimum average Ack Vector length - used for updating MPS */ 29/* Estimated minimum average Ack Vector length - used for updating MPS */
24#define DCCPAV_MIN_OPTLEN 16 30#define DCCPAV_MIN_OPTLEN 16
25 31
26#define DCCP_ACKVEC_STATE_RECEIVED 0 32/* Threshold for coping with large bursts of losses */
27#define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) 33#define DCCPAV_BURST_THRESH (DCCPAV_MAX_ACKVEC_LEN / 8)
28#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6)
29 34
30#define DCCP_ACKVEC_STATE_MASK 0xC0 /* 11000000 */ 35enum dccp_ackvec_states {
31#define DCCP_ACKVEC_LEN_MASK 0x3F /* 00111111 */ 36 DCCPAV_RECEIVED = 0x00,
37 DCCPAV_ECN_MARKED = 0x40,
38 DCCPAV_RESERVED = 0x80,
39 DCCPAV_NOT_RECEIVED = 0xC0
40};
41#define DCCPAV_MAX_RUNLEN 0x3F
32 42
33/** struct dccp_ackvec - ack vector 43static inline u8 dccp_ackvec_runlen(const u8 *cell)
34 * 44{
35 * This data structure is the one defined in RFC 4340, Appendix A. 45 return *cell & DCCPAV_MAX_RUNLEN;
36 * 46}
37 * @av_buf_head - circular buffer head 47
38 * @av_buf_tail - circular buffer tail 48static inline u8 dccp_ackvec_state(const u8 *cell)
39 * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the 49{
40 * buffer (i.e. %av_buf_head) 50 return *cell & ~DCCPAV_MAX_RUNLEN;
41 * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked 51}
42 * by the buffer with State 0 52
43 * 53/** struct dccp_ackvec - Ack Vector main data structure
44 * Additionally, the HC-Receiver must keep some information about the
45 * Ack Vectors it has recently sent. For each packet sent carrying an
46 * Ack Vector, it remembers four variables:
47 * 54 *
48 * @av_records - list of dccp_ackvec_record 55 * This implements a fixed-size circular buffer within an array and is largely
49 * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. 56 * based on Appendix A of RFC 4340.
50 * 57 *
51 * @av_time - the time in usecs 58 * @av_buf: circular buffer storage area
52 * @av_buf - circular buffer of acknowledgeable packets 59 * @av_buf_head: head index; begin of live portion in @av_buf
60 * @av_buf_tail: tail index; first index _after_ the live portion in @av_buf
61 * @av_buf_ackno: highest seqno of acknowledgeable packet recorded in @av_buf
62 * @av_tail_ackno: lowest seqno of acknowledgeable packet recorded in @av_buf
63 * @av_buf_nonce: ECN nonce sums, each covering subsequent segments of up to
64 * %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
65 * @av_overflow: if 1 then buf_head == buf_tail indicates buffer wraparound
66 * @av_records: list of %dccp_ackvec_record (Ack Vectors sent previously)
53 */ 67 */
54struct dccp_ackvec { 68struct dccp_ackvec {
55 u64 av_buf_ackno; 69 u8 av_buf[DCCPAV_MAX_ACKVEC_LEN];
56 struct list_head av_records;
57 ktime_t av_time;
58 u16 av_buf_head; 70 u16 av_buf_head;
59 u16 av_vec_len; 71 u16 av_buf_tail;
60 u8 av_buf_nonce; 72 u64 av_buf_ackno:48;
61 u8 av_ack_nonce; 73 u64 av_tail_ackno:48;
62 u8 av_buf[DCCP_MAX_ACKVEC_LEN]; 74 bool av_buf_nonce[DCCPAV_NUM_ACKVECS];
75 u8 av_overflow:1;
76 struct list_head av_records;
63}; 77};
64 78
65/** struct dccp_ackvec_record - ack vector record 79/** struct dccp_ackvec_record - Records information about sent Ack Vectors
66 * 80 *
67 * ACK vector record as defined in Appendix A of spec. 81 * These list entries define the additional information which the HC-Receiver
82 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
68 * 83 *
69 * The list is sorted by avr_ack_seqno 84 * @avr_node: the list node in @av_records
85 * @avr_ack_seqno: sequence number of the packet the Ack Vector was sent on
86 * @avr_ack_ackno: the Ack number that this record/Ack Vector refers to
87 * @avr_ack_ptr: pointer into @av_buf where this record starts
88 * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
89 * @avr_ack_nonce: the sum of @av_buf_nonce's at the time this record was sent
70 * 90 *
71 * @avr_node - node in av_records 91 * The list as a whole is sorted in descending order by @avr_ack_seqno.
72 * @avr_ack_seqno - sequence number of the packet this record was sent on
73 * @avr_ack_ackno - sequence number being acknowledged
74 * @avr_ack_ptr - pointer into av_buf where this record starts
75 * @avr_ack_nonce - av_ack_nonce at the time this record was sent
76 * @avr_sent_len - lenght of the record in av_buf
77 */ 92 */
78struct dccp_ackvec_record { 93struct dccp_ackvec_record {
79 struct list_head avr_node; 94 struct list_head avr_node;
80 u64 avr_ack_seqno; 95 u64 avr_ack_seqno:48;
81 u64 avr_ack_ackno; 96 u64 avr_ack_ackno:48;
82 u16 avr_ack_ptr; 97 u16 avr_ack_ptr;
83 u16 avr_sent_len; 98 u8 avr_ack_runlen;
84 u8 avr_ack_nonce; 99 u8 avr_ack_nonce:1;
85}; 100};
86 101
87struct sock;
88struct sk_buff;
89
90extern int dccp_ackvec_init(void); 102extern int dccp_ackvec_init(void);
91extern void dccp_ackvec_exit(void); 103extern void dccp_ackvec_exit(void);
92 104
93extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority); 105extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
94extern void dccp_ackvec_free(struct dccp_ackvec *av); 106extern void dccp_ackvec_free(struct dccp_ackvec *av);
95 107
96extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 108extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
97 const u64 ackno, const u8 state); 109extern int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
98 110extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
99extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, 111extern u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
100 struct sock *sk, const u64 ackno);
101extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
102 u64 *ackno, const u8 opt,
103 const u8 *value, const u8 len);
104 112
105extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); 113static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
106
107static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
108{ 114{
109 return av->av_vec_len; 115 return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
110} 116}
117
118/**
119 * struct dccp_ackvec_parsed - Record offsets of Ack Vectors in skb
120 * @vec: start of vector (offset into skb)
121 * @len: length of @vec
122 * @nonce: whether @vec had an ECN nonce of 0 or 1
123 * @node: FIFO - arranged in descending order of ack_ackno
124 * This structure is used by CCIDs to access Ack Vectors in a received skb.
125 */
126struct dccp_ackvec_parsed {
127 u8 *vec,
128 len,
129 nonce:1;
130 struct list_head node;
131};
132
133extern int dccp_ackvec_parsed_add(struct list_head *head,
134 u8 *vec, u8 len, u8 nonce);
135extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
111#endif /* _ACKVEC_H */ 136#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 6576eae9e779..e96d5e810039 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -246,68 +246,6 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
246#endif 246#endif
247} 247}
248 248
249/* XXX Lame code duplication!
250 * returns -1 if none was found.
251 * else returns the next offset to use in the function call.
252 */
253static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
254 unsigned char **vec, unsigned char *veclen)
255{
256 const struct dccp_hdr *dh = dccp_hdr(skb);
257 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
258 unsigned char *opt_ptr;
259 const unsigned char *opt_end = (unsigned char *)dh +
260 (dh->dccph_doff * 4);
261 unsigned char opt, len;
262 unsigned char *value;
263
264 BUG_ON(offset < 0);
265 options += offset;
266 opt_ptr = options;
267 if (opt_ptr >= opt_end)
268 return -1;
269
270 while (opt_ptr != opt_end) {
271 opt = *opt_ptr++;
272 len = 0;
273 value = NULL;
274
275 /* Check if this isn't a single byte option */
276 if (opt > DCCPO_MAX_RESERVED) {
277 if (opt_ptr == opt_end)
278 goto out_invalid_option;
279
280 len = *opt_ptr++;
281 if (len < 3)
282 goto out_invalid_option;
283 /*
284 * Remove the type and len fields, leaving
285 * just the value size
286 */
287 len -= 2;
288 value = opt_ptr;
289 opt_ptr += len;
290
291 if (opt_ptr > opt_end)
292 goto out_invalid_option;
293 }
294
295 switch (opt) {
296 case DCCPO_ACK_VECTOR_0:
297 case DCCPO_ACK_VECTOR_1:
298 *vec = value;
299 *veclen = len;
300 return offset + (opt_ptr - options);
301 }
302 }
303
304 return -1;
305
306out_invalid_option:
307 DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
308 return -1;
309}
310
311/** 249/**
312 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm 250 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
313 * This code is almost identical with TCP's tcp_rtt_estimator(), since 251 * This code is almost identical with TCP's tcp_rtt_estimator(), since
@@ -432,16 +370,28 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
432 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd); 370 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
433} 371}
434 372
373static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
374 u8 option, u8 *optval, u8 optlen)
375{
376 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
377
378 switch (option) {
379 case DCCPO_ACK_VECTOR_0:
380 case DCCPO_ACK_VECTOR_1:
381 return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
382 option - DCCPO_ACK_VECTOR_0);
383 }
384 return 0;
385}
386
435static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 387static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
436{ 388{
437 struct dccp_sock *dp = dccp_sk(sk); 389 struct dccp_sock *dp = dccp_sk(sk);
438 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 390 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
439 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); 391 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
392 struct dccp_ackvec_parsed *avp;
440 u64 ackno, seqno; 393 u64 ackno, seqno;
441 struct ccid2_seq *seqp; 394 struct ccid2_seq *seqp;
442 unsigned char *vector;
443 unsigned char veclen;
444 int offset = 0;
445 int done = 0; 395 int done = 0;
446 unsigned int maxincr = 0; 396 unsigned int maxincr = 0;
447 397
@@ -475,17 +425,12 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
475 } 425 }
476 426
477 /* check forward path congestion */ 427 /* check forward path congestion */
478 /* still didn't send out new data packets */ 428 if (dccp_packet_without_ack(skb))
479 if (hc->tx_seqh == hc->tx_seqt)
480 return; 429 return;
481 430
482 switch (DCCP_SKB_CB(skb)->dccpd_type) { 431 /* still didn't send out new data packets */
483 case DCCP_PKT_ACK: 432 if (hc->tx_seqh == hc->tx_seqt)
484 case DCCP_PKT_DATAACK: 433 goto done;
485 break;
486 default:
487 return;
488 }
489 434
490 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; 435 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
491 if (after48(ackno, hc->tx_high_ack)) 436 if (after48(ackno, hc->tx_high_ack))
@@ -509,16 +454,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
509 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); 454 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
510 455
511 /* go through all ack vectors */ 456 /* go through all ack vectors */
512 while ((offset = ccid2_ackvector(sk, skb, offset, 457 list_for_each_entry(avp, &hc->tx_av_chunks, node) {
513 &vector, &veclen)) != -1) {
514 /* go through this ack vector */ 458 /* go through this ack vector */
515 while (veclen--) { 459 for (; avp->len--; avp->vec++) {
516 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; 460 u64 ackno_end_rl = SUB48(ackno,
517 u64 ackno_end_rl = SUB48(ackno, rl); 461 dccp_ackvec_runlen(avp->vec));
518 462
519 ccid2_pr_debug("ackvec start:%llu end:%llu\n", 463 ccid2_pr_debug("ackvec %llu |%u,%u|\n",
520 (unsigned long long)ackno, 464 (unsigned long long)ackno,
521 (unsigned long long)ackno_end_rl); 465 dccp_ackvec_state(avp->vec) >> 6,
466 dccp_ackvec_runlen(avp->vec));
522 /* if the seqno we are analyzing is larger than the 467 /* if the seqno we are analyzing is larger than the
523 * current ackno, then move towards the tail of our 468 * current ackno, then move towards the tail of our
524 * seqnos. 469 * seqnos.
@@ -537,17 +482,15 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
537 * run length 482 * run length
538 */ 483 */
539 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { 484 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
540 const u8 state = *vector & 485 const u8 state = dccp_ackvec_state(avp->vec);
541 DCCP_ACKVEC_STATE_MASK;
542 486
543 /* new packet received or marked */ 487 /* new packet received or marked */
544 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && 488 if (state != DCCPAV_NOT_RECEIVED &&
545 !seqp->ccid2s_acked) { 489 !seqp->ccid2s_acked) {
546 if (state == 490 if (state == DCCPAV_ECN_MARKED)
547 DCCP_ACKVEC_STATE_ECN_MARKED) {
548 ccid2_congestion_event(sk, 491 ccid2_congestion_event(sk,
549 seqp); 492 seqp);
550 } else 493 else
551 ccid2_new_ack(sk, seqp, 494 ccid2_new_ack(sk, seqp,
552 &maxincr); 495 &maxincr);
553 496
@@ -566,7 +509,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
566 break; 509 break;
567 510
568 ackno = SUB48(ackno_end_rl, 1); 511 ackno = SUB48(ackno_end_rl, 1);
569 vector++;
570 } 512 }
571 if (done) 513 if (done)
572 break; 514 break;
@@ -634,10 +576,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
634 sk_stop_timer(sk, &hc->tx_rtotimer); 576 sk_stop_timer(sk, &hc->tx_rtotimer);
635 else 577 else
636 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 578 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
637 579done:
638 /* check if incoming Acks allow pending packets to be sent */ 580 /* check if incoming Acks allow pending packets to be sent */
639 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 581 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
640 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 582 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
583 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
641} 584}
642 585
643static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 586static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -666,6 +609,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
666 hc->tx_last_cong = ccid2_time_stamp; 609 hc->tx_last_cong = ccid2_time_stamp;
667 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 610 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
668 (unsigned long)sk); 611 (unsigned long)sk);
612 INIT_LIST_HEAD(&hc->tx_av_chunks);
669 return 0; 613 return 0;
670} 614}
671 615
@@ -699,16 +643,17 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
699} 643}
700 644
701struct ccid_operations ccid2_ops = { 645struct ccid_operations ccid2_ops = {
702 .ccid_id = DCCPC_CCID2, 646 .ccid_id = DCCPC_CCID2,
703 .ccid_name = "TCP-like", 647 .ccid_name = "TCP-like",
704 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), 648 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
705 .ccid_hc_tx_init = ccid2_hc_tx_init, 649 .ccid_hc_tx_init = ccid2_hc_tx_init,
706 .ccid_hc_tx_exit = ccid2_hc_tx_exit, 650 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
707 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, 651 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
708 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, 652 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
709 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, 653 .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
710 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), 654 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
711 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, 655 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
656 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
712}; 657};
713 658
714#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 659#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 25cb6b216eda..e9985dafc2c7 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -55,6 +55,7 @@ struct ccid2_seq {
55 * @tx_rtt_seq: to decay RTTVAR at most once per flight 55 * @tx_rtt_seq: to decay RTTVAR at most once per flight
56 * @tx_rpseq: last consecutive seqno 56 * @tx_rpseq: last consecutive seqno
57 * @tx_rpdupack: dupacks since rpseq 57 * @tx_rpdupack: dupacks since rpseq
58 * @tx_av_chunks: list of Ack Vectors received on current skb
58 */ 59 */
59struct ccid2_hc_tx_sock { 60struct ccid2_hc_tx_sock {
60 u32 tx_cwnd; 61 u32 tx_cwnd;
@@ -79,6 +80,7 @@ struct ccid2_hc_tx_sock {
79 int tx_rpdupack; 80 int tx_rpdupack;
80 u32 tx_last_cong; 81 u32 tx_last_cong;
81 u64 tx_high_ack; 82 u64 tx_high_ack;
83 struct list_head tx_av_chunks;
82}; 84};
83 85
84static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc) 86static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a8ed459508b2..45087052d894 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -93,9 +93,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
93#define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5) 93#define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5)
94#define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC) 94#define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC)
95 95
96/* Maximal interval between probes for local resources. */
97#define DCCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ / 2U))
98
99/* sysctl variables for DCCP */ 96/* sysctl variables for DCCP */
100extern int sysctl_dccp_request_retries; 97extern int sysctl_dccp_request_retries;
101extern int sysctl_dccp_retries1; 98extern int sysctl_dccp_retries1;
@@ -203,12 +200,7 @@ struct dccp_mib {
203DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); 200DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
204#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) 201#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
205#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field) 202#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field)
206#define DCCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(dccp_statistics, field)
207#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) 203#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
208#define DCCP_ADD_STATS_BH(field, val) \
209 SNMP_ADD_STATS_BH(dccp_statistics, field, val)
210#define DCCP_ADD_STATS_USER(field, val) \
211 SNMP_ADD_STATS_USER(dccp_statistics, field, val)
212 204
213/* 205/*
214 * Checksumming routines 206 * Checksumming routines
@@ -243,6 +235,19 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
243extern void dccp_send_sync(struct sock *sk, const u64 seq, 235extern void dccp_send_sync(struct sock *sk, const u64 seq,
244 const enum dccp_pkt_type pkt_type); 236 const enum dccp_pkt_type pkt_type);
245 237
238/*
239 * TX Packet Dequeueing Interface
240 */
241extern void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
242extern bool dccp_qpolicy_full(struct sock *sk);
243extern void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
244extern struct sk_buff *dccp_qpolicy_top(struct sock *sk);
245extern struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
246extern bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
247
248/*
249 * TX Packet Output and TX Timers
250 */
246extern void dccp_write_xmit(struct sock *sk); 251extern void dccp_write_xmit(struct sock *sk);
247extern void dccp_write_space(struct sock *sk); 252extern void dccp_write_space(struct sock *sk);
248extern void dccp_flush_write_queue(struct sock *sk, long *time_budget); 253extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
@@ -457,12 +462,15 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
457 dp->dccps_awh = dp->dccps_gss; 462 dp->dccps_awh = dp->dccps_gss;
458} 463}
459 464
465static inline int dccp_ackvec_pending(const struct sock *sk)
466{
467 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
468 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
469}
470
460static inline int dccp_ack_pending(const struct sock *sk) 471static inline int dccp_ack_pending(const struct sock *sk)
461{ 472{
462 const struct dccp_sock *dp = dccp_sk(sk); 473 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
463 return (dp->dccps_hc_rx_ackvec != NULL &&
464 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
465 inet_csk_ack_scheduled(sk);
466} 474}
467 475
468extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 476extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 265985370fa1..15af247ea007 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -160,13 +160,15 @@ static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
160 dccp_time_wait(sk, DCCP_TIME_WAIT, 0); 160 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
161} 161}
162 162
163static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 163static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
164{ 164{
165 struct dccp_sock *dp = dccp_sk(sk); 165 struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
166 166
167 if (dp->dccps_hc_rx_ackvec != NULL) 167 if (av == NULL)
168 dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk, 168 return;
169 DCCP_SKB_CB(skb)->dccpd_ack_seq); 169 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
170 dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
171 dccp_ackvec_input(av, skb);
170} 172}
171 173
172static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb) 174static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
@@ -239,7 +241,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
239 dccp_update_gsr(sk, seqno); 241 dccp_update_gsr(sk, seqno);
240 242
241 if (dh->dccph_type != DCCP_PKT_SYNC && 243 if (dh->dccph_type != DCCP_PKT_SYNC &&
242 (ackno != DCCP_PKT_WITHOUT_ACK_SEQ)) 244 ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
245 after48(ackno, dp->dccps_gar))
243 dp->dccps_gar = ackno; 246 dp->dccps_gar = ackno;
244 } else { 247 } else {
245 unsigned long now = jiffies; 248 unsigned long now = jiffies;
@@ -365,22 +368,13 @@ discard:
365int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
366 const struct dccp_hdr *dh, const unsigned len) 369 const struct dccp_hdr *dh, const unsigned len)
367{ 370{
368 struct dccp_sock *dp = dccp_sk(sk);
369
370 if (dccp_check_seqno(sk, skb)) 371 if (dccp_check_seqno(sk, skb))
371 goto discard; 372 goto discard;
372 373
373 if (dccp_parse_options(sk, NULL, skb)) 374 if (dccp_parse_options(sk, NULL, skb))
374 return 1; 375 return 1;
375 376
376 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 377 dccp_handle_ackvec_processing(sk, skb);
377 dccp_event_ack_recv(sk, skb);
378
379 if (dp->dccps_hc_rx_ackvec != NULL &&
380 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
381 DCCP_SKB_CB(skb)->dccpd_seq,
382 DCCP_ACKVEC_STATE_RECEIVED))
383 goto discard;
384 dccp_deliver_input_to_ccids(sk, skb); 378 dccp_deliver_input_to_ccids(sk, skb);
385 379
386 return __dccp_rcv_established(sk, skb, dh, len); 380 return __dccp_rcv_established(sk, skb, dh, len);
@@ -632,15 +626,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
632 if (dccp_parse_options(sk, NULL, skb)) 626 if (dccp_parse_options(sk, NULL, skb))
633 return 1; 627 return 1;
634 628
635 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 629 dccp_handle_ackvec_processing(sk, skb);
636 dccp_event_ack_recv(sk, skb);
637
638 if (dp->dccps_hc_rx_ackvec != NULL &&
639 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
640 DCCP_SKB_CB(skb)->dccpd_seq,
641 DCCP_ACKVEC_STATE_RECEIVED))
642 goto discard;
643
644 dccp_deliver_input_to_ccids(sk, skb); 630 dccp_deliver_input_to_ccids(sk, skb);
645 } 631 }
646 632
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 3f69ea114829..45a434f94169 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -462,15 +462,12 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
462{ 462{
463 struct rtable *rt; 463 struct rtable *rt;
464 struct flowi fl = { .oif = skb_rtable(skb)->rt_iif, 464 struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
465 .nl_u = { .ip4_u = 465 .fl4_dst = ip_hdr(skb)->saddr,
466 { .daddr = ip_hdr(skb)->saddr, 466 .fl4_src = ip_hdr(skb)->daddr,
467 .saddr = ip_hdr(skb)->daddr, 467 .fl4_tos = RT_CONN_FLAGS(sk),
468 .tos = RT_CONN_FLAGS(sk) } },
469 .proto = sk->sk_protocol, 468 .proto = sk->sk_protocol,
470 .uli_u = { .ports = 469 .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
471 { .sport = dccp_hdr(skb)->dccph_dport, 470 .fl_ip_dport = dccp_hdr(skb)->dccph_sport
472 .dport = dccp_hdr(skb)->dccph_sport }
473 }
474 }; 471 };
475 472
476 security_skb_classify_flow(skb, &fl); 473 security_skb_classify_flow(skb, &fl);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index cd3061813009..f06ffcfc8d71 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -54,7 +54,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
54 struct dccp_sock *dp = dccp_sk(sk); 54 struct dccp_sock *dp = dccp_sk(sk);
55 const struct dccp_hdr *dh = dccp_hdr(skb); 55 const struct dccp_hdr *dh = dccp_hdr(skb);
56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; 56 const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
57 u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
58 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); 57 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
59 unsigned char *opt_ptr = options; 58 unsigned char *opt_ptr = options;
60 const unsigned char *opt_end = (unsigned char *)dh + 59 const unsigned char *opt_end = (unsigned char *)dh +
@@ -129,14 +128,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
129 if (rc) 128 if (rc)
130 goto out_featneg_failed; 129 goto out_featneg_failed;
131 break; 130 break;
132 case DCCPO_ACK_VECTOR_0:
133 case DCCPO_ACK_VECTOR_1:
134 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */
135 break;
136 if (dp->dccps_hc_rx_ackvec != NULL &&
137 dccp_ackvec_parse(sk, skb, &ackno, opt, value, len))
138 goto out_invalid_option;
139 break;
140 case DCCPO_TIMESTAMP: 131 case DCCPO_TIMESTAMP:
141 if (len != 4) 132 if (len != 4)
142 goto out_invalid_option; 133 goto out_invalid_option;
@@ -226,6 +217,16 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
226 pkt_type, opt, value, len)) 217 pkt_type, opt, value, len))
227 goto out_invalid_option; 218 goto out_invalid_option;
228 break; 219 break;
220 case DCCPO_ACK_VECTOR_0:
221 case DCCPO_ACK_VECTOR_1:
222 if (dccp_packet_without_ack(skb)) /* RFC 4340, 11.4 */
223 break;
224 /*
225 * Ack vectors are processed by the TX CCID if it is
226 * interested. The RX CCID need not parse Ack Vectors,
227 * since it is only interested in clearing old state.
228 * Fall through.
229 */
229 case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC: 230 case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
230 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk, 231 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
231 pkt_type, opt, value, len)) 232 pkt_type, opt, value, len))
@@ -340,6 +341,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
340 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; 341 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
341} 342}
342 343
344/* FIXME: This function is currently not used anywhere */
343int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time) 345int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
344{ 346{
345 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 347 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +426,83 @@ static int dccp_insert_option_timestamp_echo(struct dccp_sock *dp,
424 return 0; 426 return 0;
425} 427}
426 428
429static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
430{
431 struct dccp_sock *dp = dccp_sk(sk);
432 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
433 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
434 const u16 buflen = dccp_ackvec_buflen(av);
435 /* Figure out how many options do we need to represent the ackvec */
436 const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
437 u16 len = buflen + 2 * nr_opts;
438 u8 i, nonce = 0;
439 const unsigned char *tail, *from;
440 unsigned char *to;
441
442 if (dcb->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
443 DCCP_WARN("Lacking space for %u bytes on %s packet\n", len,
444 dccp_packet_name(dcb->dccpd_type));
445 return -1;
446 }
447 /*
448 * Since Ack Vectors are variable-length, we can not always predict
449 * their size. To catch exception cases where the space is running out
450 * on the skb, a separate Sync is scheduled to carry the Ack Vector.
451 */
452 if (len > DCCPAV_MIN_OPTLEN &&
453 len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
454 DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), "
455 "MPS=%u ==> reduce payload size?\n", len, skb->len,
456 dcb->dccpd_opt_len, dp->dccps_mss_cache);
457 dp->dccps_sync_scheduled = 1;
458 return 0;
459 }
460 dcb->dccpd_opt_len += len;
461
462 to = skb_push(skb, len);
463 len = buflen;
464 from = av->av_buf + av->av_buf_head;
465 tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
466
467 for (i = 0; i < nr_opts; ++i) {
468 int copylen = len;
469
470 if (len > DCCP_SINGLE_OPT_MAXLEN)
471 copylen = DCCP_SINGLE_OPT_MAXLEN;
472
473 /*
474 * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
475 * its type; ack_nonce is the sum of all individual buf_nonce's.
476 */
477 nonce ^= av->av_buf_nonce[i];
478
479 *to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
480 *to++ = copylen + 2;
481
482 /* Check if buf_head wraps */
483 if (from + copylen > tail) {
484 const u16 tailsize = tail - from;
485
486 memcpy(to, from, tailsize);
487 to += tailsize;
488 len -= tailsize;
489 copylen -= tailsize;
490 from = av->av_buf;
491 }
492
493 memcpy(to, from, copylen);
494 from += copylen;
495 to += copylen;
496 len -= copylen;
497 }
498 /*
499 * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
500 */
501 if (dccp_ackvec_update_records(av, dcb->dccpd_seq, nonce))
502 return -ENOBUFS;
503 return 0;
504}
505
427/** 506/**
428 * dccp_insert_option_mandatory - Mandatory option (5.8.2) 507 * dccp_insert_option_mandatory - Mandatory option (5.8.2)
429 * Note that since we are using skb_push, this function needs to be called 508 * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +598,7 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
519 if (dccp_insert_option_timestamp(skb)) 598 if (dccp_insert_option_timestamp(skb))
520 return -1; 599 return -1;
521 600
522 } else if (dp->dccps_hc_rx_ackvec != NULL && 601 } else if (dccp_ackvec_pending(sk) &&
523 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
524 dccp_insert_option_ackvec(sk, skb)) { 602 dccp_insert_option_ackvec(sk, skb)) {
525 return -1; 603 return -1;
526 } 604 }
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 45b91853f5ae..784d30210543 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -242,7 +242,7 @@ static void dccp_xmit_packet(struct sock *sk)
242{ 242{
243 int err, len; 243 int err, len;
244 struct dccp_sock *dp = dccp_sk(sk); 244 struct dccp_sock *dp = dccp_sk(sk);
245 struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue); 245 struct sk_buff *skb = dccp_qpolicy_pop(sk);
246 246
247 if (unlikely(skb == NULL)) 247 if (unlikely(skb == NULL))
248 return; 248 return;
@@ -283,6 +283,15 @@ static void dccp_xmit_packet(struct sock *sk)
283 * any local drop will eventually be reported via receiver feedback. 283 * any local drop will eventually be reported via receiver feedback.
284 */ 284 */
285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); 285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286
287 /*
288 * If the CCID needs to transfer additional header options out-of-band
289 * (e.g. Ack Vectors or feature-negotiation options), it activates this
290 * flag to schedule a Sync. The Sync will automatically incorporate all
291 * currently pending header options, thus clearing the backlog.
292 */
293 if (dp->dccps_sync_scheduled)
294 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
286} 295}
287 296
288/** 297/**
@@ -336,7 +345,7 @@ void dccp_write_xmit(struct sock *sk)
336 struct dccp_sock *dp = dccp_sk(sk); 345 struct dccp_sock *dp = dccp_sk(sk);
337 struct sk_buff *skb; 346 struct sk_buff *skb;
338 347
339 while ((skb = skb_peek(&sk->sk_write_queue))) { 348 while ((skb = dccp_qpolicy_top(sk))) {
340 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 349 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
341 350
342 switch (ccid_packet_dequeue_eval(rc)) { 351 switch (ccid_packet_dequeue_eval(rc)) {
@@ -350,8 +359,7 @@ void dccp_write_xmit(struct sock *sk)
350 dccp_xmit_packet(sk); 359 dccp_xmit_packet(sk);
351 break; 360 break;
352 case CCID_PACKET_ERR: 361 case CCID_PACKET_ERR:
353 skb_dequeue(&sk->sk_write_queue); 362 dccp_qpolicy_drop(sk, skb);
354 kfree_skb(skb);
355 dccp_pr_debug("packet discarded due to err=%d\n", rc); 363 dccp_pr_debug("packet discarded due to err=%d\n", rc);
356 } 364 }
357 } 365 }
@@ -636,6 +644,12 @@ void dccp_send_sync(struct sock *sk, const u64 ackno,
636 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 644 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
637 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; 645 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
638 646
647 /*
648 * Clear the flag in case the Sync was scheduled for out-of-band data,
649 * such as carrying a long Ack Vector.
650 */
651 dccp_sk(sk)->dccps_sync_scheduled = 0;
652
639 dccp_transmit_skb(sk, skb); 653 dccp_transmit_skb(sk, skb);
640} 654}
641 655
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ef343d53fcea..152975d942d9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -185,6 +185,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1; 187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
188 189
189 dccp_init_xmit_timers(sk); 190 dccp_init_xmit_timers(sk);
190 191
@@ -532,6 +533,20 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
532 case DCCP_SOCKOPT_RECV_CSCOV: 533 case DCCP_SOCKOPT_RECV_CSCOV:
533 err = dccp_setsockopt_cscov(sk, val, true); 534 err = dccp_setsockopt_cscov(sk, val, true);
534 break; 535 break;
536 case DCCP_SOCKOPT_QPOLICY_ID:
537 if (sk->sk_state != DCCP_CLOSED)
538 err = -EISCONN;
539 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
540 err = -EINVAL;
541 else
542 dp->dccps_qpolicy = val;
543 break;
544 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
545 if (val < 0)
546 err = -EINVAL;
547 else
548 dp->dccps_tx_qlen = val;
549 break;
535 default: 550 default:
536 err = -ENOPROTOOPT; 551 err = -ENOPROTOOPT;
537 break; 552 break;
@@ -639,6 +654,12 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
639 case DCCP_SOCKOPT_RECV_CSCOV: 654 case DCCP_SOCKOPT_RECV_CSCOV:
640 val = dp->dccps_pcrlen; 655 val = dp->dccps_pcrlen;
641 break; 656 break;
657 case DCCP_SOCKOPT_QPOLICY_ID:
658 val = dp->dccps_qpolicy;
659 break;
660 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
661 val = dp->dccps_tx_qlen;
662 break;
642 case 128 ... 191: 663 case 128 ... 191:
643 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, 664 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
644 len, (u32 __user *)optval, optlen); 665 len, (u32 __user *)optval, optlen);
@@ -681,6 +702,47 @@ int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
681EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); 702EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
682#endif 703#endif
683 704
705static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
706{
707 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
708
709 /*
710 * Assign an (opaque) qpolicy priority value to skb->priority.
711 *
712 * We are overloading this skb field for use with the qpolicy subystem.
713 * The skb->priority is normally used for the SO_PRIORITY option, which
714 * is initialised from sk_priority. Since the assignment of sk_priority
715 * to skb->priority happens later (on layer 3), we overload this field
716 * for use with queueing priorities as long as the skb is on layer 4.
717 * The default priority value (if nothing is set) is 0.
718 */
719 skb->priority = 0;
720
721 for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
722
723 if (!CMSG_OK(msg, cmsg))
724 return -EINVAL;
725
726 if (cmsg->cmsg_level != SOL_DCCP)
727 continue;
728
729 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
730 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
731 return -EINVAL;
732
733 switch (cmsg->cmsg_type) {
734 case DCCP_SCM_PRIORITY:
735 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
736 return -EINVAL;
737 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
738 break;
739 default:
740 return -EINVAL;
741 }
742 }
743 return 0;
744}
745
684int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 746int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
685 size_t len) 747 size_t len)
686{ 748{
@@ -696,8 +758,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 758
697 lock_sock(sk); 759 lock_sock(sk);
698 760
699 if (sysctl_dccp_tx_qlen && 761 if (dccp_qpolicy_full(sk)) {
700 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
701 rc = -EAGAIN; 762 rc = -EAGAIN;
702 goto out_release; 763 goto out_release;
703 } 764 }
@@ -725,7 +786,11 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
725 if (rc != 0) 786 if (rc != 0)
726 goto out_discard; 787 goto out_discard;
727 788
728 skb_queue_tail(&sk->sk_write_queue, skb); 789 rc = dccp_msghdr_parse(msg, skb);
790 if (rc != 0)
791 goto out_discard;
792
793 dccp_qpolicy_push(sk, skb);
729 /* 794 /*
730 * The xmit_timer is set if the TX CCID is rate-based and will expire 795 * The xmit_timer is set if the TX CCID is rate-based and will expire
731 * when congestion control permits to release further packets into the 796 * when congestion control permits to release further packets into the
diff --git a/net/dccp/qpolicy.c b/net/dccp/qpolicy.c
new file mode 100644
index 000000000000..63c30bfa4703
--- /dev/null
+++ b/net/dccp/qpolicy.c
@@ -0,0 +1,137 @@
1/*
2 * net/dccp/qpolicy.c
3 *
4 * Policy-based packet dequeueing interface for DCCP.
5 *
6 * Copyright (c) 2008 Tomasz Grobelny <tomasz@grobelny.oswiecenia.net>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License v2
10 * as published by the Free Software Foundation.
11 */
12#include "dccp.h"
13
14/*
15 * Simple Dequeueing Policy:
16 * If tx_qlen is different from 0, enqueue up to tx_qlen elements.
17 */
18static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
19{
20 skb_queue_tail(&sk->sk_write_queue, skb);
21}
22
23static bool qpolicy_simple_full(struct sock *sk)
24{
25 return dccp_sk(sk)->dccps_tx_qlen &&
26 sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen;
27}
28
29static struct sk_buff *qpolicy_simple_top(struct sock *sk)
30{
31 return skb_peek(&sk->sk_write_queue);
32}
33
34/*
35 * Priority-based Dequeueing Policy:
36 * If tx_qlen is different from 0 and the queue has reached its upper bound
37 * of tx_qlen elements, replace older packets lowest-priority-first.
38 */
39static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk)
40{
41 struct sk_buff *skb, *best = NULL;
42
43 skb_queue_walk(&sk->sk_write_queue, skb)
44 if (best == NULL || skb->priority > best->priority)
45 best = skb;
46 return best;
47}
48
49static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk)
50{
51 struct sk_buff *skb, *worst = NULL;
52
53 skb_queue_walk(&sk->sk_write_queue, skb)
54 if (worst == NULL || skb->priority < worst->priority)
55 worst = skb;
56 return worst;
57}
58
59static bool qpolicy_prio_full(struct sock *sk)
60{
61 if (qpolicy_simple_full(sk))
62 dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk));
63 return false;
64}
65
66/**
67 * struct dccp_qpolicy_operations - TX Packet Dequeueing Interface
68 * @push: add a new @skb to the write queue
69 * @full: indicates that no more packets will be admitted
70 * @top: peeks at whatever the queueing policy defines as its `top'
71 */
72static struct dccp_qpolicy_operations {
73 void (*push) (struct sock *sk, struct sk_buff *skb);
74 bool (*full) (struct sock *sk);
75 struct sk_buff* (*top) (struct sock *sk);
76 __be32 params;
77
78} qpol_table[DCCPQ_POLICY_MAX] = {
79 [DCCPQ_POLICY_SIMPLE] = {
80 .push = qpolicy_simple_push,
81 .full = qpolicy_simple_full,
82 .top = qpolicy_simple_top,
83 .params = 0,
84 },
85 [DCCPQ_POLICY_PRIO] = {
86 .push = qpolicy_simple_push,
87 .full = qpolicy_prio_full,
88 .top = qpolicy_prio_best_skb,
89 .params = DCCP_SCM_PRIORITY,
90 },
91};
92
93/*
94 * Externally visible interface
95 */
96void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
97{
98 qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
99}
100
101bool dccp_qpolicy_full(struct sock *sk)
102{
103 return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk);
104}
105
106void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
107{
108 if (skb != NULL) {
109 skb_unlink(skb, &sk->sk_write_queue);
110 kfree_skb(skb);
111 }
112}
113
114struct sk_buff *dccp_qpolicy_top(struct sock *sk)
115{
116 return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk);
117}
118
119struct sk_buff *dccp_qpolicy_pop(struct sock *sk)
120{
121 struct sk_buff *skb = dccp_qpolicy_top(sk);
122
123 if (skb != NULL) {
124 /* Clear any skb fields that we used internally */
125 skb->priority = 0;
126 skb_unlink(skb, &sk->sk_write_queue);
127 }
128 return skb;
129}
130
131bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param)
132{
133 /* check if exactly one bit is set */
134 if (!param || (param & (param - 1)))
135 return false;
136 return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param;
137}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d6b93d19790f..0065e7e14af4 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops;
155static DEFINE_RWLOCK(dn_hash_lock); 155static DEFINE_RWLOCK(dn_hash_lock);
156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_long_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
@@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1556 if (r_len > sizeof(struct linkinfo_dn)) 1556 if (r_len > sizeof(struct linkinfo_dn))
1557 r_len = sizeof(struct linkinfo_dn); 1557 r_len = sizeof(struct linkinfo_dn);
1558 1558
1559 memset(&link, 0, sizeof(link));
1560
1559 switch(sock->state) { 1561 switch(sock->state) {
1560 case SS_CONNECTING: 1562 case SS_CONNECTING:
1561 link.idn_linkstate = LL_CONNECTING; 1563 link.idn_linkstate = LL_CONNECTING;
@@ -1848,7 +1850,7 @@ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1848{ 1850{
1849 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1851 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1850 if (dev) { 1852 if (dev) {
1851 struct dn_dev *dn_db = dev->dn_ptr; 1853 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1852 mtu -= LL_RESERVED_SPACE(dev); 1854 mtu -= LL_RESERVED_SPACE(dev);
1853 if (dn_db->use_long) 1855 if (dn_db->use_long)
1854 mtu -= 21; 1856 mtu -= 21;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4c409b46aa35..0ba15633c418 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -267,7 +267,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
267 if (table->extra1 == NULL) 267 if (table->extra1 == NULL)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 dn_db = dev->dn_ptr; 270 dn_db = rcu_dereference_raw(dev->dn_ptr);
271 old = dn_db->parms.forwarding; 271 old = dn_db->parms.forwarding;
272 272
273 err = proc_dointvec(table, write, buffer, lenp, ppos); 273 err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
332 return ifa; 332 return ifa;
333} 333}
334 334
335static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa) 335static void dn_dev_free_ifa_rcu(struct rcu_head *head)
336{ 336{
337 kfree(ifa); 337 kfree(container_of(head, struct dn_ifaddr, rcu));
338} 338}
339 339
340static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy) 340static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
341{ 341{
342 struct dn_ifaddr *ifa1 = *ifap; 342 call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
343}
344
345static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
346{
347 struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
343 unsigned char mac_addr[6]; 348 unsigned char mac_addr[6];
344 struct net_device *dev = dn_db->dev; 349 struct net_device *dev = dn_db->dev;
345 350
@@ -373,7 +378,9 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
373 ASSERT_RTNL(); 378 ASSERT_RTNL();
374 379
375 /* Check for duplicates */ 380 /* Check for duplicates */
376 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 381 for (ifa1 = rtnl_dereference(dn_db->ifa_list);
382 ifa1 != NULL;
383 ifa1 = rtnl_dereference(ifa1->ifa_next)) {
377 if (ifa1->ifa_local == ifa->ifa_local) 384 if (ifa1->ifa_local == ifa->ifa_local)
378 return -EEXIST; 385 return -EEXIST;
379 } 386 }
@@ -386,7 +393,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
386 } 393 }
387 394
388 ifa->ifa_next = dn_db->ifa_list; 395 ifa->ifa_next = dn_db->ifa_list;
389 dn_db->ifa_list = ifa; 396 rcu_assign_pointer(dn_db->ifa_list, ifa);
390 397
391 dn_ifaddr_notify(RTM_NEWADDR, ifa); 398 dn_ifaddr_notify(RTM_NEWADDR, ifa);
392 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 399 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
396 403
397static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) 404static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
398{ 405{
399 struct dn_dev *dn_db = dev->dn_ptr; 406 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
400 int rv; 407 int rv;
401 408
402 if (dn_db == NULL) { 409 if (dn_db == NULL) {
@@ -425,7 +432,8 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
425 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; 432 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
426 struct dn_dev *dn_db; 433 struct dn_dev *dn_db;
427 struct net_device *dev; 434 struct net_device *dev;
428 struct dn_ifaddr *ifa = NULL, **ifap = NULL; 435 struct dn_ifaddr *ifa = NULL;
436 struct dn_ifaddr __rcu **ifap = NULL;
429 int ret = 0; 437 int ret = 0;
430 438
431 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) 439 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
454 goto done; 462 goto done;
455 } 463 }
456 464
457 if ((dn_db = dev->dn_ptr) != NULL) { 465 if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
458 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) 466 for (ifap = &dn_db->ifa_list;
467 (ifa = rtnl_dereference(*ifap)) != NULL;
468 ifap = &ifa->ifa_next)
459 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) 469 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
460 break; 470 break;
461 } 471 }
@@ -558,7 +568,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex)
558 568
559 dev = __dev_get_by_index(&init_net, ifindex); 569 dev = __dev_get_by_index(&init_net, ifindex);
560 if (dev) 570 if (dev)
561 dn_dev = dev->dn_ptr; 571 dn_dev = rtnl_dereference(dev->dn_ptr);
562 572
563 return dn_dev; 573 return dn_dev;
564} 574}
@@ -576,7 +586,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
576 struct nlattr *tb[IFA_MAX+1]; 586 struct nlattr *tb[IFA_MAX+1];
577 struct dn_dev *dn_db; 587 struct dn_dev *dn_db;
578 struct ifaddrmsg *ifm; 588 struct ifaddrmsg *ifm;
579 struct dn_ifaddr *ifa, **ifap; 589 struct dn_ifaddr *ifa;
590 struct dn_ifaddr __rcu **ifap;
580 int err = -EINVAL; 591 int err = -EINVAL;
581 592
582 if (!net_eq(net, &init_net)) 593 if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
592 goto errout; 603 goto errout;
593 604
594 err = -EADDRNOTAVAIL; 605 err = -EADDRNOTAVAIL;
595 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { 606 for (ifap = &dn_db->ifa_list;
607 (ifa = rtnl_dereference(*ifap)) != NULL;
608 ifap = &ifa->ifa_next) {
596 if (tb[IFA_LOCAL] && 609 if (tb[IFA_LOCAL] &&
597 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) 610 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
598 continue; 611 continue;
@@ -632,7 +645,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
632 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) 645 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
633 return -ENODEV; 646 return -ENODEV;
634 647
635 if ((dn_db = dev->dn_ptr) == NULL) { 648 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
636 dn_db = dn_dev_create(dev, &err); 649 dn_db = dn_dev_create(dev, &err);
637 if (!dn_db) 650 if (!dn_db)
638 return err; 651 return err;
@@ -748,11 +761,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
748 skip_naddr = 0; 761 skip_naddr = 0;
749 } 762 }
750 763
751 if ((dn_db = dev->dn_ptr) == NULL) 764 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
752 goto cont; 765 goto cont;
753 766
754 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 767 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
755 ifa = ifa->ifa_next, dn_idx++) { 768 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
756 if (dn_idx < skip_naddr) 769 if (dn_idx < skip_naddr)
757 continue; 770 continue;
758 771
@@ -773,21 +786,22 @@ done:
773 786
774static int dn_dev_get_first(struct net_device *dev, __le16 *addr) 787static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
775{ 788{
776 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 789 struct dn_dev *dn_db;
777 struct dn_ifaddr *ifa; 790 struct dn_ifaddr *ifa;
778 int rv = -ENODEV; 791 int rv = -ENODEV;
779 792
793 rcu_read_lock();
794 dn_db = rcu_dereference(dev->dn_ptr);
780 if (dn_db == NULL) 795 if (dn_db == NULL)
781 goto out; 796 goto out;
782 797
783 rtnl_lock(); 798 ifa = rcu_dereference(dn_db->ifa_list);
784 ifa = dn_db->ifa_list;
785 if (ifa != NULL) { 799 if (ifa != NULL) {
786 *addr = ifa->ifa_local; 800 *addr = ifa->ifa_local;
787 rv = 0; 801 rv = 0;
788 } 802 }
789 rtnl_unlock();
790out: 803out:
804 rcu_read_unlock();
791 return rv; 805 return rv;
792} 806}
793 807
@@ -823,7 +837,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
823 struct endnode_hello_message *msg; 837 struct endnode_hello_message *msg;
824 struct sk_buff *skb = NULL; 838 struct sk_buff *skb = NULL;
825 __le16 *pktlen; 839 __le16 *pktlen;
826 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 840 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
827 841
828 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) 842 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
829 return; 843 return;
@@ -889,7 +903,7 @@ static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn
889static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) 903static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
890{ 904{
891 int n; 905 int n;
892 struct dn_dev *dn_db = dev->dn_ptr; 906 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
893 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 907 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
894 struct sk_buff *skb; 908 struct sk_buff *skb;
895 size_t size; 909 size_t size;
@@ -960,7 +974,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
960 974
961static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) 975static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
962{ 976{
963 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 977 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
964 978
965 if (dn_db->parms.forwarding == 0) 979 if (dn_db->parms.forwarding == 0)
966 dn_send_endnode_hello(dev, ifa); 980 dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@ static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
998 1012
999static int dn_eth_up(struct net_device *dev) 1013static int dn_eth_up(struct net_device *dev)
1000{ 1014{
1001 struct dn_dev *dn_db = dev->dn_ptr; 1015 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1002 1016
1003 if (dn_db->parms.forwarding == 0) 1017 if (dn_db->parms.forwarding == 0)
1004 dev_mc_add(dev, dn_rt_all_end_mcast); 1018 dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@ static int dn_eth_up(struct net_device *dev)
1012 1026
1013static void dn_eth_down(struct net_device *dev) 1027static void dn_eth_down(struct net_device *dev)
1014{ 1028{
1015 struct dn_dev *dn_db = dev->dn_ptr; 1029 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1016 1030
1017 if (dn_db->parms.forwarding == 0) 1031 if (dn_db->parms.forwarding == 0)
1018 dev_mc_del(dev, dn_rt_all_end_mcast); 1032 dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@ static void dn_dev_set_timer(struct net_device *dev);
1025static void dn_dev_timer_func(unsigned long arg) 1039static void dn_dev_timer_func(unsigned long arg)
1026{ 1040{
1027 struct net_device *dev = (struct net_device *)arg; 1041 struct net_device *dev = (struct net_device *)arg;
1028 struct dn_dev *dn_db = dev->dn_ptr; 1042 struct dn_dev *dn_db;
1029 struct dn_ifaddr *ifa; 1043 struct dn_ifaddr *ifa;
1030 1044
1045 rcu_read_lock();
1046 dn_db = rcu_dereference(dev->dn_ptr);
1031 if (dn_db->t3 <= dn_db->parms.t2) { 1047 if (dn_db->t3 <= dn_db->parms.t2) {
1032 if (dn_db->parms.timer3) { 1048 if (dn_db->parms.timer3) {
1033 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 1049 for (ifa = rcu_dereference(dn_db->ifa_list);
1050 ifa;
1051 ifa = rcu_dereference(ifa->ifa_next)) {
1034 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) 1052 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
1035 dn_db->parms.timer3(dev, ifa); 1053 dn_db->parms.timer3(dev, ifa);
1036 } 1054 }
@@ -1039,13 +1057,13 @@ static void dn_dev_timer_func(unsigned long arg)
1039 } else { 1057 } else {
1040 dn_db->t3 -= dn_db->parms.t2; 1058 dn_db->t3 -= dn_db->parms.t2;
1041 } 1059 }
1042 1060 rcu_read_unlock();
1043 dn_dev_set_timer(dev); 1061 dn_dev_set_timer(dev);
1044} 1062}
1045 1063
1046static void dn_dev_set_timer(struct net_device *dev) 1064static void dn_dev_set_timer(struct net_device *dev)
1047{ 1065{
1048 struct dn_dev *dn_db = dev->dn_ptr; 1066 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1049 1067
1050 if (dn_db->parms.t2 > dn_db->parms.t3) 1068 if (dn_db->parms.t2 > dn_db->parms.t3)
1051 dn_db->parms.t2 = dn_db->parms.t3; 1069 dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1077 return NULL; 1095 return NULL;
1078 1096
1079 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1097 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1080 smp_wmb(); 1098
1081 dev->dn_ptr = dn_db; 1099 rcu_assign_pointer(dev->dn_ptr, dn_db);
1082 dn_db->dev = dev; 1100 dn_db->dev = dev;
1083 init_timer(&dn_db->timer); 1101 init_timer(&dn_db->timer);
1084 1102
@@ -1086,7 +1104,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1086 1104
1087 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1105 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1088 if (!dn_db->neigh_parms) { 1106 if (!dn_db->neigh_parms) {
1089 dev->dn_ptr = NULL; 1107 rcu_assign_pointer(dev->dn_ptr, NULL);
1090 kfree(dn_db); 1108 kfree(dn_db);
1091 return NULL; 1109 return NULL;
1092 } 1110 }
@@ -1125,7 +1143,7 @@ void dn_dev_up(struct net_device *dev)
1125 struct dn_ifaddr *ifa; 1143 struct dn_ifaddr *ifa;
1126 __le16 addr = decnet_address; 1144 __le16 addr = decnet_address;
1127 int maybe_default = 0; 1145 int maybe_default = 0;
1128 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 1146 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1129 1147
1130 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) 1148 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
1131 return; 1149 return;
@@ -1176,7 +1194,7 @@ void dn_dev_up(struct net_device *dev)
1176 1194
1177static void dn_dev_delete(struct net_device *dev) 1195static void dn_dev_delete(struct net_device *dev)
1178{ 1196{
1179 struct dn_dev *dn_db = dev->dn_ptr; 1197 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1180 1198
1181 if (dn_db == NULL) 1199 if (dn_db == NULL)
1182 return; 1200 return;
@@ -1204,13 +1222,13 @@ static void dn_dev_delete(struct net_device *dev)
1204 1222
1205void dn_dev_down(struct net_device *dev) 1223void dn_dev_down(struct net_device *dev)
1206{ 1224{
1207 struct dn_dev *dn_db = dev->dn_ptr; 1225 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1208 struct dn_ifaddr *ifa; 1226 struct dn_ifaddr *ifa;
1209 1227
1210 if (dn_db == NULL) 1228 if (dn_db == NULL)
1211 return; 1229 return;
1212 1230
1213 while((ifa = dn_db->ifa_list) != NULL) { 1231 while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
1214 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); 1232 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
1215 dn_dev_free_ifa(ifa); 1233 dn_dev_free_ifa(ifa);
1216 } 1234 }
@@ -1270,7 +1288,7 @@ static inline int is_dn_dev(struct net_device *dev)
1270} 1288}
1271 1289
1272static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1290static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1273 __acquires(rcu) 1291 __acquires(RCU)
1274{ 1292{
1275 int i; 1293 int i;
1276 struct net_device *dev; 1294 struct net_device *dev;
@@ -1313,7 +1331,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1313} 1331}
1314 1332
1315static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1333static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1316 __releases(rcu) 1334 __releases(RCU)
1317{ 1335{
1318 rcu_read_unlock(); 1336 rcu_read_unlock();
1319} 1337}
@@ -1340,7 +1358,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
1340 struct net_device *dev = v; 1358 struct net_device *dev = v;
1341 char peer_buf[DN_ASCBUF_LEN]; 1359 char peer_buf[DN_ASCBUF_LEN];
1342 char router_buf[DN_ASCBUF_LEN]; 1360 char router_buf[DN_ASCBUF_LEN];
1343 struct dn_dev *dn_db = dev->dn_ptr; 1361 struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
1344 1362
1345 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" 1363 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1346 " %04hu %03d %02x %-10s %-7s %-7s\n", 1364 " %04hu %03d %02x %-10s %-7s %-7s\n",
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 4ab96c15166d..0ef0a81bcd72 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -610,10 +610,12 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
610 /* Scan device list */ 610 /* Scan device list */
611 rcu_read_lock(); 611 rcu_read_lock();
612 for_each_netdev_rcu(&init_net, dev) { 612 for_each_netdev_rcu(&init_net, dev) {
613 dn_db = dev->dn_ptr; 613 dn_db = rcu_dereference(dev->dn_ptr);
614 if (dn_db == NULL) 614 if (dn_db == NULL)
615 continue; 615 continue;
616 for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) { 616 for (ifa2 = rcu_dereference(dn_db->ifa_list);
617 ifa2 != NULL;
618 ifa2 = rcu_dereference(ifa2->ifa_next)) {
617 if (ifa2->ifa_local == ifa->ifa_local) { 619 if (ifa2->ifa_local == ifa->ifa_local) {
618 found_it = 1; 620 found_it = 1;
619 break; 621 break;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index a085dbcf5c7f..602dade7e9a3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
391 write_lock(&neigh->lock); 391 write_lock(&neigh->lock);
392 392
393 neigh->used = jiffies; 393 neigh->used = jiffies;
394 dn_db = (struct dn_dev *)neigh->dev->dn_ptr; 394 dn_db = rcu_dereference(neigh->dev->dn_ptr);
395 395
396 if (!(neigh->nud_state & NUD_PERMANENT)) { 396 if (!(neigh->nud_state & NUD_PERMANENT)) {
397 neigh->updated = jiffies; 397 neigh->updated = jiffies;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index df0f3e54ff8a..e2e926841fe6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -93,7 +93,7 @@
93 93
94struct dn_rt_hash_bucket 94struct dn_rt_hash_bucket
95{ 95{
96 struct dn_route *chain; 96 struct dn_route __rcu *chain;
97 spinlock_t lock; 97 spinlock_t lock;
98}; 98};
99 99
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt)
157static void dn_dst_check_expire(unsigned long dummy) 157static void dn_dst_check_expire(unsigned long dummy)
158{ 158{
159 int i; 159 int i;
160 struct dn_route *rt, **rtp; 160 struct dn_route *rt;
161 struct dn_route __rcu **rtp;
161 unsigned long now = jiffies; 162 unsigned long now = jiffies;
162 unsigned long expire = 120 * HZ; 163 unsigned long expire = 120 * HZ;
163 164
164 for(i = 0; i <= dn_rt_hash_mask; i++) { 165 for (i = 0; i <= dn_rt_hash_mask; i++) {
165 rtp = &dn_rt_hash_table[i].chain; 166 rtp = &dn_rt_hash_table[i].chain;
166 167
167 spin_lock(&dn_rt_hash_table[i].lock); 168 spin_lock(&dn_rt_hash_table[i].lock);
168 while((rt=*rtp) != NULL) { 169 while ((rt = rcu_dereference_protected(*rtp,
170 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
169 if (atomic_read(&rt->dst.__refcnt) || 171 if (atomic_read(&rt->dst.__refcnt) ||
170 (now - rt->dst.lastuse) < expire) { 172 (now - rt->dst.lastuse) < expire) {
171 rtp = &rt->dst.dn_next; 173 rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy)
186 188
187static int dn_dst_gc(struct dst_ops *ops) 189static int dn_dst_gc(struct dst_ops *ops)
188{ 190{
189 struct dn_route *rt, **rtp; 191 struct dn_route *rt;
192 struct dn_route __rcu **rtp;
190 int i; 193 int i;
191 unsigned long now = jiffies; 194 unsigned long now = jiffies;
192 unsigned long expire = 10 * HZ; 195 unsigned long expire = 10 * HZ;
193 196
194 for(i = 0; i <= dn_rt_hash_mask; i++) { 197 for (i = 0; i <= dn_rt_hash_mask; i++) {
195 198
196 spin_lock_bh(&dn_rt_hash_table[i].lock); 199 spin_lock_bh(&dn_rt_hash_table[i].lock);
197 rtp = &dn_rt_hash_table[i].chain; 200 rtp = &dn_rt_hash_table[i].chain;
198 201
199 while((rt=*rtp) != NULL) { 202 while ((rt = rcu_dereference_protected(*rtp,
203 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
200 if (atomic_read(&rt->dst.__refcnt) || 204 if (atomic_read(&rt->dst.__refcnt) ||
201 (now - rt->dst.lastuse) < expire) { 205 (now - rt->dst.lastuse) < expire) {
202 rtp = &rt->dst.dn_next; 206 rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
227{ 231{
228 u32 min_mtu = 230; 232 u32 min_mtu = 230;
229 struct dn_dev *dn = dst->neighbour ? 233 struct dn_dev *dn = dst->neighbour ?
230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 234 rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
231 235
232 if (dn && dn->use_long == 0) 236 if (dn && dn->use_long == 0)
233 min_mtu -= 6; 237 min_mtu -= 6;
@@ -236,13 +240,13 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
236 240
237 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { 241 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
238 if (!(dst_metric_locked(dst, RTAX_MTU))) { 242 if (!(dst_metric_locked(dst, RTAX_MTU))) {
239 dst->metrics[RTAX_MTU-1] = mtu; 243 dst_metric_set(dst, RTAX_MTU, mtu);
240 dst_set_expires(dst, dn_rt_mtu_expires); 244 dst_set_expires(dst, dn_rt_mtu_expires);
241 } 245 }
242 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 246 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
243 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 247 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
244 if (dst_metric(dst, RTAX_ADVMSS) > mss) 248 if (dst_metric(dst, RTAX_ADVMSS) > mss)
245 dst->metrics[RTAX_ADVMSS-1] = mss; 249 dst_metric_set(dst, RTAX_ADVMSS, mss);
246 } 250 }
247 } 251 }
248} 252}
@@ -267,23 +271,25 @@ static void dn_dst_link_failure(struct sk_buff *skb)
267 271
268static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 272static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
269{ 273{
270 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 274 return ((fl1->fld_dst ^ fl2->fld_dst) |
271 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 275 (fl1->fld_src ^ fl2->fld_src) |
272 (fl1->mark ^ fl2->mark) | 276 (fl1->mark ^ fl2->mark) |
273 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 277 (fl1->fld_scope ^ fl2->fld_scope) |
274 (fl1->oif ^ fl2->oif) | 278 (fl1->oif ^ fl2->oif) |
275 (fl1->iif ^ fl2->iif)) == 0; 279 (fl1->iif ^ fl2->iif)) == 0;
276} 280}
277 281
278static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 282static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
279{ 283{
280 struct dn_route *rth, **rthp; 284 struct dn_route *rth;
285 struct dn_route __rcu **rthp;
281 unsigned long now = jiffies; 286 unsigned long now = jiffies;
282 287
283 rthp = &dn_rt_hash_table[hash].chain; 288 rthp = &dn_rt_hash_table[hash].chain;
284 289
285 spin_lock_bh(&dn_rt_hash_table[hash].lock); 290 spin_lock_bh(&dn_rt_hash_table[hash].lock);
286 while((rth = *rthp) != NULL) { 291 while ((rth = rcu_dereference_protected(*rthp,
292 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
287 if (compare_keys(&rth->fl, &rt->fl)) { 293 if (compare_keys(&rth->fl, &rt->fl)) {
288 /* Put it first */ 294 /* Put it first */
289 *rthp = rth->dst.dn_next; 295 *rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy)
315 int i; 321 int i;
316 struct dn_route *rt, *next; 322 struct dn_route *rt, *next;
317 323
318 for(i = 0; i < dn_rt_hash_mask; i++) { 324 for (i = 0; i < dn_rt_hash_mask; i++) {
319 spin_lock_bh(&dn_rt_hash_table[i].lock); 325 spin_lock_bh(&dn_rt_hash_table[i].lock);
320 326
321 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 327 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
322 goto nothing_to_declare; 328 goto nothing_to_declare;
323 329
324 for(; rt; rt=next) { 330 for(; rt; rt = next) {
325 next = rt->dst.dn_next; 331 next = rcu_dereference_raw(rt->dst.dn_next);
326 rt->dst.dn_next = NULL; 332 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
327 dst_free((struct dst_entry *)rt); 333 dst_free((struct dst_entry *)rt);
328 } 334 }
329 335
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb)
458 */ 464 */
459static int dn_route_rx_packet(struct sk_buff *skb) 465static int dn_route_rx_packet(struct sk_buff *skb)
460{ 466{
461 struct dn_skb_cb *cb = DN_SKB_CB(skb); 467 struct dn_skb_cb *cb;
462 int err; 468 int err;
463 469
464 if ((err = dn_route_input(skb)) == 0) 470 if ((err = dn_route_input(skb)) == 0)
465 return dst_input(skb); 471 return dst_input(skb);
466 472
473 cb = DN_SKB_CB(skb);
467 if (decnet_debug_level & 4) { 474 if (decnet_debug_level & 4) {
468 char *devname = skb->dev ? skb->dev->name : "???"; 475 char *devname = skb->dev ? skb->dev->name : "???";
469 struct dn_skb_cb *cb = DN_SKB_CB(skb); 476
470 printk(KERN_DEBUG 477 printk(KERN_DEBUG
471 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 478 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
472 (int)cb->rt_flags, devname, skb->len, 479 (int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
573 struct dn_skb_cb *cb; 580 struct dn_skb_cb *cb;
574 unsigned char flags = 0; 581 unsigned char flags = 0;
575 __u16 len = le16_to_cpu(*(__le16 *)skb->data); 582 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
576 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 583 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
577 unsigned char padlen = 0; 584 unsigned char padlen = 0;
578 585
579 if (!net_eq(dev_net(dev), &init_net)) 586 if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb)
728{ 735{
729 struct dn_skb_cb *cb = DN_SKB_CB(skb); 736 struct dn_skb_cb *cb = DN_SKB_CB(skb);
730 struct dst_entry *dst = skb_dst(skb); 737 struct dst_entry *dst = skb_dst(skb);
731 struct dn_dev *dn_db = dst->dev->dn_ptr; 738 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
732 struct dn_route *rt; 739 struct dn_route *rt;
733 struct neighbour *neigh = dst->neighbour; 740 struct neighbour *neigh = dst->neighbour;
734 int header_len; 741 int header_len;
@@ -799,8 +806,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
799 if (DN_FIB_RES_GW(*res) && 806 if (DN_FIB_RES_GW(*res) &&
800 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 807 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
801 rt->rt_gateway = DN_FIB_RES_GW(*res); 808 rt->rt_gateway = DN_FIB_RES_GW(*res);
802 memcpy(rt->dst.metrics, fi->fib_metrics, 809 dst_import_metrics(&rt->dst, fi->fib_metrics);
803 sizeof(rt->dst.metrics));
804 } 810 }
805 rt->rt_type = res->type; 811 rt->rt_type = res->type;
806 812
@@ -813,11 +819,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
813 819
814 if (dst_metric(&rt->dst, RTAX_MTU) == 0 || 820 if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
815 dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 821 dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
816 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; 822 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
817 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); 823 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
818 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 || 824 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
819 dst_metric(&rt->dst, RTAX_ADVMSS) > mss) 825 dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
820 rt->dst.metrics[RTAX_ADVMSS-1] = mss; 826 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
821 return 0; 827 return 0;
822} 828}
823 829
@@ -835,13 +841,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
835static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 841static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
836{ 842{
837 __le16 saddr = 0; 843 __le16 saddr = 0;
838 struct dn_dev *dn_db = dev->dn_ptr; 844 struct dn_dev *dn_db;
839 struct dn_ifaddr *ifa; 845 struct dn_ifaddr *ifa;
840 int best_match = 0; 846 int best_match = 0;
841 int ret; 847 int ret;
842 848
843 read_lock(&dev_base_lock); 849 rcu_read_lock();
844 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 850 dn_db = rcu_dereference(dev->dn_ptr);
851 for (ifa = rcu_dereference(dn_db->ifa_list);
852 ifa != NULL;
853 ifa = rcu_dereference(ifa->ifa_next)) {
845 if (ifa->ifa_scope > scope) 854 if (ifa->ifa_scope > scope)
846 continue; 855 continue;
847 if (!daddr) { 856 if (!daddr) {
@@ -854,7 +863,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int
854 if (best_match == 0) 863 if (best_match == 0)
855 saddr = ifa->ifa_local; 864 saddr = ifa->ifa_local;
856 } 865 }
857 read_unlock(&dev_base_lock); 866 rcu_read_unlock();
858 867
859 return saddr; 868 return saddr;
860} 869}
@@ -872,11 +881,9 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
872 881
873static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 882static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
874{ 883{
875 struct flowi fl = { .nl_u = { .dn_u = 884 struct flowi fl = { .fld_dst = oldflp->fld_dst,
876 { .daddr = oldflp->fld_dst, 885 .fld_src = oldflp->fld_src,
877 .saddr = oldflp->fld_src, 886 .fld_scope = RT_SCOPE_UNIVERSE,
878 .scope = RT_SCOPE_UNIVERSE,
879 } },
880 .mark = oldflp->mark, 887 .mark = oldflp->mark,
881 .iif = init_net.loopback_dev->ifindex, 888 .iif = init_net.loopback_dev->ifindex,
882 .oif = oldflp->oif }; 889 .oif = oldflp->oif };
@@ -1020,7 +1027,7 @@ source_ok:
1020 err = -ENODEV; 1027 err = -ENODEV;
1021 if (dev_out == NULL) 1028 if (dev_out == NULL)
1022 goto out; 1029 goto out;
1023 dn_db = dev_out->dn_ptr; 1030 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1024 /* Possible improvement - check all devices for local addr */ 1031 /* Possible improvement - check all devices for local addr */
1025 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1032 if (dn_dev_islocal(dev_out, fl.fld_dst)) {
1026 dev_put(dev_out); 1033 dev_put(dev_out);
@@ -1171,7 +1178,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1171 if ((flp->fld_dst == rt->fl.fld_dst) && 1178 if ((flp->fld_dst == rt->fl.fld_dst) &&
1172 (flp->fld_src == rt->fl.fld_src) && 1179 (flp->fld_src == rt->fl.fld_src) &&
1173 (flp->mark == rt->fl.mark) && 1180 (flp->mark == rt->fl.mark) &&
1174 (rt->fl.iif == 0) && 1181 dn_is_output_route(rt) &&
1175 (rt->fl.oif == flp->oif)) { 1182 (rt->fl.oif == flp->oif)) {
1176 dst_use(&rt->dst, jiffies); 1183 dst_use(&rt->dst, jiffies);
1177 rcu_read_unlock_bh(); 1184 rcu_read_unlock_bh();
@@ -1220,11 +1227,9 @@ static int dn_route_input_slow(struct sk_buff *skb)
1220 int flags = 0; 1227 int flags = 0;
1221 __le16 gateway = 0; 1228 __le16 gateway = 0;
1222 __le16 local_src = 0; 1229 __le16 local_src = 0;
1223 struct flowi fl = { .nl_u = { .dn_u = 1230 struct flowi fl = { .fld_dst = cb->dst,
1224 { .daddr = cb->dst, 1231 .fld_src = cb->src,
1225 .saddr = cb->src, 1232 .fld_scope = RT_SCOPE_UNIVERSE,
1226 .scope = RT_SCOPE_UNIVERSE,
1227 } },
1228 .mark = skb->mark, 1233 .mark = skb->mark,
1229 .iif = skb->dev->ifindex }; 1234 .iif = skb->dev->ifindex };
1230 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1235 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
@@ -1233,7 +1238,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1233 1238
1234 dev_hold(in_dev); 1239 dev_hold(in_dev);
1235 1240
1236 if ((dn_db = in_dev->dn_ptr) == NULL) 1241 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1237 goto out; 1242 goto out;
1238 1243
1239 /* Zero source addresses are not allowed */ 1244 /* Zero source addresses are not allowed */
@@ -1496,13 +1501,13 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1496 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1501 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
1497 if (rt->rt_daddr != rt->rt_gateway) 1502 if (rt->rt_daddr != rt->rt_gateway)
1498 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1503 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
1499 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) 1504 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1500 goto rtattr_failure; 1505 goto rtattr_failure;
1501 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; 1506 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1502 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1507 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
1503 rt->dst.error) < 0) 1508 rt->dst.error) < 0)
1504 goto rtattr_failure; 1509 goto rtattr_failure;
1505 if (rt->fl.iif) 1510 if (dn_is_input_route(rt))
1506 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1511 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
1507 1512
1508 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1513 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1682,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1677{ 1682{
1678 struct dn_rt_cache_iter_state *s = seq->private; 1683 struct dn_rt_cache_iter_state *s = seq->private;
1679 1684
1680 rt = rt->dst.dn_next; 1685 rt = rcu_dereference_bh(rt->dst.dn_next);
1681 while(!rt) { 1686 while (!rt) {
1682 rcu_read_unlock_bh(); 1687 rcu_read_unlock_bh();
1683 if (--s->bucket < 0) 1688 if (--s->bucket < 0)
1684 break; 1689 break;
1685 rcu_read_lock_bh(); 1690 rcu_read_lock_bh();
1686 rt = dn_rt_hash_table[s->bucket].chain; 1691 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1687 } 1692 }
1688 return rcu_dereference_bh(rt); 1693 return rt;
1689} 1694}
1690 1695
1691static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1696static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 48fdf10be7a1..6eb91df3c550 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -175,7 +175,7 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
175 175
176unsigned dnet_addr_type(__le16 addr) 176unsigned dnet_addr_type(__le16 addr)
177{ 177{
178 struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } }; 178 struct flowi fl = { .fld_dst = addr };
179 struct dn_fib_res res; 179 struct dn_fib_res res;
180 unsigned ret = RTN_UNICAST; 180 unsigned ret = RTN_UNICAST;
181 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); 181 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e23288..28f8b5e5f73b 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@ int decnet_log_martians = 1;
38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
39 39
40/* Reasonable defaults, I hope, based on tcp's defaults */ 40/* Reasonable defaults, I hope, based on tcp's defaults */
41int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
44 44
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = {
324 .data = &sysctl_decnet_mem, 324 .data = &sysctl_decnet_mem,
325 .maxlen = sizeof(sysctl_decnet_mem), 325 .maxlen = sizeof(sysctl_decnet_mem),
326 .mode = 0644, 326 .mode = 0644,
327 .proc_handler = proc_dointvec, 327 .proc_handler = proc_doulongvec_minmax
328 }, 328 },
329 { 329 {
330 .procname = "decnet_rmem", 330 .procname = "decnet_rmem",
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile
index c0ef4e71dc49..d5c13c2eb36d 100644
--- a/net/dns_resolver/Makefile
+++ b/net/dns_resolver/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o 5obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o
6 6
7dns_resolver-objs := dns_key.o dns_query.o 7dns_resolver-y := dns_key.o dns_query.o
diff --git a/net/econet/Makefile b/net/econet/Makefile
index 39f0a77abdbd..05fae8be2fed 100644
--- a/net/econet/Makefile
+++ b/net/econet/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_ECONET) += econet.o 5obj-$(CONFIG_ECONET) += econet.o
6 6
7econet-objs := af_econet.o 7econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index f8c1ae4b41f0..f180371fa415 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -31,6 +31,7 @@
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/vmalloc.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/inet_common.h> 36#include <net/inet_common.h>
36#include <linux/stat.h> 37#include <linux/stat.h>
@@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
276#endif 277#endif
277#ifdef CONFIG_ECONET_AUNUDP 278#ifdef CONFIG_ECONET_AUNUDP
278 struct msghdr udpmsg; 279 struct msghdr udpmsg;
279 struct iovec iov[msg->msg_iovlen+1]; 280 struct iovec iov[2];
280 struct aunhdr ah; 281 struct aunhdr ah;
281 struct sockaddr_in udpdest; 282 struct sockaddr_in udpdest;
282 __kernel_size_t size; 283 __kernel_size_t size;
283 int i;
284 mm_segment_t oldfs; 284 mm_segment_t oldfs;
285 char *userbuf;
285#endif 286#endif
286 287
287 /* 288 /*
@@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
297 298
298 mutex_lock(&econet_mutex); 299 mutex_lock(&econet_mutex);
299 300
300 if (saddr == NULL) { 301 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
301 struct econet_sock *eo = ec_sk(sk); 302 mutex_unlock(&econet_mutex);
302 303 return -EINVAL;
303 addr.station = eo->station; 304 }
304 addr.net = eo->net; 305 addr.station = saddr->addr.station;
305 port = eo->port; 306 addr.net = saddr->addr.net;
306 cb = eo->cb; 307 port = saddr->port;
307 } else { 308 cb = saddr->cb;
308 if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
309 mutex_unlock(&econet_mutex);
310 return -EINVAL;
311 }
312 addr.station = saddr->addr.station;
313 addr.net = saddr->addr.net;
314 port = saddr->port;
315 cb = saddr->cb;
316 }
317 309
318 /* Look for a device with the right network number. */ 310 /* Look for a device with the right network number. */
319 dev = net2dev_map[addr.net]; 311 dev = net2dev_map[addr.net];
@@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
328 } 320 }
329 } 321 }
330 322
331 if (len + 15 > dev->mtu) {
332 mutex_unlock(&econet_mutex);
333 return -EMSGSIZE;
334 }
335
336 if (dev->type == ARPHRD_ECONET) { 323 if (dev->type == ARPHRD_ECONET) {
337 /* Real hardware Econet. We're not worthy etc. */ 324 /* Real hardware Econet. We're not worthy etc. */
338#ifdef CONFIG_ECONET_NATIVE 325#ifdef CONFIG_ECONET_NATIVE
339 unsigned short proto = 0; 326 unsigned short proto = 0;
340 int res; 327 int res;
341 328
329 if (len + 15 > dev->mtu) {
330 mutex_unlock(&econet_mutex);
331 return -EMSGSIZE;
332 }
333
342 dev_hold(dev); 334 dev_hold(dev);
343 335
344 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 336 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
@@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
351 343
352 eb = (struct ec_cb *)&skb->cb; 344 eb = (struct ec_cb *)&skb->cb;
353 345
354 /* BUG: saddr may be NULL */
355 eb->cookie = saddr->cookie; 346 eb->cookie = saddr->cookie;
356 eb->sec = *saddr; 347 eb->sec = *saddr;
357 eb->sent = ec_tx_done; 348 eb->sent = ec_tx_done;
@@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
415 return -ENETDOWN; /* No socket - can't send */ 406 return -ENETDOWN; /* No socket - can't send */
416 } 407 }
417 408
409 if (len > 32768) {
410 err = -E2BIG;
411 goto error;
412 }
413
418 /* Make up a UDP datagram and hand it off to some higher intellect. */ 414 /* Make up a UDP datagram and hand it off to some higher intellect. */
419 415
420 memset(&udpdest, 0, sizeof(udpdest)); 416 memset(&udpdest, 0, sizeof(udpdest));
@@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
446 442
447 /* tack our header on the front of the iovec */ 443 /* tack our header on the front of the iovec */
448 size = sizeof(struct aunhdr); 444 size = sizeof(struct aunhdr);
449 /*
450 * XXX: that is b0rken. We can't mix userland and kernel pointers
451 * in iovec, since on a lot of platforms copy_from_user() will
452 * *not* work with the kernel and userland ones at the same time,
453 * regardless of what we do with set_fs(). And we are talking about
454 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
455 * apply. Any suggestions on fixing that code? -- AV
456 */
457 iov[0].iov_base = (void *)&ah; 445 iov[0].iov_base = (void *)&ah;
458 iov[0].iov_len = size; 446 iov[0].iov_len = size;
459 for (i = 0; i < msg->msg_iovlen; i++) { 447
460 void __user *base = msg->msg_iov[i].iov_base; 448 userbuf = vmalloc(len);
461 size_t iov_len = msg->msg_iov[i].iov_len; 449 if (userbuf == NULL) {
462 /* Check it now since we switch to KERNEL_DS later. */ 450 err = -ENOMEM;
463 if (!access_ok(VERIFY_READ, base, iov_len)) { 451 goto error;
464 mutex_unlock(&econet_mutex);
465 return -EFAULT;
466 }
467 iov[i+1].iov_base = base;
468 iov[i+1].iov_len = iov_len;
469 size += iov_len;
470 } 452 }
471 453
454 iov[1].iov_base = userbuf;
455 iov[1].iov_len = len;
456 err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
457 if (err)
458 goto error_free_buf;
459
472 /* Get a skbuff (no data, just holds our cb information) */ 460 /* Get a skbuff (no data, just holds our cb information) */
473 if ((skb = sock_alloc_send_skb(sk, 0, 461 if ((skb = sock_alloc_send_skb(sk, 0,
474 msg->msg_flags & MSG_DONTWAIT, 462 msg->msg_flags & MSG_DONTWAIT,
475 &err)) == NULL) { 463 &err)) == NULL)
476 mutex_unlock(&econet_mutex); 464 goto error_free_buf;
477 return err;
478 }
479 465
480 eb = (struct ec_cb *)&skb->cb; 466 eb = (struct ec_cb *)&skb->cb;
481 467
@@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
491 udpmsg.msg_name = (void *)&udpdest; 477 udpmsg.msg_name = (void *)&udpdest;
492 udpmsg.msg_namelen = sizeof(udpdest); 478 udpmsg.msg_namelen = sizeof(udpdest);
493 udpmsg.msg_iov = &iov[0]; 479 udpmsg.msg_iov = &iov[0];
494 udpmsg.msg_iovlen = msg->msg_iovlen + 1; 480 udpmsg.msg_iovlen = 2;
495 udpmsg.msg_control = NULL; 481 udpmsg.msg_control = NULL;
496 udpmsg.msg_controllen = 0; 482 udpmsg.msg_controllen = 0;
497 udpmsg.msg_flags=0; 483 udpmsg.msg_flags=0;
@@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
499 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ 485 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
500 err = sock_sendmsg(udpsock, &udpmsg, size); 486 err = sock_sendmsg(udpsock, &udpmsg, size);
501 set_fs(oldfs); 487 set_fs(oldfs);
488
489error_free_buf:
490 vfree(userbuf);
502#else 491#else
503 err = -EPROTOTYPE; 492 err = -EPROTOTYPE;
504#endif 493#endif
494 error:
505 mutex_unlock(&econet_mutex); 495 mutex_unlock(&econet_mutex);
506 496
507 return err; 497 return err;
@@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
671 err = 0; 661 err = 0;
672 switch (cmd) { 662 switch (cmd) {
673 case SIOCSIFADDR: 663 case SIOCSIFADDR:
664 if (!capable(CAP_NET_ADMIN)) {
665 err = -EPERM;
666 break;
667 }
668
674 edev = dev->ec_ptr; 669 edev = dev->ec_ptr;
675 if (edev == NULL) { 670 if (edev == NULL) {
676 /* Magic up a new one. */ 671 /* Magic up a new one. */
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 93c91b633a56..6df6ecf49708 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -52,11 +52,11 @@ struct net_device *ieee802154_get_dev(struct net *net,
52 52
53 switch (addr->addr_type) { 53 switch (addr->addr_type) {
54 case IEEE802154_ADDR_LONG: 54 case IEEE802154_ADDR_LONG:
55 rtnl_lock(); 55 rcu_read_lock();
56 dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr); 56 dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr);
57 if (dev) 57 if (dev)
58 dev_hold(dev); 58 dev_hold(dev);
59 rtnl_unlock(); 59 rcu_read_unlock();
60 break; 60 break;
61 case IEEE802154_ADDR_SHORT: 61 case IEEE802154_ADDR_SHORT:
62 if (addr->pan_id == 0xffff || 62 if (addr->pan_id == 0xffff ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f581f77d1097..f2b61107df6c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1148,21 +1148,13 @@ int inet_sk_rebuild_header(struct sock *sk)
1148 struct flowi fl = { 1148 struct flowi fl = {
1149 .oif = sk->sk_bound_dev_if, 1149 .oif = sk->sk_bound_dev_if,
1150 .mark = sk->sk_mark, 1150 .mark = sk->sk_mark,
1151 .nl_u = { 1151 .fl4_dst = daddr,
1152 .ip4_u = { 1152 .fl4_src = inet->inet_saddr,
1153 .daddr = daddr, 1153 .fl4_tos = RT_CONN_FLAGS(sk),
1154 .saddr = inet->inet_saddr,
1155 .tos = RT_CONN_FLAGS(sk),
1156 },
1157 },
1158 .proto = sk->sk_protocol, 1154 .proto = sk->sk_protocol,
1159 .flags = inet_sk_flowi_flags(sk), 1155 .flags = inet_sk_flowi_flags(sk),
1160 .uli_u = { 1156 .fl_ip_sport = inet->inet_sport,
1161 .ports = { 1157 .fl_ip_dport = inet->inet_dport,
1162 .sport = inet->inet_sport,
1163 .dport = inet->inet_dport,
1164 },
1165 },
1166 }; 1158 };
1167 1159
1168 security_sk_classify_flow(sk, &fl); 1160 security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index d8e540c5b071..a2fc7b961dbc 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -433,8 +433,8 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
433 433
434static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) 434static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
435{ 435{
436 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, 436 struct flowi fl = { .fl4_dst = sip,
437 .saddr = tip } } }; 437 .fl4_src = tip };
438 struct rtable *rt; 438 struct rtable *rt;
439 int flag = 0; 439 int flag = 0;
440 /*unsigned long now; */ 440 /*unsigned long now; */
@@ -883,7 +883,7 @@ static int arp_process(struct sk_buff *skb)
883 883
884 dont_send = arp_ignore(in_dev, sip, tip); 884 dont_send = arp_ignore(in_dev, sip, tip);
885 if (!dont_send && IN_DEV_ARPFILTER(in_dev)) 885 if (!dont_send && IN_DEV_ARPFILTER(in_dev))
886 dont_send |= arp_filter(sip, tip, dev); 886 dont_send = arp_filter(sip, tip, dev);
887 if (!dont_send) { 887 if (!dont_send) {
888 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 888 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
889 if (n) { 889 if (n) {
@@ -1017,13 +1017,14 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 if (__in_dev_get_rtnl(dev)) { 1020 if (__in_dev_get_rcu(dev)) {
1021 IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on); 1021 IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
1022 return 0; 1022 return 0;
1023 } 1023 }
1024 return -ENXIO; 1024 return -ENXIO;
1025} 1025}
1026 1026
1027/* must be called with rcu_read_lock() */
1027static int arp_req_set_public(struct net *net, struct arpreq *r, 1028static int arp_req_set_public(struct net *net, struct arpreq *r,
1028 struct net_device *dev) 1029 struct net_device *dev)
1029{ 1030{
@@ -1033,7 +1034,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1033 if (mask && mask != htonl(0xFFFFFFFF)) 1034 if (mask && mask != htonl(0xFFFFFFFF))
1034 return -EINVAL; 1035 return -EINVAL;
1035 if (!dev && (r->arp_flags & ATF_COM)) { 1036 if (!dev && (r->arp_flags & ATF_COM)) {
1036 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, 1037 dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
1037 r->arp_ha.sa_data); 1038 r->arp_ha.sa_data);
1038 if (!dev) 1039 if (!dev)
1039 return -ENODEV; 1040 return -ENODEV;
@@ -1061,8 +1062,8 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1061 if (r->arp_flags & ATF_PERM) 1062 if (r->arp_flags & ATF_PERM)
1062 r->arp_flags |= ATF_COM; 1063 r->arp_flags |= ATF_COM;
1063 if (dev == NULL) { 1064 if (dev == NULL) {
1064 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip, 1065 struct flowi fl = { .fl4_dst = ip,
1065 .tos = RTO_ONLINK } }; 1066 .fl4_tos = RTO_ONLINK };
1066 struct rtable *rt; 1067 struct rtable *rt;
1067 err = ip_route_output_key(net, &rt, &fl); 1068 err = ip_route_output_key(net, &rt, &fl);
1068 if (err != 0) 1069 if (err != 0)
@@ -1169,8 +1170,8 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1169 1170
1170 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1171 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1171 if (dev == NULL) { 1172 if (dev == NULL) {
1172 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip, 1173 struct flowi fl = { .fl4_dst = ip,
1173 .tos = RTO_ONLINK } }; 1174 .fl4_tos = RTO_ONLINK };
1174 struct rtable *rt; 1175 struct rtable *rt;
1175 err = ip_route_output_key(net, &rt, &fl); 1176 err = ip_route_output_key(net, &rt, &fl);
1176 if (err != 0) 1177 if (err != 0)
@@ -1225,10 +1226,10 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1225 if (!(r.arp_flags & ATF_NETMASK)) 1226 if (!(r.arp_flags & ATF_NETMASK))
1226 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1227 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
1227 htonl(0xFFFFFFFFUL); 1228 htonl(0xFFFFFFFFUL);
1228 rtnl_lock(); 1229 rcu_read_lock();
1229 if (r.arp_dev[0]) { 1230 if (r.arp_dev[0]) {
1230 err = -ENODEV; 1231 err = -ENODEV;
1231 dev = __dev_get_by_name(net, r.arp_dev); 1232 dev = dev_get_by_name_rcu(net, r.arp_dev);
1232 if (dev == NULL) 1233 if (dev == NULL)
1233 goto out; 1234 goto out;
1234 1235
@@ -1252,12 +1253,12 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1252 break; 1253 break;
1253 case SIOCGARP: 1254 case SIOCGARP:
1254 err = arp_req_get(&r, dev); 1255 err = arp_req_get(&r, dev);
1255 if (!err && copy_to_user(arg, &r, sizeof(r)))
1256 err = -EFAULT;
1257 break; 1256 break;
1258 } 1257 }
1259out: 1258out:
1260 rtnl_unlock(); 1259 rcu_read_unlock();
1260 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
1261 err = -EFAULT;
1261 return err; 1262 return err;
1262} 1263}
1263 1264
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index dc94b0316b78..748cb5b337bd 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1256,6 +1256,87 @@ errout:
1256 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err); 1256 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1257} 1257}
1258 1258
1259static size_t inet_get_link_af_size(const struct net_device *dev)
1260{
1261 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1262
1263 if (!in_dev)
1264 return 0;
1265
1266 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1267}
1268
1269static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1270{
1271 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1272 struct nlattr *nla;
1273 int i;
1274
1275 if (!in_dev)
1276 return -ENODATA;
1277
1278 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1279 if (nla == NULL)
1280 return -EMSGSIZE;
1281
1282 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1283 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1284
1285 return 0;
1286}
1287
1288static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1289 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1290};
1291
1292static int inet_validate_link_af(const struct net_device *dev,
1293 const struct nlattr *nla)
1294{
1295 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1296 int err, rem;
1297
1298 if (dev && !__in_dev_get_rtnl(dev))
1299 return -EAFNOSUPPORT;
1300
1301 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1302 if (err < 0)
1303 return err;
1304
1305 if (tb[IFLA_INET_CONF]) {
1306 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1307 int cfgid = nla_type(a);
1308
1309 if (nla_len(a) < 4)
1310 return -EINVAL;
1311
1312 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1313 return -EINVAL;
1314 }
1315 }
1316
1317 return 0;
1318}
1319
1320static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1321{
1322 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1323 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1324 int rem;
1325
1326 if (!in_dev)
1327 return -EAFNOSUPPORT;
1328
1329 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1330 BUG();
1331
1332 if (tb[IFLA_INET_CONF]) {
1333 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1334 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1335 }
1336
1337 return 0;
1338}
1339
1259#ifdef CONFIG_SYSCTL 1340#ifdef CONFIG_SYSCTL
1260 1341
1261static void devinet_copy_dflt_conf(struct net *net, int i) 1342static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1349,9 +1430,9 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1349 return ret; 1430 return ret;
1350} 1431}
1351 1432
1352int ipv4_doint_and_flush(ctl_table *ctl, int write, 1433static int ipv4_doint_and_flush(ctl_table *ctl, int write,
1353 void __user *buffer, 1434 void __user *buffer,
1354 size_t *lenp, loff_t *ppos) 1435 size_t *lenp, loff_t *ppos)
1355{ 1436{
1356 int *valp = ctl->data; 1437 int *valp = ctl->data;
1357 int val = *valp; 1438 int val = *valp;
@@ -1619,6 +1700,14 @@ static __net_initdata struct pernet_operations devinet_ops = {
1619 .exit = devinet_exit_net, 1700 .exit = devinet_exit_net,
1620}; 1701};
1621 1702
1703static struct rtnl_af_ops inet_af_ops = {
1704 .family = AF_INET,
1705 .fill_link_af = inet_fill_link_af,
1706 .get_link_af_size = inet_get_link_af_size,
1707 .validate_link_af = inet_validate_link_af,
1708 .set_link_af = inet_set_link_af,
1709};
1710
1622void __init devinet_init(void) 1711void __init devinet_init(void)
1623{ 1712{
1624 register_pernet_subsys(&devinet_ops); 1713 register_pernet_subsys(&devinet_ops);
@@ -1626,6 +1715,8 @@ void __init devinet_init(void)
1626 register_gifconf(PF_INET, inet_gifconf); 1715 register_gifconf(PF_INET, inet_gifconf);
1627 register_netdevice_notifier(&ip_netdev_notifier); 1716 register_netdevice_notifier(&ip_netdev_notifier);
1628 1717
1718 rtnl_af_register(&inet_af_ops);
1719
1629 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL); 1720 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
1630 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL); 1721 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
1631 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); 1722 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1c3fb0..e42a905180f0 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@ struct esp_skb_cb {
23 23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25 25
26static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
27
26/* 28/*
27 * Allocate an AEAD request structure with extra space for SG and IV. 29 * Allocate an AEAD request structure with extra space for SG and IV.
28 * 30 *
@@ -117,25 +119,35 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
117 int blksize; 119 int blksize;
118 int clen; 120 int clen;
119 int alen; 121 int alen;
122 int plen;
123 int tfclen;
120 int nfrags; 124 int nfrags;
121 125
122 /* skb is pure payload to encrypt */ 126 /* skb is pure payload to encrypt */
123 127
124 err = -ENOMEM; 128 err = -ENOMEM;
125 129
126 /* Round to block size */
127 clen = skb->len;
128
129 esp = x->data; 130 esp = x->data;
130 aead = esp->aead; 131 aead = esp->aead;
131 alen = crypto_aead_authsize(aead); 132 alen = crypto_aead_authsize(aead);
132 133
134 tfclen = 0;
135 if (x->tfcpad) {
136 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
137 u32 padto;
138
139 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
140 if (skb->len < padto)
141 tfclen = padto - skb->len;
142 }
133 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 143 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
134 clen = ALIGN(clen + 2, blksize); 144 clen = ALIGN(skb->len + 2 + tfclen, blksize);
135 if (esp->padlen) 145 if (esp->padlen)
136 clen = ALIGN(clen, esp->padlen); 146 clen = ALIGN(clen, esp->padlen);
147 plen = clen - skb->len - tfclen;
137 148
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) 149 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
150 if (err < 0)
139 goto error; 151 goto error;
140 nfrags = err; 152 nfrags = err;
141 153
@@ -150,13 +162,17 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
150 162
151 /* Fill padding... */ 163 /* Fill padding... */
152 tail = skb_tail_pointer(trailer); 164 tail = skb_tail_pointer(trailer);
165 if (tfclen) {
166 memset(tail, 0, tfclen);
167 tail += tfclen;
168 }
153 do { 169 do {
154 int i; 170 int i;
155 for (i=0; i<clen-skb->len - 2; i++) 171 for (i = 0; i < plen - 2; i++)
156 tail[i] = i + 1; 172 tail[i] = i + 1;
157 } while (0); 173 } while (0);
158 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 174 tail[plen - 2] = plen - 2;
159 tail[clen - skb->len - 1] = *skb_mac_header(skb); 175 tail[plen - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen); 176 pskb_put(skb, trailer, clen - skb->len + alen);
161 177
162 skb_push(skb, -skb_network_offset(skb)); 178 skb_push(skb, -skb_network_offset(skb));
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eb6f69a8f27a..d3a1112b9d9c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -158,11 +158,7 @@ static void fib_flush(struct net *net)
158struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) 158struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
159{ 159{
160 struct flowi fl = { 160 struct flowi fl = {
161 .nl_u = { 161 .fl4_dst = addr,
162 .ip4_u = {
163 .daddr = addr
164 }
165 },
166 .flags = FLOWI_FLAG_MATCH_ANY_IIF 162 .flags = FLOWI_FLAG_MATCH_ANY_IIF
167 }; 163 };
168 struct fib_result res = { 0 }; 164 struct fib_result res = { 0 };
@@ -193,7 +189,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
193 const struct net_device *dev, 189 const struct net_device *dev,
194 __be32 addr) 190 __be32 addr)
195{ 191{
196 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; 192 struct flowi fl = { .fl4_dst = addr };
197 struct fib_result res; 193 struct fib_result res;
198 unsigned ret = RTN_BROADCAST; 194 unsigned ret = RTN_BROADCAST;
199 struct fib_table *local_table; 195 struct fib_table *local_table;
@@ -247,13 +243,9 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
247{ 243{
248 struct in_device *in_dev; 244 struct in_device *in_dev;
249 struct flowi fl = { 245 struct flowi fl = {
250 .nl_u = { 246 .fl4_dst = src,
251 .ip4_u = { 247 .fl4_src = dst,
252 .daddr = src, 248 .fl4_tos = tos,
253 .saddr = dst,
254 .tos = tos
255 }
256 },
257 .mark = mark, 249 .mark = mark,
258 .iif = oif 250 .iif = oif
259 }; 251 };
@@ -853,13 +845,9 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
853 struct fib_result res; 845 struct fib_result res;
854 struct flowi fl = { 846 struct flowi fl = {
855 .mark = frn->fl_mark, 847 .mark = frn->fl_mark,
856 .nl_u = { 848 .fl4_dst = frn->fl_addr,
857 .ip4_u = { 849 .fl4_tos = frn->fl_tos,
858 .daddr = frn->fl_addr, 850 .fl4_scope = frn->fl_scope,
859 .tos = frn->fl_tos,
860 .scope = frn->fl_scope
861 }
862 }
863 }; 851 };
864 852
865#ifdef CONFIG_IP_MULTIPLE_TABLES 853#ifdef CONFIG_IP_MULTIPLE_TABLES
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2219c8..c079cc0ec651 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order,
47static inline void fib_result_assign(struct fib_result *res, 47static inline void fib_result_assign(struct fib_result *res,
48 struct fib_info *fi) 48 struct fib_info *fi)
49{ 49{
50 if (res->fi != NULL) 50 /* we used to play games with refcounts, but we now use RCU */
51 fib_info_put(res->fi);
52 res->fi = fi; 51 res->fi = fi;
53 if (fi != NULL)
54 atomic_inc(&fi->fib_clntref);
55} 52}
56 53
57#endif /* _FIB_LOOKUP_H */ 54#endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3e0da3ef6116..12d3dc3df1b7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -563,12 +563,8 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
563 rcu_read_lock(); 563 rcu_read_lock();
564 { 564 {
565 struct flowi fl = { 565 struct flowi fl = {
566 .nl_u = { 566 .fl4_dst = nh->nh_gw,
567 .ip4_u = { 567 .fl4_scope = cfg->fc_scope + 1,
568 .daddr = nh->nh_gw,
569 .scope = cfg->fc_scope + 1,
570 },
571 },
572 .oif = nh->nh_oif, 568 .oif = nh->nh_oif,
573 }; 569 };
574 570
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 200eb538fbb3..0f280348e0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size)
365 if (size <= PAGE_SIZE) 365 if (size <= PAGE_SIZE)
366 return kzalloc(size, GFP_KERNEL); 366 return kzalloc(size, GFP_KERNEL);
367 else 367 else
368 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 368 return vzalloc(size);
369} 369}
370 370
371static void __tnode_vfree(struct work_struct *arg) 371static void __tnode_vfree(struct work_struct *arg)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 96bc7f9475a3..4aa1b7f01ea0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -386,10 +386,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
386 daddr = icmp_param->replyopts.faddr; 386 daddr = icmp_param->replyopts.faddr;
387 } 387 }
388 { 388 {
389 struct flowi fl = { .nl_u = { .ip4_u = 389 struct flowi fl = { .fl4_dst= daddr,
390 { .daddr = daddr, 390 .fl4_src = rt->rt_spec_dst,
391 .saddr = rt->rt_spec_dst, 391 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
392 .tos = RT_TOS(ip_hdr(skb)->tos) } },
393 .proto = IPPROTO_ICMP }; 392 .proto = IPPROTO_ICMP };
394 security_skb_classify_flow(skb, &fl); 393 security_skb_classify_flow(skb, &fl);
395 if (ip_route_output_key(net, &rt, &fl)) 394 if (ip_route_output_key(net, &rt, &fl))
@@ -506,8 +505,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
506 struct net_device *dev = NULL; 505 struct net_device *dev = NULL;
507 506
508 rcu_read_lock(); 507 rcu_read_lock();
509 if (rt->fl.iif && 508 if (rt_is_input_route(rt) &&
510 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 509 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
511 dev = dev_get_by_index_rcu(net, rt->fl.iif); 510 dev = dev_get_by_index_rcu(net, rt->fl.iif);
512 511
513 if (dev) 512 if (dev)
@@ -542,22 +541,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
542 541
543 { 542 {
544 struct flowi fl = { 543 struct flowi fl = {
545 .nl_u = { 544 .fl4_dst = icmp_param.replyopts.srr ?
546 .ip4_u = { 545 icmp_param.replyopts.faddr : iph->saddr,
547 .daddr = icmp_param.replyopts.srr ? 546 .fl4_src = saddr,
548 icmp_param.replyopts.faddr : 547 .fl4_tos = RT_TOS(tos),
549 iph->saddr,
550 .saddr = saddr,
551 .tos = RT_TOS(tos)
552 }
553 },
554 .proto = IPPROTO_ICMP, 548 .proto = IPPROTO_ICMP,
555 .uli_u = { 549 .fl_icmp_type = type,
556 .icmpt = { 550 .fl_icmp_code = code,
557 .type = type,
558 .code = code
559 }
560 }
561 }; 551 };
562 int err; 552 int err;
563 struct rtable *rt2; 553 struct rtable *rt2;
@@ -569,6 +559,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
569 /* No need to clone since we're just using its address. */ 559 /* No need to clone since we're just using its address. */
570 rt2 = rt; 560 rt2 = rt;
571 561
562 if (!fl.nl_u.ip4_u.saddr)
563 fl.nl_u.ip4_u.saddr = rt->rt_src;
564
572 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0); 565 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
573 switch (err) { 566 switch (err) {
574 case 0: 567 case 0:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8877c6c7216..e0e77e297de3 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,21 +149,37 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150 int sfcount, __be32 *psfsrc, int delta); 150 int sfcount, __be32 *psfsrc, int delta);
151 151
152
153static void ip_mc_list_reclaim(struct rcu_head *head)
154{
155 kfree(container_of(head, struct ip_mc_list, rcu));
156}
157
152static void ip_ma_put(struct ip_mc_list *im) 158static void ip_ma_put(struct ip_mc_list *im)
153{ 159{
154 if (atomic_dec_and_test(&im->refcnt)) { 160 if (atomic_dec_and_test(&im->refcnt)) {
155 in_dev_put(im->interface); 161 in_dev_put(im->interface);
156 kfree(im); 162 call_rcu(&im->rcu, ip_mc_list_reclaim);
157 } 163 }
158} 164}
159 165
166#define for_each_pmc_rcu(in_dev, pmc) \
167 for (pmc = rcu_dereference(in_dev->mc_list); \
168 pmc != NULL; \
169 pmc = rcu_dereference(pmc->next_rcu))
170
171#define for_each_pmc_rtnl(in_dev, pmc) \
172 for (pmc = rtnl_dereference(in_dev->mc_list); \
173 pmc != NULL; \
174 pmc = rtnl_dereference(pmc->next_rcu))
175
160#ifdef CONFIG_IP_MULTICAST 176#ifdef CONFIG_IP_MULTICAST
161 177
162/* 178/*
163 * Timer management 179 * Timer management
164 */ 180 */
165 181
166static __inline__ void igmp_stop_timer(struct ip_mc_list *im) 182static void igmp_stop_timer(struct ip_mc_list *im)
167{ 183{
168 spin_lock_bh(&im->lock); 184 spin_lock_bh(&im->lock);
169 if (del_timer(&im->timer)) 185 if (del_timer(&im->timer))
@@ -284,6 +300,8 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
284 return scount; 300 return scount;
285} 301}
286 302
303#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
304
287static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) 305static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
288{ 306{
289 struct sk_buff *skb; 307 struct sk_buff *skb;
@@ -292,14 +310,20 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
292 struct igmpv3_report *pig; 310 struct igmpv3_report *pig;
293 struct net *net = dev_net(dev); 311 struct net *net = dev_net(dev);
294 312
295 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 313 while (1) {
296 if (skb == NULL) 314 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
297 return NULL; 315 GFP_ATOMIC | __GFP_NOWARN);
316 if (skb)
317 break;
318 size >>= 1;
319 if (size < 256)
320 return NULL;
321 }
322 igmp_skb_size(skb) = size;
298 323
299 { 324 {
300 struct flowi fl = { .oif = dev->ifindex, 325 struct flowi fl = { .oif = dev->ifindex,
301 .nl_u = { .ip4_u = { 326 .fl4_dst = IGMPV3_ALL_MCR,
302 .daddr = IGMPV3_ALL_MCR } },
303 .proto = IPPROTO_IGMP }; 327 .proto = IPPROTO_IGMP };
304 if (ip_route_output_key(net, &rt, &fl)) { 328 if (ip_route_output_key(net, &rt, &fl)) {
305 kfree_skb(skb); 329 kfree_skb(skb);
@@ -384,7 +408,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
384 return skb; 408 return skb;
385} 409}
386 410
387#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ 411#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
388 skb_tailroom(skb)) : 0) 412 skb_tailroom(skb)) : 0)
389 413
390static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 414static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -502,8 +526,8 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
502 int type; 526 int type;
503 527
504 if (!pmc) { 528 if (!pmc) {
505 read_lock(&in_dev->mc_list_lock); 529 rcu_read_lock();
506 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 530 for_each_pmc_rcu(in_dev, pmc) {
507 if (pmc->multiaddr == IGMP_ALL_HOSTS) 531 if (pmc->multiaddr == IGMP_ALL_HOSTS)
508 continue; 532 continue;
509 spin_lock_bh(&pmc->lock); 533 spin_lock_bh(&pmc->lock);
@@ -514,7 +538,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
514 skb = add_grec(skb, pmc, type, 0, 0); 538 skb = add_grec(skb, pmc, type, 0, 0);
515 spin_unlock_bh(&pmc->lock); 539 spin_unlock_bh(&pmc->lock);
516 } 540 }
517 read_unlock(&in_dev->mc_list_lock); 541 rcu_read_unlock();
518 } else { 542 } else {
519 spin_lock_bh(&pmc->lock); 543 spin_lock_bh(&pmc->lock);
520 if (pmc->sfcount[MCAST_EXCLUDE]) 544 if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +580,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
556 struct sk_buff *skb = NULL; 580 struct sk_buff *skb = NULL;
557 int type, dtype; 581 int type, dtype;
558 582
559 read_lock(&in_dev->mc_list_lock); 583 rcu_read_lock();
560 spin_lock_bh(&in_dev->mc_tomb_lock); 584 spin_lock_bh(&in_dev->mc_tomb_lock);
561 585
562 /* deleted MCA's */ 586 /* deleted MCA's */
@@ -593,7 +617,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
593 spin_unlock_bh(&in_dev->mc_tomb_lock); 617 spin_unlock_bh(&in_dev->mc_tomb_lock);
594 618
595 /* change recs */ 619 /* change recs */
596 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 620 for_each_pmc_rcu(in_dev, pmc) {
597 spin_lock_bh(&pmc->lock); 621 spin_lock_bh(&pmc->lock);
598 if (pmc->sfcount[MCAST_EXCLUDE]) { 622 if (pmc->sfcount[MCAST_EXCLUDE]) {
599 type = IGMPV3_BLOCK_OLD_SOURCES; 623 type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +640,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
616 } 640 }
617 spin_unlock_bh(&pmc->lock); 641 spin_unlock_bh(&pmc->lock);
618 } 642 }
619 read_unlock(&in_dev->mc_list_lock); 643 rcu_read_unlock();
620 644
621 if (!skb) 645 if (!skb)
622 return; 646 return;
@@ -644,7 +668,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
644 668
645 { 669 {
646 struct flowi fl = { .oif = dev->ifindex, 670 struct flowi fl = { .oif = dev->ifindex,
647 .nl_u = { .ip4_u = { .daddr = dst } }, 671 .fl4_dst = dst,
648 .proto = IPPROTO_IGMP }; 672 .proto = IPPROTO_IGMP };
649 if (ip_route_output_key(net, &rt, &fl)) 673 if (ip_route_output_key(net, &rt, &fl))
650 return -1; 674 return -1;
@@ -813,14 +837,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
813 if (group == IGMP_ALL_HOSTS) 837 if (group == IGMP_ALL_HOSTS)
814 return; 838 return;
815 839
816 read_lock(&in_dev->mc_list_lock); 840 rcu_read_lock();
817 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 841 for_each_pmc_rcu(in_dev, im) {
818 if (im->multiaddr == group) { 842 if (im->multiaddr == group) {
819 igmp_stop_timer(im); 843 igmp_stop_timer(im);
820 break; 844 break;
821 } 845 }
822 } 846 }
823 read_unlock(&in_dev->mc_list_lock); 847 rcu_read_unlock();
824} 848}
825 849
826static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 850static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +930,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
906 * - Use the igmp->igmp_code field as the maximum 930 * - Use the igmp->igmp_code field as the maximum
907 * delay possible 931 * delay possible
908 */ 932 */
909 read_lock(&in_dev->mc_list_lock); 933 rcu_read_lock();
910 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 934 for_each_pmc_rcu(in_dev, im) {
911 int changed; 935 int changed;
912 936
913 if (group && group != im->multiaddr) 937 if (group && group != im->multiaddr)
@@ -925,7 +949,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
925 if (changed) 949 if (changed)
926 igmp_mod_timer(im, max_delay); 950 igmp_mod_timer(im, max_delay);
927 } 951 }
928 read_unlock(&in_dev->mc_list_lock); 952 rcu_read_unlock();
929} 953}
930 954
931/* called in rcu_read_lock() section */ 955/* called in rcu_read_lock() section */
@@ -961,7 +985,7 @@ int igmp_rcv(struct sk_buff *skb)
961 case IGMP_HOST_MEMBERSHIP_REPORT: 985 case IGMP_HOST_MEMBERSHIP_REPORT:
962 case IGMPV2_HOST_MEMBERSHIP_REPORT: 986 case IGMPV2_HOST_MEMBERSHIP_REPORT:
963 /* Is it our report looped back? */ 987 /* Is it our report looped back? */
964 if (skb_rtable(skb)->fl.iif == 0) 988 if (rt_is_output_route(skb_rtable(skb)))
965 break; 989 break;
966 /* don't rely on MC router hearing unicast reports */ 990 /* don't rely on MC router hearing unicast reports */
967 if (skb->pkt_type == PACKET_MULTICAST || 991 if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1134,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1110 kfree(pmc); 1134 kfree(pmc);
1111 } 1135 }
1112 /* clear dead sources, too */ 1136 /* clear dead sources, too */
1113 read_lock(&in_dev->mc_list_lock); 1137 rcu_read_lock();
1114 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1138 for_each_pmc_rcu(in_dev, pmc) {
1115 struct ip_sf_list *psf, *psf_next; 1139 struct ip_sf_list *psf, *psf_next;
1116 1140
1117 spin_lock_bh(&pmc->lock); 1141 spin_lock_bh(&pmc->lock);
@@ -1123,7 +1147,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1123 kfree(psf); 1147 kfree(psf);
1124 } 1148 }
1125 } 1149 }
1126 read_unlock(&in_dev->mc_list_lock); 1150 rcu_read_unlock();
1127} 1151}
1128#endif 1152#endif
1129 1153
@@ -1209,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1209 1233
1210 ASSERT_RTNL(); 1234 ASSERT_RTNL();
1211 1235
1212 for (im=in_dev->mc_list; im; im=im->next) { 1236 for_each_pmc_rtnl(in_dev, im) {
1213 if (im->multiaddr == addr) { 1237 if (im->multiaddr == addr) {
1214 im->users++; 1238 im->users++;
1215 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); 1239 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1241,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1217 } 1241 }
1218 } 1242 }
1219 1243
1220 im = kmalloc(sizeof(*im), GFP_KERNEL); 1244 im = kzalloc(sizeof(*im), GFP_KERNEL);
1221 if (!im) 1245 if (!im)
1222 goto out; 1246 goto out;
1223 1247
@@ -1227,26 +1251,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1227 im->multiaddr = addr; 1251 im->multiaddr = addr;
1228 /* initial mode is (EX, empty) */ 1252 /* initial mode is (EX, empty) */
1229 im->sfmode = MCAST_EXCLUDE; 1253 im->sfmode = MCAST_EXCLUDE;
1230 im->sfcount[MCAST_INCLUDE] = 0;
1231 im->sfcount[MCAST_EXCLUDE] = 1; 1254 im->sfcount[MCAST_EXCLUDE] = 1;
1232 im->sources = NULL;
1233 im->tomb = NULL;
1234 im->crcount = 0;
1235 atomic_set(&im->refcnt, 1); 1255 atomic_set(&im->refcnt, 1);
1236 spin_lock_init(&im->lock); 1256 spin_lock_init(&im->lock);
1237#ifdef CONFIG_IP_MULTICAST 1257#ifdef CONFIG_IP_MULTICAST
1238 im->tm_running = 0;
1239 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); 1258 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
1240 im->unsolicit_count = IGMP_Unsolicited_Report_Count; 1259 im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1241 im->reporter = 0;
1242 im->gsquery = 0;
1243#endif 1260#endif
1244 im->loaded = 0; 1261
1245 write_lock_bh(&in_dev->mc_list_lock); 1262 im->next_rcu = in_dev->mc_list;
1246 im->next = in_dev->mc_list;
1247 in_dev->mc_list = im;
1248 in_dev->mc_count++; 1263 in_dev->mc_count++;
1249 write_unlock_bh(&in_dev->mc_list_lock); 1264 rcu_assign_pointer(in_dev->mc_list, im);
1265
1250#ifdef CONFIG_IP_MULTICAST 1266#ifdef CONFIG_IP_MULTICAST
1251 igmpv3_del_delrec(in_dev, im->multiaddr); 1267 igmpv3_del_delrec(in_dev, im->multiaddr);
1252#endif 1268#endif
@@ -1260,26 +1276,32 @@ EXPORT_SYMBOL(ip_mc_inc_group);
1260 1276
1261/* 1277/*
1262 * Resend IGMP JOIN report; used for bonding. 1278 * Resend IGMP JOIN report; used for bonding.
1279 * Called with rcu_read_lock()
1263 */ 1280 */
1264void ip_mc_rejoin_group(struct ip_mc_list *im) 1281void ip_mc_rejoin_groups(struct in_device *in_dev)
1265{ 1282{
1266#ifdef CONFIG_IP_MULTICAST 1283#ifdef CONFIG_IP_MULTICAST
1267 struct in_device *in_dev = im->interface; 1284 struct ip_mc_list *im;
1285 int type;
1268 1286
1269 if (im->multiaddr == IGMP_ALL_HOSTS) 1287 for_each_pmc_rcu(in_dev, im) {
1270 return; 1288 if (im->multiaddr == IGMP_ALL_HOSTS)
1289 continue;
1271 1290
1272 /* a failover is happening and switches 1291 /* a failover is happening and switches
1273 * must be notified immediately */ 1292 * must be notified immediately
1274 if (IGMP_V1_SEEN(in_dev)) 1293 */
1275 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); 1294 if (IGMP_V1_SEEN(in_dev))
1276 else if (IGMP_V2_SEEN(in_dev)) 1295 type = IGMP_HOST_MEMBERSHIP_REPORT;
1277 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); 1296 else if (IGMP_V2_SEEN(in_dev))
1278 else 1297 type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1279 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); 1298 else
1299 type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1300 igmp_send_report(in_dev, im, type);
1301 }
1280#endif 1302#endif
1281} 1303}
1282EXPORT_SYMBOL(ip_mc_rejoin_group); 1304EXPORT_SYMBOL(ip_mc_rejoin_groups);
1283 1305
1284/* 1306/*
1285 * A socket has left a multicast group on device dev 1307 * A socket has left a multicast group on device dev
@@ -1287,17 +1309,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
1287 1309
1288void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) 1310void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1289{ 1311{
1290 struct ip_mc_list *i, **ip; 1312 struct ip_mc_list *i;
1313 struct ip_mc_list __rcu **ip;
1291 1314
1292 ASSERT_RTNL(); 1315 ASSERT_RTNL();
1293 1316
1294 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1317 for (ip = &in_dev->mc_list;
1318 (i = rtnl_dereference(*ip)) != NULL;
1319 ip = &i->next_rcu) {
1295 if (i->multiaddr == addr) { 1320 if (i->multiaddr == addr) {
1296 if (--i->users == 0) { 1321 if (--i->users == 0) {
1297 write_lock_bh(&in_dev->mc_list_lock); 1322 *ip = i->next_rcu;
1298 *ip = i->next;
1299 in_dev->mc_count--; 1323 in_dev->mc_count--;
1300 write_unlock_bh(&in_dev->mc_list_lock);
1301 igmp_group_dropped(i); 1324 igmp_group_dropped(i);
1302 1325
1303 if (!in_dev->dead) 1326 if (!in_dev->dead)
@@ -1316,34 +1339,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
1316 1339
1317void ip_mc_unmap(struct in_device *in_dev) 1340void ip_mc_unmap(struct in_device *in_dev)
1318{ 1341{
1319 struct ip_mc_list *i; 1342 struct ip_mc_list *pmc;
1320 1343
1321 ASSERT_RTNL(); 1344 ASSERT_RTNL();
1322 1345
1323 for (i = in_dev->mc_list; i; i = i->next) 1346 for_each_pmc_rtnl(in_dev, pmc)
1324 igmp_group_dropped(i); 1347 igmp_group_dropped(pmc);
1325} 1348}
1326 1349
1327void ip_mc_remap(struct in_device *in_dev) 1350void ip_mc_remap(struct in_device *in_dev)
1328{ 1351{
1329 struct ip_mc_list *i; 1352 struct ip_mc_list *pmc;
1330 1353
1331 ASSERT_RTNL(); 1354 ASSERT_RTNL();
1332 1355
1333 for (i = in_dev->mc_list; i; i = i->next) 1356 for_each_pmc_rtnl(in_dev, pmc)
1334 igmp_group_added(i); 1357 igmp_group_added(pmc);
1335} 1358}
1336 1359
1337/* Device going down */ 1360/* Device going down */
1338 1361
1339void ip_mc_down(struct in_device *in_dev) 1362void ip_mc_down(struct in_device *in_dev)
1340{ 1363{
1341 struct ip_mc_list *i; 1364 struct ip_mc_list *pmc;
1342 1365
1343 ASSERT_RTNL(); 1366 ASSERT_RTNL();
1344 1367
1345 for (i=in_dev->mc_list; i; i=i->next) 1368 for_each_pmc_rtnl(in_dev, pmc)
1346 igmp_group_dropped(i); 1369 igmp_group_dropped(pmc);
1347 1370
1348#ifdef CONFIG_IP_MULTICAST 1371#ifdef CONFIG_IP_MULTICAST
1349 in_dev->mr_ifc_count = 0; 1372 in_dev->mr_ifc_count = 0;
@@ -1374,7 +1397,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
1374 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; 1397 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1375#endif 1398#endif
1376 1399
1377 rwlock_init(&in_dev->mc_list_lock);
1378 spin_lock_init(&in_dev->mc_tomb_lock); 1400 spin_lock_init(&in_dev->mc_tomb_lock);
1379} 1401}
1380 1402
@@ -1382,14 +1404,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
1382 1404
1383void ip_mc_up(struct in_device *in_dev) 1405void ip_mc_up(struct in_device *in_dev)
1384{ 1406{
1385 struct ip_mc_list *i; 1407 struct ip_mc_list *pmc;
1386 1408
1387 ASSERT_RTNL(); 1409 ASSERT_RTNL();
1388 1410
1389 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1411 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1390 1412
1391 for (i=in_dev->mc_list; i; i=i->next) 1413 for_each_pmc_rtnl(in_dev, pmc)
1392 igmp_group_added(i); 1414 igmp_group_added(pmc);
1393} 1415}
1394 1416
1395/* 1417/*
@@ -1405,24 +1427,19 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1405 /* Deactivate timers */ 1427 /* Deactivate timers */
1406 ip_mc_down(in_dev); 1428 ip_mc_down(in_dev);
1407 1429
1408 write_lock_bh(&in_dev->mc_list_lock); 1430 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1409 while ((i = in_dev->mc_list) != NULL) { 1431 in_dev->mc_list = i->next_rcu;
1410 in_dev->mc_list = i->next;
1411 in_dev->mc_count--; 1432 in_dev->mc_count--;
1412 write_unlock_bh(&in_dev->mc_list_lock); 1433
1413 igmp_group_dropped(i); 1434 igmp_group_dropped(i);
1414 ip_ma_put(i); 1435 ip_ma_put(i);
1415
1416 write_lock_bh(&in_dev->mc_list_lock);
1417 } 1436 }
1418 write_unlock_bh(&in_dev->mc_list_lock);
1419} 1437}
1420 1438
1421/* RTNL is locked */ 1439/* RTNL is locked */
1422static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) 1440static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1423{ 1441{
1424 struct flowi fl = { .nl_u = { .ip4_u = 1442 struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
1425 { .daddr = imr->imr_multiaddr.s_addr } } };
1426 struct rtable *rt; 1443 struct rtable *rt;
1427 struct net_device *dev = NULL; 1444 struct net_device *dev = NULL;
1428 struct in_device *idev = NULL; 1445 struct in_device *idev = NULL;
@@ -1513,18 +1530,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1513 1530
1514 if (!in_dev) 1531 if (!in_dev)
1515 return -ENODEV; 1532 return -ENODEV;
1516 read_lock(&in_dev->mc_list_lock); 1533 rcu_read_lock();
1517 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1534 for_each_pmc_rcu(in_dev, pmc) {
1518 if (*pmca == pmc->multiaddr) 1535 if (*pmca == pmc->multiaddr)
1519 break; 1536 break;
1520 } 1537 }
1521 if (!pmc) { 1538 if (!pmc) {
1522 /* MCA not found?? bug */ 1539 /* MCA not found?? bug */
1523 read_unlock(&in_dev->mc_list_lock); 1540 rcu_read_unlock();
1524 return -ESRCH; 1541 return -ESRCH;
1525 } 1542 }
1526 spin_lock_bh(&pmc->lock); 1543 spin_lock_bh(&pmc->lock);
1527 read_unlock(&in_dev->mc_list_lock); 1544 rcu_read_unlock();
1528#ifdef CONFIG_IP_MULTICAST 1545#ifdef CONFIG_IP_MULTICAST
1529 sf_markstate(pmc); 1546 sf_markstate(pmc);
1530#endif 1547#endif
@@ -1685,18 +1702,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1685 1702
1686 if (!in_dev) 1703 if (!in_dev)
1687 return -ENODEV; 1704 return -ENODEV;
1688 read_lock(&in_dev->mc_list_lock); 1705 rcu_read_lock();
1689 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1706 for_each_pmc_rcu(in_dev, pmc) {
1690 if (*pmca == pmc->multiaddr) 1707 if (*pmca == pmc->multiaddr)
1691 break; 1708 break;
1692 } 1709 }
1693 if (!pmc) { 1710 if (!pmc) {
1694 /* MCA not found?? bug */ 1711 /* MCA not found?? bug */
1695 read_unlock(&in_dev->mc_list_lock); 1712 rcu_read_unlock();
1696 return -ESRCH; 1713 return -ESRCH;
1697 } 1714 }
1698 spin_lock_bh(&pmc->lock); 1715 spin_lock_bh(&pmc->lock);
1699 read_unlock(&in_dev->mc_list_lock); 1716 rcu_read_unlock();
1700 1717
1701#ifdef CONFIG_IP_MULTICAST 1718#ifdef CONFIG_IP_MULTICAST
1702 sf_markstate(pmc); 1719 sf_markstate(pmc);
@@ -1793,7 +1810,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1793 1810
1794 err = -EADDRINUSE; 1811 err = -EADDRINUSE;
1795 ifindex = imr->imr_ifindex; 1812 ifindex = imr->imr_ifindex;
1796 for (i = inet->mc_list; i; i = i->next) { 1813 for_each_pmc_rtnl(inet, i) {
1797 if (i->multi.imr_multiaddr.s_addr == addr && 1814 if (i->multi.imr_multiaddr.s_addr == addr &&
1798 i->multi.imr_ifindex == ifindex) 1815 i->multi.imr_ifindex == ifindex)
1799 goto done; 1816 goto done;
@@ -1807,7 +1824,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1807 goto done; 1824 goto done;
1808 1825
1809 memcpy(&iml->multi, imr, sizeof(*imr)); 1826 memcpy(&iml->multi, imr, sizeof(*imr));
1810 iml->next = inet->mc_list; 1827 iml->next_rcu = inet->mc_list;
1811 iml->sflist = NULL; 1828 iml->sflist = NULL;
1812 iml->sfmode = MCAST_EXCLUDE; 1829 iml->sfmode = MCAST_EXCLUDE;
1813 rcu_assign_pointer(inet->mc_list, iml); 1830 rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1838,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
1821 1838
1822static void ip_sf_socklist_reclaim(struct rcu_head *rp) 1839static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1823{ 1840{
1824 struct ip_sf_socklist *psf; 1841 kfree(container_of(rp, struct ip_sf_socklist, rcu));
1825
1826 psf = container_of(rp, struct ip_sf_socklist, rcu);
1827 /* sk_omem_alloc should have been decreased by the caller*/ 1842 /* sk_omem_alloc should have been decreased by the caller*/
1828 kfree(psf);
1829} 1843}
1830 1844
1831static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1845static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1832 struct in_device *in_dev) 1846 struct in_device *in_dev)
1833{ 1847{
1834 struct ip_sf_socklist *psf = iml->sflist; 1848 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1835 int err; 1849 int err;
1836 1850
1837 if (psf == NULL) { 1851 if (psf == NULL) {
@@ -1851,11 +1865,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1851 1865
1852static void ip_mc_socklist_reclaim(struct rcu_head *rp) 1866static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1853{ 1867{
1854 struct ip_mc_socklist *iml; 1868 kfree(container_of(rp, struct ip_mc_socklist, rcu));
1855
1856 iml = container_of(rp, struct ip_mc_socklist, rcu);
1857 /* sk_omem_alloc should have been decreased by the caller*/ 1869 /* sk_omem_alloc should have been decreased by the caller*/
1858 kfree(iml);
1859} 1870}
1860 1871
1861 1872
@@ -1866,7 +1877,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1866int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 1877int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1867{ 1878{
1868 struct inet_sock *inet = inet_sk(sk); 1879 struct inet_sock *inet = inet_sk(sk);
1869 struct ip_mc_socklist *iml, **imlp; 1880 struct ip_mc_socklist *iml;
1881 struct ip_mc_socklist __rcu **imlp;
1870 struct in_device *in_dev; 1882 struct in_device *in_dev;
1871 struct net *net = sock_net(sk); 1883 struct net *net = sock_net(sk);
1872 __be32 group = imr->imr_multiaddr.s_addr; 1884 __be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1888,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1876 rtnl_lock(); 1888 rtnl_lock();
1877 in_dev = ip_mc_find_dev(net, imr); 1889 in_dev = ip_mc_find_dev(net, imr);
1878 ifindex = imr->imr_ifindex; 1890 ifindex = imr->imr_ifindex;
1879 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1891 for (imlp = &inet->mc_list;
1892 (iml = rtnl_dereference(*imlp)) != NULL;
1893 imlp = &iml->next_rcu) {
1880 if (iml->multi.imr_multiaddr.s_addr != group) 1894 if (iml->multi.imr_multiaddr.s_addr != group)
1881 continue; 1895 continue;
1882 if (ifindex) { 1896 if (ifindex) {
@@ -1888,7 +1902,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1888 1902
1889 (void) ip_mc_leave_src(sk, iml, in_dev); 1903 (void) ip_mc_leave_src(sk, iml, in_dev);
1890 1904
1891 rcu_assign_pointer(*imlp, iml->next); 1905 *imlp = iml->next_rcu;
1892 1906
1893 if (in_dev) 1907 if (in_dev)
1894 ip_mc_dec_group(in_dev, group); 1908 ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1948,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1934 } 1948 }
1935 err = -EADDRNOTAVAIL; 1949 err = -EADDRNOTAVAIL;
1936 1950
1937 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1951 for_each_pmc_rtnl(inet, pmc) {
1938 if ((pmc->multi.imr_multiaddr.s_addr == 1952 if ((pmc->multi.imr_multiaddr.s_addr ==
1939 imr.imr_multiaddr.s_addr) && 1953 imr.imr_multiaddr.s_addr) &&
1940 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 1954 (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1972,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1958 pmc->sfmode = omode; 1972 pmc->sfmode = omode;
1959 } 1973 }
1960 1974
1961 psl = pmc->sflist; 1975 psl = rtnl_dereference(pmc->sflist);
1962 if (!add) { 1976 if (!add) {
1963 if (!psl) 1977 if (!psl)
1964 goto done; /* err = -EADDRNOTAVAIL */ 1978 goto done; /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2091,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2077 goto done; 2091 goto done;
2078 } 2092 }
2079 2093
2080 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2094 for_each_pmc_rtnl(inet, pmc) {
2081 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2095 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2082 pmc->multi.imr_ifindex == imr.imr_ifindex) 2096 pmc->multi.imr_ifindex == imr.imr_ifindex)
2083 break; 2097 break;
@@ -2107,7 +2121,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2107 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2121 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2108 msf->imsf_fmode, 0, NULL, 0); 2122 msf->imsf_fmode, 0, NULL, 0);
2109 } 2123 }
2110 psl = pmc->sflist; 2124 psl = rtnl_dereference(pmc->sflist);
2111 if (psl) { 2125 if (psl) {
2112 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2126 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2113 psl->sl_count, psl->sl_addr, 0); 2127 psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2169,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2155 } 2169 }
2156 err = -EADDRNOTAVAIL; 2170 err = -EADDRNOTAVAIL;
2157 2171
2158 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2172 for_each_pmc_rtnl(inet, pmc) {
2159 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2173 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2160 pmc->multi.imr_ifindex == imr.imr_ifindex) 2174 pmc->multi.imr_ifindex == imr.imr_ifindex)
2161 break; 2175 break;
@@ -2163,7 +2177,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2163 if (!pmc) /* must have a prior join */ 2177 if (!pmc) /* must have a prior join */
2164 goto done; 2178 goto done;
2165 msf->imsf_fmode = pmc->sfmode; 2179 msf->imsf_fmode = pmc->sfmode;
2166 psl = pmc->sflist; 2180 psl = rtnl_dereference(pmc->sflist);
2167 rtnl_unlock(); 2181 rtnl_unlock();
2168 if (!psl) { 2182 if (!psl) {
2169 len = 0; 2183 len = 0;
@@ -2208,7 +2222,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2208 2222
2209 err = -EADDRNOTAVAIL; 2223 err = -EADDRNOTAVAIL;
2210 2224
2211 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2225 for_each_pmc_rtnl(inet, pmc) {
2212 if (pmc->multi.imr_multiaddr.s_addr == addr && 2226 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2213 pmc->multi.imr_ifindex == gsf->gf_interface) 2227 pmc->multi.imr_ifindex == gsf->gf_interface)
2214 break; 2228 break;
@@ -2216,7 +2230,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2216 if (!pmc) /* must have a prior join */ 2230 if (!pmc) /* must have a prior join */
2217 goto done; 2231 goto done;
2218 gsf->gf_fmode = pmc->sfmode; 2232 gsf->gf_fmode = pmc->sfmode;
2219 psl = pmc->sflist; 2233 psl = rtnl_dereference(pmc->sflist);
2220 rtnl_unlock(); 2234 rtnl_unlock();
2221 count = psl ? psl->sl_count : 0; 2235 count = psl ? psl->sl_count : 0;
2222 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2236 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2271,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2257 goto out; 2271 goto out;
2258 2272
2259 rcu_read_lock(); 2273 rcu_read_lock();
2260 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) { 2274 for_each_pmc_rcu(inet, pmc) {
2261 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2275 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2262 pmc->multi.imr_ifindex == dif) 2276 pmc->multi.imr_ifindex == dif)
2263 break; 2277 break;
@@ -2265,7 +2279,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2265 ret = inet->mc_all; 2279 ret = inet->mc_all;
2266 if (!pmc) 2280 if (!pmc)
2267 goto unlock; 2281 goto unlock;
2268 psl = pmc->sflist; 2282 psl = rcu_dereference(pmc->sflist);
2269 ret = (pmc->sfmode == MCAST_EXCLUDE); 2283 ret = (pmc->sfmode == MCAST_EXCLUDE);
2270 if (!psl) 2284 if (!psl)
2271 goto unlock; 2285 goto unlock;
@@ -2300,16 +2314,14 @@ void ip_mc_drop_socket(struct sock *sk)
2300 return; 2314 return;
2301 2315
2302 rtnl_lock(); 2316 rtnl_lock();
2303 while ((iml = inet->mc_list) != NULL) { 2317 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2304 struct in_device *in_dev; 2318 struct in_device *in_dev;
2305 rcu_assign_pointer(inet->mc_list, iml->next);
2306 2319
2320 inet->mc_list = iml->next_rcu;
2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2321 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2322 (void) ip_mc_leave_src(sk, iml, in_dev);
2309 if (in_dev != NULL) { 2323 if (in_dev != NULL)
2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2324 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2311 in_dev_put(in_dev);
2312 }
2313 /* decrease mem now to avoid the memleak warning */ 2325 /* decrease mem now to avoid the memleak warning */
2314 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2326 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2315 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 2327 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
@@ -2323,8 +2335,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2323 struct ip_sf_list *psf; 2335 struct ip_sf_list *psf;
2324 int rv = 0; 2336 int rv = 0;
2325 2337
2326 read_lock(&in_dev->mc_list_lock); 2338 rcu_read_lock();
2327 for (im=in_dev->mc_list; im; im=im->next) { 2339 for_each_pmc_rcu(in_dev, im) {
2328 if (im->multiaddr == mc_addr) 2340 if (im->multiaddr == mc_addr)
2329 break; 2341 break;
2330 } 2342 }
@@ -2345,7 +2357,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2345 } else 2357 } else
2346 rv = 1; /* unspecified source; tentatively allow */ 2358 rv = 1; /* unspecified source; tentatively allow */
2347 } 2359 }
2348 read_unlock(&in_dev->mc_list_lock); 2360 rcu_read_unlock();
2349 return rv; 2361 return rv;
2350} 2362}
2351 2363
@@ -2371,13 +2383,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2371 in_dev = __in_dev_get_rcu(state->dev); 2383 in_dev = __in_dev_get_rcu(state->dev);
2372 if (!in_dev) 2384 if (!in_dev)
2373 continue; 2385 continue;
2374 read_lock(&in_dev->mc_list_lock); 2386 im = rcu_dereference(in_dev->mc_list);
2375 im = in_dev->mc_list;
2376 if (im) { 2387 if (im) {
2377 state->in_dev = in_dev; 2388 state->in_dev = in_dev;
2378 break; 2389 break;
2379 } 2390 }
2380 read_unlock(&in_dev->mc_list_lock);
2381 } 2391 }
2382 return im; 2392 return im;
2383} 2393}
@@ -2385,11 +2395,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2385static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2395static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2386{ 2396{
2387 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2397 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2388 im = im->next;
2389 while (!im) {
2390 if (likely(state->in_dev != NULL))
2391 read_unlock(&state->in_dev->mc_list_lock);
2392 2398
2399 im = rcu_dereference(im->next_rcu);
2400 while (!im) {
2393 state->dev = next_net_device_rcu(state->dev); 2401 state->dev = next_net_device_rcu(state->dev);
2394 if (!state->dev) { 2402 if (!state->dev) {
2395 state->in_dev = NULL; 2403 state->in_dev = NULL;
@@ -2398,8 +2406,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2398 state->in_dev = __in_dev_get_rcu(state->dev); 2406 state->in_dev = __in_dev_get_rcu(state->dev);
2399 if (!state->in_dev) 2407 if (!state->in_dev)
2400 continue; 2408 continue;
2401 read_lock(&state->in_dev->mc_list_lock); 2409 im = rcu_dereference(state->in_dev->mc_list);
2402 im = state->in_dev->mc_list;
2403 } 2410 }
2404 return im; 2411 return im;
2405} 2412}
@@ -2435,10 +2442,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2435 __releases(rcu) 2442 __releases(rcu)
2436{ 2443{
2437 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2444 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2438 if (likely(state->in_dev != NULL)) { 2445
2439 read_unlock(&state->in_dev->mc_list_lock); 2446 state->in_dev = NULL;
2440 state->in_dev = NULL;
2441 }
2442 state->dev = NULL; 2447 state->dev = NULL;
2443 rcu_read_unlock(); 2448 rcu_read_unlock();
2444} 2449}
@@ -2460,7 +2465,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2460 querier = "NONE"; 2465 querier = "NONE";
2461#endif 2466#endif
2462 2467
2463 if (state->in_dev->mc_list == im) { 2468 if (rcu_dereference(state->in_dev->mc_list) == im) {
2464 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2469 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2465 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2470 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2466 } 2471 }
@@ -2519,8 +2524,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2519 idev = __in_dev_get_rcu(state->dev); 2524 idev = __in_dev_get_rcu(state->dev);
2520 if (unlikely(idev == NULL)) 2525 if (unlikely(idev == NULL))
2521 continue; 2526 continue;
2522 read_lock(&idev->mc_list_lock); 2527 im = rcu_dereference(idev->mc_list);
2523 im = idev->mc_list;
2524 if (likely(im != NULL)) { 2528 if (likely(im != NULL)) {
2525 spin_lock_bh(&im->lock); 2529 spin_lock_bh(&im->lock);
2526 psf = im->sources; 2530 psf = im->sources;
@@ -2531,7 +2535,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2531 } 2535 }
2532 spin_unlock_bh(&im->lock); 2536 spin_unlock_bh(&im->lock);
2533 } 2537 }
2534 read_unlock(&idev->mc_list_lock);
2535 } 2538 }
2536 return psf; 2539 return psf;
2537} 2540}
@@ -2545,9 +2548,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2545 spin_unlock_bh(&state->im->lock); 2548 spin_unlock_bh(&state->im->lock);
2546 state->im = state->im->next; 2549 state->im = state->im->next;
2547 while (!state->im) { 2550 while (!state->im) {
2548 if (likely(state->idev != NULL))
2549 read_unlock(&state->idev->mc_list_lock);
2550
2551 state->dev = next_net_device_rcu(state->dev); 2551 state->dev = next_net_device_rcu(state->dev);
2552 if (!state->dev) { 2552 if (!state->dev) {
2553 state->idev = NULL; 2553 state->idev = NULL;
@@ -2556,8 +2556,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2556 state->idev = __in_dev_get_rcu(state->dev); 2556 state->idev = __in_dev_get_rcu(state->dev);
2557 if (!state->idev) 2557 if (!state->idev)
2558 continue; 2558 continue;
2559 read_lock(&state->idev->mc_list_lock); 2559 state->im = rcu_dereference(state->idev->mc_list);
2560 state->im = state->idev->mc_list;
2561 } 2560 }
2562 if (!state->im) 2561 if (!state->im)
2563 break; 2562 break;
@@ -2603,10 +2602,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2603 spin_unlock_bh(&state->im->lock); 2602 spin_unlock_bh(&state->im->lock);
2604 state->im = NULL; 2603 state->im = NULL;
2605 } 2604 }
2606 if (likely(state->idev != NULL)) { 2605 state->idev = NULL;
2607 read_unlock(&state->idev->mc_list_lock);
2608 state->idev = NULL;
2609 }
2610 state->dev = NULL; 2606 state->dev = NULL;
2611 rcu_read_unlock(); 2607 rcu_read_unlock();
2612} 2608}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7174370b1195..25e318153f14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -55,7 +55,6 @@ EXPORT_SYMBOL(inet_get_local_port_range);
55int inet_csk_bind_conflict(const struct sock *sk, 55int inet_csk_bind_conflict(const struct sock *sk,
56 const struct inet_bind_bucket *tb) 56 const struct inet_bind_bucket *tb)
57{ 57{
58 const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
59 struct sock *sk2; 58 struct sock *sk2;
60 struct hlist_node *node; 59 struct hlist_node *node;
61 int reuse = sk->sk_reuse; 60 int reuse = sk->sk_reuse;
@@ -75,9 +74,9 @@ int inet_csk_bind_conflict(const struct sock *sk,
75 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
76 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
77 sk2->sk_state == TCP_LISTEN) { 76 sk2->sk_state == TCP_LISTEN) {
78 const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
79 if (!sk2_rcv_saddr || !sk_rcv_saddr || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
80 sk2_rcv_saddr == sk_rcv_saddr) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
81 break; 80 break;
82 } 81 }
83 } 82 }
@@ -358,17 +357,14 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
358 struct ip_options *opt = inet_rsk(req)->opt; 357 struct ip_options *opt = inet_rsk(req)->opt;
359 struct flowi fl = { .oif = sk->sk_bound_dev_if, 358 struct flowi fl = { .oif = sk->sk_bound_dev_if,
360 .mark = sk->sk_mark, 359 .mark = sk->sk_mark,
361 .nl_u = { .ip4_u = 360 .fl4_dst = ((opt && opt->srr) ?
362 { .daddr = ((opt && opt->srr) ? 361 opt->faddr : ireq->rmt_addr),
363 opt->faddr : 362 .fl4_src = ireq->loc_addr,
364 ireq->rmt_addr), 363 .fl4_tos = RT_CONN_FLAGS(sk),
365 .saddr = ireq->loc_addr,
366 .tos = RT_CONN_FLAGS(sk) } },
367 .proto = sk->sk_protocol, 364 .proto = sk->sk_protocol,
368 .flags = inet_sk_flowi_flags(sk), 365 .flags = inet_sk_flowi_flags(sk),
369 .uli_u = { .ports = 366 .fl_ip_sport = inet_sk(sk)->inet_sport,
370 { .sport = inet_sk(sk)->inet_sport, 367 .fl_ip_dport = ireq->rmt_port };
371 .dport = ireq->rmt_port } } };
372 struct net *net = sock_net(sk); 368 struct net *net = sock_net(sk);
373 369
374 security_req_classify_flow(req, &fl); 370 security_req_classify_flow(req, &fl);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba8042665849..2ada17129fce 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk,
490{ 490{
491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
492 492
493 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 493 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
494 struct inet_diag_entry entry; 494 struct inet_diag_entry entry;
495 struct rtattr *bc = (struct rtattr *)(r + 1); 495 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
496 sizeof(*r),
497 INET_DIAG_REQ_BYTECODE);
496 struct inet_sock *inet = inet_sk(sk); 498 struct inet_sock *inet = inet_sk(sk);
497 499
498 entry.family = sk->sk_family; 500 entry.family = sk->sk_family;
@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk,
512 entry.dport = ntohs(inet->inet_dport); 514 entry.dport = ntohs(inet->inet_dport);
513 entry.userlocks = sk->sk_userlocks; 515 entry.userlocks = sk->sk_userlocks;
514 516
515 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 517 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
516 return 0; 518 return 0;
517 } 519 }
518 520
@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
527{ 529{
528 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 530 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
529 531
530 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 532 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
531 struct inet_diag_entry entry; 533 struct inet_diag_entry entry;
532 struct rtattr *bc = (struct rtattr *)(r + 1); 534 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
535 sizeof(*r),
536 INET_DIAG_REQ_BYTECODE);
533 537
534 entry.family = tw->tw_family; 538 entry.family = tw->tw_family;
535#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 539#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
548 entry.dport = ntohs(tw->tw_dport); 552 entry.dport = ntohs(tw->tw_dport);
549 entry.userlocks = 0; 553 entry.userlocks = 0;
550 554
551 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 555 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
552 return 0; 556 return 0;
553 } 557 }
554 558
@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
618 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 622 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
619 struct inet_connection_sock *icsk = inet_csk(sk); 623 struct inet_connection_sock *icsk = inet_csk(sk);
620 struct listen_sock *lopt; 624 struct listen_sock *lopt;
621 struct rtattr *bc = NULL; 625 const struct nlattr *bc = NULL;
622 struct inet_sock *inet = inet_sk(sk); 626 struct inet_sock *inet = inet_sk(sk);
623 int j, s_j; 627 int j, s_j;
624 int reqnum, s_reqnum; 628 int reqnum, s_reqnum;
@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
638 if (!lopt || !lopt->qlen) 642 if (!lopt || !lopt->qlen)
639 goto out; 643 goto out;
640 644
641 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 645 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
642 bc = (struct rtattr *)(r + 1); 646 bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
647 INET_DIAG_REQ_BYTECODE);
643 entry.sport = inet->inet_num; 648 entry.sport = inet->inet_num;
644 entry.userlocks = sk->sk_userlocks; 649 entry.userlocks = sk->sk_userlocks;
645 } 650 }
@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
672 &ireq->rmt_addr; 677 &ireq->rmt_addr;
673 entry.dport = ntohs(ireq->rmt_port); 678 entry.dport = ntohs(ireq->rmt_port);
674 679
675 if (!inet_diag_bc_run(RTA_DATA(bc), 680 if (!inet_diag_bc_run(nla_data(bc),
676 RTA_PAYLOAD(bc), &entry)) 681 nla_len(bc), &entry))
677 continue; 682 continue;
678 } 683 }
679 684
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1b344f30b463..3c0369a3a663 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -133,8 +133,7 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
133 } 133 }
134 } 134 }
135 } 135 }
136 sk_add_bind_node(child, &tb->owners); 136 inet_bind_hash(child, tb, port);
137 inet_csk(child)->icsk_bind_hash = tb;
138 spin_unlock(&head->lock); 137 spin_unlock(&head->lock);
139 138
140 return 0; 139 return 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9e94d7cf4f8a..d9bc85751c74 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -63,7 +63,7 @@
63 * refcnt: atomically against modifications on other CPU; 63 * refcnt: atomically against modifications on other CPU;
64 * usually under some other lock to prevent node disappearing 64 * usually under some other lock to prevent node disappearing
65 * dtime: unused node list lock 65 * dtime: unused node list lock
66 * v4daddr: unchangeable 66 * daddr: unchangeable
67 * ip_id_count: atomic value (no lock needed) 67 * ip_id_count: atomic value (no lock needed)
68 */ 68 */
69 69
@@ -79,15 +79,24 @@ static const struct inet_peer peer_fake_node = {
79 .avl_height = 0 79 .avl_height = 0
80}; 80};
81 81
82static struct { 82struct inet_peer_base {
83 struct inet_peer __rcu *root; 83 struct inet_peer __rcu *root;
84 spinlock_t lock; 84 spinlock_t lock;
85 int total; 85 int total;
86} peers = { 86};
87
88static struct inet_peer_base v4_peers = {
89 .root = peer_avl_empty_rcu,
90 .lock = __SPIN_LOCK_UNLOCKED(v4_peers.lock),
91 .total = 0,
92};
93
94static struct inet_peer_base v6_peers = {
87 .root = peer_avl_empty_rcu, 95 .root = peer_avl_empty_rcu,
88 .lock = __SPIN_LOCK_UNLOCKED(peers.lock), 96 .lock = __SPIN_LOCK_UNLOCKED(v6_peers.lock),
89 .total = 0, 97 .total = 0,
90}; 98};
99
91#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 100#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
92 101
93/* Exported for sysctl_net_ipv4. */ 102/* Exported for sysctl_net_ipv4. */
@@ -152,28 +161,45 @@ static void unlink_from_unused(struct inet_peer *p)
152 } 161 }
153} 162}
154 163
164static int addr_compare(const struct inetpeer_addr *a,
165 const struct inetpeer_addr *b)
166{
167 int i, n = (a->family == AF_INET ? 1 : 4);
168
169 for (i = 0; i < n; i++) {
170 if (a->a6[i] == b->a6[i])
171 continue;
172 if (a->a6[i] < b->a6[i])
173 return -1;
174 return 1;
175 }
176
177 return 0;
178}
179
155/* 180/*
156 * Called with local BH disabled and the pool lock held. 181 * Called with local BH disabled and the pool lock held.
157 */ 182 */
158#define lookup(_daddr, _stack) \ 183#define lookup(_daddr, _stack, _base) \
159({ \ 184({ \
160 struct inet_peer *u; \ 185 struct inet_peer *u; \
161 struct inet_peer __rcu **v; \ 186 struct inet_peer __rcu **v; \
162 \ 187 \
163 stackptr = _stack; \ 188 stackptr = _stack; \
164 *stackptr++ = &peers.root; \ 189 *stackptr++ = &_base->root; \
165 for (u = rcu_dereference_protected(peers.root, \ 190 for (u = rcu_dereference_protected(_base->root, \
166 lockdep_is_held(&peers.lock)); \ 191 lockdep_is_held(&_base->lock)); \
167 u != peer_avl_empty; ) { \ 192 u != peer_avl_empty; ) { \
168 if (_daddr == u->v4daddr) \ 193 int cmp = addr_compare(_daddr, &u->daddr); \
194 if (cmp == 0) \
169 break; \ 195 break; \
170 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ 196 if (cmp == -1) \
171 v = &u->avl_left; \ 197 v = &u->avl_left; \
172 else \ 198 else \
173 v = &u->avl_right; \ 199 v = &u->avl_right; \
174 *stackptr++ = v; \ 200 *stackptr++ = v; \
175 u = rcu_dereference_protected(*v, \ 201 u = rcu_dereference_protected(*v, \
176 lockdep_is_held(&peers.lock)); \ 202 lockdep_is_held(&_base->lock)); \
177 } \ 203 } \
178 u; \ 204 u; \
179}) 205})
@@ -185,13 +211,15 @@ static void unlink_from_unused(struct inet_peer *p)
185 * But every pointer we follow is guaranteed to be valid thanks to RCU. 211 * But every pointer we follow is guaranteed to be valid thanks to RCU.
186 * We exit from this function if number of links exceeds PEER_MAXDEPTH 212 * We exit from this function if number of links exceeds PEER_MAXDEPTH
187 */ 213 */
188static struct inet_peer *lookup_rcu_bh(__be32 daddr) 214static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
215 struct inet_peer_base *base)
189{ 216{
190 struct inet_peer *u = rcu_dereference_bh(peers.root); 217 struct inet_peer *u = rcu_dereference_bh(base->root);
191 int count = 0; 218 int count = 0;
192 219
193 while (u != peer_avl_empty) { 220 while (u != peer_avl_empty) {
194 if (daddr == u->v4daddr) { 221 int cmp = addr_compare(daddr, &u->daddr);
222 if (cmp == 0) {
195 /* Before taking a reference, check if this entry was 223 /* Before taking a reference, check if this entry was
196 * deleted, unlink_from_pool() sets refcnt=-1 to make 224 * deleted, unlink_from_pool() sets refcnt=-1 to make
197 * distinction between an unused entry (refcnt=0) and 225 * distinction between an unused entry (refcnt=0) and
@@ -201,7 +229,7 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
201 u = NULL; 229 u = NULL;
202 return u; 230 return u;
203 } 231 }
204 if ((__force __u32)daddr < (__force __u32)u->v4daddr) 232 if (cmp == -1)
205 u = rcu_dereference_bh(u->avl_left); 233 u = rcu_dereference_bh(u->avl_left);
206 else 234 else
207 u = rcu_dereference_bh(u->avl_right); 235 u = rcu_dereference_bh(u->avl_right);
@@ -212,19 +240,19 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
212} 240}
213 241
214/* Called with local BH disabled and the pool lock held. */ 242/* Called with local BH disabled and the pool lock held. */
215#define lookup_rightempty(start) \ 243#define lookup_rightempty(start, base) \
216({ \ 244({ \
217 struct inet_peer *u; \ 245 struct inet_peer *u; \
218 struct inet_peer __rcu **v; \ 246 struct inet_peer __rcu **v; \
219 *stackptr++ = &start->avl_left; \ 247 *stackptr++ = &start->avl_left; \
220 v = &start->avl_left; \ 248 v = &start->avl_left; \
221 for (u = rcu_dereference_protected(*v, \ 249 for (u = rcu_dereference_protected(*v, \
222 lockdep_is_held(&peers.lock)); \ 250 lockdep_is_held(&base->lock)); \
223 u->avl_right != peer_avl_empty_rcu; ) { \ 251 u->avl_right != peer_avl_empty_rcu; ) { \
224 v = &u->avl_right; \ 252 v = &u->avl_right; \
225 *stackptr++ = v; \ 253 *stackptr++ = v; \
226 u = rcu_dereference_protected(*v, \ 254 u = rcu_dereference_protected(*v, \
227 lockdep_is_held(&peers.lock)); \ 255 lockdep_is_held(&base->lock)); \
228 } \ 256 } \
229 u; \ 257 u; \
230}) 258})
@@ -234,7 +262,8 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
234 * Look into mm/map_avl.c for more detail description of the ideas. 262 * Look into mm/map_avl.c for more detail description of the ideas.
235 */ 263 */
236static void peer_avl_rebalance(struct inet_peer __rcu **stack[], 264static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
237 struct inet_peer __rcu ***stackend) 265 struct inet_peer __rcu ***stackend,
266 struct inet_peer_base *base)
238{ 267{
239 struct inet_peer __rcu **nodep; 268 struct inet_peer __rcu **nodep;
240 struct inet_peer *node, *l, *r; 269 struct inet_peer *node, *l, *r;
@@ -243,20 +272,20 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
243 while (stackend > stack) { 272 while (stackend > stack) {
244 nodep = *--stackend; 273 nodep = *--stackend;
245 node = rcu_dereference_protected(*nodep, 274 node = rcu_dereference_protected(*nodep,
246 lockdep_is_held(&peers.lock)); 275 lockdep_is_held(&base->lock));
247 l = rcu_dereference_protected(node->avl_left, 276 l = rcu_dereference_protected(node->avl_left,
248 lockdep_is_held(&peers.lock)); 277 lockdep_is_held(&base->lock));
249 r = rcu_dereference_protected(node->avl_right, 278 r = rcu_dereference_protected(node->avl_right,
250 lockdep_is_held(&peers.lock)); 279 lockdep_is_held(&base->lock));
251 lh = node_height(l); 280 lh = node_height(l);
252 rh = node_height(r); 281 rh = node_height(r);
253 if (lh > rh + 1) { /* l: RH+2 */ 282 if (lh > rh + 1) { /* l: RH+2 */
254 struct inet_peer *ll, *lr, *lrl, *lrr; 283 struct inet_peer *ll, *lr, *lrl, *lrr;
255 int lrh; 284 int lrh;
256 ll = rcu_dereference_protected(l->avl_left, 285 ll = rcu_dereference_protected(l->avl_left,
257 lockdep_is_held(&peers.lock)); 286 lockdep_is_held(&base->lock));
258 lr = rcu_dereference_protected(l->avl_right, 287 lr = rcu_dereference_protected(l->avl_right,
259 lockdep_is_held(&peers.lock)); 288 lockdep_is_held(&base->lock));
260 lrh = node_height(lr); 289 lrh = node_height(lr);
261 if (lrh <= node_height(ll)) { /* ll: RH+1 */ 290 if (lrh <= node_height(ll)) { /* ll: RH+1 */
262 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ 291 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
@@ -268,9 +297,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
268 RCU_INIT_POINTER(*nodep, l); 297 RCU_INIT_POINTER(*nodep, l);
269 } else { /* ll: RH, lr: RH+1 */ 298 } else { /* ll: RH, lr: RH+1 */
270 lrl = rcu_dereference_protected(lr->avl_left, 299 lrl = rcu_dereference_protected(lr->avl_left,
271 lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */ 300 lockdep_is_held(&base->lock)); /* lrl: RH or RH-1 */
272 lrr = rcu_dereference_protected(lr->avl_right, 301 lrr = rcu_dereference_protected(lr->avl_right,
273 lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */ 302 lockdep_is_held(&base->lock)); /* lrr: RH or RH-1 */
274 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ 303 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
275 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ 304 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
276 node->avl_height = rh + 1; /* node: RH+1 */ 305 node->avl_height = rh + 1; /* node: RH+1 */
@@ -286,9 +315,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
286 struct inet_peer *rr, *rl, *rlr, *rll; 315 struct inet_peer *rr, *rl, *rlr, *rll;
287 int rlh; 316 int rlh;
288 rr = rcu_dereference_protected(r->avl_right, 317 rr = rcu_dereference_protected(r->avl_right,
289 lockdep_is_held(&peers.lock)); 318 lockdep_is_held(&base->lock));
290 rl = rcu_dereference_protected(r->avl_left, 319 rl = rcu_dereference_protected(r->avl_left,
291 lockdep_is_held(&peers.lock)); 320 lockdep_is_held(&base->lock));
292 rlh = node_height(rl); 321 rlh = node_height(rl);
293 if (rlh <= node_height(rr)) { /* rr: LH+1 */ 322 if (rlh <= node_height(rr)) { /* rr: LH+1 */
294 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ 323 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
@@ -300,9 +329,9 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
300 RCU_INIT_POINTER(*nodep, r); 329 RCU_INIT_POINTER(*nodep, r);
301 } else { /* rr: RH, rl: RH+1 */ 330 } else { /* rr: RH, rl: RH+1 */
302 rlr = rcu_dereference_protected(rl->avl_right, 331 rlr = rcu_dereference_protected(rl->avl_right,
303 lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */ 332 lockdep_is_held(&base->lock)); /* rlr: LH or LH-1 */
304 rll = rcu_dereference_protected(rl->avl_left, 333 rll = rcu_dereference_protected(rl->avl_left,
305 lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */ 334 lockdep_is_held(&base->lock)); /* rll: LH or LH-1 */
306 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ 335 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
307 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ 336 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
308 node->avl_height = lh + 1; /* node: LH+1 */ 337 node->avl_height = lh + 1; /* node: LH+1 */
@@ -321,14 +350,14 @@ static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
321} 350}
322 351
323/* Called with local BH disabled and the pool lock held. */ 352/* Called with local BH disabled and the pool lock held. */
324#define link_to_pool(n) \ 353#define link_to_pool(n, base) \
325do { \ 354do { \
326 n->avl_height = 1; \ 355 n->avl_height = 1; \
327 n->avl_left = peer_avl_empty_rcu; \ 356 n->avl_left = peer_avl_empty_rcu; \
328 n->avl_right = peer_avl_empty_rcu; \ 357 n->avl_right = peer_avl_empty_rcu; \
329 /* lockless readers can catch us now */ \ 358 /* lockless readers can catch us now */ \
330 rcu_assign_pointer(**--stackptr, n); \ 359 rcu_assign_pointer(**--stackptr, n); \
331 peer_avl_rebalance(stack, stackptr); \ 360 peer_avl_rebalance(stack, stackptr, base); \
332} while (0) 361} while (0)
333 362
334static void inetpeer_free_rcu(struct rcu_head *head) 363static void inetpeer_free_rcu(struct rcu_head *head)
@@ -337,13 +366,13 @@ static void inetpeer_free_rcu(struct rcu_head *head)
337} 366}
338 367
339/* May be called with local BH enabled. */ 368/* May be called with local BH enabled. */
340static void unlink_from_pool(struct inet_peer *p) 369static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
341{ 370{
342 int do_free; 371 int do_free;
343 372
344 do_free = 0; 373 do_free = 0;
345 374
346 spin_lock_bh(&peers.lock); 375 spin_lock_bh(&base->lock);
347 /* Check the reference counter. It was artificially incremented by 1 376 /* Check the reference counter. It was artificially incremented by 1
348 * in cleanup() function to prevent sudden disappearing. If we can 377 * in cleanup() function to prevent sudden disappearing. If we can
349 * atomically (because of lockless readers) take this last reference, 378 * atomically (because of lockless readers) take this last reference,
@@ -353,7 +382,7 @@ static void unlink_from_pool(struct inet_peer *p)
353 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 382 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
354 struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 383 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
355 struct inet_peer __rcu ***stackptr, ***delp; 384 struct inet_peer __rcu ***stackptr, ***delp;
356 if (lookup(p->v4daddr, stack) != p) 385 if (lookup(&p->daddr, stack, base) != p)
357 BUG(); 386 BUG();
358 delp = stackptr - 1; /* *delp[0] == p */ 387 delp = stackptr - 1; /* *delp[0] == p */
359 if (p->avl_left == peer_avl_empty_rcu) { 388 if (p->avl_left == peer_avl_empty_rcu) {
@@ -362,11 +391,11 @@ static void unlink_from_pool(struct inet_peer *p)
362 } else { 391 } else {
363 /* look for a node to insert instead of p */ 392 /* look for a node to insert instead of p */
364 struct inet_peer *t; 393 struct inet_peer *t;
365 t = lookup_rightempty(p); 394 t = lookup_rightempty(p, base);
366 BUG_ON(rcu_dereference_protected(*stackptr[-1], 395 BUG_ON(rcu_dereference_protected(*stackptr[-1],
367 lockdep_is_held(&peers.lock)) != t); 396 lockdep_is_held(&base->lock)) != t);
368 **--stackptr = t->avl_left; 397 **--stackptr = t->avl_left;
369 /* t is removed, t->v4daddr > x->v4daddr for any 398 /* t is removed, t->daddr > x->daddr for any
370 * x in p->avl_left subtree. 399 * x in p->avl_left subtree.
371 * Put t in the old place of p. */ 400 * Put t in the old place of p. */
372 RCU_INIT_POINTER(*delp[0], t); 401 RCU_INIT_POINTER(*delp[0], t);
@@ -376,11 +405,11 @@ static void unlink_from_pool(struct inet_peer *p)
376 BUG_ON(delp[1] != &p->avl_left); 405 BUG_ON(delp[1] != &p->avl_left);
377 delp[1] = &t->avl_left; /* was &p->avl_left */ 406 delp[1] = &t->avl_left; /* was &p->avl_left */
378 } 407 }
379 peer_avl_rebalance(stack, stackptr); 408 peer_avl_rebalance(stack, stackptr, base);
380 peers.total--; 409 base->total--;
381 do_free = 1; 410 do_free = 1;
382 } 411 }
383 spin_unlock_bh(&peers.lock); 412 spin_unlock_bh(&base->lock);
384 413
385 if (do_free) 414 if (do_free)
386 call_rcu_bh(&p->rcu, inetpeer_free_rcu); 415 call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -395,6 +424,16 @@ static void unlink_from_pool(struct inet_peer *p)
395 inet_putpeer(p); 424 inet_putpeer(p);
396} 425}
397 426
427static struct inet_peer_base *family_to_base(int family)
428{
429 return (family == AF_INET ? &v4_peers : &v6_peers);
430}
431
432static struct inet_peer_base *peer_to_base(struct inet_peer *p)
433{
434 return family_to_base(p->daddr.family);
435}
436
398/* May be called with local BH enabled. */ 437/* May be called with local BH enabled. */
399static int cleanup_once(unsigned long ttl) 438static int cleanup_once(unsigned long ttl)
400{ 439{
@@ -428,21 +467,22 @@ static int cleanup_once(unsigned long ttl)
428 * happen because of entry limits in route cache. */ 467 * happen because of entry limits in route cache. */
429 return -1; 468 return -1;
430 469
431 unlink_from_pool(p); 470 unlink_from_pool(p, peer_to_base(p));
432 return 0; 471 return 0;
433} 472}
434 473
435/* Called with or without local BH being disabled. */ 474/* Called with or without local BH being disabled. */
436struct inet_peer *inet_getpeer(__be32 daddr, int create) 475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
437{ 476{
438 struct inet_peer *p;
439 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 struct inet_peer_base *base = family_to_base(AF_INET);
479 struct inet_peer *p;
440 480
441 /* Look up for the address quickly, lockless. 481 /* Look up for the address quickly, lockless.
442 * Because of a concurrent writer, we might not find an existing entry. 482 * Because of a concurrent writer, we might not find an existing entry.
443 */ 483 */
444 rcu_read_lock_bh(); 484 rcu_read_lock_bh();
445 p = lookup_rcu_bh(daddr); 485 p = lookup_rcu_bh(daddr, base);
446 rcu_read_unlock_bh(); 486 rcu_read_unlock_bh();
447 487
448 if (p) { 488 if (p) {
@@ -456,50 +496,57 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
456 /* retry an exact lookup, taking the lock before. 496 /* retry an exact lookup, taking the lock before.
457 * At least, nodes should be hot in our cache. 497 * At least, nodes should be hot in our cache.
458 */ 498 */
459 spin_lock_bh(&peers.lock); 499 spin_lock_bh(&base->lock);
460 p = lookup(daddr, stack); 500 p = lookup(daddr, stack, base);
461 if (p != peer_avl_empty) { 501 if (p != peer_avl_empty) {
462 atomic_inc(&p->refcnt); 502 atomic_inc(&p->refcnt);
463 spin_unlock_bh(&peers.lock); 503 spin_unlock_bh(&base->lock);
464 /* Remove the entry from unused list if it was there. */ 504 /* Remove the entry from unused list if it was there. */
465 unlink_from_unused(p); 505 unlink_from_unused(p);
466 return p; 506 return p;
467 } 507 }
468 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; 508 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
469 if (p) { 509 if (p) {
470 p->v4daddr = daddr; 510 p->daddr = *daddr;
471 atomic_set(&p->refcnt, 1); 511 atomic_set(&p->refcnt, 1);
472 atomic_set(&p->rid, 0); 512 atomic_set(&p->rid, 0);
473 atomic_set(&p->ip_id_count, secure_ip_id(daddr)); 513 atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
474 p->tcp_ts_stamp = 0; 514 p->tcp_ts_stamp = 0;
475 INIT_LIST_HEAD(&p->unused); 515 INIT_LIST_HEAD(&p->unused);
476 516
477 517
478 /* Link the node. */ 518 /* Link the node. */
479 link_to_pool(p); 519 link_to_pool(p, base);
480 peers.total++; 520 base->total++;
481 } 521 }
482 spin_unlock_bh(&peers.lock); 522 spin_unlock_bh(&base->lock);
483 523
484 if (peers.total >= inet_peer_threshold) 524 if (base->total >= inet_peer_threshold)
485 /* Remove one less-recently-used entry. */ 525 /* Remove one less-recently-used entry. */
486 cleanup_once(0); 526 cleanup_once(0);
487 527
488 return p; 528 return p;
489} 529}
490 530
531static int compute_total(void)
532{
533 return v4_peers.total + v6_peers.total;
534}
535EXPORT_SYMBOL_GPL(inet_getpeer);
536
491/* Called with local BH disabled. */ 537/* Called with local BH disabled. */
492static void peer_check_expire(unsigned long dummy) 538static void peer_check_expire(unsigned long dummy)
493{ 539{
494 unsigned long now = jiffies; 540 unsigned long now = jiffies;
495 int ttl; 541 int ttl, total;
496 542
497 if (peers.total >= inet_peer_threshold) 543 total = compute_total();
544 if (total >= inet_peer_threshold)
498 ttl = inet_peer_minttl; 545 ttl = inet_peer_minttl;
499 else 546 else
500 ttl = inet_peer_maxttl 547 ttl = inet_peer_maxttl
501 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 548 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
502 peers.total / inet_peer_threshold * HZ; 549 total / inet_peer_threshold * HZ;
503 while (!cleanup_once(ttl)) { 550 while (!cleanup_once(ttl)) {
504 if (jiffies != now) 551 if (jiffies != now)
505 break; 552 break;
@@ -508,13 +555,14 @@ static void peer_check_expire(unsigned long dummy)
508 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 555 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
509 * interval depending on the total number of entries (more entries, 556 * interval depending on the total number of entries (more entries,
510 * less interval). */ 557 * less interval). */
511 if (peers.total >= inet_peer_threshold) 558 total = compute_total();
559 if (total >= inet_peer_threshold)
512 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 560 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
513 else 561 else
514 peer_periodic_timer.expires = jiffies 562 peer_periodic_timer.expires = jiffies
515 + inet_peer_gc_maxtime 563 + inet_peer_gc_maxtime
516 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 564 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
517 peers.total / inet_peer_threshold * HZ; 565 total / inet_peer_threshold * HZ;
518 add_timer(&peer_periodic_timer); 566 add_timer(&peer_periodic_timer);
519} 567}
520 568
@@ -530,3 +578,4 @@ void inet_putpeer(struct inet_peer *p)
530 578
531 local_bh_enable(); 579 local_bh_enable();
532} 580}
581EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 168440834ade..e6215bdd96c0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -141,7 +141,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
141 qp->daddr = arg->iph->daddr; 141 qp->daddr = arg->iph->daddr;
142 qp->user = arg->user; 142 qp->user = arg->user;
143 qp->peer = sysctl_ipfrag_max_dist ? 143 qp->peer = sysctl_ipfrag_max_dist ?
144 inet_getpeer(arg->iph->saddr, 1) : NULL; 144 inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
145} 145}
146 146
147static __inline__ void ip4_frag_free(struct inet_frag_queue *q) 147static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 70ff77f02eee..eb68a0e34e49 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -405,11 +405,11 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
405 if (parms->name[0]) 405 if (parms->name[0])
406 strlcpy(name, parms->name, IFNAMSIZ); 406 strlcpy(name, parms->name, IFNAMSIZ);
407 else 407 else
408 sprintf(name, "gre%%d"); 408 strcpy(name, "gre%d");
409 409
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); 410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
411 if (!dev) 411 if (!dev)
412 return NULL; 412 return NULL;
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
634#ifdef CONFIG_NET_IPGRE_BROADCAST 634#ifdef CONFIG_NET_IPGRE_BROADCAST
635 if (ipv4_is_multicast(iph->daddr)) { 635 if (ipv4_is_multicast(iph->daddr)) {
636 /* Looped back packet, drop it! */ 636 /* Looped back packet, drop it! */
637 if (skb_rtable(skb)->fl.iif == 0) 637 if (rt_is_output_route(skb_rtable(skb)))
638 goto drop; 638 goto drop;
639 tunnel->dev->stats.multicast++; 639 tunnel->dev->stats.multicast++;
640 skb->pkt_type = PACKET_BROADCAST; 640 skb->pkt_type = PACKET_BROADCAST;
@@ -772,16 +772,11 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
772 { 772 {
773 struct flowi fl = { 773 struct flowi fl = {
774 .oif = tunnel->parms.link, 774 .oif = tunnel->parms.link,
775 .nl_u = { 775 .fl4_dst = dst,
776 .ip4_u = { 776 .fl4_src = tiph->saddr,
777 .daddr = dst, 777 .fl4_tos = RT_TOS(tos),
778 .saddr = tiph->saddr, 778 .fl_gre_key = tunnel->parms.o_key
779 .tos = RT_TOS(tos) 779 };
780 }
781 },
782 .proto = IPPROTO_GRE
783 }
784;
785 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
786 dev->stats.tx_carrier_errors++; 781 dev->stats.tx_carrier_errors++;
787 goto tx_error; 782 goto tx_error;
@@ -823,7 +818,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
823 !ipv4_is_multicast(tunnel->parms.iph.daddr)) || 818 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
824 rt6->rt6i_dst.plen == 128) { 819 rt6->rt6i_dst.plen == 128) {
825 rt6->rt6i_flags |= RTF_MODIFIED; 820 rt6->rt6i_flags |= RTF_MODIFIED;
826 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu; 821 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
827 } 822 }
828 } 823 }
829 824
@@ -895,7 +890,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
895 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 890 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
896#endif 891#endif
897 else 892 else
898 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT); 893 iph->ttl = ip4_dst_hoplimit(&rt->dst);
899 } 894 }
900 895
901 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; 896 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -951,14 +946,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
951 if (iph->daddr) { 946 if (iph->daddr) {
952 struct flowi fl = { 947 struct flowi fl = {
953 .oif = tunnel->parms.link, 948 .oif = tunnel->parms.link,
954 .nl_u = { 949 .fl4_dst = iph->daddr,
955 .ip4_u = { 950 .fl4_src = iph->saddr,
956 .daddr = iph->daddr, 951 .fl4_tos = RT_TOS(iph->tos),
957 .saddr = iph->saddr, 952 .proto = IPPROTO_GRE,
958 .tos = RT_TOS(iph->tos) 953 .fl_gre_key = tunnel->parms.o_key
959 }
960 },
961 .proto = IPPROTO_GRE
962 }; 954 };
963 struct rtable *rt; 955 struct rtable *rt;
964 956
@@ -1216,14 +1208,11 @@ static int ipgre_open(struct net_device *dev)
1216 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1208 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1217 struct flowi fl = { 1209 struct flowi fl = {
1218 .oif = t->parms.link, 1210 .oif = t->parms.link,
1219 .nl_u = { 1211 .fl4_dst = t->parms.iph.daddr,
1220 .ip4_u = { 1212 .fl4_src = t->parms.iph.saddr,
1221 .daddr = t->parms.iph.daddr, 1213 .fl4_tos = RT_TOS(t->parms.iph.tos),
1222 .saddr = t->parms.iph.saddr, 1214 .proto = IPPROTO_GRE,
1223 .tos = RT_TOS(t->parms.iph.tos) 1215 .fl_gre_key = t->parms.o_key
1224 }
1225 },
1226 .proto = IPPROTO_GRE
1227 }; 1216 };
1228 struct rtable *rt; 1217 struct rtable *rt;
1229 1218
@@ -1775,3 +1764,4 @@ module_exit(ipgre_fini);
1775MODULE_LICENSE("GPL"); 1764MODULE_LICENSE("GPL");
1776MODULE_ALIAS_RTNL_LINK("gre"); 1765MODULE_ALIAS_RTNL_LINK("gre");
1777MODULE_ALIAS_RTNL_LINK("gretap"); 1766MODULE_ALIAS_RTNL_LINK("gretap");
1767MODULE_ALIAS("gre0");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 439d2a34ee44..04c7b3ba6b39 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -82,6 +82,7 @@
82#include <linux/tcp.h> 82#include <linux/tcp.h>
83 83
84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; 84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85EXPORT_SYMBOL(sysctl_ip_default_ttl);
85 86
86/* Generate a checksum for an outgoing IP datagram. */ 87/* Generate a checksum for an outgoing IP datagram. */
87__inline__ void ip_send_check(struct iphdr *iph) 88__inline__ void ip_send_check(struct iphdr *iph)
@@ -130,7 +131,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130 int ttl = inet->uc_ttl; 131 int ttl = inet->uc_ttl;
131 132
132 if (ttl < 0) 133 if (ttl < 0)
133 ttl = dst_metric(dst, RTAX_HOPLIMIT); 134 ttl = ip4_dst_hoplimit(dst);
134 return ttl; 135 return ttl;
135} 136}
136 137
@@ -341,15 +342,13 @@ int ip_queue_xmit(struct sk_buff *skb)
341 { 342 {
342 struct flowi fl = { .oif = sk->sk_bound_dev_if, 343 struct flowi fl = { .oif = sk->sk_bound_dev_if,
343 .mark = sk->sk_mark, 344 .mark = sk->sk_mark,
344 .nl_u = { .ip4_u = 345 .fl4_dst = daddr,
345 { .daddr = daddr, 346 .fl4_src = inet->inet_saddr,
346 .saddr = inet->inet_saddr, 347 .fl4_tos = RT_CONN_FLAGS(sk),
347 .tos = RT_CONN_FLAGS(sk) } },
348 .proto = sk->sk_protocol, 348 .proto = sk->sk_protocol,
349 .flags = inet_sk_flowi_flags(sk), 349 .flags = inet_sk_flowi_flags(sk),
350 .uli_u = { .ports = 350 .fl_ip_sport = inet->inet_sport,
351 { .sport = inet->inet_sport, 351 .fl_ip_dport = inet->inet_dport };
352 .dport = inet->inet_dport } } };
353 352
354 /* If this fails, retransmit mechanism of transport layer will 353 /* If this fails, retransmit mechanism of transport layer will
355 * keep trying until route appears or the connection times 354 * keep trying until route appears or the connection times
@@ -1404,14 +1403,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1404 1403
1405 { 1404 {
1406 struct flowi fl = { .oif = arg->bound_dev_if, 1405 struct flowi fl = { .oif = arg->bound_dev_if,
1407 .nl_u = { .ip4_u = 1406 .fl4_dst = daddr,
1408 { .daddr = daddr, 1407 .fl4_src = rt->rt_spec_dst,
1409 .saddr = rt->rt_spec_dst, 1408 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
1410 .tos = RT_TOS(ip_hdr(skb)->tos) } }, 1409 .fl_ip_sport = tcp_hdr(skb)->dest,
1411 /* Not quite clean, but right. */ 1410 .fl_ip_dport = tcp_hdr(skb)->source,
1412 .uli_u = { .ports =
1413 { .sport = tcp_hdr(skb)->dest,
1414 .dport = tcp_hdr(skb)->source } },
1415 .proto = sk->sk_protocol, 1411 .proto = sk->sk_protocol,
1416 .flags = ip_reply_arg_flowi_flags(arg) }; 1412 .flags = ip_reply_arg_flowi_flags(arg) };
1417 security_skb_classify_flow(skb, &fl); 1413 security_skb_classify_flow(skb, &fl);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 3a6e1ec5e9ae..2b097752426b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1191,13 +1191,13 @@ static int __init ic_dynamic(void)
1191 (ic_proto_enabled & IC_USE_DHCP) && 1191 (ic_proto_enabled & IC_USE_DHCP) &&
1192 ic_dhcp_msgtype != DHCPACK) { 1192 ic_dhcp_msgtype != DHCPACK) {
1193 ic_got_reply = 0; 1193 ic_got_reply = 0;
1194 printk(","); 1194 printk(KERN_CONT ",");
1195 continue; 1195 continue;
1196 } 1196 }
1197#endif /* IPCONFIG_DHCP */ 1197#endif /* IPCONFIG_DHCP */
1198 1198
1199 if (ic_got_reply) { 1199 if (ic_got_reply) {
1200 printk(" OK\n"); 1200 printk(KERN_CONT " OK\n");
1201 break; 1201 break;
1202 } 1202 }
1203 1203
@@ -1205,7 +1205,7 @@ static int __init ic_dynamic(void)
1205 continue; 1205 continue;
1206 1206
1207 if (! --retries) { 1207 if (! --retries) {
1208 printk(" timed out!\n"); 1208 printk(KERN_CONT " timed out!\n");
1209 break; 1209 break;
1210 } 1210 }
1211 1211
@@ -1215,7 +1215,7 @@ static int __init ic_dynamic(void)
1215 if (timeout > CONF_TIMEOUT_MAX) 1215 if (timeout > CONF_TIMEOUT_MAX)
1216 timeout = CONF_TIMEOUT_MAX; 1216 timeout = CONF_TIMEOUT_MAX;
1217 1217
1218 printk("."); 1218 printk(KERN_CONT ".");
1219 } 1219 }
1220 1220
1221#ifdef IPCONFIG_BOOTP 1221#ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@ static int __init ic_dynamic(void)
1236 ((ic_got_reply & IC_RARP) ? "RARP" 1236 ((ic_got_reply & IC_RARP) ? "RARP"
1237 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1237 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1238 &ic_servaddr); 1238 &ic_servaddr);
1239 printk("my address is %pI4\n", &ic_myaddr); 1239 printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
1240 1240
1241 return 0; 1241 return 0;
1242} 1242}
@@ -1468,19 +1468,19 @@ static int __init ip_auto_config(void)
1468 /* 1468 /*
1469 * Clue in the operator. 1469 * Clue in the operator.
1470 */ 1470 */
1471 printk("IP-Config: Complete:"); 1471 printk("IP-Config: Complete:\n");
1472 printk("\n device=%s", ic_dev->name); 1472 printk(" device=%s", ic_dev->name);
1473 printk(", addr=%pI4", &ic_myaddr); 1473 printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
1474 printk(", mask=%pI4", &ic_netmask); 1474 printk(KERN_CONT ", mask=%pI4", &ic_netmask);
1475 printk(", gw=%pI4", &ic_gateway); 1475 printk(KERN_CONT ", gw=%pI4", &ic_gateway);
1476 printk(",\n host=%s, domain=%s, nis-domain=%s", 1476 printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s",
1477 utsname()->nodename, ic_domain, utsname()->domainname); 1477 utsname()->nodename, ic_domain, utsname()->domainname);
1478 printk(",\n bootserver=%pI4", &ic_servaddr); 1478 printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr);
1479 printk(", rootserver=%pI4", &root_server_addr); 1479 printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
1480 printk(", rootpath=%s", root_server_path); 1480 printk(KERN_CONT ", rootpath=%s", root_server_path);
1481 if (ic_dev_mtu) 1481 if (ic_dev_mtu)
1482 printk(", mtu=%d", ic_dev_mtu); 1482 printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
1483 printk("\n"); 1483 printk(KERN_CONT "\n");
1484#endif /* !SILENT */ 1484#endif /* !SILENT */
1485 1485
1486 return 0; 1486 return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index cd300aaee78f..988f52fba54a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -463,13 +463,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
463 { 463 {
464 struct flowi fl = { 464 struct flowi fl = {
465 .oif = tunnel->parms.link, 465 .oif = tunnel->parms.link,
466 .nl_u = { 466 .fl4_dst = dst,
467 .ip4_u = { 467 .fl4_src= tiph->saddr,
468 .daddr = dst, 468 .fl4_tos = RT_TOS(tos),
469 .saddr = tiph->saddr,
470 .tos = RT_TOS(tos)
471 }
472 },
473 .proto = IPPROTO_IPIP 469 .proto = IPPROTO_IPIP
474 }; 470 };
475 471
@@ -589,13 +585,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
589 if (iph->daddr) { 585 if (iph->daddr) {
590 struct flowi fl = { 586 struct flowi fl = {
591 .oif = tunnel->parms.link, 587 .oif = tunnel->parms.link,
592 .nl_u = { 588 .fl4_dst = iph->daddr,
593 .ip4_u = { 589 .fl4_src = iph->saddr,
594 .daddr = iph->daddr, 590 .fl4_tos = RT_TOS(iph->tos),
595 .saddr = iph->saddr,
596 .tos = RT_TOS(iph->tos)
597 }
598 },
599 .proto = IPPROTO_IPIP 591 .proto = IPPROTO_IPIP
600 }; 592 };
601 struct rtable *rt; 593 struct rtable *rt;
@@ -921,3 +913,4 @@ static void __exit ipip_fini(void)
921module_init(ipip_init); 913module_init(ipip_init);
922module_exit(ipip_fini); 914module_exit(ipip_fini);
923MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86dd5691af46..3f3a9afd73e0 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1537,13 +1537,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1537 if (vif->flags & VIFF_TUNNEL) { 1537 if (vif->flags & VIFF_TUNNEL) {
1538 struct flowi fl = { 1538 struct flowi fl = {
1539 .oif = vif->link, 1539 .oif = vif->link,
1540 .nl_u = { 1540 .fl4_dst = vif->remote,
1541 .ip4_u = { 1541 .fl4_src = vif->local,
1542 .daddr = vif->remote, 1542 .fl4_tos = RT_TOS(iph->tos),
1543 .saddr = vif->local,
1544 .tos = RT_TOS(iph->tos)
1545 }
1546 },
1547 .proto = IPPROTO_IPIP 1543 .proto = IPPROTO_IPIP
1548 }; 1544 };
1549 1545
@@ -1553,12 +1549,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1553 } else { 1549 } else {
1554 struct flowi fl = { 1550 struct flowi fl = {
1555 .oif = vif->link, 1551 .oif = vif->link,
1556 .nl_u = { 1552 .fl4_dst = iph->daddr,
1557 .ip4_u = { 1553 .fl4_tos = RT_TOS(iph->tos),
1558 .daddr = iph->daddr,
1559 .tos = RT_TOS(iph->tos)
1560 }
1561 },
1562 .proto = IPPROTO_IPIP 1554 .proto = IPPROTO_IPIP
1563 }; 1555 };
1564 1556
@@ -1654,7 +1646,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1654 if (mrt->vif_table[vif].dev != skb->dev) { 1646 if (mrt->vif_table[vif].dev != skb->dev) {
1655 int true_vifi; 1647 int true_vifi;
1656 1648
1657 if (skb_rtable(skb)->fl.iif == 0) { 1649 if (rt_is_output_route(skb_rtable(skb))) {
1658 /* It is our own packet, looped back. 1650 /* It is our own packet, looped back.
1659 * Very complicated situation... 1651 * Very complicated situation...
1660 * 1652 *
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index d88a46c54fd1..994a1f29ebbc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -31,10 +31,10 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
32 */ 32 */
33 if (addr_type == RTN_LOCAL) { 33 if (addr_type == RTN_LOCAL) {
34 fl.nl_u.ip4_u.daddr = iph->daddr; 34 fl.fl4_dst = iph->daddr;
35 if (type == RTN_LOCAL) 35 if (type == RTN_LOCAL)
36 fl.nl_u.ip4_u.saddr = iph->saddr; 36 fl.fl4_src = iph->saddr;
37 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 37 fl.fl4_tos = RT_TOS(iph->tos);
38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 38 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
39 fl.mark = skb->mark; 39 fl.mark = skb->mark;
40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 40 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
@@ -47,7 +47,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
47 } else { 47 } else {
48 /* non-local src, find valid iif to satisfy 48 /* non-local src, find valid iif to satisfy
49 * rp-filter when calling ip_route_input. */ 49 * rp-filter when calling ip_route_input. */
50 fl.nl_u.ip4_u.daddr = iph->saddr; 50 fl.fl4_dst = iph->saddr;
51 if (ip_route_output_key(net, &rt, &fl) != 0) 51 if (ip_route_output_key(net, &rt, &fl) != 0)
52 return -1; 52 return -1;
53 53
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 48111594ee9b..19eb59d01037 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,15 +3,15 @@
3# 3#
4 4
5# objects for l3 independent conntrack 5# objects for l3 independent conntrack
6nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o 6nf_conntrack_ipv4-y := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
7ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y) 7ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
8ifeq ($(CONFIG_PROC_FS),y) 8ifeq ($(CONFIG_PROC_FS),y)
9nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o 9nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
10endif 10endif
11endif 11endif
12 12
13nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o 13nf_nat-y := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
14iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o 14iptable_nat-y := nf_nat_rule.o nf_nat_standalone.o
15 15
16# connection tracking 16# connection tracking
17obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o 17obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad2591ace0..3fac340a28d5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user,
927 private = &tmp; 927 private = &tmp;
928 } 928 }
929#endif 929#endif
930 memset(&info, 0, sizeof(info));
930 info.valid_hooks = t->valid_hooks; 931 info.valid_hooks = t->valid_hooks;
931 memcpy(info.hook_entry, private->hook_entry, 932 memcpy(info.hook_entry, private->hook_entry,
932 sizeof(info.hook_entry)); 933 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007a6d80..a846d633b3b6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user,
1124 private = &tmp; 1124 private = &tmp;
1125 } 1125 }
1126#endif 1126#endif
1127 memset(&info, 0, sizeof(info));
1127 info.valid_hooks = t->valid_hooks; 1128 info.valid_hooks = t->valid_hooks;
1128 memcpy(info.hook_entry, private->hook_entry, 1129 memcpy(info.hook_entry, private->hook_entry,
1129 sizeof(info.hook_entry)); 1130 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 43eec80c0e7c..1ff79e557f96 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -116,7 +116,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
116 if (ip_route_me_harder(nskb, addr_type)) 116 if (ip_route_me_harder(nskb, addr_type))
117 goto free_nskb; 117 goto free_nskb;
118 118
119 niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); 119 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
120 120
121 /* "Never happens" */ 121 /* "Never happens" */
122 if (nskb->len > dst_mtu(skb_dst(nskb))) 122 if (nskb->len > dst_mtu(skb_dst(nskb)))
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c97431e43..c04787ce1a71 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum)
47 return rcu_dereference(nf_nat_protos[protonum]); 47 return rcu_dereference(nf_nat_protos[protonum]);
48} 48}
49 49
50static const struct nf_nat_protocol *
51nf_nat_proto_find_get(u_int8_t protonum)
52{
53 const struct nf_nat_protocol *p;
54
55 rcu_read_lock();
56 p = __nf_nat_proto_find(protonum);
57 if (!try_module_get(p->me))
58 p = &nf_nat_unknown_protocol;
59 rcu_read_unlock();
60
61 return p;
62}
63
64static void
65nf_nat_proto_put(const struct nf_nat_protocol *p)
66{
67 module_put(p->me);
68}
69
70/* We keep an extra hash for each conntrack, for fast searching. */ 50/* We keep an extra hash for each conntrack, for fast searching. */
71static inline unsigned int 51static inline unsigned int
72hash_by_src(const struct net *net, u16 zone, 52hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
588#include <linux/netfilter/nfnetlink.h> 568#include <linux/netfilter/nfnetlink.h>
589#include <linux/netfilter/nfnetlink_conntrack.h> 569#include <linux/netfilter/nfnetlink_conntrack.h>
590 570
571static const struct nf_nat_protocol *
572nf_nat_proto_find_get(u_int8_t protonum)
573{
574 const struct nf_nat_protocol *p;
575
576 rcu_read_lock();
577 p = __nf_nat_proto_find(protonum);
578 if (!try_module_get(p->me))
579 p = &nf_nat_unknown_protocol;
580 rcu_read_unlock();
581
582 return p;
583}
584
585static void
586nf_nat_proto_put(const struct nf_nat_protocol *p)
587{
588 module_put(p->me);
589}
590
591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f203f7cb..b14ec7d03b6e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
59 local_bh_enable(); 59 local_bh_enable();
60 60
61 socket_seq_show(seq); 61 socket_seq_show(seq);
62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
63 sock_prot_inuse_get(net, &tcp_prot), orphans, 63 sock_prot_inuse_get(net, &tcp_prot), orphans,
64 tcp_death_row.tw_count, sockets, 64 tcp_death_row.tw_count, sockets,
65 atomic_read(&tcp_memory_allocated)); 65 atomic_long_read(&tcp_memory_allocated));
66 seq_printf(seq, "UDP: inuse %d mem %d\n", 66 seq_printf(seq, "UDP: inuse %d mem %ld\n",
67 sock_prot_inuse_get(net, &udp_prot), 67 sock_prot_inuse_get(net, &udp_prot),
68 atomic_read(&udp_memory_allocated)); 68 atomic_long_read(&udp_memory_allocated));
69 seq_printf(seq, "UDPLITE: inuse %d\n", 69 seq_printf(seq, "UDPLITE: inuse %d\n",
70 sock_prot_inuse_get(net, &udplite_prot)); 70 sock_prot_inuse_get(net, &udplite_prot));
71 seq_printf(seq, "RAW: inuse %d\n", 71 seq_printf(seq, "RAW: inuse %d\n",
@@ -253,6 +253,7 @@ static const struct snmp_mib snmp4_net_list[] = {
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
256 SNMP_MIB_SENTINEL 257 SNMP_MIB_SENTINEL
257}; 258};
258 259
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1f85ef289895..a3d5ab786e81 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -549,10 +549,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
549 { 549 {
550 struct flowi fl = { .oif = ipc.oif, 550 struct flowi fl = { .oif = ipc.oif,
551 .mark = sk->sk_mark, 551 .mark = sk->sk_mark,
552 .nl_u = { .ip4_u = 552 .fl4_dst = daddr,
553 { .daddr = daddr, 553 .fl4_src = saddr,
554 .saddr = saddr, 554 .fl4_tos = tos,
555 .tos = tos } },
556 .proto = inet->hdrincl ? IPPROTO_RAW : 555 .proto = inet->hdrincl ? IPPROTO_RAW :
557 sk->sk_protocol, 556 sk->sk_protocol,
558 }; 557 };
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 987bf9adb318..770f70427f0b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -140,13 +140,15 @@ static unsigned long expires_ljiffies;
140 140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142static void ipv4_dst_destroy(struct dst_entry *dst); 142static void ipv4_dst_destroy(struct dst_entry *dst);
143static void ipv4_dst_ifdown(struct dst_entry *dst,
144 struct net_device *dev, int how);
145static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146static void ipv4_link_failure(struct sk_buff *skb); 144static void ipv4_link_failure(struct sk_buff *skb);
147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148static int rt_garbage_collect(struct dst_ops *ops); 146static int rt_garbage_collect(struct dst_ops *ops);
149 147
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
150 152
151static struct dst_ops ipv4_dst_ops = { 153static struct dst_ops ipv4_dst_ops = {
152 .family = AF_INET, 154 .family = AF_INET,
@@ -621,7 +623,7 @@ static inline int rt_fast_clean(struct rtable *rth)
621 /* Kill broadcast/multicast entries very aggresively, if they 623 /* Kill broadcast/multicast entries very aggresively, if they
622 collide in hash table with more useful entries */ 624 collide in hash table with more useful entries */
623 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
624 rth->fl.iif && rth->dst.rt_next; 626 rt_is_input_route(rth) && rth->dst.rt_next;
625} 627}
626 628
627static inline int rt_valuable(struct rtable *rth) 629static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@ static inline u32 rt_score(struct rtable *rt)
666 if (rt_valuable(rt)) 668 if (rt_valuable(rt))
667 score |= (1<<31); 669 score |= (1<<31);
668 670
669 if (!rt->fl.iif || 671 if (rt_is_output_route(rt) ||
670 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL))) 672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
671 score |= (1<<30); 673 score |= (1<<30);
672 674
@@ -682,17 +684,17 @@ static inline bool rt_caching(const struct net *net)
682static inline bool compare_hash_inputs(const struct flowi *fl1, 684static inline bool compare_hash_inputs(const struct flowi *fl1,
683 const struct flowi *fl2) 685 const struct flowi *fl2)
684{ 686{
685 return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | 687 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
686 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | 688 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
687 (fl1->iif ^ fl2->iif)) == 0); 689 (fl1->iif ^ fl2->iif)) == 0);
688} 690}
689 691
690static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
691{ 693{
692 return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | 694 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
693 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | 695 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
694 (fl1->mark ^ fl2->mark) | 696 (fl1->mark ^ fl2->mark) |
695 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | 697 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
696 (fl1->oif ^ fl2->oif) | 698 (fl1->oif ^ fl2->oif) |
697 (fl1->iif ^ fl2->iif)) == 0; 699 (fl1->iif ^ fl2->iif)) == 0;
698} 700}
@@ -1124,7 +1126,7 @@ restart:
1124 */ 1126 */
1125 1127
1126 rt->dst.flags |= DST_NOCACHE; 1128 rt->dst.flags |= DST_NOCACHE;
1127 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1128 int err = arp_bind_neighbour(&rt->dst); 1130 int err = arp_bind_neighbour(&rt->dst);
1129 if (err) { 1131 if (err) {
1130 if (net_ratelimit()) 1132 if (net_ratelimit())
@@ -1222,7 +1224,7 @@ restart:
1222 /* Try to bind route to arp only if it is output 1224 /* Try to bind route to arp only if it is output
1223 route or unicast forwarding path. 1225 route or unicast forwarding path.
1224 */ 1226 */
1225 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1226 int err = arp_bind_neighbour(&rt->dst); 1228 int err = arp_bind_neighbour(&rt->dst);
1227 if (err) { 1229 if (err) {
1228 spin_unlock_bh(rt_hash_lock_addr(hash)); 1230 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1287,7 +1289,7 @@ void rt_bind_peer(struct rtable *rt, int create)
1287{ 1289{
1288 struct inet_peer *peer; 1290 struct inet_peer *peer;
1289 1291
1290 peer = inet_getpeer(rt->rt_dst, create); 1292 peer = inet_getpeer_v4(rt->rt_dst, create);
1291 1293
1292 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL) 1294 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1293 inet_putpeer(peer); 1295 inet_putpeer(peer);
@@ -1404,7 +1406,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1404 if (rth->fl.fl4_dst != daddr || 1406 if (rth->fl.fl4_dst != daddr ||
1405 rth->fl.fl4_src != skeys[i] || 1407 rth->fl.fl4_src != skeys[i] ||
1406 rth->fl.oif != ikeys[k] || 1408 rth->fl.oif != ikeys[k] ||
1407 rth->fl.iif != 0 || 1409 rt_is_input_route(rth) ||
1408 rt_is_expired(rth) || 1410 rt_is_expired(rth) ||
1409 !net_eq(dev_net(rth->dst.dev), net)) { 1411 !net_eq(dev_net(rth->dst.dev), net)) {
1410 rthp = &rth->dst.rt_next; 1412 rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1433 rt->dst.child = NULL; 1435 rt->dst.child = NULL;
1434 if (rt->dst.dev) 1436 if (rt->dst.dev)
1435 dev_hold(rt->dst.dev); 1437 dev_hold(rt->dst.dev);
1436 if (rt->idev)
1437 in_dev_hold(rt->idev);
1438 rt->dst.obsolete = -1; 1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies; 1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst; 1440 rt->dst.path = &rt->dst;
@@ -1666,7 +1666,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1666 rth->rt_dst != daddr || 1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr || 1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] || 1668 rth->fl.oif != ikeys[k] ||
1669 rth->fl.iif != 0 || 1669 rt_is_input_route(rth) ||
1670 dst_metric_locked(&rth->dst, RTAX_MTU) || 1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) || 1671 !net_eq(dev_net(rth->dst.dev), net) ||
1672 rt_is_expired(rth)) 1672 rt_is_expired(rth))
@@ -1686,11 +1686,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1686 if (mtu < dst_mtu(&rth->dst)) { 1686 if (mtu < dst_mtu(&rth->dst)) {
1687 dst_confirm(&rth->dst); 1687 dst_confirm(&rth->dst);
1688 if (mtu < ip_rt_min_pmtu) { 1688 if (mtu < ip_rt_min_pmtu) {
1689 u32 lock = dst_metric(&rth->dst,
1690 RTAX_LOCK);
1689 mtu = ip_rt_min_pmtu; 1691 mtu = ip_rt_min_pmtu;
1690 rth->dst.metrics[RTAX_LOCK-1] |= 1692 lock |= (1 << RTAX_MTU);
1691 (1 << RTAX_MTU); 1693 dst_metric_set(&rth->dst, RTAX_LOCK,
1694 lock);
1692 } 1695 }
1693 rth->dst.metrics[RTAX_MTU-1] = mtu; 1696 dst_metric_set(&rth->dst, RTAX_MTU, mtu);
1694 dst_set_expires(&rth->dst, 1697 dst_set_expires(&rth->dst,
1695 ip_rt_mtu_expires); 1698 ip_rt_mtu_expires);
1696 } 1699 }
@@ -1708,10 +1711,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1708 if (dst_mtu(dst) > mtu && mtu >= 68 && 1711 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1709 !(dst_metric_locked(dst, RTAX_MTU))) { 1712 !(dst_metric_locked(dst, RTAX_MTU))) {
1710 if (mtu < ip_rt_min_pmtu) { 1713 if (mtu < ip_rt_min_pmtu) {
1714 u32 lock = dst_metric(dst, RTAX_LOCK);
1711 mtu = ip_rt_min_pmtu; 1715 mtu = ip_rt_min_pmtu;
1712 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU); 1716 dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
1713 } 1717 }
1714 dst->metrics[RTAX_MTU-1] = mtu; 1718 dst_metric_set(dst, RTAX_MTU, mtu);
1715 dst_set_expires(dst, ip_rt_mtu_expires); 1719 dst_set_expires(dst, ip_rt_mtu_expires);
1716 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 1720 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1717 } 1721 }
@@ -1728,33 +1732,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1728{ 1732{
1729 struct rtable *rt = (struct rtable *) dst; 1733 struct rtable *rt = (struct rtable *) dst;
1730 struct inet_peer *peer = rt->peer; 1734 struct inet_peer *peer = rt->peer;
1731 struct in_device *idev = rt->idev;
1732 1735
1733 if (peer) { 1736 if (peer) {
1734 rt->peer = NULL; 1737 rt->peer = NULL;
1735 inet_putpeer(peer); 1738 inet_putpeer(peer);
1736 } 1739 }
1737
1738 if (idev) {
1739 rt->idev = NULL;
1740 in_dev_put(idev);
1741 }
1742} 1740}
1743 1741
1744static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1745 int how)
1746{
1747 struct rtable *rt = (struct rtable *) dst;
1748 struct in_device *idev = rt->idev;
1749 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1750 struct in_device *loopback_idev =
1751 in_dev_get(dev_net(dev)->loopback_dev);
1752 if (loopback_idev) {
1753 rt->idev = loopback_idev;
1754 in_dev_put(idev);
1755 }
1756 }
1757}
1758 1742
1759static void ipv4_link_failure(struct sk_buff *skb) 1743static void ipv4_link_failure(struct sk_buff *skb)
1760{ 1744{
@@ -1790,7 +1774,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1790 __be32 src; 1774 __be32 src;
1791 struct fib_result res; 1775 struct fib_result res;
1792 1776
1793 if (rt->fl.iif == 0) 1777 if (rt_is_output_route(rt))
1794 src = rt->rt_src; 1778 src = rt->rt_src;
1795 else { 1779 else {
1796 rcu_read_lock(); 1780 rcu_read_lock();
@@ -1816,36 +1800,35 @@ static void set_class_tag(struct rtable *rt, u32 tag)
1816 1800
1817static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) 1801static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1818{ 1802{
1803 struct dst_entry *dst = &rt->dst;
1819 struct fib_info *fi = res->fi; 1804 struct fib_info *fi = res->fi;
1820 1805
1821 if (fi) { 1806 if (fi) {
1822 if (FIB_RES_GW(*res) && 1807 if (FIB_RES_GW(*res) &&
1823 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1808 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1824 rt->rt_gateway = FIB_RES_GW(*res); 1809 rt->rt_gateway = FIB_RES_GW(*res);
1825 memcpy(rt->dst.metrics, fi->fib_metrics, 1810 dst_import_metrics(dst, fi->fib_metrics);
1826 sizeof(rt->dst.metrics));
1827 if (fi->fib_mtu == 0) { 1811 if (fi->fib_mtu == 0) {
1828 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; 1812 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1829 if (dst_metric_locked(&rt->dst, RTAX_MTU) && 1813 if (dst_metric_locked(dst, RTAX_MTU) &&
1830 rt->rt_gateway != rt->rt_dst && 1814 rt->rt_gateway != rt->rt_dst &&
1831 rt->dst.dev->mtu > 576) 1815 dst->dev->mtu > 576)
1832 rt->dst.metrics[RTAX_MTU-1] = 576; 1816 dst_metric_set(dst, RTAX_MTU, 576);
1833 } 1817 }
1834#ifdef CONFIG_NET_CLS_ROUTE 1818#ifdef CONFIG_NET_CLS_ROUTE
1835 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid; 1819 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1836#endif 1820#endif
1837 } else 1821 } else
1838 rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu; 1822 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1839 1823
1840 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0) 1824 if (dst_mtu(dst) > IP_MAX_MTU)
1841 rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1825 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1842 if (dst_mtu(&rt->dst) > IP_MAX_MTU) 1826 if (dst_metric(dst, RTAX_ADVMSS) == 0)
1843 rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1827 dst_metric_set(dst, RTAX_ADVMSS,
1844 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0) 1828 max_t(unsigned int, dst->dev->mtu - 40,
1845 rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40, 1829 ip_rt_min_advmss));
1846 ip_rt_min_advmss); 1830 if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
1847 if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40) 1831 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1848 rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1849 1832
1850#ifdef CONFIG_NET_CLS_ROUTE 1833#ifdef CONFIG_NET_CLS_ROUTE
1851#ifdef CONFIG_IP_MULTIPLE_TABLES 1834#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1910,7 +1893,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1910 rth->fl.iif = dev->ifindex; 1893 rth->fl.iif = dev->ifindex;
1911 rth->dst.dev = init_net.loopback_dev; 1894 rth->dst.dev = init_net.loopback_dev;
1912 dev_hold(rth->dst.dev); 1895 dev_hold(rth->dst.dev);
1913 rth->idev = in_dev_get(rth->dst.dev);
1914 rth->fl.oif = 0; 1896 rth->fl.oif = 0;
1915 rth->rt_gateway = daddr; 1897 rth->rt_gateway = daddr;
1916 rth->rt_spec_dst= spec_dst; 1898 rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2032,6 @@ static int __mkroute_input(struct sk_buff *skb,
2050 rth->fl.iif = in_dev->dev->ifindex; 2032 rth->fl.iif = in_dev->dev->ifindex;
2051 rth->dst.dev = (out_dev)->dev; 2033 rth->dst.dev = (out_dev)->dev;
2052 dev_hold(rth->dst.dev); 2034 dev_hold(rth->dst.dev);
2053 rth->idev = in_dev_get(rth->dst.dev);
2054 rth->fl.oif = 0; 2035 rth->fl.oif = 0;
2055 rth->rt_spec_dst= spec_dst; 2036 rth->rt_spec_dst= spec_dst;
2056 2037
@@ -2111,12 +2092,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2111{ 2092{
2112 struct fib_result res; 2093 struct fib_result res;
2113 struct in_device *in_dev = __in_dev_get_rcu(dev); 2094 struct in_device *in_dev = __in_dev_get_rcu(dev);
2114 struct flowi fl = { .nl_u = { .ip4_u = 2095 struct flowi fl = { .fl4_dst = daddr,
2115 { .daddr = daddr, 2096 .fl4_src = saddr,
2116 .saddr = saddr, 2097 .fl4_tos = tos,
2117 .tos = tos, 2098 .fl4_scope = RT_SCOPE_UNIVERSE,
2118 .scope = RT_SCOPE_UNIVERSE,
2119 } },
2120 .mark = skb->mark, 2099 .mark = skb->mark,
2121 .iif = dev->ifindex }; 2100 .iif = dev->ifindex };
2122 unsigned flags = 0; 2101 unsigned flags = 0;
@@ -2231,7 +2210,6 @@ local_input:
2231 rth->fl.iif = dev->ifindex; 2210 rth->fl.iif = dev->ifindex;
2232 rth->dst.dev = net->loopback_dev; 2211 rth->dst.dev = net->loopback_dev;
2233 dev_hold(rth->dst.dev); 2212 dev_hold(rth->dst.dev);
2234 rth->idev = in_dev_get(rth->dst.dev);
2235 rth->rt_gateway = daddr; 2213 rth->rt_gateway = daddr;
2236 rth->rt_spec_dst= spec_dst; 2214 rth->rt_spec_dst= spec_dst;
2237 rth->dst.input= ip_local_deliver; 2215 rth->dst.input= ip_local_deliver;
@@ -2417,9 +2395,6 @@ static int __mkroute_output(struct rtable **result,
2417 if (!rth) 2395 if (!rth)
2418 return -ENOBUFS; 2396 return -ENOBUFS;
2419 2397
2420 in_dev_hold(in_dev);
2421 rth->idev = in_dev;
2422
2423 atomic_set(&rth->dst.__refcnt, 1); 2398 atomic_set(&rth->dst.__refcnt, 1);
2424 rth->dst.flags= DST_HOST; 2399 rth->dst.flags= DST_HOST;
2425 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2400 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2506,14 +2481,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2506 const struct flowi *oldflp) 2481 const struct flowi *oldflp)
2507{ 2482{
2508 u32 tos = RT_FL_TOS(oldflp); 2483 u32 tos = RT_FL_TOS(oldflp);
2509 struct flowi fl = { .nl_u = { .ip4_u = 2484 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2510 { .daddr = oldflp->fl4_dst, 2485 .fl4_src = oldflp->fl4_src,
2511 .saddr = oldflp->fl4_src, 2486 .fl4_tos = tos & IPTOS_RT_MASK,
2512 .tos = tos & IPTOS_RT_MASK, 2487 .fl4_scope = ((tos & RTO_ONLINK) ?
2513 .scope = ((tos & RTO_ONLINK) ? 2488 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2514 RT_SCOPE_LINK :
2515 RT_SCOPE_UNIVERSE),
2516 } },
2517 .mark = oldflp->mark, 2489 .mark = oldflp->mark,
2518 .iif = net->loopback_dev->ifindex, 2490 .iif = net->loopback_dev->ifindex,
2519 .oif = oldflp->oif }; 2491 .oif = oldflp->oif };
@@ -2695,7 +2667,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2695 rth = rcu_dereference_bh(rth->dst.rt_next)) { 2667 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2696 if (rth->fl.fl4_dst == flp->fl4_dst && 2668 if (rth->fl.fl4_dst == flp->fl4_dst &&
2697 rth->fl.fl4_src == flp->fl4_src && 2669 rth->fl.fl4_src == flp->fl4_src &&
2698 rth->fl.iif == 0 && 2670 rt_is_output_route(rth) &&
2699 rth->fl.oif == flp->oif && 2671 rth->fl.oif == flp->oif &&
2700 rth->fl.mark == flp->mark && 2672 rth->fl.mark == flp->mark &&
2701 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2673 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2751,7 +2723,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2751 new->__use = 1; 2723 new->__use = 1;
2752 new->input = dst_discard; 2724 new->input = dst_discard;
2753 new->output = dst_discard; 2725 new->output = dst_discard;
2754 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); 2726 dst_copy_metrics(new, &ort->dst);
2755 2727
2756 new->dev = ort->dst.dev; 2728 new->dev = ort->dst.dev;
2757 if (new->dev) 2729 if (new->dev)
@@ -2759,9 +2731,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2759 2731
2760 rt->fl = ort->fl; 2732 rt->fl = ort->fl;
2761 2733
2762 rt->idev = ort->idev;
2763 if (rt->idev)
2764 in_dev_hold(rt->idev);
2765 rt->rt_genid = rt_genid(net); 2734 rt->rt_genid = rt_genid(net);
2766 rt->rt_flags = ort->rt_flags; 2735 rt->rt_flags = ort->rt_flags;
2767 rt->rt_type = ort->rt_type; 2736 rt->rt_type = ort->rt_type;
@@ -2853,7 +2822,7 @@ static int rt_fill_info(struct net *net,
2853 if (rt->dst.tclassid) 2822 if (rt->dst.tclassid)
2854 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); 2823 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2855#endif 2824#endif
2856 if (rt->fl.iif) 2825 if (rt_is_input_route(rt))
2857 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2826 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2858 else if (rt->rt_src != rt->fl.fl4_src) 2827 else if (rt->rt_src != rt->fl.fl4_src)
2859 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 2828 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2861,7 +2830,7 @@ static int rt_fill_info(struct net *net,
2861 if (rt->rt_dst != rt->rt_gateway) 2830 if (rt->rt_dst != rt->rt_gateway)
2862 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 2831 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2863 2832
2864 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) 2833 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2865 goto nla_put_failure; 2834 goto nla_put_failure;
2866 2835
2867 if (rt->fl.mark) 2836 if (rt->fl.mark)
@@ -2878,7 +2847,7 @@ static int rt_fill_info(struct net *net,
2878 } 2847 }
2879 } 2848 }
2880 2849
2881 if (rt->fl.iif) { 2850 if (rt_is_input_route(rt)) {
2882#ifdef CONFIG_IP_MROUTE 2851#ifdef CONFIG_IP_MROUTE
2883 __be32 dst = rt->rt_dst; 2852 __be32 dst = rt->rt_dst;
2884 2853
@@ -2973,13 +2942,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2973 err = -rt->dst.error; 2942 err = -rt->dst.error;
2974 } else { 2943 } else {
2975 struct flowi fl = { 2944 struct flowi fl = {
2976 .nl_u = { 2945 .fl4_dst = dst,
2977 .ip4_u = { 2946 .fl4_src = src,
2978 .daddr = dst, 2947 .fl4_tos = rtm->rtm_tos,
2979 .saddr = src,
2980 .tos = rtm->rtm_tos,
2981 },
2982 },
2983 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2948 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2984 .mark = mark, 2949 .mark = mark,
2985 }; 2950 };
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650cace2180d..47519205a014 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -346,17 +346,14 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
346 */ 346 */
347 { 347 {
348 struct flowi fl = { .mark = sk->sk_mark, 348 struct flowi fl = { .mark = sk->sk_mark,
349 .nl_u = { .ip4_u = 349 .fl4_dst = ((opt && opt->srr) ?
350 { .daddr = ((opt && opt->srr) ? 350 opt->faddr : ireq->rmt_addr),
351 opt->faddr : 351 .fl4_src = ireq->loc_addr,
352 ireq->rmt_addr), 352 .fl4_tos = RT_CONN_FLAGS(sk),
353 .saddr = ireq->loc_addr,
354 .tos = RT_CONN_FLAGS(sk) } },
355 .proto = IPPROTO_TCP, 353 .proto = IPPROTO_TCP,
356 .flags = inet_sk_flowi_flags(sk), 354 .flags = inet_sk_flowi_flags(sk),
357 .uli_u = { .ports = 355 .fl_ip_sport = th->dest,
358 { .sport = th->dest, 356 .fl_ip_dport = th->source };
359 .dport = th->source } } };
360 security_req_classify_flow(req, &fl); 357 security_req_classify_flow(req, &fl);
361 if (ip_route_output_key(sock_net(sk), &rt, &fl)) { 358 if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
362 reqsk_free(req); 359 reqsk_free(req);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da4b17c..e85ff5930607 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -26,6 +26,8 @@ static int zero;
26static int tcp_retr1_max = 255; 26static int tcp_retr1_max = 255;
27static int ip_local_port_range_min[] = { 1, 1 }; 27static int ip_local_port_range_min[] = { 1, 1 };
28static int ip_local_port_range_max[] = { 65535, 65535 }; 28static int ip_local_port_range_max[] = { 65535, 65535 };
29static int tcp_adv_win_scale_min = -31;
30static int tcp_adv_win_scale_max = 31;
29 31
30/* Update system visible IP port range */ 32/* Update system visible IP port range */
31static void set_local_port_range(int range[2]) 33static void set_local_port_range(int range[2])
@@ -153,7 +155,7 @@ static struct ctl_table ipv4_table[] = {
153 .data = &sysctl_ip_default_ttl, 155 .data = &sysctl_ip_default_ttl,
154 .maxlen = sizeof(int), 156 .maxlen = sizeof(int),
155 .mode = 0644, 157 .mode = 0644,
156 .proc_handler = ipv4_doint_and_flush, 158 .proc_handler = proc_dointvec,
157 .extra2 = &init_net, 159 .extra2 = &init_net,
158 }, 160 },
159 { 161 {
@@ -398,7 +400,7 @@ static struct ctl_table ipv4_table[] = {
398 .data = &sysctl_tcp_mem, 400 .data = &sysctl_tcp_mem,
399 .maxlen = sizeof(sysctl_tcp_mem), 401 .maxlen = sizeof(sysctl_tcp_mem),
400 .mode = 0644, 402 .mode = 0644,
401 .proc_handler = proc_dointvec 403 .proc_handler = proc_doulongvec_minmax
402 }, 404 },
403 { 405 {
404 .procname = "tcp_wmem", 406 .procname = "tcp_wmem",
@@ -426,7 +428,9 @@ static struct ctl_table ipv4_table[] = {
426 .data = &sysctl_tcp_adv_win_scale, 428 .data = &sysctl_tcp_adv_win_scale,
427 .maxlen = sizeof(int), 429 .maxlen = sizeof(int),
428 .mode = 0644, 430 .mode = 0644,
429 .proc_handler = proc_dointvec 431 .proc_handler = proc_dointvec_minmax,
432 .extra1 = &tcp_adv_win_scale_min,
433 .extra2 = &tcp_adv_win_scale_max,
430 }, 434 },
431 { 435 {
432 .procname = "tcp_tw_reuse", 436 .procname = "tcp_tw_reuse",
@@ -602,8 +606,7 @@ static struct ctl_table ipv4_table[] = {
602 .data = &sysctl_udp_mem, 606 .data = &sysctl_udp_mem,
603 .maxlen = sizeof(sysctl_udp_mem), 607 .maxlen = sizeof(sysctl_udp_mem),
604 .mode = 0644, 608 .mode = 0644,
605 .proc_handler = proc_dointvec_minmax, 609 .proc_handler = proc_doulongvec_minmax,
606 .extra1 = &zero
607 }, 610 },
608 { 611 {
609 .procname = "udp_rmem_min", 612 .procname = "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1664a0590bb8..6c11eece262c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
282struct percpu_counter tcp_orphan_count; 282struct percpu_counter tcp_orphan_count;
283EXPORT_SYMBOL_GPL(tcp_orphan_count); 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284 284
285int sysctl_tcp_mem[3] __read_mostly; 285long sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly; 286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly; 287int sysctl_tcp_rmem[3] __read_mostly;
288 288
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem); 290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem); 291EXPORT_SYMBOL(sysctl_tcp_wmem);
292 292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */ 293atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
294EXPORT_SYMBOL(tcp_memory_allocated); 294EXPORT_SYMBOL(tcp_memory_allocated);
295 295
296/* 296/*
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1194 1194
1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1196 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1196 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1198#endif 1198#endif
1199 1199
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1477 * shouldn't happen. 1477 * shouldn't happen.
1478 */ 1478 */
1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1480 KERN_INFO "recvmsg bug: copied %X " 1480 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1481 "seq %X rcvnxt %X fl %X\n", *seq, 1481 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1482 TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1482 flags))
1483 flags))
1484 break; 1483 break;
1485 1484
1486 offset = *seq - TCP_SKB_CB(skb)->seq; 1485 offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1490 goto found_ok_skb; 1489 goto found_ok_skb;
1491 if (tcp_hdr(skb)->fin) 1490 if (tcp_hdr(skb)->fin)
1492 goto found_fin_ok; 1491 goto found_fin_ok;
1493 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1492 WARN(!(flags & MSG_PEEK),
1494 "copied %X seq %X rcvnxt %X fl %X\n", 1493 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1495 *seq, TCP_SKB_CB(skb)->seq, 1494 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1496 tp->rcv_nxt, flags);
1497 } 1495 }
1498 1496
1499 /* Well, if we have backlog, try to process it now yet. */ 1497 /* Well, if we have backlog, try to process it now yet. */
@@ -2246,7 +2244,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2246 /* Values greater than interface MTU won't take effect. However 2244 /* Values greater than interface MTU won't take effect. However
2247 * at the point when this call is done we typically don't yet 2245 * at the point when this call is done we typically don't yet
2248 * know which interface is going to be used */ 2246 * know which interface is going to be used */
2249 if (val < 8 || val > MAX_TCP_WINDOW) { 2247 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2250 err = -EINVAL; 2248 err = -EINVAL;
2251 break; 2249 break;
2252 } 2250 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69e353d..824e8c8a17ad 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
260 sizeof(struct sk_buff); 260 sizeof(struct sk_buff);
261 261
262 if (sk->sk_sndbuf < 3 * sndmem) 262 if (sk->sk_sndbuf < 3 * sndmem) {
263 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 263 sk->sk_sndbuf = 3 * sndmem;
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
264} 267}
265 268
266/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk)
396 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 399 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
397 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 400 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
398 !tcp_memory_pressure && 401 !tcp_memory_pressure &&
399 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 402 atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
400 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 403 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
401 sysctl_tcp_rmem[2]); 404 sysctl_tcp_rmem[2]);
402 } 405 }
@@ -731,7 +734,7 @@ void tcp_update_metrics(struct sock *sk)
731 * Reset our results. 734 * Reset our results.
732 */ 735 */
733 if (!(dst_metric_locked(dst, RTAX_RTT))) 736 if (!(dst_metric_locked(dst, RTAX_RTT)))
734 dst->metrics[RTAX_RTT - 1] = 0; 737 dst_metric_set(dst, RTAX_RTT, 0);
735 return; 738 return;
736 } 739 }
737 740
@@ -773,34 +776,38 @@ void tcp_update_metrics(struct sock *sk)
773 if (dst_metric(dst, RTAX_SSTHRESH) && 776 if (dst_metric(dst, RTAX_SSTHRESH) &&
774 !dst_metric_locked(dst, RTAX_SSTHRESH) && 777 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
775 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 778 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
776 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; 779 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
777 if (!dst_metric_locked(dst, RTAX_CWND) && 780 if (!dst_metric_locked(dst, RTAX_CWND) &&
778 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 781 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
779 dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd; 782 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
780 } else if (tp->snd_cwnd > tp->snd_ssthresh && 783 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
781 icsk->icsk_ca_state == TCP_CA_Open) { 784 icsk->icsk_ca_state == TCP_CA_Open) {
782 /* Cong. avoidance phase, cwnd is reliable. */ 785 /* Cong. avoidance phase, cwnd is reliable. */
783 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 786 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
784 dst->metrics[RTAX_SSTHRESH-1] = 787 dst_metric_set(dst, RTAX_SSTHRESH,
785 max(tp->snd_cwnd >> 1, tp->snd_ssthresh); 788 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
786 if (!dst_metric_locked(dst, RTAX_CWND)) 789 if (!dst_metric_locked(dst, RTAX_CWND))
787 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1; 790 dst_metric_set(dst, RTAX_CWND,
791 (dst_metric(dst, RTAX_CWND) +
792 tp->snd_cwnd) >> 1);
788 } else { 793 } else {
789 /* Else slow start did not finish, cwnd is non-sense, 794 /* Else slow start did not finish, cwnd is non-sense,
790 ssthresh may be also invalid. 795 ssthresh may be also invalid.
791 */ 796 */
792 if (!dst_metric_locked(dst, RTAX_CWND)) 797 if (!dst_metric_locked(dst, RTAX_CWND))
793 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1; 798 dst_metric_set(dst, RTAX_CWND,
799 (dst_metric(dst, RTAX_CWND) +
800 tp->snd_ssthresh) >> 1);
794 if (dst_metric(dst, RTAX_SSTHRESH) && 801 if (dst_metric(dst, RTAX_SSTHRESH) &&
795 !dst_metric_locked(dst, RTAX_SSTHRESH) && 802 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
796 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) 803 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
797 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; 804 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
798 } 805 }
799 806
800 if (!dst_metric_locked(dst, RTAX_REORDERING)) { 807 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
801 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && 808 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
802 tp->reordering != sysctl_tcp_reordering) 809 tp->reordering != sysctl_tcp_reordering)
803 dst->metrics[RTAX_REORDERING-1] = tp->reordering; 810 dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
804 } 811 }
805 } 812 }
806} 813}
@@ -4861,7 +4868,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
4861 return 0; 4868 return 0;
4862 4869
4863 /* If we are under soft global TCP memory pressure, do not expand. */ 4870 /* If we are under soft global TCP memory pressure, do not expand. */
4864 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4871 if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
4865 return 0; 4872 return 0;
4866 4873
4867 /* If we filled the congestion window, do not expand. */ 4874 /* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8f8527d41682..4fc3387aa994 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
415 !icsk->icsk_backoff) 415 !icsk->icsk_backoff)
416 break; 416 break;
417 417
418 if (sock_owned_by_user(sk))
419 break;
420
418 icsk->icsk_backoff--; 421 icsk->icsk_backoff--;
419 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
420 icsk->icsk_backoff; 423 icsk->icsk_backoff;
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
429 if (remaining) { 432 if (remaining) {
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 remaining, TCP_RTO_MAX); 434 remaining, TCP_RTO_MAX);
432 } else if (sock_owned_by_user(sk)) {
433 /* RTO revert clocked out retransmission,
434 * but socket is locked. Will defer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 HZ/20, TCP_RTO_MAX);
437 } else { 435 } else {
438 /* RTO revert clocked out retransmission. 436 /* RTO revert clocked out retransmission.
439 * Will retransmit now */ 437 * Will retransmit now */
@@ -1212,12 +1210,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1212}; 1210};
1213#endif 1211#endif
1214 1212
1215static struct timewait_sock_ops tcp_timewait_sock_ops = {
1216 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1217 .twsk_unique = tcp_twsk_unique,
1218 .twsk_destructor= tcp_twsk_destructor,
1219};
1220
1221int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1213int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1222{ 1214{
1223 struct tcp_extend_values tmp_ext; 1215 struct tcp_extend_values tmp_ext;
@@ -1349,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1349 tcp_death_row.sysctl_tw_recycle && 1341 tcp_death_row.sysctl_tw_recycle &&
1350 (dst = inet_csk_route_req(sk, req)) != NULL && 1342 (dst = inet_csk_route_req(sk, req)) != NULL &&
1351 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1343 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1352 peer->v4daddr == saddr) { 1344 peer->daddr.a4 == saddr) {
1353 inet_peer_refcheck(peer); 1345 inet_peer_refcheck(peer);
1354 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1346 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1355 (s32)(peer->tcp_ts - req->ts_recent) > 1347 (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1765,64 +1757,40 @@ do_time_wait:
1765 goto discard_it; 1757 goto discard_it;
1766} 1758}
1767 1759
1768/* VJ's idea. Save last timestamp seen from this destination 1760struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1769 * and hold it at least for normal timewait interval to use for duplicate
1770 * segment detection in subsequent connections, before they enter synchronized
1771 * state.
1772 */
1773
1774int tcp_v4_remember_stamp(struct sock *sk)
1775{ 1761{
1762 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1776 struct inet_sock *inet = inet_sk(sk); 1763 struct inet_sock *inet = inet_sk(sk);
1777 struct tcp_sock *tp = tcp_sk(sk); 1764 struct inet_peer *peer;
1778 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1779 struct inet_peer *peer = NULL;
1780 int release_it = 0;
1781 1765
1782 if (!rt || rt->rt_dst != inet->inet_daddr) { 1766 if (!rt || rt->rt_dst != inet->inet_daddr) {
1783 peer = inet_getpeer(inet->inet_daddr, 1); 1767 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1784 release_it = 1; 1768 *release_it = true;
1785 } else { 1769 } else {
1786 if (!rt->peer) 1770 if (!rt->peer)
1787 rt_bind_peer(rt, 1); 1771 rt_bind_peer(rt, 1);
1788 peer = rt->peer; 1772 peer = rt->peer;
1773 *release_it = false;
1789 } 1774 }
1790 1775
1791 if (peer) { 1776 return peer;
1792 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1793 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1794 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1795 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1796 peer->tcp_ts = tp->rx_opt.ts_recent;
1797 }
1798 if (release_it)
1799 inet_putpeer(peer);
1800 return 1;
1801 }
1802
1803 return 0;
1804} 1777}
1805EXPORT_SYMBOL(tcp_v4_remember_stamp); 1778EXPORT_SYMBOL(tcp_v4_get_peer);
1806 1779
1807int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) 1780void *tcp_v4_tw_get_peer(struct sock *sk)
1808{ 1781{
1809 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1); 1782 struct inet_timewait_sock *tw = inet_twsk(sk);
1810
1811 if (peer) {
1812 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1813
1814 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1815 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1816 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1817 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1818 peer->tcp_ts = tcptw->tw_ts_recent;
1819 }
1820 inet_putpeer(peer);
1821 return 1;
1822 }
1823 1783
1824 return 0; 1784 return inet_getpeer_v4(tw->tw_daddr, 1);
1825} 1785}
1786EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1787
1788static struct timewait_sock_ops tcp_timewait_sock_ops = {
1789 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1790 .twsk_unique = tcp_twsk_unique,
1791 .twsk_destructor= tcp_twsk_destructor,
1792 .twsk_getpeer = tcp_v4_tw_get_peer,
1793};
1826 1794
1827const struct inet_connection_sock_af_ops ipv4_specific = { 1795const struct inet_connection_sock_af_ops ipv4_specific = {
1828 .queue_xmit = ip_queue_xmit, 1796 .queue_xmit = ip_queue_xmit,
@@ -1830,7 +1798,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
1830 .rebuild_header = inet_sk_rebuild_header, 1798 .rebuild_header = inet_sk_rebuild_header,
1831 .conn_request = tcp_v4_conn_request, 1799 .conn_request = tcp_v4_conn_request,
1832 .syn_recv_sock = tcp_v4_syn_recv_sock, 1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1833 .remember_stamp = tcp_v4_remember_stamp, 1801 .get_peer = tcp_v4_get_peer,
1834 .net_header_len = sizeof(struct iphdr), 1802 .net_header_len = sizeof(struct iphdr),
1835 .setsockopt = ip_setsockopt, 1803 .setsockopt = ip_setsockopt,
1836 .getsockopt = ip_getsockopt, 1804 .getsockopt = ip_getsockopt,
@@ -2045,7 +2013,9 @@ get_req:
2045 } 2013 }
2046get_sk: 2014get_sk:
2047 sk_nulls_for_each_from(sk, node) { 2015 sk_nulls_for_each_from(sk, node) {
2048 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { 2016 if (!net_eq(sock_net(sk), net))
2017 continue;
2018 if (sk->sk_family == st->family) {
2049 cur = sk; 2019 cur = sk;
2050 goto out; 2020 goto out;
2051 } 2021 }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43cf901d7659..80b1f80759ab 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,6 +49,56 @@ struct inet_timewait_death_row tcp_death_row = {
49}; 49};
50EXPORT_SYMBOL_GPL(tcp_death_row); 50EXPORT_SYMBOL_GPL(tcp_death_row);
51 51
52/* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
55 * state.
56 */
57
58static int tcp_remember_stamp(struct sock *sk)
59{
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
63 bool release_it;
64
65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
66 if (peer) {
67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
71 peer->tcp_ts = tp->rx_opt.ts_recent;
72 }
73 if (release_it)
74 inet_putpeer(peer);
75 return 1;
76 }
77
78 return 0;
79}
80
81static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{
83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer;
85
86 peer = twsk_getpeer(sk);
87 if (peer) {
88 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
89
90 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
91 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
92 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
93 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
94 peer->tcp_ts = tcptw->tw_ts_recent;
95 }
96 inet_putpeer(peer);
97 return 1;
98 }
99 return 0;
100}
101
52static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 102static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
53{ 103{
54 if (seq == s_win) 104 if (seq == s_win)
@@ -149,14 +199,9 @@ kill_with_rst:
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 } 200 }
151 201
152 /* I am shamed, but failed to make it more elegant. 202 if (tcp_death_row.sysctl_tw_recycle &&
153 * Yes, it is direct reference to IP, which is impossible 203 tcptw->tw_ts_recent_stamp &&
154 * to generalize to IPv6. Taking into account that IPv6 204 tcp_tw_remember_stamp(tw))
155 * do not understand recycling in any case, it not
156 * a big problem in practice. --ANK */
157 if (tw->tw_family == AF_INET &&
158 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
159 tcp_v4_tw_remember_stamp(tw))
160 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, 205 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
161 TCP_TIMEWAIT_LEN); 206 TCP_TIMEWAIT_LEN);
162 else 207 else
@@ -274,7 +319,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
274 int recycle_ok = 0; 319 int recycle_ok = 0;
275 320
276 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
277 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk); 322 recycle_ok = tcp_remember_stamp(sk);
278 323
279 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) 324 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
280 tw = inet_twsk_alloc(sk, state); 325 tw = inet_twsk_alloc(sk, state);
@@ -347,7 +392,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
347 * socket up. We've got bigger problems than 392 * socket up. We've got bigger problems than
348 * non-graceful socket closings. 393 * non-graceful socket closings.
349 */ 394 */
350 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); 395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
351 } 396 }
352 397
353 tcp_update_metrics(sk); 398 tcp_update_metrics(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 05b1ecf36763..97041f24cd27 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -55,7 +55,7 @@ int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
55int sysctl_tcp_tso_win_divisor __read_mostly = 3; 55int sysctl_tcp_tso_win_divisor __read_mostly = 3;
56 56
57int sysctl_tcp_mtu_probing __read_mostly = 0; 57int sysctl_tcp_mtu_probing __read_mostly = 0;
58int sysctl_tcp_base_mss __read_mostly = 512; 58int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
59 59
60/* By default, RFC2861 behavior. */ 60/* By default, RFC2861 behavior. */
61int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 61int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@ -231,11 +231,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
231 /* when initializing use the value from init_rcv_wnd 231 /* when initializing use the value from init_rcv_wnd
232 * rather than the default from above 232 * rather than the default from above
233 */ 233 */
234 if (init_rcv_wnd && 234 if (init_rcv_wnd)
235 (*rcv_wnd > init_rcv_wnd * mss)) 235 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
236 *rcv_wnd = init_rcv_wnd * mss; 236 else
237 else if (*rcv_wnd > init_cwnd * mss) 237 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
238 *rcv_wnd = init_cwnd * mss;
239 } 238 }
240 239
241 /* Set the clamp no higher than max representable value */ 240 /* Set the clamp no higher than max representable value */
@@ -386,27 +385,30 @@ struct tcp_out_options {
386 */ 385 */
387static u8 tcp_cookie_size_check(u8 desired) 386static u8 tcp_cookie_size_check(u8 desired)
388{ 387{
389 if (desired > 0) { 388 int cookie_size;
389
390 if (desired > 0)
390 /* previously specified */ 391 /* previously specified */
391 return desired; 392 return desired;
392 } 393
393 if (sysctl_tcp_cookie_size <= 0) { 394 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
395 if (cookie_size <= 0)
394 /* no default specified */ 396 /* no default specified */
395 return 0; 397 return 0;
396 } 398
397 if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { 399 if (cookie_size <= TCP_COOKIE_MIN)
398 /* value too small, specify minimum */ 400 /* value too small, specify minimum */
399 return TCP_COOKIE_MIN; 401 return TCP_COOKIE_MIN;
400 } 402
401 if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { 403 if (cookie_size >= TCP_COOKIE_MAX)
402 /* value too large, specify maximum */ 404 /* value too large, specify maximum */
403 return TCP_COOKIE_MAX; 405 return TCP_COOKIE_MAX;
404 } 406
405 if (0x1 & sysctl_tcp_cookie_size) { 407 if (cookie_size & 1)
406 /* 8-bit multiple, illegal, fix it */ 408 /* 8-bit multiple, illegal, fix it */
407 return (u8)(sysctl_tcp_cookie_size + 0x1); 409 cookie_size++;
408 } 410
409 return (u8)sysctl_tcp_cookie_size; 411 return (u8)cookie_size;
410} 412}
411 413
412/* Write previously computed TCP options to the packet. 414/* Write previously computed TCP options to the packet.
@@ -822,8 +824,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
822 &md5); 824 &md5);
823 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 825 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
824 826
825 if (tcp_packets_in_flight(tp) == 0) 827 if (tcp_packets_in_flight(tp) == 0) {
826 tcp_ca_event(sk, CA_EVENT_TX_START); 828 tcp_ca_event(sk, CA_EVENT_TX_START);
829 skb->ooo_okay = 1;
830 } else
831 skb->ooo_okay = 0;
827 832
828 skb_push(skb, tcp_header_size); 833 skb_push(skb, tcp_header_size);
829 skb_reset_transport_header(skb); 834 skb_reset_transport_header(skb);
@@ -1513,6 +1518,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1513 struct tcp_sock *tp = tcp_sk(sk); 1518 struct tcp_sock *tp = tcp_sk(sk);
1514 const struct inet_connection_sock *icsk = inet_csk(sk); 1519 const struct inet_connection_sock *icsk = inet_csk(sk);
1515 u32 send_win, cong_win, limit, in_flight; 1520 u32 send_win, cong_win, limit, in_flight;
1521 int win_divisor;
1516 1522
1517 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1523 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
1518 goto send_now; 1524 goto send_now;
@@ -1544,13 +1550,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1544 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1550 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1545 goto send_now; 1551 goto send_now;
1546 1552
1547 if (sysctl_tcp_tso_win_divisor) { 1553 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1554 if (win_divisor) {
1548 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1555 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1549 1556
1550 /* If at least some fraction of a window is available, 1557 /* If at least some fraction of a window is available,
1551 * just use it. 1558 * just use it.
1552 */ 1559 */
1553 chunk /= sysctl_tcp_tso_win_divisor; 1560 chunk /= win_divisor;
1554 if (limit >= chunk) 1561 if (limit >= chunk)
1555 goto send_now; 1562 goto send_now;
1556 } else { 1563 } else {
@@ -2592,6 +2599,7 @@ int tcp_connect(struct sock *sk)
2592{ 2599{
2593 struct tcp_sock *tp = tcp_sk(sk); 2600 struct tcp_sock *tp = tcp_sk(sk);
2594 struct sk_buff *buff; 2601 struct sk_buff *buff;
2602 int err;
2595 2603
2596 tcp_connect_init(sk); 2604 tcp_connect_init(sk);
2597 2605
@@ -2614,7 +2622,9 @@ int tcp_connect(struct sock *sk)
2614 sk->sk_wmem_queued += buff->truesize; 2622 sk->sk_wmem_queued += buff->truesize;
2615 sk_mem_charge(sk, buff->truesize); 2623 sk_mem_charge(sk, buff->truesize);
2616 tp->packets_out += tcp_skb_pcount(buff); 2624 tp->packets_out += tcp_skb_pcount(buff);
2617 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2625 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2626 if (err == -ECONNREFUSED)
2627 return err;
2618 2628
2619 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2629 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2620 * in order to make this packet get counted in tcpOutSegs. 2630 * in order to make this packet get counted in tcpOutSegs.
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 6211e2114173..85ee7eb7e38e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -154,7 +154,7 @@ static int tcpprobe_sprint(char *tbuf, int n)
154 struct timespec tv 154 struct timespec tv
155 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 155 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
156 156
157 return snprintf(tbuf, n, 157 return scnprintf(tbuf, n,
158 "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n", 158 "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
159 (unsigned long) tv.tv_sec, 159 (unsigned long) tv.tv_sec,
160 (unsigned long) tv.tv_nsec, 160 (unsigned long) tv.tv_nsec,
@@ -174,7 +174,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
174 return -EINVAL; 174 return -EINVAL;
175 175
176 while (cnt < len) { 176 while (cnt < len) {
177 char tbuf[128]; 177 char tbuf[164];
178 int width; 178 int width;
179 179
180 /* Wait for data in buffer */ 180 /* Wait for data in buffer */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28cb2d733a3c..b37181da487c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
110struct udp_table udp_table __read_mostly; 110struct udp_table udp_table __read_mostly;
111EXPORT_SYMBOL(udp_table); 111EXPORT_SYMBOL(udp_table);
112 112
113int sysctl_udp_mem[3] __read_mostly; 113long sysctl_udp_mem[3] __read_mostly;
114EXPORT_SYMBOL(sysctl_udp_mem); 114EXPORT_SYMBOL(sysctl_udp_mem);
115 115
116int sysctl_udp_rmem_min __read_mostly; 116int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min);
119int sysctl_udp_wmem_min __read_mostly; 119int sysctl_udp_wmem_min __read_mostly;
120EXPORT_SYMBOL(sysctl_udp_wmem_min); 120EXPORT_SYMBOL(sysctl_udp_wmem_min);
121 121
122atomic_t udp_memory_allocated; 122atomic_long_t udp_memory_allocated;
123EXPORT_SYMBOL(udp_memory_allocated); 123EXPORT_SYMBOL(udp_memory_allocated);
124 124
125#define MAX_UDP_PORTS 65536 125#define MAX_UDP_PORTS 65536
@@ -430,7 +430,7 @@ begin:
430 430
431 if (result) { 431 if (result) {
432exact_match: 432exact_match:
433 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 433 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
434 result = NULL; 434 result = NULL;
435 else if (unlikely(compute_score2(result, net, saddr, sport, 435 else if (unlikely(compute_score2(result, net, saddr, sport,
436 daddr, hnum, dif) < badness)) { 436 daddr, hnum, dif) < badness)) {
@@ -500,7 +500,7 @@ begin:
500 goto begin; 500 goto begin;
501 501
502 if (result) { 502 if (result) {
503 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 503 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
504 result = NULL; 504 result = NULL;
505 else if (unlikely(compute_score(result, net, saddr, hnum, sport, 505 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
506 daddr, dport, dif) < badness)) { 506 daddr, dport, dif) < badness)) {
@@ -890,15 +890,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
890 if (rt == NULL) { 890 if (rt == NULL) {
891 struct flowi fl = { .oif = ipc.oif, 891 struct flowi fl = { .oif = ipc.oif,
892 .mark = sk->sk_mark, 892 .mark = sk->sk_mark,
893 .nl_u = { .ip4_u = 893 .fl4_dst = faddr,
894 { .daddr = faddr, 894 .fl4_src = saddr,
895 .saddr = saddr, 895 .fl4_tos = tos,
896 .tos = tos } },
897 .proto = sk->sk_protocol, 896 .proto = sk->sk_protocol,
898 .flags = inet_sk_flowi_flags(sk), 897 .flags = inet_sk_flowi_flags(sk),
899 .uli_u = { .ports = 898 .fl_ip_sport = inet->inet_sport,
900 { .sport = inet->inet_sport, 899 .fl_ip_dport = dport };
901 .dport = dport } } };
902 struct net *net = sock_net(sk); 900 struct net *net = sock_net(sk);
903 901
904 security_sk_classify_flow(sk, &fl); 902 security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 6f368413eb0e..534972e114ac 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -56,7 +56,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
56 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 56 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
57 ip_select_ident(top_iph, dst->child, NULL); 57 ip_select_ident(top_iph, dst->child, NULL);
58 58
59 top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT); 59 top_iph->ttl = ip4_dst_hoplimit(dst->child);
60 60
61 top_iph->saddr = x->props.saddr.a4; 61 top_iph->saddr = x->props.saddr.a4;
62 top_iph->daddr = x->id.daddr.a4; 62 top_iph->daddr = x->id.daddr.a4;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 4464f3bff6a7..b057d40addec 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -11,6 +11,7 @@
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/inetdevice.h> 13#include <linux/inetdevice.h>
14#include <linux/if_tunnel.h>
14#include <net/dst.h> 15#include <net/dst.h>
15#include <net/xfrm.h> 16#include <net/xfrm.h>
16#include <net/ip.h> 17#include <net/ip.h>
@@ -22,12 +23,8 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
22 xfrm_address_t *daddr) 23 xfrm_address_t *daddr)
23{ 24{
24 struct flowi fl = { 25 struct flowi fl = {
25 .nl_u = { 26 .fl4_dst = daddr->a4,
26 .ip4_u = { 27 .fl4_tos = tos,
27 .tos = tos,
28 .daddr = daddr->a4,
29 },
30 },
31 }; 28 };
32 struct dst_entry *dst; 29 struct dst_entry *dst;
33 struct rtable *rt; 30 struct rtable *rt;
@@ -80,10 +77,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
80 xdst->u.dst.dev = dev; 77 xdst->u.dst.dev = dev;
81 dev_hold(dev); 78 dev_hold(dev);
82 79
83 xdst->u.rt.idev = in_dev_get(dev);
84 if (!xdst->u.rt.idev)
85 return -ENODEV;
86
87 xdst->u.rt.peer = rt->peer; 80 xdst->u.rt.peer = rt->peer;
88 if (rt->peer) 81 if (rt->peer)
89 atomic_inc(&rt->peer->refcnt); 82 atomic_inc(&rt->peer->refcnt);
@@ -158,6 +151,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
158 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); 151 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
159 } 152 }
160 break; 153 break;
154
155 case IPPROTO_GRE:
156 if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
157 __be16 *greflags = (__be16 *)xprth;
158 __be32 *gre_hdr = (__be32 *)xprth;
159
160 if (greflags[0] & GRE_KEY) {
161 if (greflags[0] & GRE_CSUM)
162 gre_hdr++;
163 fl->fl_gre_key = gre_hdr[1];
164 }
165 }
166 break;
167
161 default: 168 default:
162 fl->fl_ipsec_spi = 0; 169 fl->fl_ipsec_spi = 0;
163 break; 170 break;
@@ -189,8 +196,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
189{ 196{
190 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 197 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
191 198
192 if (likely(xdst->u.rt.idev))
193 in_dev_put(xdst->u.rt.idev);
194 if (likely(xdst->u.rt.peer)) 199 if (likely(xdst->u.rt.peer))
195 inet_putpeer(xdst->u.rt.peer); 200 inet_putpeer(xdst->u.rt.peer);
196 xfrm_dst_destroy(xdst); 201 xfrm_dst_destroy(xdst);
@@ -199,27 +204,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
199static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 204static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
200 int unregister) 205 int unregister)
201{ 206{
202 struct xfrm_dst *xdst;
203
204 if (!unregister) 207 if (!unregister)
205 return; 208 return;
206 209
207 xdst = (struct xfrm_dst *)dst;
208 if (xdst->u.rt.idev->dev == dev) {
209 struct in_device *loopback_idev =
210 in_dev_get(dev_net(dev)->loopback_dev);
211 BUG_ON(!loopback_idev);
212
213 do {
214 in_dev_put(xdst->u.rt.idev);
215 xdst->u.rt.idev = loopback_idev;
216 in_dev_hold(loopback_idev);
217 xdst = (struct xfrm_dst *)xdst->u.dst.child;
218 } while (xdst->u.dst.xfrm);
219
220 __in_dev_put(loopback_idev);
221 }
222
223 xfrm_dst_ifdown(dst, dev); 210 xfrm_dst_ifdown(dst, dev);
224} 211}
225 212
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e048ec62d109..1023ad0d2b15 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -98,7 +98,11 @@
98#endif 98#endif
99 99
100#define INFINITY_LIFE_TIME 0xFFFFFFFF 100#define INFINITY_LIFE_TIME 0xFFFFFFFF
101#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) 101
102static inline u32 cstamp_delta(unsigned long cstamp)
103{
104 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
105}
102 106
103#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) 107#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
104#define ADDRCONF_TIMER_FUZZ (HZ / 4) 108#define ADDRCONF_TIMER_FUZZ (HZ / 4)
@@ -2740,10 +2744,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2740 /* Flag it for later restoration when link comes up */ 2744 /* Flag it for later restoration when link comes up */
2741 ifa->flags |= IFA_F_TENTATIVE; 2745 ifa->flags |= IFA_F_TENTATIVE;
2742 ifa->state = INET6_IFADDR_STATE_DAD; 2746 ifa->state = INET6_IFADDR_STATE_DAD;
2743
2744 write_unlock_bh(&idev->lock);
2745
2746 in6_ifa_hold(ifa);
2747 } else { 2747 } else {
2748 list_del(&ifa->if_list); 2748 list_del(&ifa->if_list);
2749 2749
@@ -2758,19 +2758,15 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2758 ifa->state = INET6_IFADDR_STATE_DEAD;
2759 spin_unlock_bh(&ifa->state_lock); 2759 spin_unlock_bh(&ifa->state_lock);
2760 2760
2761 if (state == INET6_IFADDR_STATE_DEAD) 2761 if (state != INET6_IFADDR_STATE_DEAD) {
2762 goto put_ifa; 2762 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2763 } 2763 atomic_notifier_call_chain(&inet6addr_chain,
2764 2764 NETDEV_DOWN, ifa);
2765 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2765 }
2766 if (ifa->state == INET6_IFADDR_STATE_DEAD)
2767 atomic_notifier_call_chain(&inet6addr_chain,
2768 NETDEV_DOWN, ifa);
2769
2770put_ifa:
2771 in6_ifa_put(ifa);
2772 2766
2773 write_lock_bh(&idev->lock); 2767 in6_ifa_put(ifa);
2768 write_lock_bh(&idev->lock);
2769 }
2774 } 2770 }
2775 2771
2776 list_splice(&keep_list, &idev->addr_list); 2772 list_splice(&keep_list, &idev->addr_list);
@@ -3452,10 +3448,8 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
3452{ 3448{
3453 struct ifa_cacheinfo ci; 3449 struct ifa_cacheinfo ci;
3454 3450
3455 ci.cstamp = (u32)(TIME_DELTA(cstamp, INITIAL_JIFFIES) / HZ * 100 3451 ci.cstamp = cstamp_delta(cstamp);
3456 + TIME_DELTA(cstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); 3452 ci.tstamp = cstamp_delta(tstamp);
3457 ci.tstamp = (u32)(TIME_DELTA(tstamp, INITIAL_JIFFIES) / HZ * 100
3458 + TIME_DELTA(tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
3459 ci.ifa_prefered = preferred; 3453 ci.ifa_prefered = preferred;
3460 ci.ifa_valid = valid; 3454 ci.ifa_valid = valid;
3461 3455
@@ -3806,8 +3800,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3806 array[DEVCONF_AUTOCONF] = cnf->autoconf; 3800 array[DEVCONF_AUTOCONF] = cnf->autoconf;
3807 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; 3801 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
3808 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; 3802 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
3809 array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval; 3803 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
3810 array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay; 3804 jiffies_to_msecs(cnf->rtr_solicit_interval);
3805 array[DEVCONF_RTR_SOLICIT_DELAY] =
3806 jiffies_to_msecs(cnf->rtr_solicit_delay);
3811 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; 3807 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
3812#ifdef CONFIG_IPV6_PRIVACY 3808#ifdef CONFIG_IPV6_PRIVACY
3813 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; 3809 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
@@ -3821,7 +3817,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3821 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo; 3817 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
3822#ifdef CONFIG_IPV6_ROUTER_PREF 3818#ifdef CONFIG_IPV6_ROUTER_PREF
3823 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref; 3819 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
3824 array[DEVCONF_RTR_PROBE_INTERVAL] = cnf->rtr_probe_interval; 3820 array[DEVCONF_RTR_PROBE_INTERVAL] =
3821 jiffies_to_msecs(cnf->rtr_probe_interval);
3825#ifdef CONFIG_IPV6_ROUTE_INFO 3822#ifdef CONFIG_IPV6_ROUTE_INFO
3826 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; 3823 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
3827#endif 3824#endif
@@ -3839,6 +3836,15 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3839 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; 3836 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
3840} 3837}
3841 3838
3839static inline size_t inet6_ifla6_size(void)
3840{
3841 return nla_total_size(4) /* IFLA_INET6_FLAGS */
3842 + nla_total_size(sizeof(struct ifla_cacheinfo))
3843 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
3844 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
3845 + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
3846}
3847
3842static inline size_t inet6_if_nlmsg_size(void) 3848static inline size_t inet6_if_nlmsg_size(void)
3843{ 3849{
3844 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 3850 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -3846,13 +3852,7 @@ static inline size_t inet6_if_nlmsg_size(void)
3846 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 3852 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3847 + nla_total_size(4) /* IFLA_MTU */ 3853 + nla_total_size(4) /* IFLA_MTU */
3848 + nla_total_size(4) /* IFLA_LINK */ 3854 + nla_total_size(4) /* IFLA_LINK */
3849 + nla_total_size( /* IFLA_PROTINFO */ 3855 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
3850 nla_total_size(4) /* IFLA_INET6_FLAGS */
3851 + nla_total_size(sizeof(struct ifla_cacheinfo))
3852 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
3853 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
3854 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
3855 );
3856} 3856}
3857 3857
3858static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, 3858static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
@@ -3899,15 +3899,70 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3899 } 3899 }
3900} 3900}
3901 3901
3902static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
3903{
3904 struct nlattr *nla;
3905 struct ifla_cacheinfo ci;
3906
3907 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
3908
3909 ci.max_reasm_len = IPV6_MAXPLEN;
3910 ci.tstamp = cstamp_delta(idev->tstamp);
3911 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3912 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3913 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
3914
3915 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
3916 if (nla == NULL)
3917 goto nla_put_failure;
3918 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
3919
3920 /* XXX - MC not implemented */
3921
3922 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
3923 if (nla == NULL)
3924 goto nla_put_failure;
3925 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
3926
3927 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
3928 if (nla == NULL)
3929 goto nla_put_failure;
3930 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
3931
3932 return 0;
3933
3934nla_put_failure:
3935 return -EMSGSIZE;
3936}
3937
3938static size_t inet6_get_link_af_size(const struct net_device *dev)
3939{
3940 if (!__in6_dev_get(dev))
3941 return 0;
3942
3943 return inet6_ifla6_size();
3944}
3945
3946static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
3947{
3948 struct inet6_dev *idev = __in6_dev_get(dev);
3949
3950 if (!idev)
3951 return -ENODATA;
3952
3953 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
3954 return -EMSGSIZE;
3955
3956 return 0;
3957}
3958
3902static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 3959static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3903 u32 pid, u32 seq, int event, unsigned int flags) 3960 u32 pid, u32 seq, int event, unsigned int flags)
3904{ 3961{
3905 struct net_device *dev = idev->dev; 3962 struct net_device *dev = idev->dev;
3906 struct nlattr *nla;
3907 struct ifinfomsg *hdr; 3963 struct ifinfomsg *hdr;
3908 struct nlmsghdr *nlh; 3964 struct nlmsghdr *nlh;
3909 void *protoinfo; 3965 void *protoinfo;
3910 struct ifla_cacheinfo ci;
3911 3966
3912 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 3967 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
3913 if (nlh == NULL) 3968 if (nlh == NULL)
@@ -3934,31 +3989,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
3934 if (protoinfo == NULL) 3989 if (protoinfo == NULL)
3935 goto nla_put_failure; 3990 goto nla_put_failure;
3936 3991
3937 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); 3992 if (inet6_fill_ifla6_attrs(skb, idev) < 0)
3938
3939 ci.max_reasm_len = IPV6_MAXPLEN;
3940 ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100
3941 + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
3942 ci.reachable_time = idev->nd_parms->reachable_time;
3943 ci.retrans_time = idev->nd_parms->retrans_time;
3944 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
3945
3946 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
3947 if (nla == NULL)
3948 goto nla_put_failure;
3949 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
3950
3951 /* XXX - MC not implemented */
3952
3953 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
3954 if (nla == NULL)
3955 goto nla_put_failure; 3993 goto nla_put_failure;
3956 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
3957
3958 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
3959 if (nla == NULL)
3960 goto nla_put_failure;
3961 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
3962 3994
3963 nla_nest_end(skb, protoinfo); 3995 nla_nest_end(skb, protoinfo);
3964 return nlmsg_end(skb, nlh); 3996 return nlmsg_end(skb, nlh);
@@ -4629,6 +4661,12 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4629} 4661}
4630EXPORT_SYMBOL(unregister_inet6addr_notifier); 4662EXPORT_SYMBOL(unregister_inet6addr_notifier);
4631 4663
4664static struct rtnl_af_ops inet6_ops = {
4665 .family = AF_INET6,
4666 .fill_link_af = inet6_fill_link_af,
4667 .get_link_af_size = inet6_get_link_af_size,
4668};
4669
4632/* 4670/*
4633 * Init / cleanup code 4671 * Init / cleanup code
4634 */ 4672 */
@@ -4680,6 +4718,10 @@ int __init addrconf_init(void)
4680 4718
4681 addrconf_verify(0); 4719 addrconf_verify(0);
4682 4720
4721 err = rtnl_af_register(&inet6_ops);
4722 if (err < 0)
4723 goto errout_af;
4724
4683 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo); 4725 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
4684 if (err < 0) 4726 if (err < 0)
4685 goto errout; 4727 goto errout;
@@ -4695,6 +4737,8 @@ int __init addrconf_init(void)
4695 4737
4696 return 0; 4738 return 0;
4697errout: 4739errout:
4740 rtnl_af_unregister(&inet6_ops);
4741errout_af:
4698 unregister_netdevice_notifier(&ipv6_dev_notf); 4742 unregister_netdevice_notifier(&ipv6_dev_notf);
4699errlo: 4743errlo:
4700 unregister_pernet_subsys(&addrconf_ops); 4744 unregister_pernet_subsys(&addrconf_ops);
@@ -4715,6 +4759,8 @@ void addrconf_cleanup(void)
4715 4759
4716 rtnl_lock(); 4760 rtnl_lock();
4717 4761
4762 __rtnl_af_unregister(&inet6_ops);
4763
4718 /* clean dev list */ 4764 /* clean dev list */
4719 for_each_netdev(&init_net, dev) { 4765 for_each_netdev(&init_net, dev) {
4720 if (__in6_dev_get(dev) == NULL) 4766 if (__in6_dev_get(dev) == NULL)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 54e8e42f7a88..059a3de647db 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -810,7 +810,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
810 } 810 }
811 rcu_read_unlock(); 811 rcu_read_unlock();
812 812
813 if (unlikely(IS_ERR(segs))) 813 if (IS_ERR(segs))
814 goto out; 814 goto out;
815 815
816 for (skb = segs; skb; skb = skb->next) { 816 for (skb = segs; skb; skb = skb->next) {
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ee9b93bdd6a2..1b5c9825743b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -49,6 +49,8 @@ struct esp_skb_cb {
49 49
50#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 50#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
51 51
52static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
53
52/* 54/*
53 * Allocate an AEAD request structure with extra space for SG and IV. 55 * Allocate an AEAD request structure with extra space for SG and IV.
54 * 56 *
@@ -140,6 +142,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
140 int blksize; 142 int blksize;
141 int clen; 143 int clen;
142 int alen; 144 int alen;
145 int plen;
146 int tfclen;
143 int nfrags; 147 int nfrags;
144 u8 *iv; 148 u8 *iv;
145 u8 *tail; 149 u8 *tail;
@@ -148,18 +152,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
148 /* skb is pure payload to encrypt */ 152 /* skb is pure payload to encrypt */
149 err = -ENOMEM; 153 err = -ENOMEM;
150 154
151 /* Round to block size */
152 clen = skb->len;
153
154 aead = esp->aead; 155 aead = esp->aead;
155 alen = crypto_aead_authsize(aead); 156 alen = crypto_aead_authsize(aead);
156 157
158 tfclen = 0;
159 if (x->tfcpad) {
160 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
161 u32 padto;
162
163 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
164 if (skb->len < padto)
165 tfclen = padto - skb->len;
166 }
157 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 167 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
158 clen = ALIGN(clen + 2, blksize); 168 clen = ALIGN(skb->len + 2 + tfclen, blksize);
159 if (esp->padlen) 169 if (esp->padlen)
160 clen = ALIGN(clen, esp->padlen); 170 clen = ALIGN(clen, esp->padlen);
171 plen = clen - skb->len - tfclen;
161 172
162 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) 173 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
174 if (err < 0)
163 goto error; 175 goto error;
164 nfrags = err; 176 nfrags = err;
165 177
@@ -174,13 +186,17 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
174 186
175 /* Fill padding... */ 187 /* Fill padding... */
176 tail = skb_tail_pointer(trailer); 188 tail = skb_tail_pointer(trailer);
189 if (tfclen) {
190 memset(tail, 0, tfclen);
191 tail += tfclen;
192 }
177 do { 193 do {
178 int i; 194 int i;
179 for (i=0; i<clen-skb->len - 2; i++) 195 for (i = 0; i < plen - 2; i++)
180 tail[i] = i + 1; 196 tail[i] = i + 1;
181 } while (0); 197 } while (0);
182 tail[clen-skb->len - 2] = (clen - skb->len) - 2; 198 tail[plen - 2] = plen - 2;
183 tail[clen - skb->len - 1] = *skb_mac_header(skb); 199 tail[plen - 1] = *skb_mac_header(skb);
184 pskb_put(skb, trailer, clen - skb->len + alen); 200 pskb_put(skb, trailer, clen - skb->len + alen);
185 201
186 skb_push(skb, -skb_network_offset(skb)); 202 skb_push(skb, -skb_network_offset(skb));
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a1628023bd1..e46305d1815a 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -54,24 +54,54 @@ int inet6_csk_bind_conflict(const struct sock *sk,
54 54
55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 55EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
56 56
57struct dst_entry *inet6_csk_route_req(struct sock *sk,
58 const struct request_sock *req)
59{
60 struct inet6_request_sock *treq = inet6_rsk(req);
61 struct ipv6_pinfo *np = inet6_sk(sk);
62 struct in6_addr *final_p, final;
63 struct dst_entry *dst;
64 struct flowi fl;
65
66 memset(&fl, 0, sizeof(fl));
67 fl.proto = IPPROTO_TCP;
68 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
69 final_p = fl6_update_dst(&fl, np->opt, &final);
70 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
71 fl.oif = sk->sk_bound_dev_if;
72 fl.mark = sk->sk_mark;
73 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
74 fl.fl_ip_sport = inet_rsk(req)->loc_port;
75 security_req_classify_flow(req, &fl);
76
77 if (ip6_dst_lookup(sk, &dst, &fl))
78 return NULL;
79
80 if (final_p)
81 ipv6_addr_copy(&fl.fl6_dst, final_p);
82
83 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
84 return NULL;
85
86 return dst;
87}
88
57/* 89/*
58 * request_sock (formerly open request) hash tables. 90 * request_sock (formerly open request) hash tables.
59 */ 91 */
60static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 92static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
61 const u32 rnd, const u16 synq_hsize) 93 const u32 rnd, const u16 synq_hsize)
62{ 94{
63 u32 a = (__force u32)raddr->s6_addr32[0]; 95 u32 c;
64 u32 b = (__force u32)raddr->s6_addr32[1]; 96
65 u32 c = (__force u32)raddr->s6_addr32[2]; 97 c = jhash_3words((__force u32)raddr->s6_addr32[0],
66 98 (__force u32)raddr->s6_addr32[1],
67 a += JHASH_GOLDEN_RATIO; 99 (__force u32)raddr->s6_addr32[2],
68 b += JHASH_GOLDEN_RATIO; 100 rnd);
69 c += rnd; 101
70 __jhash_mix(a, b, c); 102 c = jhash_2words((__force u32)raddr->s6_addr32[3],
71 103 (__force u32)rport,
72 a += (__force u32)raddr->s6_addr32[3]; 104 c);
73 b += (__force u32)rport;
74 __jhash_mix(a, b, c);
75 105
76 return c & (synq_hsize - 1); 106 return c & (synq_hsize - 1);
77} 107}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2a59610c2a58..4f4483e697bd 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -58,8 +58,6 @@ MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60 60
61#define IPV6_TLV_TEL_DST_SIZE 8
62
63#ifdef IP6_TNL_DEBUG 61#ifdef IP6_TNL_DEBUG
64#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
65#else 63#else
@@ -1175,6 +1173,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1175 sizeof (struct ipv6hdr); 1173 sizeof (struct ipv6hdr);
1176 1174
1177 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); 1175 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
1176 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1177 dev->mtu-=8;
1178 1178
1179 if (dev->mtu < IPV6_MIN_MTU) 1179 if (dev->mtu < IPV6_MIN_MTU)
1180 dev->mtu = IPV6_MIN_MTU; 1180 dev->mtu = IPV6_MIN_MTU;
@@ -1363,12 +1363,17 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1363 1363
1364static void ip6_tnl_dev_setup(struct net_device *dev) 1364static void ip6_tnl_dev_setup(struct net_device *dev)
1365{ 1365{
1366 struct ip6_tnl *t;
1367
1366 dev->netdev_ops = &ip6_tnl_netdev_ops; 1368 dev->netdev_ops = &ip6_tnl_netdev_ops;
1367 dev->destructor = ip6_dev_free; 1369 dev->destructor = ip6_dev_free;
1368 1370
1369 dev->type = ARPHRD_TUNNEL6; 1371 dev->type = ARPHRD_TUNNEL6;
1370 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); 1372 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1371 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); 1373 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1374 t = netdev_priv(dev);
1375 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1376 dev->mtu-=8;
1372 dev->flags |= IFF_NOARP; 1377 dev->flags |= IFF_NOARP;
1373 dev->addr_len = sizeof(struct in6_addr); 1378 dev->addr_len = sizeof(struct in6_addr);
1374 dev->features |= NETIF_F_NETNS_LOCAL; 1379 dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6f32ffce7022..9fab274019c0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1843,9 +1843,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1843 1843
1844 fl = (struct flowi) { 1844 fl = (struct flowi) {
1845 .oif = vif->link, 1845 .oif = vif->link,
1846 .nl_u = { .ip6_u = 1846 .fl6_dst = ipv6h->daddr,
1847 { .daddr = ipv6h->daddr, }
1848 }
1849 }; 1847 };
1850 1848
1851 dst = ip6_route_output(net, NULL, &fl); 1849 dst = ip6_route_output(net, NULL, &fl);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d1444b95ad7e..49f986d626a0 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -82,7 +82,7 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; 82static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
83 83
84/* Big mc list lock for all the sockets */ 84/* Big mc list lock for all the sockets */
85static DEFINE_RWLOCK(ipv6_sk_mc_lock); 85static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
86 86
87static void igmp6_join_group(struct ifmcaddr6 *ma); 87static void igmp6_join_group(struct ifmcaddr6 *ma);
88static void igmp6_leave_group(struct ifmcaddr6 *ma); 88static void igmp6_leave_group(struct ifmcaddr6 *ma);
@@ -123,6 +123,11 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
123 * socket join on multicast group 123 * socket join on multicast group
124 */ 124 */
125 125
126#define for_each_pmc_rcu(np, pmc) \
127 for (pmc = rcu_dereference(np->ipv6_mc_list); \
128 pmc != NULL; \
129 pmc = rcu_dereference(pmc->next))
130
126int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) 131int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
127{ 132{
128 struct net_device *dev = NULL; 133 struct net_device *dev = NULL;
@@ -134,15 +139,15 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
134 if (!ipv6_addr_is_multicast(addr)) 139 if (!ipv6_addr_is_multicast(addr))
135 return -EINVAL; 140 return -EINVAL;
136 141
137 read_lock_bh(&ipv6_sk_mc_lock); 142 rcu_read_lock();
138 for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) { 143 for_each_pmc_rcu(np, mc_lst) {
139 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 144 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
140 ipv6_addr_equal(&mc_lst->addr, addr)) { 145 ipv6_addr_equal(&mc_lst->addr, addr)) {
141 read_unlock_bh(&ipv6_sk_mc_lock); 146 rcu_read_unlock();
142 return -EADDRINUSE; 147 return -EADDRINUSE;
143 } 148 }
144 } 149 }
145 read_unlock_bh(&ipv6_sk_mc_lock); 150 rcu_read_unlock();
146 151
147 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); 152 mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
148 153
@@ -186,33 +191,41 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
186 return err; 191 return err;
187 } 192 }
188 193
189 write_lock_bh(&ipv6_sk_mc_lock); 194 spin_lock(&ipv6_sk_mc_lock);
190 mc_lst->next = np->ipv6_mc_list; 195 mc_lst->next = np->ipv6_mc_list;
191 np->ipv6_mc_list = mc_lst; 196 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
192 write_unlock_bh(&ipv6_sk_mc_lock); 197 spin_unlock(&ipv6_sk_mc_lock);
193 198
194 rcu_read_unlock(); 199 rcu_read_unlock();
195 200
196 return 0; 201 return 0;
197} 202}
198 203
204static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
205{
206 kfree(container_of(head, struct ipv6_mc_socklist, rcu));
207}
199/* 208/*
200 * socket leave on multicast group 209 * socket leave on multicast group
201 */ 210 */
202int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) 211int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
203{ 212{
204 struct ipv6_pinfo *np = inet6_sk(sk); 213 struct ipv6_pinfo *np = inet6_sk(sk);
205 struct ipv6_mc_socklist *mc_lst, **lnk; 214 struct ipv6_mc_socklist *mc_lst;
215 struct ipv6_mc_socklist __rcu **lnk;
206 struct net *net = sock_net(sk); 216 struct net *net = sock_net(sk);
207 217
208 write_lock_bh(&ipv6_sk_mc_lock); 218 spin_lock(&ipv6_sk_mc_lock);
209 for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) { 219 for (lnk = &np->ipv6_mc_list;
220 (mc_lst = rcu_dereference_protected(*lnk,
221 lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
222 lnk = &mc_lst->next) {
210 if ((ifindex == 0 || mc_lst->ifindex == ifindex) && 223 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
211 ipv6_addr_equal(&mc_lst->addr, addr)) { 224 ipv6_addr_equal(&mc_lst->addr, addr)) {
212 struct net_device *dev; 225 struct net_device *dev;
213 226
214 *lnk = mc_lst->next; 227 *lnk = mc_lst->next;
215 write_unlock_bh(&ipv6_sk_mc_lock); 228 spin_unlock(&ipv6_sk_mc_lock);
216 229
217 rcu_read_lock(); 230 rcu_read_lock();
218 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 231 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -225,11 +238,12 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
225 } else 238 } else
226 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 239 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
227 rcu_read_unlock(); 240 rcu_read_unlock();
228 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 241 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
242 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
229 return 0; 243 return 0;
230 } 244 }
231 } 245 }
232 write_unlock_bh(&ipv6_sk_mc_lock); 246 spin_unlock(&ipv6_sk_mc_lock);
233 247
234 return -EADDRNOTAVAIL; 248 return -EADDRNOTAVAIL;
235} 249}
@@ -257,7 +271,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
257 return NULL; 271 return NULL;
258 idev = __in6_dev_get(dev); 272 idev = __in6_dev_get(dev);
259 if (!idev) 273 if (!idev)
260 return NULL;; 274 return NULL;
261 read_lock_bh(&idev->lock); 275 read_lock_bh(&idev->lock);
262 if (idev->dead) { 276 if (idev->dead) {
263 read_unlock_bh(&idev->lock); 277 read_unlock_bh(&idev->lock);
@@ -272,12 +286,13 @@ void ipv6_sock_mc_close(struct sock *sk)
272 struct ipv6_mc_socklist *mc_lst; 286 struct ipv6_mc_socklist *mc_lst;
273 struct net *net = sock_net(sk); 287 struct net *net = sock_net(sk);
274 288
275 write_lock_bh(&ipv6_sk_mc_lock); 289 spin_lock(&ipv6_sk_mc_lock);
276 while ((mc_lst = np->ipv6_mc_list) != NULL) { 290 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
291 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
277 struct net_device *dev; 292 struct net_device *dev;
278 293
279 np->ipv6_mc_list = mc_lst->next; 294 np->ipv6_mc_list = mc_lst->next;
280 write_unlock_bh(&ipv6_sk_mc_lock); 295 spin_unlock(&ipv6_sk_mc_lock);
281 296
282 rcu_read_lock(); 297 rcu_read_lock();
283 dev = dev_get_by_index_rcu(net, mc_lst->ifindex); 298 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -290,11 +305,13 @@ void ipv6_sock_mc_close(struct sock *sk)
290 } else 305 } else
291 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 306 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
292 rcu_read_unlock(); 307 rcu_read_unlock();
293 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
294 308
295 write_lock_bh(&ipv6_sk_mc_lock); 309 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
310 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
311
312 spin_lock(&ipv6_sk_mc_lock);
296 } 313 }
297 write_unlock_bh(&ipv6_sk_mc_lock); 314 spin_unlock(&ipv6_sk_mc_lock);
298} 315}
299 316
300int ip6_mc_source(int add, int omode, struct sock *sk, 317int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -328,8 +345,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
328 345
329 err = -EADDRNOTAVAIL; 346 err = -EADDRNOTAVAIL;
330 347
331 read_lock(&ipv6_sk_mc_lock); 348 for_each_pmc_rcu(inet6, pmc) {
332 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
333 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 349 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
334 continue; 350 continue;
335 if (ipv6_addr_equal(&pmc->addr, group)) 351 if (ipv6_addr_equal(&pmc->addr, group))
@@ -428,7 +444,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
428done: 444done:
429 if (pmclocked) 445 if (pmclocked)
430 write_unlock(&pmc->sflock); 446 write_unlock(&pmc->sflock);
431 read_unlock(&ipv6_sk_mc_lock);
432 read_unlock_bh(&idev->lock); 447 read_unlock_bh(&idev->lock);
433 rcu_read_unlock(); 448 rcu_read_unlock();
434 if (leavegroup) 449 if (leavegroup)
@@ -466,14 +481,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
466 dev = idev->dev; 481 dev = idev->dev;
467 482
468 err = 0; 483 err = 0;
469 read_lock(&ipv6_sk_mc_lock);
470 484
471 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 485 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
472 leavegroup = 1; 486 leavegroup = 1;
473 goto done; 487 goto done;
474 } 488 }
475 489
476 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 490 for_each_pmc_rcu(inet6, pmc) {
477 if (pmc->ifindex != gsf->gf_interface) 491 if (pmc->ifindex != gsf->gf_interface)
478 continue; 492 continue;
479 if (ipv6_addr_equal(&pmc->addr, group)) 493 if (ipv6_addr_equal(&pmc->addr, group))
@@ -521,7 +535,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
521 write_unlock(&pmc->sflock); 535 write_unlock(&pmc->sflock);
522 err = 0; 536 err = 0;
523done: 537done:
524 read_unlock(&ipv6_sk_mc_lock);
525 read_unlock_bh(&idev->lock); 538 read_unlock_bh(&idev->lock);
526 rcu_read_unlock(); 539 rcu_read_unlock();
527 if (leavegroup) 540 if (leavegroup)
@@ -562,7 +575,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
562 * so reading the list is safe. 575 * so reading the list is safe.
563 */ 576 */
564 577
565 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 578 for_each_pmc_rcu(inet6, pmc) {
566 if (pmc->ifindex != gsf->gf_interface) 579 if (pmc->ifindex != gsf->gf_interface)
567 continue; 580 continue;
568 if (ipv6_addr_equal(group, &pmc->addr)) 581 if (ipv6_addr_equal(group, &pmc->addr))
@@ -612,13 +625,13 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
612 struct ip6_sf_socklist *psl; 625 struct ip6_sf_socklist *psl;
613 int rv = 1; 626 int rv = 1;
614 627
615 read_lock(&ipv6_sk_mc_lock); 628 rcu_read_lock();
616 for (mc = np->ipv6_mc_list; mc; mc = mc->next) { 629 for_each_pmc_rcu(np, mc) {
617 if (ipv6_addr_equal(&mc->addr, mc_addr)) 630 if (ipv6_addr_equal(&mc->addr, mc_addr))
618 break; 631 break;
619 } 632 }
620 if (!mc) { 633 if (!mc) {
621 read_unlock(&ipv6_sk_mc_lock); 634 rcu_read_unlock();
622 return 1; 635 return 1;
623 } 636 }
624 read_lock(&mc->sflock); 637 read_lock(&mc->sflock);
@@ -638,7 +651,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
638 rv = 0; 651 rv = 0;
639 } 652 }
640 read_unlock(&mc->sflock); 653 read_unlock(&mc->sflock);
641 read_unlock(&ipv6_sk_mc_lock); 654 rcu_read_unlock();
642 655
643 return rv; 656 return rv;
644} 657}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 998d6d27e7cf..2342545a5ee9 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -141,18 +141,18 @@ struct neigh_table nd_tbl = {
141 .proxy_redo = pndisc_redo, 141 .proxy_redo = pndisc_redo,
142 .id = "ndisc_cache", 142 .id = "ndisc_cache",
143 .parms = { 143 .parms = {
144 .tbl = &nd_tbl, 144 .tbl = &nd_tbl,
145 .base_reachable_time = 30 * HZ, 145 .base_reachable_time = ND_REACHABLE_TIME,
146 .retrans_time = 1 * HZ, 146 .retrans_time = ND_RETRANS_TIMER,
147 .gc_staletime = 60 * HZ, 147 .gc_staletime = 60 * HZ,
148 .reachable_time = 30 * HZ, 148 .reachable_time = ND_REACHABLE_TIME,
149 .delay_probe_time = 5 * HZ, 149 .delay_probe_time = 5 * HZ,
150 .queue_len = 3, 150 .queue_len = 3,
151 .ucast_probes = 3, 151 .ucast_probes = 3,
152 .mcast_probes = 3, 152 .mcast_probes = 3,
153 .anycast_delay = 1 * HZ, 153 .anycast_delay = 1 * HZ,
154 .proxy_delay = (8 * HZ) / 10, 154 .proxy_delay = (8 * HZ) / 10,
155 .proxy_qlen = 64, 155 .proxy_qlen = 64,
156 }, 156 },
157 .gc_interval = 30 * HZ, 157 .gc_interval = 30 * HZ,
158 .gc_thresh1 = 128, 158 .gc_thresh1 = 128,
@@ -1259,7 +1259,8 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1259 if (ra_msg->icmph.icmp6_hop_limit) { 1259 if (ra_msg->icmph.icmp6_hop_limit) {
1260 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1260 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1261 if (rt) 1261 if (rt)
1262 rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; 1262 dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
1263 ra_msg->icmph.icmp6_hop_limit);
1263 } 1264 }
1264 1265
1265skip_defrtr: 1266skip_defrtr:
@@ -1377,7 +1378,7 @@ skip_linkparms:
1377 in6_dev->cnf.mtu6 = mtu; 1378 in6_dev->cnf.mtu6 = mtu;
1378 1379
1379 if (rt) 1380 if (rt)
1380 rt->dst.metrics[RTAX_MTU-1] = mtu; 1381 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
1381 1382
1382 rt6_mtu_change(skb->dev, mtu); 1383 rt6_mtu_change(skb->dev, mtu);
1383 } 1384 }
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 7155b2451d7c..35915e8617f0 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -18,10 +18,8 @@ int ip6_route_me_harder(struct sk_buff *skb)
18 struct flowi fl = { 18 struct flowi fl = {
19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
20 .mark = skb->mark, 20 .mark = skb->mark,
21 .nl_u = 21 .fl6_dst = iph->daddr,
22 { .ip6_u = 22 .fl6_src = iph->saddr,
23 { .daddr = iph->daddr,
24 .saddr = iph->saddr, } },
25 }; 23 };
26 24
27 dst = ip6_route_output(net, skb->sk, &fl); 25 dst = ip6_route_output(net, skb->sk, &fl);
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 0a432c9b0795..abfee91ce816 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,13 +11,13 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o 11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
12 12
13# objects for l3 independent conntrack 13# objects for l3 independent conntrack
14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o 14nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
15 15
16# l3 independent conntrack 16# l3 independent conntrack
17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o 17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
18 18
19# defrag 19# defrag
20nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o 20nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
21obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o 21obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
22 22
23# matches 23# matches
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035897e7..455582384ece 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user,
1137 private = &tmp; 1137 private = &tmp;
1138 } 1138 }
1139#endif 1139#endif
1140 memset(&info, 0, sizeof(info));
1140 info.valid_hooks = t->valid_hooks; 1141 info.valid_hooks = t->valid_hooks;
1141 memcpy(info.hook_entry, private->hook_entry, 1142 memcpy(info.hook_entry, private->hook_entry,
1142 sizeof(info.hook_entry)); 1143 sizeof(info.hook_entry));
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 2933396e0281..bf998feac14e 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -124,7 +124,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
124 skb_reset_network_header(nskb); 124 skb_reset_network_header(nskb);
125 ip6h = ipv6_hdr(nskb); 125 ip6h = ipv6_hdr(nskb);
126 ip6h->version = 6; 126 ip6h->version = 6;
127 ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT); 127 ip6h->hop_limit = ip6_dst_hoplimit(dst);
128 ip6h->nexthdr = IPPROTO_TCP; 128 ip6h->nexthdr = IPPROTO_TCP;
129 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); 129 ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
130 ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr); 130 ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 3a3f129a44cb..79d43aa8fa8d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -286,7 +286,7 @@ found:
286 286
287 /* Check for overlap with preceding fragment. */ 287 /* Check for overlap with preceding fragment. */
288 if (prev && 288 if (prev &&
289 (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) 289 (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
290 goto discard_fq; 290 goto discard_fq;
291 291
292 /* Look for overlap with succeeding segment. */ 292 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c7ba3149633f..07beeb06f752 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -104,26 +104,22 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
105 const struct in6_addr *daddr, u32 rnd) 105 const struct in6_addr *daddr, u32 rnd)
106{ 106{
107 u32 a, b, c; 107 u32 c;
108 108
109 a = (__force u32)saddr->s6_addr32[0]; 109 c = jhash_3words((__force u32)saddr->s6_addr32[0],
110 b = (__force u32)saddr->s6_addr32[1]; 110 (__force u32)saddr->s6_addr32[1],
111 c = (__force u32)saddr->s6_addr32[2]; 111 (__force u32)saddr->s6_addr32[2],
112 rnd);
112 113
113 a += JHASH_GOLDEN_RATIO; 114 c = jhash_3words((__force u32)saddr->s6_addr32[3],
114 b += JHASH_GOLDEN_RATIO; 115 (__force u32)daddr->s6_addr32[0],
115 c += rnd; 116 (__force u32)daddr->s6_addr32[1],
116 __jhash_mix(a, b, c); 117 c);
117 118
118 a += (__force u32)saddr->s6_addr32[3]; 119 c = jhash_3words((__force u32)daddr->s6_addr32[2],
119 b += (__force u32)daddr->s6_addr32[0]; 120 (__force u32)daddr->s6_addr32[3],
120 c += (__force u32)daddr->s6_addr32[1]; 121 (__force u32)id,
121 __jhash_mix(a, b, c); 122 c);
122
123 a += (__force u32)daddr->s6_addr32[2];
124 b += (__force u32)daddr->s6_addr32[3];
125 c += (__force u32)id;
126 __jhash_mix(a, b, c);
127 123
128 return c & (INETFRAGS_HASHSZ - 1); 124 return c & (INETFRAGS_HASHSZ - 1);
129} 125}
@@ -349,7 +345,7 @@ found:
349 345
350 /* Check for overlap with preceding fragment. */ 346 /* Check for overlap with preceding fragment. */
351 if (prev && 347 if (prev &&
352 (FRAG6_CB(prev)->offset + prev->len) - offset > 0) 348 (FRAG6_CB(prev)->offset + prev->len) > offset)
353 goto discard_fq; 349 goto discard_fq;
354 350
355 /* Look for overlap with succeeding segment. */ 351 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f968f3f..98796b0dc2b7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -129,7 +129,6 @@ static struct rt6_info ip6_null_entry_template = {
129 .__use = 1, 129 .__use = 1,
130 .obsolete = -1, 130 .obsolete = -1,
131 .error = -ENETUNREACH, 131 .error = -ENETUNREACH,
132 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
133 .input = ip6_pkt_discard, 132 .input = ip6_pkt_discard,
134 .output = ip6_pkt_discard_out, 133 .output = ip6_pkt_discard_out,
135 }, 134 },
@@ -150,7 +149,6 @@ static struct rt6_info ip6_prohibit_entry_template = {
150 .__use = 1, 149 .__use = 1,
151 .obsolete = -1, 150 .obsolete = -1,
152 .error = -EACCES, 151 .error = -EACCES,
153 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
154 .input = ip6_pkt_prohibit, 152 .input = ip6_pkt_prohibit,
155 .output = ip6_pkt_prohibit_out, 153 .output = ip6_pkt_prohibit_out,
156 }, 154 },
@@ -166,7 +164,6 @@ static struct rt6_info ip6_blk_hole_entry_template = {
166 .__use = 1, 164 .__use = 1,
167 .obsolete = -1, 165 .obsolete = -1,
168 .error = -EINVAL, 166 .error = -EINVAL,
169 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
170 .input = dst_discard, 167 .input = dst_discard,
171 .output = dst_discard, 168 .output = dst_discard,
172 }, 169 },
@@ -188,11 +185,29 @@ static void ip6_dst_destroy(struct dst_entry *dst)
188{ 185{
189 struct rt6_info *rt = (struct rt6_info *)dst; 186 struct rt6_info *rt = (struct rt6_info *)dst;
190 struct inet6_dev *idev = rt->rt6i_idev; 187 struct inet6_dev *idev = rt->rt6i_idev;
188 struct inet_peer *peer = rt->rt6i_peer;
191 189
192 if (idev != NULL) { 190 if (idev != NULL) {
193 rt->rt6i_idev = NULL; 191 rt->rt6i_idev = NULL;
194 in6_dev_put(idev); 192 in6_dev_put(idev);
195 } 193 }
194 if (peer) {
195 BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
196 rt->rt6i_peer = NULL;
197 inet_putpeer(peer);
198 }
199}
200
201void rt6_bind_peer(struct rt6_info *rt, int create)
202{
203 struct inet_peer *peer;
204
205 if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
206 return;
207
208 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
209 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
210 inet_putpeer(peer);
196} 211}
197 212
198static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 213static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -558,11 +573,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
558{ 573{
559 struct flowi fl = { 574 struct flowi fl = {
560 .oif = oif, 575 .oif = oif,
561 .nl_u = { 576 .fl6_dst = *daddr,
562 .ip6_u = {
563 .daddr = *daddr,
564 },
565 },
566 }; 577 };
567 struct dst_entry *dst; 578 struct dst_entry *dst;
568 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 579 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
@@ -778,13 +789,9 @@ void ip6_route_input(struct sk_buff *skb)
778 int flags = RT6_LOOKUP_F_HAS_SADDR; 789 int flags = RT6_LOOKUP_F_HAS_SADDR;
779 struct flowi fl = { 790 struct flowi fl = {
780 .iif = skb->dev->ifindex, 791 .iif = skb->dev->ifindex,
781 .nl_u = { 792 .fl6_dst = iph->daddr,
782 .ip6_u = { 793 .fl6_src = iph->saddr,
783 .daddr = iph->daddr, 794 .fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
784 .saddr = iph->saddr,
785 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
786 },
787 },
788 .mark = skb->mark, 795 .mark = skb->mark,
789 .proto = iph->nexthdr, 796 .proto = iph->nexthdr,
790 }; 797 };
@@ -834,7 +841,7 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
834 new->input = dst_discard; 841 new->input = dst_discard;
835 new->output = dst_discard; 842 new->output = dst_discard;
836 843
837 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); 844 dst_copy_metrics(new, &ort->dst);
838 new->dev = ort->dst.dev; 845 new->dev = ort->dst.dev;
839 if (new->dev) 846 if (new->dev)
840 dev_hold(new->dev); 847 dev_hold(new->dev);
@@ -918,10 +925,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
918 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { 925 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
919 rt6->rt6i_flags |= RTF_MODIFIED; 926 rt6->rt6i_flags |= RTF_MODIFIED;
920 if (mtu < IPV6_MIN_MTU) { 927 if (mtu < IPV6_MIN_MTU) {
928 u32 features = dst_metric(dst, RTAX_FEATURES);
921 mtu = IPV6_MIN_MTU; 929 mtu = IPV6_MIN_MTU;
922 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 930 features |= RTAX_FEATURE_ALLFRAG;
931 dst_metric_set(dst, RTAX_FEATURES, features);
923 } 932 }
924 dst->metrics[RTAX_MTU-1] = mtu; 933 dst_metric_set(dst, RTAX_MTU, mtu);
925 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 934 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
926 } 935 }
927} 936}
@@ -979,9 +988,9 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
979 rt->rt6i_idev = idev; 988 rt->rt6i_idev = idev;
980 rt->rt6i_nexthop = neigh; 989 rt->rt6i_nexthop = neigh;
981 atomic_set(&rt->dst.__refcnt, 1); 990 atomic_set(&rt->dst.__refcnt, 1);
982 rt->dst.metrics[RTAX_HOPLIMIT-1] = 255; 991 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
983 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 992 dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
984 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); 993 dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
985 rt->dst.output = ip6_output; 994 rt->dst.output = ip6_output;
986 995
987#if 0 /* there's no chance to use these for ndisc */ 996#if 0 /* there's no chance to use these for ndisc */
@@ -1095,8 +1104,8 @@ static int ipv6_get_mtu(struct net_device *dev)
1095 1104
1096int ip6_dst_hoplimit(struct dst_entry *dst) 1105int ip6_dst_hoplimit(struct dst_entry *dst)
1097{ 1106{
1098 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT); 1107 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1099 if (hoplimit < 0) { 1108 if (hoplimit == 0) {
1100 struct net_device *dev = dst->dev; 1109 struct net_device *dev = dst->dev;
1101 struct inet6_dev *idev; 1110 struct inet6_dev *idev;
1102 1111
@@ -1110,6 +1119,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
1110 } 1119 }
1111 return hoplimit; 1120 return hoplimit;
1112} 1121}
1122EXPORT_SYMBOL(ip6_dst_hoplimit);
1113 1123
1114/* 1124/*
1115 * 1125 *
@@ -1295,17 +1305,15 @@ install_route:
1295 goto out; 1305 goto out;
1296 } 1306 }
1297 1307
1298 rt->dst.metrics[type - 1] = nla_get_u32(nla); 1308 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1299 } 1309 }
1300 } 1310 }
1301 } 1311 }
1302 1312
1303 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1304 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
1305 if (!dst_mtu(&rt->dst)) 1313 if (!dst_mtu(&rt->dst))
1306 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); 1314 dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(dev));
1307 if (!dst_metric(&rt->dst, RTAX_ADVMSS)) 1315 if (!dst_metric(&rt->dst, RTAX_ADVMSS))
1308 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); 1316 dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
1309 rt->dst.dev = dev; 1317 rt->dst.dev = dev;
1310 rt->rt6i_idev = idev; 1318 rt->rt6i_idev = idev;
1311 rt->rt6i_table = table; 1319 rt->rt6i_table = table;
@@ -1463,12 +1471,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1463 struct ip6rd_flowi rdfl = { 1471 struct ip6rd_flowi rdfl = {
1464 .fl = { 1472 .fl = {
1465 .oif = dev->ifindex, 1473 .oif = dev->ifindex,
1466 .nl_u = { 1474 .fl6_dst = *dest,
1467 .ip6_u = { 1475 .fl6_src = *src,
1468 .daddr = *dest,
1469 .saddr = *src,
1470 },
1471 },
1472 }, 1476 },
1473 }; 1477 };
1474 1478
@@ -1535,9 +1539,9 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1535 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1539 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1536 nrt->rt6i_nexthop = neigh_clone(neigh); 1540 nrt->rt6i_nexthop = neigh_clone(neigh);
1537 /* Reset pmtu, it may be better */ 1541 /* Reset pmtu, it may be better */
1538 nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); 1542 dst_metric_set(&nrt->dst, RTAX_MTU, ipv6_get_mtu(neigh->dev));
1539 nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), 1543 dst_metric_set(&nrt->dst, RTAX_ADVMSS, ipv6_advmss(dev_net(neigh->dev),
1540 dst_mtu(&nrt->dst)); 1544 dst_mtu(&nrt->dst)));
1541 1545
1542 if (ip6_ins_rt(nrt)) 1546 if (ip6_ins_rt(nrt))
1543 goto out; 1547 goto out;
@@ -1596,9 +1600,12 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1596 would return automatically. 1600 would return automatically.
1597 */ 1601 */
1598 if (rt->rt6i_flags & RTF_CACHE) { 1602 if (rt->rt6i_flags & RTF_CACHE) {
1599 rt->dst.metrics[RTAX_MTU-1] = pmtu; 1603 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1600 if (allfrag) 1604 if (allfrag) {
1601 rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1605 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1606 features |= RTAX_FEATURE_ALLFRAG;
1607 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1608 }
1602 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1609 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1603 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1610 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1604 goto out; 1611 goto out;
@@ -1615,9 +1622,12 @@ static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1615 nrt = rt6_alloc_clone(rt, daddr); 1622 nrt = rt6_alloc_clone(rt, daddr);
1616 1623
1617 if (nrt) { 1624 if (nrt) {
1618 nrt->dst.metrics[RTAX_MTU-1] = pmtu; 1625 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1619 if (allfrag) 1626 if (allfrag) {
1620 nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1627 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1628 features |= RTAX_FEATURE_ALLFRAG;
1629 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1630 }
1621 1631
1622 /* According to RFC 1981, detecting PMTU increase shouldn't be 1632 /* According to RFC 1981, detecting PMTU increase shouldn't be
1623 * happened within 5 mins, the recommended timer is 10 mins. 1633 * happened within 5 mins, the recommended timer is 10 mins.
@@ -1668,7 +1678,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1668 rt->dst.input = ort->dst.input; 1678 rt->dst.input = ort->dst.input;
1669 rt->dst.output = ort->dst.output; 1679 rt->dst.output = ort->dst.output;
1670 1680
1671 memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); 1681 dst_copy_metrics(&rt->dst, &ort->dst);
1672 rt->dst.error = ort->dst.error; 1682 rt->dst.error = ort->dst.error;
1673 rt->dst.dev = ort->dst.dev; 1683 rt->dst.dev = ort->dst.dev;
1674 if (rt->dst.dev) 1684 if (rt->dst.dev)
@@ -1945,8 +1955,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1955 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1946 struct neighbour *neigh; 1956 struct neighbour *neigh;
1947 1957
1948 if (rt == NULL) 1958 if (rt == NULL) {
1959 if (net_ratelimit())
1960 pr_warning("IPv6: Maximum number of routes reached,"
1961 " consider increasing route/max_size.\n");
1949 return ERR_PTR(-ENOMEM); 1962 return ERR_PTR(-ENOMEM);
1963 }
1950 1964
1951 dev_hold(net->loopback_dev); 1965 dev_hold(net->loopback_dev);
1952 in6_dev_hold(idev); 1966 in6_dev_hold(idev);
@@ -1956,9 +1970,9 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1956 rt->dst.output = ip6_output; 1970 rt->dst.output = ip6_output;
1957 rt->rt6i_dev = net->loopback_dev; 1971 rt->rt6i_dev = net->loopback_dev;
1958 rt->rt6i_idev = idev; 1972 rt->rt6i_idev = idev;
1959 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 1973 dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
1960 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); 1974 dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
1961 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1; 1975 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
1962 rt->dst.obsolete = -1; 1976 rt->dst.obsolete = -1;
1963 1977
1964 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; 1978 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
@@ -2058,8 +2072,8 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2058 (dst_mtu(&rt->dst) >= arg->mtu || 2072 (dst_mtu(&rt->dst) >= arg->mtu ||
2059 (dst_mtu(&rt->dst) < arg->mtu && 2073 (dst_mtu(&rt->dst) < arg->mtu &&
2060 dst_mtu(&rt->dst) == idev->cnf.mtu6))) { 2074 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2061 rt->dst.metrics[RTAX_MTU-1] = arg->mtu; 2075 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2062 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); 2076 dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, arg->mtu));
2063 } 2077 }
2064 return 0; 2078 return 0;
2065} 2079}
@@ -2285,7 +2299,7 @@ static int rt6_fill_node(struct net *net,
2285 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2299 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2286 } 2300 }
2287 2301
2288 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) 2302 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2289 goto nla_put_failure; 2303 goto nla_put_failure;
2290 2304
2291 if (rt->dst.neighbour) 2305 if (rt->dst.neighbour)
@@ -2461,8 +2475,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
2461 2475
2462#ifdef CONFIG_PROC_FS 2476#ifdef CONFIG_PROC_FS
2463 2477
2464#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2465
2466struct rt6_proc_arg 2478struct rt6_proc_arg
2467{ 2479{
2468 char *buffer; 2480 char *buffer;
@@ -2678,6 +2690,7 @@ static int __net_init ip6_route_net_init(struct net *net)
2678 net->ipv6.ip6_null_entry->dst.path = 2690 net->ipv6.ip6_null_entry->dst.path =
2679 (struct dst_entry *)net->ipv6.ip6_null_entry; 2691 (struct dst_entry *)net->ipv6.ip6_null_entry;
2680 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 2692 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2693 dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
2681 2694
2682#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2695#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2683 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 2696 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2688,6 +2701,7 @@ static int __net_init ip6_route_net_init(struct net *net)
2688 net->ipv6.ip6_prohibit_entry->dst.path = 2701 net->ipv6.ip6_prohibit_entry->dst.path =
2689 (struct dst_entry *)net->ipv6.ip6_prohibit_entry; 2702 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2690 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; 2703 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2704 dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
2691 2705
2692 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 2706 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2693 sizeof(*net->ipv6.ip6_blk_hole_entry), 2707 sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2697,6 +2711,7 @@ static int __net_init ip6_route_net_init(struct net *net)
2697 net->ipv6.ip6_blk_hole_entry->dst.path = 2711 net->ipv6.ip6_blk_hole_entry->dst.path =
2698 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; 2712 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2699 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; 2713 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2714 dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
2700#endif 2715#endif
2701 2716
2702 net->ipv6.sysctl.flush_delay = 0; 2717 net->ipv6.sysctl.flush_delay = 0;
@@ -2741,6 +2756,7 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2741 kfree(net->ipv6.ip6_prohibit_entry); 2756 kfree(net->ipv6.ip6_prohibit_entry);
2742 kfree(net->ipv6.ip6_blk_hole_entry); 2757 kfree(net->ipv6.ip6_blk_hole_entry);
2743#endif 2758#endif
2759 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2744} 2760}
2745 2761
2746static struct pernet_operations ip6_route_net_ops = { 2762static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2848,6 @@ void ip6_route_cleanup(void)
2832 xfrm6_fini(); 2848 xfrm6_fini();
2833 fib6_gc_cleanup(); 2849 fib6_gc_cleanup();
2834 unregister_pernet_subsys(&ip6_route_net_ops); 2850 unregister_pernet_subsys(&ip6_route_net_ops);
2851 dst_entries_destroy(&ip6_dst_blackhole_ops);
2835 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 2852 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2836} 2853}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d6bfaec3bbbf..8ce38f10a547 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -606,8 +606,9 @@ static int ipip6_rcv(struct sk_buff *skb)
606 return 0; 606 return 0;
607 } 607 }
608 608
609 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 609 /* no tunnel matched, let upstream know, ipsec may handle it */
610 rcu_read_unlock(); 610 rcu_read_unlock();
611 return 1;
611out: 612out:
612 kfree_skb(skb); 613 kfree_skb(skb);
613 return 0; 614 return 0;
@@ -730,10 +731,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
730 } 731 }
731 732
732 { 733 {
733 struct flowi fl = { .nl_u = { .ip4_u = 734 struct flowi fl = { .fl4_dst = dst,
734 { .daddr = dst, 735 .fl4_src = tiph->saddr,
735 .saddr = tiph->saddr, 736 .fl4_tos = RT_TOS(tos),
736 .tos = RT_TOS(tos) } },
737 .oif = tunnel->parms.link, 737 .oif = tunnel->parms.link,
738 .proto = IPPROTO_IPV6 }; 738 .proto = IPPROTO_IPV6 };
739 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 739 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -855,10 +855,9 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
855 iph = &tunnel->parms.iph; 855 iph = &tunnel->parms.iph;
856 856
857 if (iph->daddr) { 857 if (iph->daddr) {
858 struct flowi fl = { .nl_u = { .ip4_u = 858 struct flowi fl = { .fl4_dst = iph->daddr,
859 { .daddr = iph->daddr, 859 .fl4_src = iph->saddr,
860 .saddr = iph->saddr, 860 .fl4_tos = RT_TOS(iph->tos),
861 .tos = RT_TOS(iph->tos) } },
862 .oif = tunnel->parms.link, 861 .oif = tunnel->parms.link,
863 .proto = IPPROTO_IPV6 }; 862 .proto = IPPROTO_IPV6 };
864 struct rtable *rt; 863 struct rtable *rt;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7e41e2cbb85e..fee076891646 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -130,6 +130,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
130 struct ipv6_pinfo *np = inet6_sk(sk); 130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p, final; 132 struct in6_addr *saddr = NULL, *final_p, final;
133 struct rt6_info *rt;
133 struct flowi fl; 134 struct flowi fl;
134 struct dst_entry *dst; 135 struct dst_entry *dst;
135 int addr_type; 136 int addr_type;
@@ -280,6 +281,26 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
280 sk->sk_gso_type = SKB_GSO_TCPV6; 281 sk->sk_gso_type = SKB_GSO_TCPV6;
281 __ip6_dst_store(sk, dst, NULL, NULL); 282 __ip6_dst_store(sk, dst, NULL, NULL);
282 283
284 rt = (struct rt6_info *) dst;
285 if (tcp_death_row.sysctl_tw_recycle &&
286 !tp->rx_opt.ts_recent_stamp &&
287 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
288 struct inet_peer *peer = rt6_get_peer(rt);
289 /*
290 * VJ's idea. We save last timestamp seen from
291 * the destination in peer table, when entering state
292 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
293 * when trying new connection.
294 */
295 if (peer) {
296 inet_peer_refcheck(peer);
297 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
298 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
299 tp->rx_opt.ts_recent = peer->tcp_ts;
300 }
301 }
302 }
303
283 icsk->icsk_ext_hdr_len = 0; 304 icsk->icsk_ext_hdr_len = 0;
284 if (np->opt) 305 if (np->opt)
285 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 306 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
@@ -906,12 +927,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
906}; 927};
907#endif 928#endif
908 929
909static struct timewait_sock_ops tcp6_timewait_sock_ops = {
910 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
911 .twsk_unique = tcp_twsk_unique,
912 .twsk_destructor= tcp_twsk_destructor,
913};
914
915static void __tcp_v6_send_check(struct sk_buff *skb, 930static void __tcp_v6_send_check(struct sk_buff *skb,
916 struct in6_addr *saddr, struct in6_addr *daddr) 931 struct in6_addr *saddr, struct in6_addr *daddr)
917{ 932{
@@ -1176,6 +1191,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1176 struct ipv6_pinfo *np = inet6_sk(sk); 1191 struct ipv6_pinfo *np = inet6_sk(sk);
1177 struct tcp_sock *tp = tcp_sk(sk); 1192 struct tcp_sock *tp = tcp_sk(sk);
1178 __u32 isn = TCP_SKB_CB(skb)->when; 1193 __u32 isn = TCP_SKB_CB(skb)->when;
1194 struct dst_entry *dst = NULL;
1179#ifdef CONFIG_SYN_COOKIES 1195#ifdef CONFIG_SYN_COOKIES
1180 int want_cookie = 0; 1196 int want_cookie = 0;
1181#else 1197#else
@@ -1273,6 +1289,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1273 TCP_ECN_create_request(req, tcp_hdr(skb)); 1289 TCP_ECN_create_request(req, tcp_hdr(skb));
1274 1290
1275 if (!isn) { 1291 if (!isn) {
1292 struct inet_peer *peer = NULL;
1293
1276 if (ipv6_opt_accepted(sk, skb) || 1294 if (ipv6_opt_accepted(sk, skb) ||
1277 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1295 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1278 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1296 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1285,13 +1303,57 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1285 if (!sk->sk_bound_dev_if && 1303 if (!sk->sk_bound_dev_if &&
1286 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1304 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1287 treq->iif = inet6_iif(skb); 1305 treq->iif = inet6_iif(skb);
1288 if (!want_cookie) { 1306
1289 isn = tcp_v6_init_sequence(skb); 1307 if (want_cookie) {
1290 } else {
1291 isn = cookie_v6_init_sequence(sk, skb, &req->mss); 1308 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1292 req->cookie_ts = tmp_opt.tstamp_ok; 1309 req->cookie_ts = tmp_opt.tstamp_ok;
1310 goto have_isn;
1293 } 1311 }
1312
1313 /* VJ's idea. We save last timestamp seen
1314 * from the destination in peer table, when entering
1315 * state TIME-WAIT, and check against it before
1316 * accepting new connection request.
1317 *
1318 * If "isn" is not zero, this request hit alive
1319 * timewait bucket, so that all the necessary checks
1320 * are made in the function processing timewait state.
1321 */
1322 if (tmp_opt.saw_tstamp &&
1323 tcp_death_row.sysctl_tw_recycle &&
1324 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1325 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1326 ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
1327 &treq->rmt_addr)) {
1328 inet_peer_refcheck(peer);
1329 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1330 (s32)(peer->tcp_ts - req->ts_recent) >
1331 TCP_PAWS_WINDOW) {
1332 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1333 goto drop_and_release;
1334 }
1335 }
1336 /* Kill the following clause, if you dislike this way. */
1337 else if (!sysctl_tcp_syncookies &&
1338 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1339 (sysctl_max_syn_backlog >> 2)) &&
1340 (!peer || !peer->tcp_ts_stamp) &&
1341 (!dst || !dst_metric(dst, RTAX_RTT))) {
1342 /* Without syncookies last quarter of
1343 * backlog is filled with destinations,
1344 * proven to be alive.
1345 * It means that we continue to communicate
1346 * to destinations, already remembered
1347 * to the moment of synflood.
1348 */
1349 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1350 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1351 goto drop_and_release;
1352 }
1353
1354 isn = tcp_v6_init_sequence(skb);
1294 } 1355 }
1356have_isn:
1295 tcp_rsk(req)->snt_isn = isn; 1357 tcp_rsk(req)->snt_isn = isn;
1296 1358
1297 security_inet_conn_request(sk, skb, req); 1359 security_inet_conn_request(sk, skb, req);
@@ -1304,6 +1366,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1304 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1366 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1305 return 0; 1367 return 0;
1306 1368
1369drop_and_release:
1370 dst_release(dst);
1307drop_and_free: 1371drop_and_free:
1308 reqsk_free(req); 1372 reqsk_free(req);
1309drop: 1373drop:
@@ -1382,28 +1446,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1382 if (sk_acceptq_is_full(sk)) 1446 if (sk_acceptq_is_full(sk))
1383 goto out_overflow; 1447 goto out_overflow;
1384 1448
1385 if (dst == NULL) { 1449 if (!dst) {
1386 struct in6_addr *final_p, final; 1450 dst = inet6_csk_route_req(sk, req);
1387 struct flowi fl; 1451 if (!dst)
1388
1389 memset(&fl, 0, sizeof(fl));
1390 fl.proto = IPPROTO_TCP;
1391 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1392 final_p = fl6_update_dst(&fl, opt, &final);
1393 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1394 fl.oif = sk->sk_bound_dev_if;
1395 fl.mark = sk->sk_mark;
1396 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1397 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1398 security_req_classify_flow(req, &fl);
1399
1400 if (ip6_dst_lookup(sk, &dst, &fl))
1401 goto out;
1402
1403 if (final_p)
1404 ipv6_addr_copy(&fl.fl6_dst, final_p);
1405
1406 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1407 goto out; 1452 goto out;
1408 } 1453 }
1409 1454
@@ -1818,19 +1863,51 @@ do_time_wait:
1818 goto discard_it; 1863 goto discard_it;
1819} 1864}
1820 1865
1821static int tcp_v6_remember_stamp(struct sock *sk) 1866static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1822{ 1867{
1823 /* Alas, not yet... */ 1868 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1824 return 0; 1869 struct ipv6_pinfo *np = inet6_sk(sk);
1870 struct inet_peer *peer;
1871
1872 if (!rt ||
1873 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1874 peer = inet_getpeer_v6(&np->daddr, 1);
1875 *release_it = true;
1876 } else {
1877 if (!rt->rt6i_peer)
1878 rt6_bind_peer(rt, 1);
1879 peer = rt->rt6i_peer;
1880 *release_it = false;
1881 }
1882
1883 return peer;
1825} 1884}
1826 1885
1886static void *tcp_v6_tw_get_peer(struct sock *sk)
1887{
1888 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1889 struct inet_timewait_sock *tw = inet_twsk(sk);
1890
1891 if (tw->tw_family == AF_INET)
1892 return tcp_v4_tw_get_peer(sk);
1893
1894 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1895}
1896
1897static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1898 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1899 .twsk_unique = tcp_twsk_unique,
1900 .twsk_destructor= tcp_twsk_destructor,
1901 .twsk_getpeer = tcp_v6_tw_get_peer,
1902};
1903
1827static const struct inet_connection_sock_af_ops ipv6_specific = { 1904static const struct inet_connection_sock_af_ops ipv6_specific = {
1828 .queue_xmit = inet6_csk_xmit, 1905 .queue_xmit = inet6_csk_xmit,
1829 .send_check = tcp_v6_send_check, 1906 .send_check = tcp_v6_send_check,
1830 .rebuild_header = inet6_sk_rebuild_header, 1907 .rebuild_header = inet6_sk_rebuild_header,
1831 .conn_request = tcp_v6_conn_request, 1908 .conn_request = tcp_v6_conn_request,
1832 .syn_recv_sock = tcp_v6_syn_recv_sock, 1909 .syn_recv_sock = tcp_v6_syn_recv_sock,
1833 .remember_stamp = tcp_v6_remember_stamp, 1910 .get_peer = tcp_v6_get_peer,
1834 .net_header_len = sizeof(struct ipv6hdr), 1911 .net_header_len = sizeof(struct ipv6hdr),
1835 .setsockopt = ipv6_setsockopt, 1912 .setsockopt = ipv6_setsockopt,
1836 .getsockopt = ipv6_getsockopt, 1913 .getsockopt = ipv6_getsockopt,
@@ -1862,7 +1939,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1862 .rebuild_header = inet_sk_rebuild_header, 1939 .rebuild_header = inet_sk_rebuild_header,
1863 .conn_request = tcp_v6_conn_request, 1940 .conn_request = tcp_v6_conn_request,
1864 .syn_recv_sock = tcp_v6_syn_recv_sock, 1941 .syn_recv_sock = tcp_v6_syn_recv_sock,
1865 .remember_stamp = tcp_v4_remember_stamp, 1942 .get_peer = tcp_v4_get_peer,
1866 .net_header_len = sizeof(struct iphdr), 1943 .net_header_len = sizeof(struct iphdr),
1867 .setsockopt = ipv6_setsockopt, 1944 .setsockopt = ipv6_setsockopt,
1868 .getsockopt = ipv6_getsockopt, 1945 .getsockopt = ipv6_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 91def93bec85..26a8da3f2044 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -54,8 +54,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
54{ 54{
55 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 55 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
56 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 56 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
57 __be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr; 57 __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
58 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 58 __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
59 int sk_ipv6only = ipv6_only_sock(sk); 59 int sk_ipv6only = ipv6_only_sock(sk);
60 int sk2_ipv6only = inet_v6_ipv6only(sk2); 60 int sk2_ipv6only = inet_v6_ipv6only(sk2);
61 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 61 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -227,7 +227,7 @@ begin:
227 227
228 if (result) { 228 if (result) {
229exact_match: 229exact_match:
230 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 230 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
231 result = NULL; 231 result = NULL;
232 else if (unlikely(compute_score2(result, net, saddr, sport, 232 else if (unlikely(compute_score2(result, net, saddr, sport,
233 daddr, hnum, dif) < badness)) { 233 daddr, hnum, dif) < badness)) {
@@ -294,7 +294,7 @@ begin:
294 goto begin; 294 goto begin;
295 295
296 if (result) { 296 if (result) {
297 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 297 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
298 result = NULL; 298 result = NULL;
299 else if (unlikely(compute_score(result, net, hnum, saddr, sport, 299 else if (unlikely(compute_score(result, net, hnum, saddr, sport,
300 daddr, dport, dif) < badness)) { 300 daddr, dport, dif) < badness)) {
@@ -602,7 +602,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
602 602
603 sk = stack[i]; 603 sk = stack[i];
604 if (skb1) { 604 if (skb1) {
605 if (sk_rcvqueues_full(sk, skb)) { 605 if (sk_rcvqueues_full(sk, skb1)) {
606 kfree_skb(skb1); 606 kfree_skb(skb1);
607 goto drop; 607 goto drop;
608 } 608 }
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index b809812c8d30..645cb968d450 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -14,6 +14,7 @@
14#include <net/dsfield.h> 14#include <net/dsfield.h>
15#include <net/dst.h> 15#include <net/dst.h>
16#include <net/inet_ecn.h> 16#include <net/inet_ecn.h>
17#include <net/ip6_route.h>
17#include <net/ipv6.h> 18#include <net/ipv6.h>
18#include <net/xfrm.h> 19#include <net/xfrm.h>
19 20
@@ -53,7 +54,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
53 if (x->props.flags & XFRM_STATE_NOECN) 54 if (x->props.flags & XFRM_STATE_NOECN)
54 dsfield &= ~INET_ECN_MASK; 55 dsfield &= ~INET_ECN_MASK;
55 ipv6_change_dsfield(top_iph, 0, dsfield); 56 ipv6_change_dsfield(top_iph, 0, dsfield);
56 top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT); 57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
57 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 58 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
58 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 59 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
59 return 0; 60 return 0;
diff --git a/net/irda/ircomm/Makefile b/net/irda/ircomm/Makefile
index 48689458c086..ab23b5ba7e33 100644
--- a/net/irda/ircomm/Makefile
+++ b/net/irda/ircomm/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o 5obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o
6 6
7ircomm-objs := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o 7ircomm-y := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o
8ircomm-tty-objs := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o 8ircomm-tty-y := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o
diff --git a/net/irda/irlan/Makefile b/net/irda/irlan/Makefile
index 77549bc8641b..94eefbc8e6b9 100644
--- a/net/irda/irlan/Makefile
+++ b/net/irda/irlan/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_IRLAN) += irlan.o 5obj-$(CONFIG_IRLAN) += irlan.o
6 6
7irlan-objs := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o 7irlan-y := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
diff --git a/net/irda/irnet/Makefile b/net/irda/irnet/Makefile
index b3ee01e0def3..61c365c8a2a0 100644
--- a/net/irda/irnet/Makefile
+++ b/net/irda/irnet/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_IRNET) += irnet.o 5obj-$(CONFIG_IRNET) += irnet.o
6 6
7irnet-objs := irnet_ppp.o irnet_irda.o 7irnet-y := irnet_ppp.o irnet_irda.o
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 285761e77d90..f6054f9ccbe3 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -550,22 +550,30 @@ EXPORT_SYMBOL(irttp_close_tsap);
550 */ 550 */
551int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) 551int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
552{ 552{
553 int ret;
554
553 IRDA_ASSERT(self != NULL, return -1;); 555 IRDA_ASSERT(self != NULL, return -1;);
554 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 556 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
555 IRDA_ASSERT(skb != NULL, return -1;); 557 IRDA_ASSERT(skb != NULL, return -1;);
556 558
557 IRDA_DEBUG(4, "%s()\n", __func__); 559 IRDA_DEBUG(4, "%s()\n", __func__);
558 560
561 /* Take shortcut on zero byte packets */
562 if (skb->len == 0) {
563 ret = 0;
564 goto err;
565 }
566
559 /* Check that nothing bad happens */ 567 /* Check that nothing bad happens */
560 if ((skb->len == 0) || (!self->connected)) { 568 if (!self->connected) {
561 IRDA_DEBUG(1, "%s(), No data, or not connected\n", 569 IRDA_WARNING("%s(), Not connected\n", __func__);
562 __func__); 570 ret = -ENOTCONN;
563 goto err; 571 goto err;
564 } 572 }
565 573
566 if (skb->len > self->max_seg_size) { 574 if (skb->len > self->max_seg_size) {
567 IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", 575 IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
568 __func__); 576 ret = -EMSGSIZE;
569 goto err; 577 goto err;
570 } 578 }
571 579
@@ -576,7 +584,7 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
576 584
577err: 585err:
578 dev_kfree_skb(skb); 586 dev_kfree_skb(skb);
579 return -1; 587 return ret;
580} 588}
581EXPORT_SYMBOL(irttp_udata_request); 589EXPORT_SYMBOL(irttp_udata_request);
582 590
@@ -599,9 +607,15 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
599 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, 607 IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
600 skb_queue_len(&self->tx_queue)); 608 skb_queue_len(&self->tx_queue));
601 609
610 /* Take shortcut on zero byte packets */
611 if (skb->len == 0) {
612 ret = 0;
613 goto err;
614 }
615
602 /* Check that nothing bad happens */ 616 /* Check that nothing bad happens */
603 if ((skb->len == 0) || (!self->connected)) { 617 if (!self->connected) {
604 IRDA_WARNING("%s: No data, or not connected\n", __func__); 618 IRDA_WARNING("%s: Not connected\n", __func__);
605 ret = -ENOTCONN; 619 ret = -ENOTCONN;
606 goto err; 620 goto err;
607 } 621 }
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b283d4..b8dbae82fab8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
249 struct seq_file *seq; 249 struct seq_file *seq;
250 int rc = -ENOMEM; 250 int rc = -ENOMEM;
251 251
252 pd = kzalloc(GFP_KERNEL, sizeof(*pd)); 252 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
253 if (pd == NULL) 253 if (pd == NULL)
254 goto out; 254 goto out;
255 255
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0bf6a59545ab..110efb704c9b 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -476,15 +476,13 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
476 476
477 { 477 {
478 struct flowi fl = { .oif = sk->sk_bound_dev_if, 478 struct flowi fl = { .oif = sk->sk_bound_dev_if,
479 .nl_u = { .ip4_u = { 479 .fl4_dst = daddr,
480 .daddr = daddr, 480 .fl4_src = inet->inet_saddr,
481 .saddr = inet->inet_saddr, 481 .fl4_tos = RT_CONN_FLAGS(sk),
482 .tos = RT_CONN_FLAGS(sk) } },
483 .proto = sk->sk_protocol, 482 .proto = sk->sk_protocol,
484 .flags = inet_sk_flowi_flags(sk), 483 .flags = inet_sk_flowi_flags(sk),
485 .uli_u = { .ports = { 484 .fl_ip_sport = inet->inet_sport,
486 .sport = inet->inet_sport, 485 .fl_ip_dport = inet->inet_dport };
487 .dport = inet->inet_dport } } };
488 486
489 /* If this fails, retransmit mechanism of transport layer will 487 /* If this fails, retransmit mechanism of transport layer will
490 * keep trying until route appears or the connection times 488 * keep trying until route appears or the connection times
@@ -674,4 +672,8 @@ MODULE_LICENSE("GPL");
674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 672MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
675MODULE_DESCRIPTION("L2TP over IP"); 673MODULE_DESCRIPTION("L2TP over IP");
676MODULE_VERSION("1.0"); 674MODULE_VERSION("1.0");
677MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); 675
676/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
677 * enums
678 */
679MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
diff --git a/net/lapb/Makefile b/net/lapb/Makefile
index 53f7c90db163..fff797dfc88c 100644
--- a/net/lapb/Makefile
+++ b/net/lapb/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_LAPB) += lapb.o 5obj-$(CONFIG_LAPB) += lapb.o
6 6
7lapb-objs := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o 7lapb-y := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 582612998211..dfd3a648a551 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -316,9 +316,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
316 if (unlikely(addr->sllc_family != AF_LLC)) 316 if (unlikely(addr->sllc_family != AF_LLC))
317 goto out; 317 goto out;
318 rc = -ENODEV; 318 rc = -ENODEV;
319 rtnl_lock(); 319 rcu_read_lock();
320 if (sk->sk_bound_dev_if) { 320 if (sk->sk_bound_dev_if) {
321 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); 321 llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
322 if (llc->dev) { 322 if (llc->dev) {
323 if (!addr->sllc_arphrd) 323 if (!addr->sllc_arphrd)
324 addr->sllc_arphrd = llc->dev->type; 324 addr->sllc_arphrd = llc->dev->type;
@@ -329,14 +329,15 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
329 !llc_mac_match(addr->sllc_mac, 329 !llc_mac_match(addr->sllc_mac,
330 llc->dev->dev_addr)) { 330 llc->dev->dev_addr)) {
331 rc = -EINVAL; 331 rc = -EINVAL;
332 dev_put(llc->dev);
333 llc->dev = NULL; 332 llc->dev = NULL;
334 } 333 }
335 } 334 }
336 } else 335 } else
337 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, 336 llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
338 addr->sllc_mac); 337 addr->sllc_mac);
339 rtnl_unlock(); 338 if (llc->dev)
339 dev_hold(llc->dev);
340 rcu_read_unlock();
340 if (!llc->dev) 341 if (!llc->dev)
341 goto out; 342 goto out;
342 if (!addr->sllc_sap) { 343 if (!addr->sllc_sap) {
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 85dabb86be6f..32fcbe290c04 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -173,9 +173,11 @@ next_hook:
173 outdev, &elem, okfn, hook_thresh); 173 outdev, &elem, okfn, hook_thresh);
174 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 174 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
175 ret = 1; 175 ret = 1;
176 } else if (verdict == NF_DROP) { 176 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
177 kfree_skb(skb); 177 kfree_skb(skb);
178 ret = -EPERM; 178 ret = -(verdict >> NF_VERDICT_BITS);
179 if (ret == 0)
180 ret = -EPERM;
179 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 181 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
180 if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn, 182 if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
181 verdict >> NF_VERDICT_BITS)) 183 verdict >> NF_VERDICT_BITS))
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index a22dac227055..70bd1d0774c6 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -4,6 +4,7 @@
4menuconfig IP_VS 4menuconfig IP_VS
5 tristate "IP virtual server support" 5 tristate "IP virtual server support"
6 depends on NET && INET && NETFILTER 6 depends on NET && INET && NETFILTER
7 depends on (NF_CONNTRACK || NF_CONNTRACK=n)
7 ---help--- 8 ---help---
8 IP Virtual Server support will let you build a high-performance 9 IP Virtual Server support will let you build a high-performance
9 virtual server based on cluster of two or more real servers. This 10 virtual server based on cluster of two or more real servers. This
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5f5daa30b0af..c6f293639220 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -110,10 +110,8 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
110 struct rt6_info *rt; 110 struct rt6_info *rt;
111 struct flowi fl = { 111 struct flowi fl = {
112 .oif = 0, 112 .oif = 0,
113 .nl_u = { 113 .fl6_dst = *addr,
114 .ip6_u = { 114 .fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
115 .daddr = *addr,
116 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
117 }; 115 };
118 116
119 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 117 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index de04ea39cde8..5325a3fbe4ac 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -96,12 +96,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
96 if (!(rt = (struct rtable *) 96 if (!(rt = (struct rtable *)
97 __ip_vs_dst_check(dest, rtos))) { 97 __ip_vs_dst_check(dest, rtos))) {
98 struct flowi fl = { 98 struct flowi fl = {
99 .oif = 0, 99 .fl4_dst = dest->addr.ip,
100 .nl_u = { 100 .fl4_tos = rtos,
101 .ip4_u = {
102 .daddr = dest->addr.ip,
103 .saddr = 0,
104 .tos = rtos, } },
105 }; 101 };
106 102
107 if (ip_route_output_key(net, &rt, &fl)) { 103 if (ip_route_output_key(net, &rt, &fl)) {
@@ -118,12 +114,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
118 spin_unlock(&dest->dst_lock); 114 spin_unlock(&dest->dst_lock);
119 } else { 115 } else {
120 struct flowi fl = { 116 struct flowi fl = {
121 .oif = 0, 117 .fl4_dst = daddr,
122 .nl_u = { 118 .fl4_tos = rtos,
123 .ip4_u = {
124 .daddr = daddr,
125 .saddr = 0,
126 .tos = rtos, } },
127 }; 119 };
128 120
129 if (ip_route_output_key(net, &rt, &fl)) { 121 if (ip_route_output_key(net, &rt, &fl)) {
@@ -169,7 +161,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
169 struct net *net = dev_net(dev); 161 struct net *net = dev_net(dev);
170 struct iphdr *iph = ip_hdr(skb); 162 struct iphdr *iph = ip_hdr(skb);
171 163
172 if (rt->fl.iif) { 164 if (rt_is_input_route(rt)) {
173 unsigned long orefdst = skb->_skb_refdst; 165 unsigned long orefdst = skb->_skb_refdst;
174 166
175 if (ip_route_input(skb, iph->daddr, iph->saddr, 167 if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -178,14 +170,9 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
178 refdst_drop(orefdst); 170 refdst_drop(orefdst);
179 } else { 171 } else {
180 struct flowi fl = { 172 struct flowi fl = {
181 .oif = 0, 173 .fl4_dst = iph->daddr,
182 .nl_u = { 174 .fl4_src = iph->saddr,
183 .ip4_u = { 175 .fl4_tos = RT_TOS(iph->tos),
184 .daddr = iph->daddr,
185 .saddr = iph->saddr,
186 .tos = RT_TOS(iph->tos),
187 }
188 },
189 .mark = skb->mark, 176 .mark = skb->mark,
190 }; 177 };
191 struct rtable *rt; 178 struct rtable *rt;
@@ -216,12 +203,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
216{ 203{
217 struct dst_entry *dst; 204 struct dst_entry *dst;
218 struct flowi fl = { 205 struct flowi fl = {
219 .oif = 0, 206 .fl6_dst = *daddr,
220 .nl_u = {
221 .ip6_u = {
222 .daddr = *daddr,
223 },
224 },
225 }; 207 };
226 208
227 dst = ip6_route_output(net, NULL, &fl); 209 dst = ip6_route_output(net, NULL, &fl);
@@ -552,7 +534,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
552#endif 534#endif
553 535
554 /* From world but DNAT to loopback address? */ 536 /* From world but DNAT to loopback address? */
555 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 537 if (local && ipv4_is_loopback(rt->rt_dst) &&
538 rt_is_input_route(skb_rtable(skb))) {
556 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 539 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
557 "stopping DNAT to loopback address"); 540 "stopping DNAT to loopback address");
558 goto tx_error_put; 541 goto tx_error_put;
@@ -1165,7 +1148,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1165#endif 1148#endif
1166 1149
1167 /* From world but DNAT to loopback address? */ 1150 /* From world but DNAT to loopback address? */
1168 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 1151 if (local && ipv4_is_loopback(rt->rt_dst) &&
1152 rt_is_input_route(skb_rtable(skb))) {
1169 IP_VS_DBG(1, "%s(): " 1153 IP_VS_DBG(1, "%s(): "
1170 "stopping DNAT to loopback %pI4\n", 1154 "stopping DNAT to loopback %pI4\n",
1171 __func__, &cp->daddr.ip); 1155 __func__, &cp->daddr.ip);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d9966a..27a5ea6b6a0f 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1312 if (!hash) { 1312 if (!hash) {
1313 *vmalloced = 1; 1313 *vmalloced = 1;
1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1316 PAGE_KERNEL);
1316 } 1317 }
1317 1318
1318 if (hash && nulls) 1319 if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d92958023..dc7bb74110df 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
292 292
293 for (i = 0; i < MAX_NF_CT_PROTO; i++) 293 for (i = 0; i < MAX_NF_CT_PROTO; i++)
294 proto_array[i] = &nf_conntrack_l4proto_generic; 294 proto_array[i] = &nf_conntrack_l4proto_generic;
295
296 /* Before making proto_array visible to lockless readers,
297 * we must make sure its content is committed to memory.
298 */
299 smp_wmb();
300
295 nf_ct_protos[l4proto->l3proto] = proto_array; 301 nf_ct_protos[l4proto->l3proto] = proto_array;
296 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != 302 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
297 &nf_conntrack_l4proto_generic) { 303 &nf_conntrack_l4proto_generic) {
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 22a2d421e7eb..5128a6c4cb2c 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -70,9 +70,9 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
70 return false; 70 return false;
71 fl.oif = info->priv->oif; 71 fl.oif = info->priv->oif;
72 } 72 }
73 fl.nl_u.ip4_u.daddr = info->gw.ip; 73 fl.fl4_dst = info->gw.ip;
74 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 74 fl.fl4_tos = RT_TOS(iph->tos);
75 fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE; 75 fl.fl4_scope = RT_SCOPE_UNIVERSE;
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
@@ -150,9 +150,9 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
150 return false; 150 return false;
151 fl.oif = info->priv->oif; 151 fl.oif = info->priv->oif;
152 } 152 }
153 fl.nl_u.ip6_u.daddr = info->gw.in6; 153 fl.fl6_dst = info->gw.in6;
154 fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 154 fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
155 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 155 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
156 dst = ip6_route_output(net, NULL, &fl); 156 dst = ip6_route_output(net, NULL, &fl);
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27b9d46..e79efaf06389 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -61,6 +61,7 @@
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/kmod.h> 62#include <linux/kmod.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/vmalloc.h>
64#include <net/net_namespace.h> 65#include <net/net_namespace.h>
65#include <net/ip.h> 66#include <net/ip.h>
66#include <net/protocol.h> 67#include <net/protocol.h>
@@ -163,8 +164,13 @@ struct packet_mreq_max {
163static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 164static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
164 int closing, int tx_ring); 165 int closing, int tx_ring);
165 166
167#define PGV_FROM_VMALLOC 1
168struct pgv {
169 char *buffer;
170};
171
166struct packet_ring_buffer { 172struct packet_ring_buffer {
167 char **pg_vec; 173 struct pgv *pg_vec;
168 unsigned int head; 174 unsigned int head;
169 unsigned int frames_per_block; 175 unsigned int frames_per_block;
170 unsigned int frame_size; 176 unsigned int frame_size;
@@ -217,6 +223,13 @@ struct packet_skb_cb {
217 223
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 224#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 225
226static inline __pure struct page *pgv_to_page(void *addr)
227{
228 if (is_vmalloc_addr(addr))
229 return vmalloc_to_page(addr);
230 return virt_to_page(addr);
231}
232
220static void __packet_set_status(struct packet_sock *po, void *frame, int status) 233static void __packet_set_status(struct packet_sock *po, void *frame, int status)
221{ 234{
222 union { 235 union {
@@ -229,11 +242,11 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
229 switch (po->tp_version) { 242 switch (po->tp_version) {
230 case TPACKET_V1: 243 case TPACKET_V1:
231 h.h1->tp_status = status; 244 h.h1->tp_status = status;
232 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 245 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
233 break; 246 break;
234 case TPACKET_V2: 247 case TPACKET_V2:
235 h.h2->tp_status = status; 248 h.h2->tp_status = status;
236 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 249 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
237 break; 250 break;
238 default: 251 default:
239 pr_err("TPACKET version not supported\n"); 252 pr_err("TPACKET version not supported\n");
@@ -256,10 +269,10 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
256 h.raw = frame; 269 h.raw = frame;
257 switch (po->tp_version) { 270 switch (po->tp_version) {
258 case TPACKET_V1: 271 case TPACKET_V1:
259 flush_dcache_page(virt_to_page(&h.h1->tp_status)); 272 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
260 return h.h1->tp_status; 273 return h.h1->tp_status;
261 case TPACKET_V2: 274 case TPACKET_V2:
262 flush_dcache_page(virt_to_page(&h.h2->tp_status)); 275 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
263 return h.h2->tp_status; 276 return h.h2->tp_status;
264 default: 277 default:
265 pr_err("TPACKET version not supported\n"); 278 pr_err("TPACKET version not supported\n");
@@ -283,7 +296,8 @@ static void *packet_lookup_frame(struct packet_sock *po,
283 pg_vec_pos = position / rb->frames_per_block; 296 pg_vec_pos = position / rb->frames_per_block;
284 frame_offset = position % rb->frames_per_block; 297 frame_offset = position % rb->frames_per_block;
285 298
286 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size); 299 h.raw = rb->pg_vec[pg_vec_pos].buffer +
300 (frame_offset * rb->frame_size);
287 301
288 if (status != __packet_get_status(po, h.raw)) 302 if (status != __packet_get_status(po, h.raw))
289 return NULL; 303 return NULL;
@@ -503,7 +517,8 @@ out_free:
503 return err; 517 return err;
504} 518}
505 519
506static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, 520static inline unsigned int run_filter(const struct sk_buff *skb,
521 const struct sock *sk,
507 unsigned int res) 522 unsigned int res)
508{ 523{
509 struct sk_filter *filter; 524 struct sk_filter *filter;
@@ -511,22 +526,22 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
511 rcu_read_lock_bh(); 526 rcu_read_lock_bh();
512 filter = rcu_dereference_bh(sk->sk_filter); 527 filter = rcu_dereference_bh(sk->sk_filter);
513 if (filter != NULL) 528 if (filter != NULL)
514 res = sk_run_filter(skb, filter->insns, filter->len); 529 res = sk_run_filter(skb, filter->insns);
515 rcu_read_unlock_bh(); 530 rcu_read_unlock_bh();
516 531
517 return res; 532 return res;
518} 533}
519 534
520/* 535/*
521 This function makes lazy skb cloning in hope that most of packets 536 * This function makes lazy skb cloning in hope that most of packets
522 are discarded by BPF. 537 * are discarded by BPF.
523 538 *
524 Note tricky part: we DO mangle shared skb! skb->data, skb->len 539 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
525 and skb->cb are mangled. It works because (and until) packets 540 * and skb->cb are mangled. It works because (and until) packets
526 falling here are owned by current CPU. Output packets are cloned 541 * falling here are owned by current CPU. Output packets are cloned
527 by dev_queue_xmit_nit(), input packets are processed by net_bh 542 * by dev_queue_xmit_nit(), input packets are processed by net_bh
528 sequencially, so that if we return skb to original state on exit, 543 * sequencially, so that if we return skb to original state on exit,
529 we will not harm anyone. 544 * we will not harm anyone.
530 */ 545 */
531 546
532static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 547static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -552,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
552 567
553 if (dev->header_ops) { 568 if (dev->header_ops) {
554 /* The device has an explicit notion of ll header, 569 /* The device has an explicit notion of ll header,
555 exported to higher levels. 570 * exported to higher levels.
556 571 *
557 Otherwise, the device hides datails of it frame 572 * Otherwise, the device hides details of its frame
558 structure, so that corresponding packet head 573 * structure, so that corresponding packet head is
559 never delivered to user. 574 * never delivered to user.
560 */ 575 */
561 if (sk->sk_type != SOCK_DGRAM) 576 if (sk->sk_type != SOCK_DGRAM)
562 skb_push(skb, skb->data - skb_mac_header(skb)); 577 skb_push(skb, skb->data - skb_mac_header(skb));
@@ -791,17 +806,15 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
791 806
792 __packet_set_status(po, h.raw, status); 807 __packet_set_status(po, h.raw, status);
793 smp_mb(); 808 smp_mb();
809#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
794 { 810 {
795 struct page *p_start, *p_end; 811 u8 *start, *end;
796 u8 *h_end = h.raw + macoff + snaplen - 1; 812
797 813 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
798 p_start = virt_to_page(h.raw); 814 for (start = h.raw; start < end; start += PAGE_SIZE)
799 p_end = virt_to_page(h_end); 815 flush_dcache_page(pgv_to_page(start));
800 while (p_start <= p_end) {
801 flush_dcache_page(p_start);
802 p_start++;
803 }
804 } 816 }
817#endif
805 818
806 sk->sk_data_ready(sk, 0); 819 sk->sk_data_ready(sk, 0);
807 820
@@ -907,7 +920,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
907 } 920 }
908 921
909 err = -EFAULT; 922 err = -EFAULT;
910 page = virt_to_page(data);
911 offset = offset_in_page(data); 923 offset = offset_in_page(data);
912 len_max = PAGE_SIZE - offset; 924 len_max = PAGE_SIZE - offset;
913 len = ((to_write > len_max) ? len_max : to_write); 925 len = ((to_write > len_max) ? len_max : to_write);
@@ -926,11 +938,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
926 return -EFAULT; 938 return -EFAULT;
927 } 939 }
928 940
941 page = pgv_to_page(data);
942 data += len;
929 flush_dcache_page(page); 943 flush_dcache_page(page);
930 get_page(page); 944 get_page(page);
931 skb_fill_page_desc(skb, 945 skb_fill_page_desc(skb, nr_frags, page, offset, len);
932 nr_frags,
933 page++, offset, len);
934 to_write -= len; 946 to_write -= len;
935 offset = 0; 947 offset = 0;
936 len_max = PAGE_SIZE; 948 len_max = PAGE_SIZE;
@@ -1610,9 +1622,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 1622
1611 err = -EINVAL; 1623 err = -EINVAL;
1612 vnet_hdr_len = sizeof(vnet_hdr); 1624 vnet_hdr_len = sizeof(vnet_hdr);
1613 if ((len -= vnet_hdr_len) < 0) 1625 if (len < vnet_hdr_len)
1614 goto out_free; 1626 goto out_free;
1615 1627
1628 len -= vnet_hdr_len;
1629
1616 if (skb_is_gso(skb)) { 1630 if (skb_is_gso(skb)) {
1617 struct skb_shared_info *sinfo = skb_shinfo(skb); 1631 struct skb_shared_info *sinfo = skb_shinfo(skb);
1618 1632
@@ -1719,7 +1733,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1719 rcu_read_lock(); 1733 rcu_read_lock();
1720 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 1734 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1721 if (dev) 1735 if (dev)
1722 strlcpy(uaddr->sa_data, dev->name, 15); 1736 strncpy(uaddr->sa_data, dev->name, 14);
1723 else 1737 else
1724 memset(uaddr->sa_data, 0, 14); 1738 memset(uaddr->sa_data, 0, 14);
1725 rcu_read_unlock(); 1739 rcu_read_unlock();
@@ -1742,6 +1756,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1742 sll->sll_family = AF_PACKET; 1756 sll->sll_family = AF_PACKET;
1743 sll->sll_ifindex = po->ifindex; 1757 sll->sll_ifindex = po->ifindex;
1744 sll->sll_protocol = po->num; 1758 sll->sll_protocol = po->num;
1759 sll->sll_pkttype = 0;
1745 rcu_read_lock(); 1760 rcu_read_lock();
1746 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 1761 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1747 if (dev) { 1762 if (dev) {
@@ -2322,37 +2337,70 @@ static const struct vm_operations_struct packet_mmap_ops = {
2322 .close = packet_mm_close, 2337 .close = packet_mm_close,
2323}; 2338};
2324 2339
2325static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) 2340static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2341 unsigned int len)
2326{ 2342{
2327 int i; 2343 int i;
2328 2344
2329 for (i = 0; i < len; i++) { 2345 for (i = 0; i < len; i++) {
2330 if (likely(pg_vec[i])) 2346 if (likely(pg_vec[i].buffer)) {
2331 free_pages((unsigned long) pg_vec[i], order); 2347 if (is_vmalloc_addr(pg_vec[i].buffer))
2348 vfree(pg_vec[i].buffer);
2349 else
2350 free_pages((unsigned long)pg_vec[i].buffer,
2351 order);
2352 pg_vec[i].buffer = NULL;
2353 }
2332 } 2354 }
2333 kfree(pg_vec); 2355 kfree(pg_vec);
2334} 2356}
2335 2357
2336static inline char *alloc_one_pg_vec_page(unsigned long order) 2358static inline char *alloc_one_pg_vec_page(unsigned long order)
2337{ 2359{
2338 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN; 2360 char *buffer = NULL;
2361 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2362 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2363
2364 buffer = (char *) __get_free_pages(gfp_flags, order);
2365
2366 if (buffer)
2367 return buffer;
2368
2369 /*
2370 * __get_free_pages failed, fall back to vmalloc
2371 */
2372 buffer = vzalloc((1 << order) * PAGE_SIZE);
2339 2373
2340 return (char *) __get_free_pages(gfp_flags, order); 2374 if (buffer)
2375 return buffer;
2376
2377 /*
2378 * vmalloc failed, lets dig into swap here
2379 */
2380 gfp_flags &= ~__GFP_NORETRY;
2381 buffer = (char *)__get_free_pages(gfp_flags, order);
2382 if (buffer)
2383 return buffer;
2384
2385 /*
2386 * complete and utter failure
2387 */
2388 return NULL;
2341} 2389}
2342 2390
2343static char **alloc_pg_vec(struct tpacket_req *req, int order) 2391static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2344{ 2392{
2345 unsigned int block_nr = req->tp_block_nr; 2393 unsigned int block_nr = req->tp_block_nr;
2346 char **pg_vec; 2394 struct pgv *pg_vec;
2347 int i; 2395 int i;
2348 2396
2349 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL); 2397 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2350 if (unlikely(!pg_vec)) 2398 if (unlikely(!pg_vec))
2351 goto out; 2399 goto out;
2352 2400
2353 for (i = 0; i < block_nr; i++) { 2401 for (i = 0; i < block_nr; i++) {
2354 pg_vec[i] = alloc_one_pg_vec_page(order); 2402 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2355 if (unlikely(!pg_vec[i])) 2403 if (unlikely(!pg_vec[i].buffer))
2356 goto out_free_pgvec; 2404 goto out_free_pgvec;
2357 } 2405 }
2358 2406
@@ -2368,7 +2416,7 @@ out_free_pgvec:
2368static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 2416static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2369 int closing, int tx_ring) 2417 int closing, int tx_ring)
2370{ 2418{
2371 char **pg_vec = NULL; 2419 struct pgv *pg_vec = NULL;
2372 struct packet_sock *po = pkt_sk(sk); 2420 struct packet_sock *po = pkt_sk(sk);
2373 int was_running, order = 0; 2421 int was_running, order = 0;
2374 struct packet_ring_buffer *rb; 2422 struct packet_ring_buffer *rb;
@@ -2453,22 +2501,20 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2453 mutex_lock(&po->pg_vec_lock); 2501 mutex_lock(&po->pg_vec_lock);
2454 if (closing || atomic_read(&po->mapped) == 0) { 2502 if (closing || atomic_read(&po->mapped) == 0) {
2455 err = 0; 2503 err = 0;
2456#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2457 spin_lock_bh(&rb_queue->lock); 2504 spin_lock_bh(&rb_queue->lock);
2458 pg_vec = XC(rb->pg_vec, pg_vec); 2505 swap(rb->pg_vec, pg_vec);
2459 rb->frame_max = (req->tp_frame_nr - 1); 2506 rb->frame_max = (req->tp_frame_nr - 1);
2460 rb->head = 0; 2507 rb->head = 0;
2461 rb->frame_size = req->tp_frame_size; 2508 rb->frame_size = req->tp_frame_size;
2462 spin_unlock_bh(&rb_queue->lock); 2509 spin_unlock_bh(&rb_queue->lock);
2463 2510
2464 order = XC(rb->pg_vec_order, order); 2511 swap(rb->pg_vec_order, order);
2465 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr); 2512 swap(rb->pg_vec_len, req->tp_block_nr);
2466 2513
2467 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 2514 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2468 po->prot_hook.func = (po->rx_ring.pg_vec) ? 2515 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2469 tpacket_rcv : packet_rcv; 2516 tpacket_rcv : packet_rcv;
2470 skb_queue_purge(rb_queue); 2517 skb_queue_purge(rb_queue);
2471#undef XC
2472 if (atomic_read(&po->mapped)) 2518 if (atomic_read(&po->mapped))
2473 pr_err("packet_mmap: vma is busy: %d\n", 2519 pr_err("packet_mmap: vma is busy: %d\n",
2474 atomic_read(&po->mapped)); 2520 atomic_read(&po->mapped));
@@ -2530,15 +2576,17 @@ static int packet_mmap(struct file *file, struct socket *sock,
2530 continue; 2576 continue;
2531 2577
2532 for (i = 0; i < rb->pg_vec_len; i++) { 2578 for (i = 0; i < rb->pg_vec_len; i++) {
2533 struct page *page = virt_to_page(rb->pg_vec[i]); 2579 struct page *page;
2580 void *kaddr = rb->pg_vec[i].buffer;
2534 int pg_num; 2581 int pg_num;
2535 2582
2536 for (pg_num = 0; pg_num < rb->pg_vec_pages; 2583 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2537 pg_num++, page++) { 2584 page = pgv_to_page(kaddr);
2538 err = vm_insert_page(vma, start, page); 2585 err = vm_insert_page(vma, start, page);
2539 if (unlikely(err)) 2586 if (unlikely(err))
2540 goto out; 2587 goto out;
2541 start += PAGE_SIZE; 2588 start += PAGE_SIZE;
2589 kaddr += PAGE_SIZE;
2542 } 2590 }
2543 } 2591 }
2544 } 2592 }
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
index d62bbba649b3..e10b1b182ce3 100644
--- a/net/phonet/Makefile
+++ b/net/phonet/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_PHONET) += phonet.o pn_pep.o 1obj-$(CONFIG_PHONET) += phonet.o pn_pep.o
2 2
3phonet-objs := \ 3phonet-y := \
4 pn_dev.o \ 4 pn_dev.o \
5 pn_netlink.o \ 5 pn_netlink.o \
6 socket.o \ 6 socket.o \
@@ -8,4 +8,4 @@ phonet-objs := \
8 sysctl.o \ 8 sysctl.o \
9 af_phonet.o 9 af_phonet.o
10 10
11pn_pep-objs := pep.o pep-gprs.o 11pn_pep-y := pep.o pep-gprs.o
diff --git a/net/rds/Makefile b/net/rds/Makefile
index b46eca109688..56d3f6023ced 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -4,7 +4,7 @@ rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
4 loop.o page.o rdma.o 4 loop.o page.o rdma.o
5 5
6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o 6obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
7rds_rdma-objs := rdma_transport.o \ 7rds_rdma-y := rdma_transport.o \
8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ 8 ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
9 ib_sysctl.o ib_rdma.o \ 9 ib_sysctl.o ib_rdma.o \
10 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \ 10 iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
@@ -12,10 +12,8 @@ rds_rdma-objs := rdma_transport.o \
12 12
13 13
14obj-$(CONFIG_RDS_TCP) += rds_tcp.o 14obj-$(CONFIG_RDS_TCP) += rds_tcp.o
15rds_tcp-objs := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ 15rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
16 tcp_send.o tcp_stats.o 16 tcp_send.o tcp_stats.o
17 17
18ifeq ($(CONFIG_RDS_DEBUG), y) 18ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
19EXTRA_CFLAGS += -DDEBUG
20endif
21 19
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156b426f..aeec1d483b17 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
134static void rds_loop_conn_free(void *arg) 134static void rds_loop_conn_free(void *arg)
135{ 135{
136 struct rds_loop_connection *lc = arg; 136 struct rds_loop_connection *lc = arg;
137 unsigned long flags;
138
137 rdsdebug("lc %p\n", lc); 139 rdsdebug("lc %p\n", lc);
140 spin_lock_irqsave(&loop_conns_lock, flags);
138 list_del(&lc->loop_node); 141 list_del(&lc->loop_node);
142 spin_unlock_irqrestore(&loop_conns_lock, flags);
139 kfree(lc); 143 kfree(lc);
140} 144}
141 145
diff --git a/net/rds/message.c b/net/rds/message.c
index 848cff45183b..1fd3d29023d7 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -249,8 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
250 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 250 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
252 if (!rm->data.op_sg) 252 if (!rm->data.op_sg) {
253 rds_message_put(rm);
253 return ERR_PTR(-ENOMEM); 254 return ERR_PTR(-ENOMEM);
255 }
254 256
255 for (i = 0; i < rm->data.op_nents; ++i) { 257 for (i = 0; i < rm->data.op_nents; ++i) {
256 sg_set_page(&rm->data.op_sg[i], 258 sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8920f2a83327..4e37c1cbe8b2 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -567,7 +567,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
567 goto out; 567 goto out;
568 } 568 }
569 569
570 if (args->nr_local > (u64)UINT_MAX) { 570 if (args->nr_local > UIO_MAXIOV) {
571 ret = -EMSGSIZE; 571 ret = -EMSGSIZE;
572 goto out; 572 goto out;
573 } 573 }
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6cf2d10..8e0a32001c90 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
221static void rds_tcp_conn_free(void *arg) 221static void rds_tcp_conn_free(void *arg)
222{ 222{
223 struct rds_tcp_connection *tc = arg; 223 struct rds_tcp_connection *tc = arg;
224 unsigned long flags;
224 rdsdebug("freeing tc %p\n", tc); 225 rdsdebug("freeing tc %p\n", tc);
226
227 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
228 list_del(&tc->t_tcp_node);
229 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
230
225 kmem_cache_free(rds_tcp_conn_slab, tc); 231 kmem_cache_free(rds_tcp_conn_slab, tc);
226} 232}
227 233
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index c46867c61c98..d1c3429b69ed 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -2,7 +2,7 @@
2# Makefile for Linux kernel RxRPC 2# Makefile for Linux kernel RxRPC
3# 3#
4 4
5af-rxrpc-objs := \ 5af-rxrpc-y := \
6 af_rxrpc.o \ 6 af_rxrpc.o \
7 ar-accept.o \ 7 ar-accept.o \
8 ar-ack.o \ 8 ar-ack.o \
@@ -21,7 +21,7 @@ af-rxrpc-objs := \
21 ar-transport.o 21 ar-transport.o
22 22
23ifeq ($(CONFIG_PROC_FS),y) 23ifeq ($(CONFIG_PROC_FS),y)
24af-rxrpc-objs += ar-proc.o 24af-rxrpc-y += ar-proc.o
25endif 25endif
26 26
27obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o 27obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 9f1729bd60de..a53fb25a64ed 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -47,12 +47,12 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
47 case AF_INET: 47 case AF_INET:
48 fl.oif = 0; 48 fl.oif = 0;
49 fl.proto = IPPROTO_UDP, 49 fl.proto = IPPROTO_UDP,
50 fl.nl_u.ip4_u.saddr = 0; 50 fl.fl4_dst = peer->srx.transport.sin.sin_addr.s_addr;
51 fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr; 51 fl.fl4_src = 0;
52 fl.nl_u.ip4_u.tos = 0; 52 fl.fl4_tos = 0;
53 /* assume AFS.CM talking to AFS.FS */ 53 /* assume AFS.CM talking to AFS.FS */
54 fl.uli_u.ports.sport = htons(7001); 54 fl.fl_ip_sport = htons(7001);
55 fl.uli_u.ports.dport = htons(7000); 55 fl.fl_ip_dport = htons(7000);
56 break; 56 break;
57 default: 57 default:
58 BUG(); 58 BUG();
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index efd4f95fd050..f23d9155b1ef 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
268 goto nla_put_failure; 268 goto nla_put_failure;
269 269
270 nla_nest_end(skb, nest); 270 nla_nest_end(skb, nest);
271
272 if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
273 goto nla_put_failure;
274
271 return skb->len; 275 return skb->len;
272 276
273nla_put_failure: 277nla_put_failure:
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78e9cb1..d49c40fb7e09 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = {
34 .populate = cgrp_populate, 34 .populate = cgrp_populate,
35#ifdef CONFIG_NET_CLS_CGROUP 35#ifdef CONFIG_NET_CLS_CGROUP
36 .subsys_id = net_cls_subsys_id, 36 .subsys_id = net_cls_subsys_id,
37#else
38#define net_cls_subsys_id net_cls_subsys.subsys_id
39#endif 37#endif
40 .module = THIS_MODULE, 38 .module = THIS_MODULE,
41}; 39};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 763253257411..ea8f566e720c 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@ retry:
103 103
104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) 104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
105{ 105{
106 textsearch_destroy(EM_TEXT_PRIV(m)->config); 106 if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
107 textsearch_destroy(EM_TEXT_PRIV(m)->config);
107} 108}
108 109
109static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) 110static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd96e59..0918834ee4a1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_stopped(txq) && 63 if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 !netif_tx_queue_frozen(txq)) {
65 q->gso_skb = NULL; 64 q->gso_skb = NULL;
66 q->q.qlen--; 65 q->q.qlen--;
67 } else 66 } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
122 spin_unlock(root_lock); 121 spin_unlock(root_lock);
123 122
124 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) 124 if (!netif_tx_queue_frozen_or_stopped(txq))
126 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
127 126
128 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
144 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
145 } 144 }
146 145
147 if (ret && (netif_tx_queue_stopped(txq) || 146 if (ret && netif_tx_queue_frozen_or_stopped(txq))
148 netif_tx_queue_frozen(txq)))
149 ret = 0; 147 ret = 0;
150 148
151 return ret; 149 return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
555 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
556 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
557 555
558 p = kzalloc(size, GFP_KERNEL); 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue));
558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af9596709..106479a7c94a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -309,8 +309,7 @@ restart:
309 if (__netif_tx_trylock(slave_txq)) { 309 if (__netif_tx_trylock(slave_txq)) {
310 unsigned int length = qdisc_pkt_len(skb); 310 unsigned int length = qdisc_pkt_len(skb);
311 311
312 if (!netif_tx_queue_stopped(slave_txq) && 312 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
313 !netif_tx_queue_frozen(slave_txq) &&
314 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 313 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
315 txq_trans_update(slave_txq); 314 txq_trans_update(slave_txq);
316 __netif_tx_unlock(slave_txq); 315 __netif_tx_unlock(slave_txq);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c74d85e..e58f9476f29c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific;
92struct kmem_cache *sctp_chunk_cachep __read_mostly; 92struct kmem_cache *sctp_chunk_cachep __read_mostly;
93struct kmem_cache *sctp_bucket_cachep __read_mostly; 93struct kmem_cache *sctp_bucket_cachep __read_mostly;
94 94
95int sysctl_sctp_mem[3]; 95long sysctl_sctp_mem[3];
96int sysctl_sctp_rmem[3]; 96int sysctl_sctp_rmem[3];
97int sysctl_sctp_wmem[3]; 97int sysctl_sctp_wmem[3];
98 98
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9cc1167..842c7f3650b9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
112 112
113extern struct kmem_cache *sctp_bucket_cachep; 113extern struct kmem_cache *sctp_bucket_cachep;
114extern int sysctl_sctp_mem[3]; 114extern long sysctl_sctp_mem[3];
115extern int sysctl_sctp_rmem[3]; 115extern int sysctl_sctp_rmem[3];
116extern int sysctl_sctp_wmem[3]; 116extern int sysctl_sctp_wmem[3];
117 117
118static int sctp_memory_pressure; 118static int sctp_memory_pressure;
119static atomic_t sctp_memory_allocated; 119static atomic_long_t sctp_memory_allocated;
120struct percpu_counter sctp_sockets_allocated; 120struct percpu_counter sctp_sockets_allocated;
121 121
122static void sctp_enter_memory_pressure(struct sock *sk) 122static void sctp_enter_memory_pressure(struct sock *sk)
@@ -6047,7 +6047,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6047 * will suddenly eat the receive_queue. 6047 * will suddenly eat the receive_queue.
6048 * 6048 *
6049 * Look at current nfs client by the way... 6049 * Look at current nfs client by the way...
6050 * However, this function was corrent in any case. 8) 6050 * However, this function was correct in any case. 8)
6051 */ 6051 */
6052 if (flags & MSG_PEEK) { 6052 if (flags & MSG_PEEK) {
6053 spin_lock_bh(&sk->sk_receive_queue.lock); 6053 spin_lock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590bbe0c0..50cb57f0919e 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@ static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56 56
57extern int sysctl_sctp_mem[3]; 57extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
59extern int sysctl_sctp_wmem[3]; 59extern int sysctl_sctp_wmem[3];
60 60
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = {
203 .data = &sysctl_sctp_mem, 203 .data = &sysctl_sctp_mem,
204 .maxlen = sizeof(sysctl_sctp_mem), 204 .maxlen = sizeof(sysctl_sctp_mem),
205 .mode = 0644, 205 .mode = 0644,
206 .proc_handler = proc_dointvec, 206 .proc_handler = proc_doulongvec_minmax
207 }, 207 },
208 { 208 {
209 .procname = "sctp_rmem", 209 .procname = "sctp_rmem",
diff --git a/net/socket.c b/net/socket.c
index 3ca2fd9e3720..c898df76e924 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -156,7 +156,7 @@ static const struct file_operations socket_file_ops = {
156 */ 156 */
157 157
158static DEFINE_SPINLOCK(net_family_lock); 158static DEFINE_SPINLOCK(net_family_lock);
159static const struct net_proto_family *net_families[NPROTO] __read_mostly; 159static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
160 160
161/* 161/*
162 * Statistics counters of the socket lists 162 * Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
1200 * requested real, full-featured networking support upon configuration. 1200 * requested real, full-featured networking support upon configuration.
1201 * Otherwise module support will break! 1201 * Otherwise module support will break!
1202 */ 1202 */
1203 if (net_families[family] == NULL) 1203 if (rcu_access_pointer(net_families[family]) == NULL)
1204 request_module("net-pf-%d", family); 1204 request_module("net-pf-%d", family);
1205#endif 1205#endif
1206 1206
@@ -2332,10 +2332,11 @@ int sock_register(const struct net_proto_family *ops)
2332 } 2332 }
2333 2333
2334 spin_lock(&net_family_lock); 2334 spin_lock(&net_family_lock);
2335 if (net_families[ops->family]) 2335 if (rcu_dereference_protected(net_families[ops->family],
2336 lockdep_is_held(&net_family_lock)))
2336 err = -EEXIST; 2337 err = -EEXIST;
2337 else { 2338 else {
2338 net_families[ops->family] = ops; 2339 rcu_assign_pointer(net_families[ops->family], ops);
2339 err = 0; 2340 err = 0;
2340 } 2341 }
2341 spin_unlock(&net_family_lock); 2342 spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@ void sock_unregister(int family)
2363 BUG_ON(family < 0 || family >= NPROTO); 2364 BUG_ON(family < 0 || family >= NPROTO);
2364 2365
2365 spin_lock(&net_family_lock); 2366 spin_lock(&net_family_lock);
2366 net_families[family] = NULL; 2367 rcu_assign_pointer(net_families[family], NULL);
2367 spin_unlock(&net_family_lock); 2368 spin_unlock(&net_family_lock);
2368 2369
2369 synchronize_rcu(); 2370 synchronize_rcu();
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 7350d86a32ee..9e4cb59ef9f0 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -4,10 +4,10 @@
4 4
5obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o 5obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
6 6
7auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ 7auth_rpcgss-y := auth_gss.o gss_generic_token.o \
8 gss_mech_switch.o svcauth_gss.o 8 gss_mech_switch.o svcauth_gss.o
9 9
10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o 10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
11 11
12rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ 12rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o 13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 8a2e89bffde5..886715a75259 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -35,11 +35,9 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "addr.h" 38#include "addr.h"
40#include "zone.h" 39#include "zone.h"
41#include "cluster.h" 40#include "cluster.h"
42#include "net.h"
43 41
44/** 42/**
45 * tipc_addr_domain_valid - validates a network domain address 43 * tipc_addr_domain_valid - validates a network domain address
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 22a60fc98392..6d828d9eda42 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -36,17 +36,9 @@
36 */ 36 */
37 37
38#include "core.h" 38#include "core.h"
39#include "msg.h"
40#include "dbg.h"
41#include "link.h" 39#include "link.h"
42#include "net.h"
43#include "node.h"
44#include "port.h" 40#include "port.h"
45#include "addr.h"
46#include "node_subscr.h"
47#include "name_distr.h" 41#include "name_distr.h"
48#include "bearer.h"
49#include "name_table.h"
50#include "bcast.h" 42#include "bcast.h"
51 43
52#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 9927d1d56c4f..885da94be4ac 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -36,12 +36,9 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "bearer.h" 39#include "bearer.h"
41#include "link.h"
42#include "port.h" 40#include "port.h"
43#include "discover.h" 41#include "discover.h"
44#include "bcast.h"
45 42
46#define MAX_ADDR_STR 32 43#define MAX_ADDR_STR 32
47 44
@@ -625,7 +622,7 @@ int tipc_block_bearer(const char *name)
625 * Note: This routine assumes caller holds tipc_net_lock. 622 * Note: This routine assumes caller holds tipc_net_lock.
626 */ 623 */
627 624
628static int bearer_disable(struct bearer *b_ptr) 625static void bearer_disable(struct bearer *b_ptr)
629{ 626{
630 struct link *l_ptr; 627 struct link *l_ptr;
631 struct link *temp_l_ptr; 628 struct link *temp_l_ptr;
@@ -641,7 +638,6 @@ static int bearer_disable(struct bearer *b_ptr)
641 } 638 }
642 spin_unlock_bh(&b_ptr->publ.lock); 639 spin_unlock_bh(&b_ptr->publ.lock);
643 memset(b_ptr, 0, sizeof(struct bearer)); 640 memset(b_ptr, 0, sizeof(struct bearer));
644 return 0;
645} 641}
646 642
647int tipc_disable_bearer(const char *name) 643int tipc_disable_bearer(const char *name)
@@ -654,8 +650,10 @@ int tipc_disable_bearer(const char *name)
654 if (b_ptr == NULL) { 650 if (b_ptr == NULL) {
655 warn("Attempt to disable unknown bearer <%s>\n", name); 651 warn("Attempt to disable unknown bearer <%s>\n", name);
656 res = -EINVAL; 652 res = -EINVAL;
657 } else 653 } else {
658 res = bearer_disable(b_ptr); 654 bearer_disable(b_ptr);
655 res = 0;
656 }
659 write_unlock_bh(&tipc_net_lock); 657 write_unlock_bh(&tipc_net_lock);
660 return res; 658 return res;
661} 659}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index a850b389663e..85f451d5aacf 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,12 +37,50 @@
37#ifndef _TIPC_BEARER_H 37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H 38#define _TIPC_BEARER_H
39 39
40#include "core.h"
41#include "bcast.h" 40#include "bcast.h"
42 41
43#define MAX_BEARERS 8 42#define MAX_BEARERS 8
44#define MAX_MEDIA 4 43#define MAX_MEDIA 4
45 44
45/*
46 * Identifiers of supported TIPC media types
47 */
48#define TIPC_MEDIA_TYPE_ETH 1
49
50/*
51 * Destination address structure used by TIPC bearers when sending messages
52 *
53 * IMPORTANT: The fields of this structure MUST be stored using the specified
54 * byte order indicated below, as the structure is exchanged between nodes
55 * as part of a link setup process.
56 */
57struct tipc_media_addr {
58 __be32 type; /* bearer type (network byte order) */
59 union {
60 __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
61 } dev_addr;
62};
63
64/**
65 * struct tipc_bearer - TIPC bearer info available to media code
66 * @usr_handle: pointer to additional media-specific information about bearer
67 * @mtu: max packet size bearer can support
68 * @blocked: non-zero if bearer is blocked
69 * @lock: spinlock for controlling access to bearer
70 * @addr: media-specific address associated with bearer
71 * @name: bearer name (format = media:interface)
72 *
73 * Note: TIPC initializes "name" and "lock" fields; media code is responsible
74 * for initialization all other fields when a bearer is enabled.
75 */
76struct tipc_bearer {
77 void *usr_handle;
78 u32 mtu;
79 int blocked;
80 spinlock_t lock;
81 struct tipc_media_addr addr;
82 char name[TIPC_MAX_BEARER_NAME];
83};
46 84
47/** 85/**
48 * struct media - TIPC media information available to internal users 86 * struct media - TIPC media information available to internal users
@@ -55,7 +93,7 @@
55 * @priority: default link (and bearer) priority 93 * @priority: default link (and bearer) priority
56 * @tolerance: default time (in ms) before declaring link failure 94 * @tolerance: default time (in ms) before declaring link failure
57 * @window: default window (in packets) before declaring link congestion 95 * @window: default window (in packets) before declaring link congestion
58 * @type_id: TIPC media identifier [defined in tipc_bearer.h] 96 * @type_id: TIPC media identifier
59 * @name: media name 97 * @name: media name
60 */ 98 */
61 99
@@ -116,6 +154,34 @@ struct link;
116 154
117extern struct bearer tipc_bearers[]; 155extern struct bearer tipc_bearers[];
118 156
157/*
158 * TIPC routines available to supported media types
159 */
160int tipc_register_media(u32 media_type,
161 char *media_name, int (*enable)(struct tipc_bearer *),
162 void (*disable)(struct tipc_bearer *),
163 int (*send_msg)(struct sk_buff *,
164 struct tipc_bearer *, struct tipc_media_addr *),
165 char *(*addr2str)(struct tipc_media_addr *a,
166 char *str_buf, int str_size),
167 struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
168 const u32 link_tolerance, /* [ms] */
169 const u32 send_window_limit);
170
171void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
172
173int tipc_block_bearer(const char *name);
174void tipc_continue(struct tipc_bearer *tb_ptr);
175
176int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
177int tipc_disable_bearer(const char *name);
178
179/*
180 * Routines made available to TIPC by supported media types
181 */
182int tipc_eth_media_start(void);
183void tipc_eth_media_stop(void);
184
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 185void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void); 186struct sk_buff *tipc_media_get_names(void);
121 187
@@ -126,7 +192,6 @@ void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *tipc_bearer_find_interface(const char *if_name); 192struct bearer *tipc_bearer_find_interface(const char *if_name);
127int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); 193int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr); 194int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
129int tipc_bearer_init(void);
130void tipc_bearer_stop(void); 195void tipc_bearer_stop(void);
131void tipc_bearer_lock_push(struct bearer *b_ptr); 196void tipc_bearer_lock_push(struct bearer *b_ptr);
132 197
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 7fea14b98b97..405be87157ba 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -36,17 +36,10 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "cluster.h" 38#include "cluster.h"
39#include "addr.h"
40#include "node_subscr.h"
41#include "link.h" 39#include "link.h"
42#include "node.h"
43#include "net.h"
44#include "msg.h"
45#include "bearer.h"
46 40
47static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, 41static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper); 42 u32 lower, u32 upper);
49static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50 43
51struct tipc_node **tipc_local_nodes = NULL; 44struct tipc_node **tipc_local_nodes = NULL;
52struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}}; 45struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 50a6133a3668..bdde39f0436b 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -35,23 +35,11 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "bearer.h"
40#include "port.h" 38#include "port.h"
41#include "link.h" 39#include "link.h"
42#include "zone.h"
43#include "addr.h"
44#include "name_table.h" 40#include "name_table.h"
45#include "node.h" 41#include "user_reg.h"
46#include "config.h" 42#include "config.h"
47#include "discover.h"
48
49struct subscr_data {
50 char usr_handle[8];
51 u32 domain;
52 u32 port_ref;
53 struct list_head subd_list;
54};
55 43
56struct manager { 44struct manager {
57 u32 user_ref; 45 u32 user_ref;
@@ -572,7 +560,7 @@ int tipc_cfg_init(void)
572 struct tipc_name_seq seq; 560 struct tipc_name_seq seq;
573 int res; 561 int res;
574 562
575 res = tipc_attach(&mng.user_ref, NULL, NULL); 563 res = tipc_attach(&mng.user_ref);
576 if (res) 564 if (res)
577 goto failed; 565 goto failed;
578 566
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 481e12ece715..443159a166fd 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -39,7 +39,6 @@
39 39
40/* ---------------------------------------------------------------------- */ 40/* ---------------------------------------------------------------------- */
41 41
42#include "core.h"
43#include "link.h" 42#include "link.h"
44 43
45struct sk_buff *tipc_cfg_reply_alloc(int payload_size); 44struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index e2a09eb8efd4..f5d62c174de2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -40,7 +40,6 @@
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42#include "core.h" 42#include "core.h"
43#include "dbg.h"
44#include "ref.h" 43#include "ref.h"
45#include "net.h" 44#include "net.h"
46#include "user_reg.h" 45#include "user_reg.h"
@@ -236,43 +235,3 @@ module_exit(tipc_exit);
236MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication"); 235MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
237MODULE_LICENSE("Dual BSD/GPL"); 236MODULE_LICENSE("Dual BSD/GPL");
238MODULE_VERSION(TIPC_MOD_VER); 237MODULE_VERSION(TIPC_MOD_VER);
239
240/* Native TIPC API for kernel-space applications (see tipc.h) */
241
242EXPORT_SYMBOL(tipc_attach);
243EXPORT_SYMBOL(tipc_detach);
244EXPORT_SYMBOL(tipc_createport);
245EXPORT_SYMBOL(tipc_deleteport);
246EXPORT_SYMBOL(tipc_ownidentity);
247EXPORT_SYMBOL(tipc_portimportance);
248EXPORT_SYMBOL(tipc_set_portimportance);
249EXPORT_SYMBOL(tipc_portunreliable);
250EXPORT_SYMBOL(tipc_set_portunreliable);
251EXPORT_SYMBOL(tipc_portunreturnable);
252EXPORT_SYMBOL(tipc_set_portunreturnable);
253EXPORT_SYMBOL(tipc_publish);
254EXPORT_SYMBOL(tipc_withdraw);
255EXPORT_SYMBOL(tipc_connect2port);
256EXPORT_SYMBOL(tipc_disconnect);
257EXPORT_SYMBOL(tipc_shutdown);
258EXPORT_SYMBOL(tipc_send);
259EXPORT_SYMBOL(tipc_send2name);
260EXPORT_SYMBOL(tipc_send2port);
261EXPORT_SYMBOL(tipc_multicast);
262
263/* TIPC API for external bearers (see tipc_bearer.h) */
264
265EXPORT_SYMBOL(tipc_block_bearer);
266EXPORT_SYMBOL(tipc_continue);
267EXPORT_SYMBOL(tipc_disable_bearer);
268EXPORT_SYMBOL(tipc_enable_bearer);
269EXPORT_SYMBOL(tipc_recv_msg);
270EXPORT_SYMBOL(tipc_register_media);
271
272/* TIPC API for external APIs (see tipc_port.h) */
273
274EXPORT_SYMBOL(tipc_createport_raw);
275EXPORT_SYMBOL(tipc_reject_msg);
276EXPORT_SYMBOL(tipc_send_buf_fast);
277EXPORT_SYMBOL(tipc_acknowledge);
278
diff --git a/net/tipc/core.h b/net/tipc/core.h
index e19389e57227..ca7e171c1043 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -39,10 +39,6 @@
39 39
40#include <linux/tipc.h> 40#include <linux/tipc.h>
41#include <linux/tipc_config.h> 41#include <linux/tipc_config.h>
42#include <net/tipc/tipc_msg.h>
43#include <net/tipc/tipc_port.h>
44#include <net/tipc/tipc_bearer.h>
45#include <net/tipc/tipc.h>
46#include <linux/types.h> 42#include <linux/types.h>
47#include <linux/kernel.h> 43#include <linux/kernel.h>
48#include <linux/errno.h> 44#include <linux/errno.h>
@@ -62,6 +58,9 @@
62 58
63#define TIPC_MOD_VER "2.0.0" 59#define TIPC_MOD_VER "2.0.0"
64 60
61struct tipc_msg; /* msg.h */
62struct print_buf; /* dbg.h */
63
65/* 64/*
66 * TIPC sanity test macros 65 * TIPC sanity test macros
67 */ 66 */
@@ -174,6 +173,13 @@ void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
174#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 173#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
175 174
176/* 175/*
176 * TIPC operating mode routines
177 */
178#define TIPC_NOT_RUNNING 0
179#define TIPC_NODE_MODE 1
180#define TIPC_NET_MODE 2
181
182/*
177 * Global configuration variables 183 * Global configuration variables
178 */ 184 */
179 185
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 4a7cd3719b78..f2ce36baf42e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,9 +35,7 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "link.h" 38#include "link.h"
40#include "zone.h"
41#include "discover.h" 39#include "discover.h"
42#include "port.h" 40#include "port.h"
43#include "name_table.h" 41#include "name_table.h"
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index f8e750636123..d2c3cffb79fc 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,8 +37,6 @@
37#ifndef _TIPC_DISCOVER_H 37#ifndef _TIPC_DISCOVER_H
38#define _TIPC_DISCOVER_H 38#define _TIPC_DISCOVER_H
39 39
40#include "core.h"
41
42struct link_req; 40struct link_req;
43 41
44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 42struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6e988ba485fd..ee683cc8f4b1 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -34,13 +34,13 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <net/tipc/tipc.h>
38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h> 37#include <linux/netdevice.h>
41#include <linux/slab.h> 38#include <linux/slab.h>
42#include <net/net_namespace.h> 39#include <net/net_namespace.h>
43 40
41#include "core.h"
42#include "bearer.h"
43
44#define MAX_ETH_BEARERS 2 44#define MAX_ETH_BEARERS 2
45#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI 45#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL 46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b31992ccd5d3..cf414cf05e72 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -35,19 +35,11 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "link.h" 38#include "link.h"
40#include "net.h"
41#include "node.h"
42#include "port.h" 39#include "port.h"
43#include "addr.h"
44#include "node_subscr.h"
45#include "name_distr.h" 40#include "name_distr.h"
46#include "bearer.h"
47#include "name_table.h"
48#include "discover.h" 41#include "discover.h"
49#include "config.h" 42#include "config.h"
50#include "bcast.h"
51 43
52 44
53/* 45/*
@@ -57,12 +49,6 @@
57#define INVALID_SESSION 0x10000 49#define INVALID_SESSION 0x10000
58 50
59/* 51/*
60 * Limit for deferred reception queue:
61 */
62
63#define DEF_QUEUE_LIMIT 256u
64
65/*
66 * Link state events: 52 * Link state events:
67 */ 53 */
68 54
diff --git a/net/tipc/link.h b/net/tipc/link.h
index f98bc613de67..c562888d25da 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -39,7 +39,6 @@
39 39
40#include "dbg.h" 40#include "dbg.h"
41#include "msg.h" 41#include "msg.h"
42#include "bearer.h"
43#include "node.h" 42#include "node.h"
44 43
45#define PUSH_FAILED 1 44#define PUSH_FAILED 1
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ecb532fb0351..ee6b4c68d4a4 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -36,9 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "addr.h" 38#include "addr.h"
39#include "dbg.h"
40#include "msg.h" 39#include "msg.h"
41#include "bearer.h"
42 40
43u32 tipc_msg_tot_importance(struct tipc_msg *m) 41u32 tipc_msg_tot_importance(struct tipc_msg *m)
44{ 42{
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 031aad18efce..aee53864d7a0 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,10 +37,51 @@
37#ifndef _TIPC_MSG_H 37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H 38#define _TIPC_MSG_H
39 39
40#include "core.h" 40#include "bearer.h"
41 41
42#define TIPC_VERSION 2 42#define TIPC_VERSION 2
43 43
44/*
45 * TIPC user data message header format, version 2:
46 *
47 *
48 * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
50 * w0:|vers | user |hdr sz |n|d|s|-| message size |
51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
52 * w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
54 * w2:| link level ack no | broadcast/link level seq no |
55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56 * w3:| previous node |
57 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
58 * w4:| originating port |
59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
60 * w5:| destination port |
61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62 * w6:| originating node |
63 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * w7:| destination node |
65 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
66 * w8:| name type / transport sequence number |
67 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
68 * w9:| name instance/multicast lower bound |
69 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
70 * wA:| multicast upper bound |
71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72 * / /
73 * \ options \
74 * / /
75 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 *
77 */
78
79#define TIPC_CONN_MSG 0
80#define TIPC_MCAST_MSG 1
81#define TIPC_NAMED_MSG 2
82#define TIPC_DIRECT_MSG 3
83
84
44#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ 85#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
45#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 86#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
46#define LONG_H_SIZE 40 /* Named messages */ 87#define LONG_H_SIZE 40 /* Named messages */
@@ -52,20 +93,26 @@
52#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) 93#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
53 94
54 95
55/* 96struct tipc_msg {
56 TIPC user data message header format, version 2 97 __be32 hdr[15];
98};
57 99
58 - Fundamental definitions available to privileged TIPC users
59 are located in tipc_msg.h.
60 - Remaining definitions available to TIPC internal users appear below.
61*/
62 100
101static inline u32 msg_word(struct tipc_msg *m, u32 pos)
102{
103 return ntohl(m->hdr[pos]);
104}
63 105
64static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val) 106static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
65{ 107{
66 m->hdr[w] = htonl(val); 108 m->hdr[w] = htonl(val);
67} 109}
68 110
111static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
112{
113 return (msg_word(m, w) >> pos) & mask;
114}
115
69static inline void msg_set_bits(struct tipc_msg *m, u32 w, 116static inline void msg_set_bits(struct tipc_msg *m, u32 w,
70 u32 pos, u32 mask, u32 val) 117 u32 pos, u32 mask, u32 val)
71{ 118{
@@ -112,16 +159,36 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n)
112 msg_set_bits(m, 0, 25, 0xf, n); 159 msg_set_bits(m, 0, 25, 0xf, n);
113} 160}
114 161
162static inline u32 msg_importance(struct tipc_msg *m)
163{
164 return msg_bits(m, 0, 25, 0xf);
165}
166
115static inline void msg_set_importance(struct tipc_msg *m, u32 i) 167static inline void msg_set_importance(struct tipc_msg *m, u32 i)
116{ 168{
117 msg_set_user(m, i); 169 msg_set_user(m, i);
118} 170}
119 171
172static inline u32 msg_hdr_sz(struct tipc_msg *m)
173{
174 return msg_bits(m, 0, 21, 0xf) << 2;
175}
176
120static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n) 177static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
121{ 178{
122 msg_set_bits(m, 0, 21, 0xf, n>>2); 179 msg_set_bits(m, 0, 21, 0xf, n>>2);
123} 180}
124 181
182static inline u32 msg_size(struct tipc_msg *m)
183{
184 return msg_bits(m, 0, 0, 0x1ffff);
185}
186
187static inline u32 msg_data_sz(struct tipc_msg *m)
188{
189 return msg_size(m) - msg_hdr_sz(m);
190}
191
125static inline int msg_non_seq(struct tipc_msg *m) 192static inline int msg_non_seq(struct tipc_msg *m)
126{ 193{
127 return msg_bits(m, 0, 20, 1); 194 return msg_bits(m, 0, 20, 1);
@@ -162,11 +229,36 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
162 * Word 1 229 * Word 1
163 */ 230 */
164 231
232static inline u32 msg_type(struct tipc_msg *m)
233{
234 return msg_bits(m, 1, 29, 0x7);
235}
236
165static inline void msg_set_type(struct tipc_msg *m, u32 n) 237static inline void msg_set_type(struct tipc_msg *m, u32 n)
166{ 238{
167 msg_set_bits(m, 1, 29, 0x7, n); 239 msg_set_bits(m, 1, 29, 0x7, n);
168} 240}
169 241
242static inline u32 msg_named(struct tipc_msg *m)
243{
244 return msg_type(m) == TIPC_NAMED_MSG;
245}
246
247static inline u32 msg_mcast(struct tipc_msg *m)
248{
249 return msg_type(m) == TIPC_MCAST_MSG;
250}
251
252static inline u32 msg_connected(struct tipc_msg *m)
253{
254 return msg_type(m) == TIPC_CONN_MSG;
255}
256
257static inline u32 msg_errcode(struct tipc_msg *m)
258{
259 return msg_bits(m, 1, 25, 0xf);
260}
261
170static inline void msg_set_errcode(struct tipc_msg *m, u32 err) 262static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
171{ 263{
172 msg_set_bits(m, 1, 25, 0xf, err); 264 msg_set_bits(m, 1, 25, 0xf, err);
@@ -257,31 +349,68 @@ static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
257 */ 349 */
258 350
259 351
352static inline u32 msg_prevnode(struct tipc_msg *m)
353{
354 return msg_word(m, 3);
355}
356
260static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) 357static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
261{ 358{
262 msg_set_word(m, 3, a); 359 msg_set_word(m, 3, a);
263} 360}
264 361
362static inline u32 msg_origport(struct tipc_msg *m)
363{
364 return msg_word(m, 4);
365}
366
265static inline void msg_set_origport(struct tipc_msg *m, u32 p) 367static inline void msg_set_origport(struct tipc_msg *m, u32 p)
266{ 368{
267 msg_set_word(m, 4, p); 369 msg_set_word(m, 4, p);
268} 370}
269 371
372static inline u32 msg_destport(struct tipc_msg *m)
373{
374 return msg_word(m, 5);
375}
376
270static inline void msg_set_destport(struct tipc_msg *m, u32 p) 377static inline void msg_set_destport(struct tipc_msg *m, u32 p)
271{ 378{
272 msg_set_word(m, 5, p); 379 msg_set_word(m, 5, p);
273} 380}
274 381
382static inline u32 msg_mc_netid(struct tipc_msg *m)
383{
384 return msg_word(m, 5);
385}
386
275static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p) 387static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
276{ 388{
277 msg_set_word(m, 5, p); 389 msg_set_word(m, 5, p);
278} 390}
279 391
392static inline int msg_short(struct tipc_msg *m)
393{
394 return msg_hdr_sz(m) == 24;
395}
396
397static inline u32 msg_orignode(struct tipc_msg *m)
398{
399 if (likely(msg_short(m)))
400 return msg_prevnode(m);
401 return msg_word(m, 6);
402}
403
280static inline void msg_set_orignode(struct tipc_msg *m, u32 a) 404static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
281{ 405{
282 msg_set_word(m, 6, a); 406 msg_set_word(m, 6, a);
283} 407}
284 408
409static inline u32 msg_destnode(struct tipc_msg *m)
410{
411 return msg_word(m, 7);
412}
413
285static inline void msg_set_destnode(struct tipc_msg *m, u32 a) 414static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
286{ 415{
287 msg_set_word(m, 7, a); 416 msg_set_word(m, 7, a);
@@ -299,6 +428,11 @@ static inline u32 msg_routed(struct tipc_msg *m)
299 return(msg_destnode(m) ^ msg_orignode(m)) >> 11; 428 return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
300} 429}
301 430
431static inline u32 msg_nametype(struct tipc_msg *m)
432{
433 return msg_word(m, 8);
434}
435
302static inline void msg_set_nametype(struct tipc_msg *m, u32 n) 436static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
303{ 437{
304 msg_set_word(m, 8, n); 438 msg_set_word(m, 8, n);
@@ -324,6 +458,16 @@ static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
324 msg_set_word(m, 8, n); 458 msg_set_word(m, 8, n);
325} 459}
326 460
461static inline u32 msg_nameinst(struct tipc_msg *m)
462{
463 return msg_word(m, 9);
464}
465
466static inline u32 msg_namelower(struct tipc_msg *m)
467{
468 return msg_nameinst(m);
469}
470
327static inline void msg_set_namelower(struct tipc_msg *m, u32 n) 471static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
328{ 472{
329 msg_set_word(m, 9, n); 473 msg_set_word(m, 9, n);
@@ -334,11 +478,21 @@ static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
334 msg_set_namelower(m, n); 478 msg_set_namelower(m, n);
335} 479}
336 480
481static inline u32 msg_nameupper(struct tipc_msg *m)
482{
483 return msg_word(m, 10);
484}
485
337static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) 486static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
338{ 487{
339 msg_set_word(m, 10, n); 488 msg_set_word(m, 10, n);
340} 489}
341 490
491static inline unchar *msg_data(struct tipc_msg *m)
492{
493 return ((unchar *)m) + msg_hdr_sz(m);
494}
495
342static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) 496static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
343{ 497{
344 return (struct tipc_msg *)msg_data(m); 498 return (struct tipc_msg *)msg_data(m);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 7b907171f879..10ff48be3c01 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -36,9 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "cluster.h" 38#include "cluster.h"
39#include "dbg.h"
40#include "link.h" 39#include "link.h"
41#include "msg.h"
42#include "name_distr.h" 40#include "name_distr.h"
43 41
44#define ITEM_SIZE sizeof(struct distr_item) 42#define ITEM_SIZE sizeof(struct distr_item)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 3a8de4334da1..d5adb0456746 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -36,15 +36,10 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "name_table.h" 39#include "name_table.h"
41#include "name_distr.h" 40#include "name_distr.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "subscr.h" 41#include "subscr.h"
45#include "port.h" 42#include "port.h"
46#include "cluster.h"
47#include "bcast.h"
48 43
49static int tipc_nametbl_size = 1024; /* must be a power of 2 */ 44static int tipc_nametbl_size = 1024; /* must be a power of 2 */
50 45
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 1a621cfd6604..c2b4b86c2e6a 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -35,18 +35,13 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "bearer.h"
39#include "net.h" 38#include "net.h"
40#include "zone.h" 39#include "zone.h"
41#include "addr.h"
42#include "name_table.h" 40#include "name_table.h"
43#include "name_distr.h" 41#include "name_distr.h"
44#include "subscr.h" 42#include "subscr.h"
45#include "link.h" 43#include "link.h"
46#include "msg.h"
47#include "port.h" 44#include "port.h"
48#include "bcast.h"
49#include "discover.h"
50#include "config.h" 45#include "config.h"
51 46
52/* 47/*
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b4d87eb2dc5d..df71dfc3a9ae 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -37,16 +37,9 @@
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "node.h" 39#include "node.h"
40#include "cluster.h"
41#include "net.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "link.h"
45#include "port.h" 40#include "port.h"
46#include "bearer.h"
47#include "name_distr.h" 41#include "name_distr.h"
48 42
49void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
50static void node_lost_contact(struct tipc_node *n_ptr); 43static void node_lost_contact(struct tipc_node *n_ptr);
51static void node_established_contact(struct tipc_node *n_ptr); 44static void node_established_contact(struct tipc_node *n_ptr);
52 45
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 19194d476a9e..018a55332d91 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -35,10 +35,8 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "node_subscr.h" 38#include "node_subscr.h"
40#include "node.h" 39#include "node.h"
41#include "addr.h"
42 40
43/** 41/**
44 * tipc_nodesub_subscribe - create "node down" subscription for specified node 42 * tipc_nodesub_subscribe - create "node down" subscription for specified node
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 82092eaa1536..7873283f4965 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -36,15 +36,9 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "dbg.h"
40#include "port.h" 39#include "port.h"
41#include "addr.h"
42#include "link.h"
43#include "node.h"
44#include "name_table.h" 40#include "name_table.h"
45#include "user_reg.h" 41#include "user_reg.h"
46#include "msg.h"
47#include "bcast.h"
48 42
49/* Connection management: */ 43/* Connection management: */
50#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ 44#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
@@ -94,7 +88,7 @@ static void port_incr_out_seqno(struct port *p_ptr)
94 * tipc_multicast - send a multicast message to local and remote destinations 88 * tipc_multicast - send a multicast message to local and remote destinations
95 */ 89 */
96 90
97int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain, 91int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
98 u32 num_sect, struct iovec const *msg_sect) 92 u32 num_sect, struct iovec const *msg_sect)
99{ 93{
100 struct tipc_msg *hdr; 94 struct tipc_msg *hdr;
@@ -989,13 +983,6 @@ int tipc_createport(u32 user_ref,
989 return 0; 983 return 0;
990} 984}
991 985
992int tipc_ownidentity(u32 ref, struct tipc_portid *id)
993{
994 id->ref = ref;
995 id->node = tipc_own_addr;
996 return 0;
997}
998
999int tipc_portimportance(u32 ref, unsigned int *importance) 986int tipc_portimportance(u32 ref, unsigned int *importance)
1000{ 987{
1001 struct port *p_ptr; 988 struct port *p_ptr;
@@ -1271,16 +1258,11 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1271} 1258}
1272 1259
1273/** 1260/**
1274 * tipc_forward2name - forward message sections to port name 1261 * tipc_send2name - send message sections to port name
1275 */ 1262 */
1276 1263
1277static int tipc_forward2name(u32 ref, 1264int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1278 struct tipc_name const *name, 1265 unsigned int num_sect, struct iovec const *msg_sect)
1279 u32 domain,
1280 u32 num_sect,
1281 struct iovec const *msg_sect,
1282 struct tipc_portid const *orig,
1283 unsigned int importance)
1284{ 1266{
1285 struct port *p_ptr; 1267 struct port *p_ptr;
1286 struct tipc_msg *msg; 1268 struct tipc_msg *msg;
@@ -1294,14 +1276,12 @@ static int tipc_forward2name(u32 ref,
1294 1276
1295 msg = &p_ptr->publ.phdr; 1277 msg = &p_ptr->publ.phdr;
1296 msg_set_type(msg, TIPC_NAMED_MSG); 1278 msg_set_type(msg, TIPC_NAMED_MSG);
1297 msg_set_orignode(msg, orig->node); 1279 msg_set_orignode(msg, tipc_own_addr);
1298 msg_set_origport(msg, orig->ref); 1280 msg_set_origport(msg, ref);
1299 msg_set_hdr_sz(msg, LONG_H_SIZE); 1281 msg_set_hdr_sz(msg, LONG_H_SIZE);
1300 msg_set_nametype(msg, name->type); 1282 msg_set_nametype(msg, name->type);
1301 msg_set_nameinst(msg, name->instance); 1283 msg_set_nameinst(msg, name->instance);
1302 msg_set_lookup_scope(msg, tipc_addr_scope(domain)); 1284 msg_set_lookup_scope(msg, tipc_addr_scope(domain));
1303 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1304 msg_set_importance(msg,importance);
1305 destport = tipc_nametbl_translate(name->type, name->instance, &destnode); 1285 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1306 msg_set_destnode(msg, destnode); 1286 msg_set_destnode(msg, destnode);
1307 msg_set_destport(msg, destport); 1287 msg_set_destport(msg, destport);
@@ -1325,33 +1305,11 @@ static int tipc_forward2name(u32 ref,
1325} 1305}
1326 1306
1327/** 1307/**
1328 * tipc_send2name - send message sections to port name 1308 * tipc_send2port - send message sections to port identity
1329 */
1330
1331int tipc_send2name(u32 ref,
1332 struct tipc_name const *name,
1333 unsigned int domain,
1334 unsigned int num_sect,
1335 struct iovec const *msg_sect)
1336{
1337 struct tipc_portid orig;
1338
1339 orig.ref = ref;
1340 orig.node = tipc_own_addr;
1341 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1342 TIPC_PORT_IMPORTANCE);
1343}
1344
1345/**
1346 * tipc_forward2port - forward message sections to port identity
1347 */ 1309 */
1348 1310
1349static int tipc_forward2port(u32 ref, 1311int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1350 struct tipc_portid const *dest, 1312 unsigned int num_sect, struct iovec const *msg_sect)
1351 unsigned int num_sect,
1352 struct iovec const *msg_sect,
1353 struct tipc_portid const *orig,
1354 unsigned int importance)
1355{ 1313{
1356 struct port *p_ptr; 1314 struct port *p_ptr;
1357 struct tipc_msg *msg; 1315 struct tipc_msg *msg;
@@ -1363,13 +1321,11 @@ static int tipc_forward2port(u32 ref,
1363 1321
1364 msg = &p_ptr->publ.phdr; 1322 msg = &p_ptr->publ.phdr;
1365 msg_set_type(msg, TIPC_DIRECT_MSG); 1323 msg_set_type(msg, TIPC_DIRECT_MSG);
1366 msg_set_orignode(msg, orig->node); 1324 msg_set_orignode(msg, tipc_own_addr);
1367 msg_set_origport(msg, orig->ref); 1325 msg_set_origport(msg, ref);
1368 msg_set_destnode(msg, dest->node); 1326 msg_set_destnode(msg, dest->node);
1369 msg_set_destport(msg, dest->ref); 1327 msg_set_destport(msg, dest->ref);
1370 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1328 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1371 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1372 msg_set_importance(msg, importance);
1373 p_ptr->sent++; 1329 p_ptr->sent++;
1374 if (dest->node == tipc_own_addr) 1330 if (dest->node == tipc_own_addr)
1375 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1331 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1384,31 +1340,11 @@ static int tipc_forward2port(u32 ref,
1384} 1340}
1385 1341
1386/** 1342/**
1387 * tipc_send2port - send message sections to port identity 1343 * tipc_send_buf2port - send message buffer to port identity
1388 */ 1344 */
1389 1345
1390int tipc_send2port(u32 ref, 1346int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1391 struct tipc_portid const *dest, 1347 struct sk_buff *buf, unsigned int dsz)
1392 unsigned int num_sect,
1393 struct iovec const *msg_sect)
1394{
1395 struct tipc_portid orig;
1396
1397 orig.ref = ref;
1398 orig.node = tipc_own_addr;
1399 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1400 TIPC_PORT_IMPORTANCE);
1401}
1402
1403/**
1404 * tipc_forward_buf2port - forward message buffer to port identity
1405 */
1406static int tipc_forward_buf2port(u32 ref,
1407 struct tipc_portid const *dest,
1408 struct sk_buff *buf,
1409 unsigned int dsz,
1410 struct tipc_portid const *orig,
1411 unsigned int importance)
1412{ 1348{
1413 struct port *p_ptr; 1349 struct port *p_ptr;
1414 struct tipc_msg *msg; 1350 struct tipc_msg *msg;
@@ -1420,13 +1356,11 @@ static int tipc_forward_buf2port(u32 ref,
1420 1356
1421 msg = &p_ptr->publ.phdr; 1357 msg = &p_ptr->publ.phdr;
1422 msg_set_type(msg, TIPC_DIRECT_MSG); 1358 msg_set_type(msg, TIPC_DIRECT_MSG);
1423 msg_set_orignode(msg, orig->node); 1359 msg_set_orignode(msg, tipc_own_addr);
1424 msg_set_origport(msg, orig->ref); 1360 msg_set_origport(msg, ref);
1425 msg_set_destnode(msg, dest->node); 1361 msg_set_destnode(msg, dest->node);
1426 msg_set_destport(msg, dest->ref); 1362 msg_set_destport(msg, dest->ref);
1427 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1363 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1428 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1429 msg_set_importance(msg, importance);
1430 msg_set_size(msg, DIR_MSG_H_SIZE + dsz); 1364 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1431 if (skb_cow(buf, DIR_MSG_H_SIZE)) 1365 if (skb_cow(buf, DIR_MSG_H_SIZE))
1432 return -ENOMEM; 1366 return -ENOMEM;
@@ -1445,20 +1379,3 @@ static int tipc_forward_buf2port(u32 ref,
1445 return -ELINKCONG; 1379 return -ELINKCONG;
1446} 1380}
1447 1381
1448/**
1449 * tipc_send_buf2port - send message buffer to port identity
1450 */
1451
1452int tipc_send_buf2port(u32 ref,
1453 struct tipc_portid const *dest,
1454 struct sk_buff *buf,
1455 unsigned int dsz)
1456{
1457 struct tipc_portid orig;
1458
1459 orig.ref = ref;
1460 orig.node = tipc_own_addr;
1461 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1462 TIPC_PORT_IMPORTANCE);
1463}
1464
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 73bbf442b346..3a807fcec2be 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -37,13 +37,44 @@
37#ifndef _TIPC_PORT_H 37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H 38#define _TIPC_PORT_H
39 39
40#include "core.h"
41#include "ref.h" 40#include "ref.h"
42#include "net.h" 41#include "net.h"
43#include "msg.h" 42#include "msg.h"
44#include "dbg.h"
45#include "node_subscr.h" 43#include "node_subscr.h"
46 44
45#define TIPC_FLOW_CONTROL_WIN 512
46
47typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref,
48 struct sk_buff **buf, unsigned char const *data,
49 unsigned int size, int reason,
50 struct tipc_portid const *attmpt_destid);
51
52typedef void (*tipc_named_msg_err_event) (void *usr_handle, u32 portref,
53 struct sk_buff **buf, unsigned char const *data,
54 unsigned int size, int reason,
55 struct tipc_name_seq const *attmpt_dest);
56
57typedef void (*tipc_conn_shutdown_event) (void *usr_handle, u32 portref,
58 struct sk_buff **buf, unsigned char const *data,
59 unsigned int size, int reason);
60
61typedef void (*tipc_msg_event) (void *usr_handle, u32 portref,
62 struct sk_buff **buf, unsigned char const *data,
63 unsigned int size, unsigned int importance,
64 struct tipc_portid const *origin);
65
66typedef void (*tipc_named_msg_event) (void *usr_handle, u32 portref,
67 struct sk_buff **buf, unsigned char const *data,
68 unsigned int size, unsigned int importance,
69 struct tipc_portid const *orig,
70 struct tipc_name_seq const *dest);
71
72typedef void (*tipc_conn_msg_event) (void *usr_handle, u32 portref,
73 struct sk_buff **buf, unsigned char const *data,
74 unsigned int size);
75
76typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
77
47/** 78/**
48 * struct user_port - TIPC user port (used with native API) 79 * struct user_port - TIPC user port (used with native API)
49 * @user_ref: id of user who created user port 80 * @user_ref: id of user who created user port
@@ -68,6 +99,34 @@ struct user_port {
68}; 99};
69 100
70/** 101/**
102 * struct tipc_port - TIPC port info available to socket API
103 * @usr_handle: pointer to additional user-defined information about port
104 * @lock: pointer to spinlock for controlling access to port
105 * @connected: non-zero if port is currently connected to a peer port
106 * @conn_type: TIPC type used when connection was established
107 * @conn_instance: TIPC instance used when connection was established
108 * @conn_unacked: number of unacknowledged messages received from peer port
109 * @published: non-zero if port has one or more associated names
110 * @congested: non-zero if cannot send because of link or port congestion
111 * @max_pkt: maximum packet size "hint" used when building messages sent by port
112 * @ref: unique reference to port in TIPC object registry
113 * @phdr: preformatted message header used when sending messages
114 */
115struct tipc_port {
116 void *usr_handle;
117 spinlock_t *lock;
118 int connected;
119 u32 conn_type;
120 u32 conn_instance;
121 u32 conn_unacked;
122 int published;
123 u32 congested;
124 u32 max_pkt;
125 u32 ref;
126 struct tipc_msg phdr;
127};
128
129/**
71 * struct port - TIPC port structure 130 * struct port - TIPC port structure
72 * @publ: TIPC port info available to privileged users 131 * @publ: TIPC port info available to privileged users
73 * @port_list: adjacent ports in TIPC's global list of ports 132 * @port_list: adjacent ports in TIPC's global list of ports
@@ -109,11 +168,76 @@ struct port {
109extern spinlock_t tipc_port_list_lock; 168extern spinlock_t tipc_port_list_lock;
110struct port_list; 169struct port_list;
111 170
171/*
172 * TIPC port manipulation routines
173 */
174struct tipc_port *tipc_createport_raw(void *usr_handle,
175 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
176 void (*wakeup)(struct tipc_port *), const u32 importance);
177
178int tipc_reject_msg(struct sk_buff *buf, u32 err);
179
180int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
181
182void tipc_acknowledge(u32 port_ref, u32 ack);
183
184int tipc_createport(unsigned int tipc_user, void *usr_handle,
185 unsigned int importance, tipc_msg_err_event error_cb,
186 tipc_named_msg_err_event named_error_cb,
187 tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
188 tipc_named_msg_event named_msg_cb,
189 tipc_conn_msg_event conn_msg_cb,
190 tipc_continue_event continue_event_cb, u32 *portref);
191
192int tipc_deleteport(u32 portref);
193
194int tipc_portimportance(u32 portref, unsigned int *importance);
195int tipc_set_portimportance(u32 portref, unsigned int importance);
196
197int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
198int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
199
200int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
201int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
202
203int tipc_publish(u32 portref, unsigned int scope,
204 struct tipc_name_seq const *name_seq);
205int tipc_withdraw(u32 portref, unsigned int scope,
206 struct tipc_name_seq const *name_seq);
207
208int tipc_connect2port(u32 portref, struct tipc_portid const *port);
209
210int tipc_disconnect(u32 portref);
211
212int tipc_shutdown(u32 ref);
213
214
215/*
216 * The following routines require that the port be locked on entry
217 */
218int tipc_disconnect_port(struct tipc_port *tp_ptr);
219
220/*
221 * TIPC messaging routines
222 */
223int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect);
224
225int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
226 unsigned int num_sect, struct iovec const *msg_sect);
227
228int tipc_send2port(u32 portref, struct tipc_portid const *dest,
229 unsigned int num_sect, struct iovec const *msg_sect);
230
231int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
232 struct sk_buff *buf, unsigned int dsz);
233
234int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
235 unsigned int section_count, struct iovec const *msg);
236
112int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 237int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
113 struct iovec const *msg_sect, u32 num_sect, 238 struct iovec const *msg_sect, u32 num_sect,
114 int err); 239 int err);
115struct sk_buff *tipc_port_get_ports(void); 240struct sk_buff *tipc_port_get_ports(void);
116struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
117void tipc_port_recv_proto_msg(struct sk_buff *buf); 241void tipc_port_recv_proto_msg(struct sk_buff *buf);
118void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 242void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
119void tipc_port_reinit(void); 243void tipc_port_reinit(void);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc3d697..cd0bb77f2673 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -49,10 +49,9 @@
49 49
50#include <linux/tipc.h> 50#include <linux/tipc.h>
51#include <linux/tipc_config.h> 51#include <linux/tipc_config.h>
52#include <net/tipc/tipc_msg.h>
53#include <net/tipc/tipc_port.h>
54 52
55#include "core.h" 53#include "core.h"
54#include "port.h"
56 55
57#define SS_LISTENING -1 /* socket is listening */ 56#define SS_LISTENING -1 /* socket is listening */
58#define SS_READY -2 /* socket is connectionless */ 57#define SS_READY -2 /* socket is connectionless */
@@ -396,6 +395,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 395 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
397 struct tipc_sock *tsock = tipc_sk(sock->sk); 396 struct tipc_sock *tsock = tipc_sk(sock->sk);
398 397
398 memset(addr, 0, sizeof(*addr));
399 if (peer) { 399 if (peer) {
400 if ((sock->state != SS_CONNECTED) && 400 if ((sock->state != SS_CONNECTED) &&
401 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 401 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
@@ -403,7 +403,8 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
403 addr->addr.id.ref = tsock->peer_name.ref; 403 addr->addr.id.ref = tsock->peer_name.ref;
404 addr->addr.id.node = tsock->peer_name.node; 404 addr->addr.id.node = tsock->peer_name.node;
405 } else { 405 } else {
406 tipc_ownidentity(tsock->p->ref, &addr->addr.id); 406 addr->addr.id.ref = tsock->p->ref;
407 addr->addr.id.node = tipc_own_addr;
407 } 408 }
408 409
409 *uaddr_len = sizeof(*addr); 410 *uaddr_len = sizeof(*addr);
@@ -596,7 +597,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
596 break; 597 break;
597 res = tipc_multicast(tport->ref, 598 res = tipc_multicast(tport->ref,
598 &dest->addr.nameseq, 599 &dest->addr.nameseq,
599 0,
600 m->msg_iovlen, 600 m->msg_iovlen,
601 m->msg_iov); 601 m->msg_iov);
602 } 602 }
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 33313961d010..23f43d03980c 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -35,10 +35,8 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h"
39#include "name_table.h" 38#include "name_table.h"
40#include "port.h" 39#include "user_reg.h"
41#include "ref.h"
42#include "subscr.h" 40#include "subscr.h"
43 41
44/** 42/**
@@ -544,14 +542,14 @@ static void subscr_named_msg_event(void *usr_handle,
544int tipc_subscr_start(void) 542int tipc_subscr_start(void)
545{ 543{
546 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 544 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
547 int res = -1; 545 int res;
548 546
549 memset(&topsrv, 0, sizeof (topsrv)); 547 memset(&topsrv, 0, sizeof (topsrv));
550 spin_lock_init(&topsrv.lock); 548 spin_lock_init(&topsrv.lock);
551 INIT_LIST_HEAD(&topsrv.subscriber_list); 549 INIT_LIST_HEAD(&topsrv.subscriber_list);
552 550
553 spin_lock_bh(&topsrv.lock); 551 spin_lock_bh(&topsrv.lock);
554 res = tipc_attach(&topsrv.user_ref, NULL, NULL); 552 res = tipc_attach(&topsrv.user_ref);
555 if (res) { 553 if (res) {
556 spin_unlock_bh(&topsrv.lock); 554 spin_unlock_bh(&topsrv.lock);
557 return res; 555 return res;
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 506928803162..2e2702e2049c 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -50,15 +50,11 @@
50/** 50/**
51 * struct tipc_user - registered TIPC user info 51 * struct tipc_user - registered TIPC user info
52 * @next: index of next free registry entry (or -1 for an allocated entry) 52 * @next: index of next free registry entry (or -1 for an allocated entry)
53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
54 * @usr_handle: user-defined value passed to callback routine
55 * @ports: list of user ports owned by the user 53 * @ports: list of user ports owned by the user
56 */ 54 */
57 55
58struct tipc_user { 56struct tipc_user {
59 int next; 57 int next;
60 tipc_mode_event callback;
61 void *usr_handle;
62 struct list_head ports; 58 struct list_head ports;
63}; 59};
64 60
@@ -95,41 +91,12 @@ static int reg_init(void)
95} 91}
96 92
97/** 93/**
98 * reg_callback - inform TIPC user about current operating mode
99 */
100
101static void reg_callback(struct tipc_user *user_ptr)
102{
103 tipc_mode_event cb;
104 void *arg;
105
106 spin_lock_bh(&reg_lock);
107 cb = user_ptr->callback;
108 arg = user_ptr->usr_handle;
109 spin_unlock_bh(&reg_lock);
110
111 if (cb)
112 cb(arg, tipc_mode, tipc_own_addr);
113}
114
115/**
116 * tipc_reg_start - activate TIPC user registry 94 * tipc_reg_start - activate TIPC user registry
117 */ 95 */
118 96
119int tipc_reg_start(void) 97int tipc_reg_start(void)
120{ 98{
121 u32 u; 99 return reg_init();
122 int res;
123
124 if ((res = reg_init()))
125 return res;
126
127 for (u = 1; u <= MAX_USERID; u++) {
128 if (users[u].callback)
129 tipc_k_signal((Handler)reg_callback,
130 (unsigned long)&users[u]);
131 }
132 return 0;
133} 100}
134 101
135/** 102/**
@@ -138,15 +105,9 @@ int tipc_reg_start(void)
138 105
139void tipc_reg_stop(void) 106void tipc_reg_stop(void)
140{ 107{
141 int id;
142
143 if (!users) 108 if (!users)
144 return; 109 return;
145 110
146 for (id = 1; id <= MAX_USERID; id++) {
147 if (users[id].callback)
148 reg_callback(&users[id]);
149 }
150 kfree(users); 111 kfree(users);
151 users = NULL; 112 users = NULL;
152} 113}
@@ -157,12 +118,10 @@ void tipc_reg_stop(void)
157 * NOTE: This routine may be called when TIPC is inactive. 118 * NOTE: This routine may be called when TIPC is inactive.
158 */ 119 */
159 120
160int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle) 121int tipc_attach(u32 *userid)
161{ 122{
162 struct tipc_user *user_ptr; 123 struct tipc_user *user_ptr;
163 124
164 if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
165 return -ENOPROTOOPT;
166 if (!users) 125 if (!users)
167 reg_init(); 126 reg_init();
168 127
@@ -177,13 +136,9 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
177 user_ptr->next = -1; 136 user_ptr->next = -1;
178 spin_unlock_bh(&reg_lock); 137 spin_unlock_bh(&reg_lock);
179 138
180 user_ptr->callback = cb;
181 user_ptr->usr_handle = usr_handle;
182 INIT_LIST_HEAD(&user_ptr->ports); 139 INIT_LIST_HEAD(&user_ptr->ports);
183 atomic_inc(&tipc_user_count); 140 atomic_inc(&tipc_user_count);
184 141
185 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
187 return 0; 142 return 0;
188} 143}
189 144
@@ -207,7 +162,6 @@ void tipc_detach(u32 userid)
207 } 162 }
208 163
209 user_ptr = &users[userid]; 164 user_ptr = &users[userid];
210 user_ptr->callback = NULL;
211 INIT_LIST_HEAD(&ports_temp); 165 INIT_LIST_HEAD(&ports_temp);
212 list_splice(&user_ptr->ports, &ports_temp); 166 list_splice(&user_ptr->ports, &ports_temp);
213 user_ptr->next = next_free_user; 167 user_ptr->next = next_free_user;
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index 81dc12e2882f..109eed0d6de3 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -42,6 +42,9 @@
42int tipc_reg_start(void); 42int tipc_reg_start(void);
43void tipc_reg_stop(void); 43void tipc_reg_stop(void);
44 44
45int tipc_attach(unsigned int *userref);
46void tipc_detach(unsigned int userref);
47
45int tipc_reg_add_port(struct user_port *up_ptr); 48int tipc_reg_add_port(struct user_port *up_ptr);
46int tipc_reg_remove_port(struct user_port *up_ptr); 49int tipc_reg_remove_port(struct user_port *up_ptr);
47 50
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 83f8b5e91fc8..1b61ca8c48ef 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -36,9 +36,6 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "zone.h" 38#include "zone.h"
39#include "net.h"
40#include "addr.h"
41#include "node_subscr.h"
42#include "cluster.h" 39#include "cluster.h"
43#include "node.h" 40#include "node.h"
44 41
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3c95304a0817..417d7a6c36cf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -316,7 +316,8 @@ static void unix_write_space(struct sock *sk)
316 if (unix_writable(sk)) { 316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq); 317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq)) 318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait); 319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 322 }
322 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -1343,9 +1344,25 @@ static void unix_destruct_scm(struct sk_buff *skb)
1343 sock_wfree(skb); 1344 sock_wfree(skb);
1344} 1345}
1345 1346
1347#define MAX_RECURSION_LEVEL 4
1348
1346static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1349static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1347{ 1350{
1348 int i; 1351 int i;
1352 unsigned char max_level = 0;
1353 int unix_sock_count = 0;
1354
1355 for (i = scm->fp->count - 1; i >= 0; i--) {
1356 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1357
1358 if (sk) {
1359 unix_sock_count++;
1360 max_level = max(max_level,
1361 unix_sk(sk)->recursion_level);
1362 }
1363 }
1364 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1365 return -ETOOMANYREFS;
1349 1366
1350 /* 1367 /*
1351 * Need to duplicate file references for the sake of garbage 1368 * Need to duplicate file references for the sake of garbage
@@ -1356,9 +1373,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1356 if (!UNIXCB(skb).fp) 1373 if (!UNIXCB(skb).fp)
1357 return -ENOMEM; 1374 return -ENOMEM;
1358 1375
1359 for (i = scm->fp->count-1; i >= 0; i--) 1376 if (unix_sock_count) {
1360 unix_inflight(scm->fp->fp[i]); 1377 for (i = scm->fp->count - 1; i >= 0; i--)
1361 return 0; 1378 unix_inflight(scm->fp->fp[i]);
1379 }
1380 return max_level;
1362} 1381}
1363 1382
1364static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1383static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1393,6 +1412,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1393 struct sk_buff *skb; 1412 struct sk_buff *skb;
1394 long timeo; 1413 long timeo;
1395 struct scm_cookie tmp_scm; 1414 struct scm_cookie tmp_scm;
1415 int max_level;
1396 1416
1397 if (NULL == siocb->scm) 1417 if (NULL == siocb->scm)
1398 siocb->scm = &tmp_scm; 1418 siocb->scm = &tmp_scm;
@@ -1431,8 +1451,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1431 goto out; 1451 goto out;
1432 1452
1433 err = unix_scm_to_skb(siocb->scm, skb, true); 1453 err = unix_scm_to_skb(siocb->scm, skb, true);
1434 if (err) 1454 if (err < 0)
1435 goto out_free; 1455 goto out_free;
1456 max_level = err + 1;
1436 unix_get_secdata(siocb->scm, skb); 1457 unix_get_secdata(siocb->scm, skb);
1437 1458
1438 skb_reset_transport_header(skb); 1459 skb_reset_transport_header(skb);
@@ -1514,6 +1535,8 @@ restart:
1514 if (sock_flag(other, SOCK_RCVTSTAMP)) 1535 if (sock_flag(other, SOCK_RCVTSTAMP))
1515 __net_timestamp(skb); 1536 __net_timestamp(skb);
1516 skb_queue_tail(&other->sk_receive_queue, skb); 1537 skb_queue_tail(&other->sk_receive_queue, skb);
1538 if (max_level > unix_sk(other)->recursion_level)
1539 unix_sk(other)->recursion_level = max_level;
1517 unix_state_unlock(other); 1540 unix_state_unlock(other);
1518 other->sk_data_ready(other, len); 1541 other->sk_data_ready(other, len);
1519 sock_put(other); 1542 sock_put(other);
@@ -1544,6 +1567,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1544 int sent = 0; 1567 int sent = 0;
1545 struct scm_cookie tmp_scm; 1568 struct scm_cookie tmp_scm;
1546 bool fds_sent = false; 1569 bool fds_sent = false;
1570 int max_level;
1547 1571
1548 if (NULL == siocb->scm) 1572 if (NULL == siocb->scm)
1549 siocb->scm = &tmp_scm; 1573 siocb->scm = &tmp_scm;
@@ -1607,10 +1631,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1607 1631
1608 /* Only send the fds in the first buffer */ 1632 /* Only send the fds in the first buffer */
1609 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); 1633 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1610 if (err) { 1634 if (err < 0) {
1611 kfree_skb(skb); 1635 kfree_skb(skb);
1612 goto out_err; 1636 goto out_err;
1613 } 1637 }
1638 max_level = err + 1;
1614 fds_sent = true; 1639 fds_sent = true;
1615 1640
1616 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1641 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
@@ -1626,6 +1651,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1626 goto pipe_err_free; 1651 goto pipe_err_free;
1627 1652
1628 skb_queue_tail(&other->sk_receive_queue, skb); 1653 skb_queue_tail(&other->sk_receive_queue, skb);
1654 if (max_level > unix_sk(other)->recursion_level)
1655 unix_sk(other)->recursion_level = max_level;
1629 unix_state_unlock(other); 1656 unix_state_unlock(other);
1630 other->sk_data_ready(other, size); 1657 other->sk_data_ready(other, size);
1631 sent += size; 1658 sent += size;
@@ -1710,7 +1737,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1710 goto out_unlock; 1737 goto out_unlock;
1711 } 1738 }
1712 1739
1713 wake_up_interruptible_sync(&u->peer_wait); 1740 wake_up_interruptible_sync_poll(&u->peer_wait,
1741 POLLOUT | POLLWRNORM | POLLWRBAND);
1714 1742
1715 if (msg->msg_name) 1743 if (msg->msg_name)
1716 unix_copy_addr(msg, skb->sk); 1744 unix_copy_addr(msg, skb->sk);
@@ -1845,6 +1873,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1845 unix_state_lock(sk); 1873 unix_state_lock(sk);
1846 skb = skb_dequeue(&sk->sk_receive_queue); 1874 skb = skb_dequeue(&sk->sk_receive_queue);
1847 if (skb == NULL) { 1875 if (skb == NULL) {
1876 unix_sk(sk)->recursion_level = 0;
1848 if (copied >= target) 1877 if (copied >= target)
1849 goto unlock; 1878 goto unlock;
1850 1879
@@ -2072,13 +2101,12 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2072 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2101 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2073 mask |= POLLERR; 2102 mask |= POLLERR;
2074 if (sk->sk_shutdown & RCV_SHUTDOWN) 2103 if (sk->sk_shutdown & RCV_SHUTDOWN)
2075 mask |= POLLRDHUP; 2104 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2076 if (sk->sk_shutdown == SHUTDOWN_MASK) 2105 if (sk->sk_shutdown == SHUTDOWN_MASK)
2077 mask |= POLLHUP; 2106 mask |= POLLHUP;
2078 2107
2079 /* readable? */ 2108 /* readable? */
2080 if (!skb_queue_empty(&sk->sk_receive_queue) || 2109 if (!skb_queue_empty(&sk->sk_receive_queue))
2081 (sk->sk_shutdown & RCV_SHUTDOWN))
2082 mask |= POLLIN | POLLRDNORM; 2110 mask |= POLLIN | POLLRDNORM;
2083 2111
2084 /* Connection-based need to check for termination and startup */ 2112 /* Connection-based need to check for termination and startup */
@@ -2090,20 +2118,19 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2090 return mask; 2118 return mask;
2091 } 2119 }
2092 2120
2093 /* writable? */ 2121 /* No write status requested, avoid expensive OUT tests. */
2094 writable = unix_writable(sk); 2122 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2095 if (writable) { 2123 return mask;
2096 other = unix_peer_get(sk);
2097 if (other) {
2098 if (unix_peer(other) != sk) {
2099 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2100 wait);
2101 if (unix_recvq_full(other))
2102 writable = 0;
2103 }
2104 2124
2105 sock_put(other); 2125 writable = unix_writable(sk);
2126 other = unix_peer_get(sk);
2127 if (other) {
2128 if (unix_peer(other) != sk) {
2129 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2130 if (unix_recvq_full(other))
2131 writable = 0;
2106 } 2132 }
2133 sock_put(other);
2107 } 2134 }
2108 2135
2109 if (writable) 2136 if (writable)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index c8df6fda0b1f..f89f83bf828e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
96unsigned int unix_tot_inflight; 96unsigned int unix_tot_inflight;
97 97
98 98
99static struct sock *unix_get_socket(struct file *filp) 99struct sock *unix_get_socket(struct file *filp)
100{ 100{
101 struct sock *u_sock = NULL; 101 struct sock *u_sock = NULL;
102 struct inode *inode = filp->f_path.dentry->d_inode; 102 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
259} 259}
260 260
261static bool gc_in_progress = false; 261static bool gc_in_progress = false;
262#define UNIX_INFLIGHT_TRIGGER_GC 16000
262 263
263void wait_for_unix_gc(void) 264void wait_for_unix_gc(void)
264{ 265{
266 /*
267 * If number of inflight sockets is insane,
268 * force a garbage collect right now.
269 */
270 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
271 unix_gc();
265 wait_event(unix_gc_wait, gc_in_progress == false); 272 wait_event(unix_gc_wait, gc_in_progress == false);
266} 273}
267 274
diff --git a/net/wanrouter/Makefile b/net/wanrouter/Makefile
index 9f188ab3dcd0..4da14bc48078 100644
--- a/net/wanrouter/Makefile
+++ b/net/wanrouter/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_WAN_ROUTER) += wanrouter.o 5obj-$(CONFIG_WAN_ROUTER) += wanrouter.o
6 6
7wanrouter-objs := wanproc.o wanmain.o 7wanrouter-y := wanproc.o wanmain.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7af98dff409..ad96ee90fe27 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1357,11 +1357,11 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1357 void __user *argp = (void __user *)arg; 1357 void __user *argp = (void __user *)arg;
1358 int rc; 1358 int rc;
1359 1359
1360 lock_kernel();
1361 switch (cmd) { 1360 switch (cmd) {
1362 case TIOCOUTQ: { 1361 case TIOCOUTQ: {
1363 int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1362 int amount;
1364 1363
1364 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1365 if (amount < 0) 1365 if (amount < 0)
1366 amount = 0; 1366 amount = 0;
1367 rc = put_user(amount, (unsigned int __user *)argp); 1367 rc = put_user(amount, (unsigned int __user *)argp);
@@ -1375,8 +1375,10 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1375 * These two are safe on a single CPU system as 1375 * These two are safe on a single CPU system as
1376 * only user tasks fiddle here 1376 * only user tasks fiddle here
1377 */ 1377 */
1378 lock_sock(sk);
1378 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1379 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1379 amount = skb->len; 1380 amount = skb->len;
1381 release_sock(sk);
1380 rc = put_user(amount, (unsigned int __user *)argp); 1382 rc = put_user(amount, (unsigned int __user *)argp);
1381 break; 1383 break;
1382 } 1384 }
@@ -1422,9 +1424,11 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1422 rc = x25_subscr_ioctl(cmd, argp); 1424 rc = x25_subscr_ioctl(cmd, argp);
1423 break; 1425 break;
1424 case SIOCX25GFACILITIES: { 1426 case SIOCX25GFACILITIES: {
1425 struct x25_facilities fac = x25->facilities; 1427 lock_sock(sk);
1426 rc = copy_to_user(argp, &fac, 1428 rc = copy_to_user(argp, &x25->facilities,
1427 sizeof(fac)) ? -EFAULT : 0; 1429 sizeof(x25->facilities))
1430 ? -EFAULT : 0;
1431 release_sock(sk);
1428 break; 1432 break;
1429 } 1433 }
1430 1434
@@ -1435,18 +1439,19 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1435 sizeof(facilities))) 1439 sizeof(facilities)))
1436 break; 1440 break;
1437 rc = -EINVAL; 1441 rc = -EINVAL;
1442 lock_sock(sk);
1438 if (sk->sk_state != TCP_LISTEN && 1443 if (sk->sk_state != TCP_LISTEN &&
1439 sk->sk_state != TCP_CLOSE) 1444 sk->sk_state != TCP_CLOSE)
1440 break; 1445 goto out_fac_release;
1441 if (facilities.pacsize_in < X25_PS16 || 1446 if (facilities.pacsize_in < X25_PS16 ||
1442 facilities.pacsize_in > X25_PS4096) 1447 facilities.pacsize_in > X25_PS4096)
1443 break; 1448 goto out_fac_release;
1444 if (facilities.pacsize_out < X25_PS16 || 1449 if (facilities.pacsize_out < X25_PS16 ||
1445 facilities.pacsize_out > X25_PS4096) 1450 facilities.pacsize_out > X25_PS4096)
1446 break; 1451 goto out_fac_release;
1447 if (facilities.winsize_in < 1 || 1452 if (facilities.winsize_in < 1 ||
1448 facilities.winsize_in > 127) 1453 facilities.winsize_in > 127)
1449 break; 1454 goto out_fac_release;
1450 if (facilities.throughput) { 1455 if (facilities.throughput) {
1451 int out = facilities.throughput & 0xf0; 1456 int out = facilities.throughput & 0xf0;
1452 int in = facilities.throughput & 0x0f; 1457 int in = facilities.throughput & 0x0f;
@@ -1454,24 +1459,28 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1454 facilities.throughput |= 1459 facilities.throughput |=
1455 X25_DEFAULT_THROUGHPUT << 4; 1460 X25_DEFAULT_THROUGHPUT << 4;
1456 else if (out < 0x30 || out > 0xD0) 1461 else if (out < 0x30 || out > 0xD0)
1457 break; 1462 goto out_fac_release;
1458 if (!in) 1463 if (!in)
1459 facilities.throughput |= 1464 facilities.throughput |=
1460 X25_DEFAULT_THROUGHPUT; 1465 X25_DEFAULT_THROUGHPUT;
1461 else if (in < 0x03 || in > 0x0D) 1466 else if (in < 0x03 || in > 0x0D)
1462 break; 1467 goto out_fac_release;
1463 } 1468 }
1464 if (facilities.reverse && 1469 if (facilities.reverse &&
1465 (facilities.reverse & 0x81) != 0x81) 1470 (facilities.reverse & 0x81) != 0x81)
1466 break; 1471 goto out_fac_release;
1467 x25->facilities = facilities; 1472 x25->facilities = facilities;
1468 rc = 0; 1473 rc = 0;
1474out_fac_release:
1475 release_sock(sk);
1469 break; 1476 break;
1470 } 1477 }
1471 1478
1472 case SIOCX25GDTEFACILITIES: { 1479 case SIOCX25GDTEFACILITIES: {
1480 lock_sock(sk);
1473 rc = copy_to_user(argp, &x25->dte_facilities, 1481 rc = copy_to_user(argp, &x25->dte_facilities,
1474 sizeof(x25->dte_facilities)); 1482 sizeof(x25->dte_facilities));
1483 release_sock(sk);
1475 if (rc) 1484 if (rc)
1476 rc = -EFAULT; 1485 rc = -EFAULT;
1477 break; 1486 break;
@@ -1483,26 +1492,31 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1483 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1492 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
1484 break; 1493 break;
1485 rc = -EINVAL; 1494 rc = -EINVAL;
1495 lock_sock(sk);
1486 if (sk->sk_state != TCP_LISTEN && 1496 if (sk->sk_state != TCP_LISTEN &&
1487 sk->sk_state != TCP_CLOSE) 1497 sk->sk_state != TCP_CLOSE)
1488 break; 1498 goto out_dtefac_release;
1489 if (dtefacs.calling_len > X25_MAX_AE_LEN) 1499 if (dtefacs.calling_len > X25_MAX_AE_LEN)
1490 break; 1500 goto out_dtefac_release;
1491 if (dtefacs.calling_ae == NULL) 1501 if (dtefacs.calling_ae == NULL)
1492 break; 1502 goto out_dtefac_release;
1493 if (dtefacs.called_len > X25_MAX_AE_LEN) 1503 if (dtefacs.called_len > X25_MAX_AE_LEN)
1494 break; 1504 goto out_dtefac_release;
1495 if (dtefacs.called_ae == NULL) 1505 if (dtefacs.called_ae == NULL)
1496 break; 1506 goto out_dtefac_release;
1497 x25->dte_facilities = dtefacs; 1507 x25->dte_facilities = dtefacs;
1498 rc = 0; 1508 rc = 0;
1509out_dtefac_release:
1510 release_sock(sk);
1499 break; 1511 break;
1500 } 1512 }
1501 1513
1502 case SIOCX25GCALLUSERDATA: { 1514 case SIOCX25GCALLUSERDATA: {
1503 struct x25_calluserdata cud = x25->calluserdata; 1515 lock_sock(sk);
1504 rc = copy_to_user(argp, &cud, 1516 rc = copy_to_user(argp, &x25->calluserdata,
1505 sizeof(cud)) ? -EFAULT : 0; 1517 sizeof(x25->calluserdata))
1518 ? -EFAULT : 0;
1519 release_sock(sk);
1506 break; 1520 break;
1507 } 1521 }
1508 1522
@@ -1516,16 +1530,19 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1516 rc = -EINVAL; 1530 rc = -EINVAL;
1517 if (calluserdata.cudlength > X25_MAX_CUD_LEN) 1531 if (calluserdata.cudlength > X25_MAX_CUD_LEN)
1518 break; 1532 break;
1533 lock_sock(sk);
1519 x25->calluserdata = calluserdata; 1534 x25->calluserdata = calluserdata;
1535 release_sock(sk);
1520 rc = 0; 1536 rc = 0;
1521 break; 1537 break;
1522 } 1538 }
1523 1539
1524 case SIOCX25GCAUSEDIAG: { 1540 case SIOCX25GCAUSEDIAG: {
1525 struct x25_causediag causediag; 1541 lock_sock(sk);
1526 causediag = x25->causediag; 1542 rc = copy_to_user(argp, &x25->causediag,
1527 rc = copy_to_user(argp, &causediag, 1543 sizeof(x25->causediag))
1528 sizeof(causediag)) ? -EFAULT : 0; 1544 ? -EFAULT : 0;
1545 release_sock(sk);
1529 break; 1546 break;
1530 } 1547 }
1531 1548
@@ -1534,7 +1551,9 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1534 rc = -EFAULT; 1551 rc = -EFAULT;
1535 if (copy_from_user(&causediag, argp, sizeof(causediag))) 1552 if (copy_from_user(&causediag, argp, sizeof(causediag)))
1536 break; 1553 break;
1554 lock_sock(sk);
1537 x25->causediag = causediag; 1555 x25->causediag = causediag;
1556 release_sock(sk);
1538 rc = 0; 1557 rc = 0;
1539 break; 1558 break;
1540 1559
@@ -1543,31 +1562,37 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1543 case SIOCX25SCUDMATCHLEN: { 1562 case SIOCX25SCUDMATCHLEN: {
1544 struct x25_subaddr sub_addr; 1563 struct x25_subaddr sub_addr;
1545 rc = -EINVAL; 1564 rc = -EINVAL;
1565 lock_sock(sk);
1546 if(sk->sk_state != TCP_CLOSE) 1566 if(sk->sk_state != TCP_CLOSE)
1547 break; 1567 goto out_cud_release;
1548 rc = -EFAULT; 1568 rc = -EFAULT;
1549 if (copy_from_user(&sub_addr, argp, 1569 if (copy_from_user(&sub_addr, argp,
1550 sizeof(sub_addr))) 1570 sizeof(sub_addr)))
1551 break; 1571 goto out_cud_release;
1552 rc = -EINVAL; 1572 rc = -EINVAL;
1553 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1573 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
1554 break; 1574 goto out_cud_release;
1555 x25->cudmatchlength = sub_addr.cudmatchlength; 1575 x25->cudmatchlength = sub_addr.cudmatchlength;
1556 rc = 0; 1576 rc = 0;
1577out_cud_release:
1578 release_sock(sk);
1557 break; 1579 break;
1558 } 1580 }
1559 1581
1560 case SIOCX25CALLACCPTAPPRV: { 1582 case SIOCX25CALLACCPTAPPRV: {
1561 rc = -EINVAL; 1583 rc = -EINVAL;
1584 lock_kernel();
1562 if (sk->sk_state != TCP_CLOSE) 1585 if (sk->sk_state != TCP_CLOSE)
1563 break; 1586 break;
1564 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); 1587 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
1588 unlock_kernel();
1565 rc = 0; 1589 rc = 0;
1566 break; 1590 break;
1567 } 1591 }
1568 1592
1569 case SIOCX25SENDCALLACCPT: { 1593 case SIOCX25SENDCALLACCPT: {
1570 rc = -EINVAL; 1594 rc = -EINVAL;
1595 lock_kernel();
1571 if (sk->sk_state != TCP_ESTABLISHED) 1596 if (sk->sk_state != TCP_ESTABLISHED)
1572 break; 1597 break;
1573 /* must call accptapprv above */ 1598 /* must call accptapprv above */
@@ -1575,6 +1600,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1575 break; 1600 break;
1576 x25_write_internal(sk, X25_CALL_ACCEPTED); 1601 x25_write_internal(sk, X25_CALL_ACCEPTED);
1577 x25->state = X25_STATE_3; 1602 x25->state = X25_STATE_3;
1603 unlock_kernel();
1578 rc = 0; 1604 rc = 0;
1579 break; 1605 break;
1580 } 1606 }
@@ -1583,7 +1609,6 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1583 rc = -ENOIOCTLCMD; 1609 rc = -ENOIOCTLCMD;
1584 break; 1610 break;
1585 } 1611 }
1586 unlock_kernel();
1587 1612
1588 return rc; 1613 return rc;
1589} 1614}
@@ -1619,16 +1644,20 @@ static int compat_x25_subscr_ioctl(unsigned int cmd,
1619 dev_put(dev); 1644 dev_put(dev);
1620 1645
1621 if (cmd == SIOCX25GSUBSCRIP) { 1646 if (cmd == SIOCX25GSUBSCRIP) {
1647 read_lock_bh(&x25_neigh_list_lock);
1622 x25_subscr.extended = nb->extended; 1648 x25_subscr.extended = nb->extended;
1623 x25_subscr.global_facil_mask = nb->global_facil_mask; 1649 x25_subscr.global_facil_mask = nb->global_facil_mask;
1650 read_unlock_bh(&x25_neigh_list_lock);
1624 rc = copy_to_user(x25_subscr32, &x25_subscr, 1651 rc = copy_to_user(x25_subscr32, &x25_subscr,
1625 sizeof(*x25_subscr32)) ? -EFAULT : 0; 1652 sizeof(*x25_subscr32)) ? -EFAULT : 0;
1626 } else { 1653 } else {
1627 rc = -EINVAL; 1654 rc = -EINVAL;
1628 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) { 1655 if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
1629 rc = 0; 1656 rc = 0;
1657 write_lock_bh(&x25_neigh_list_lock);
1630 nb->extended = x25_subscr.extended; 1658 nb->extended = x25_subscr.extended;
1631 nb->global_facil_mask = x25_subscr.global_facil_mask; 1659 nb->global_facil_mask = x25_subscr.global_facil_mask;
1660 write_unlock_bh(&x25_neigh_list_lock);
1632 } 1661 }
1633 } 1662 }
1634 x25_neigh_put(nb); 1663 x25_neigh_put(nb);
@@ -1654,19 +1683,15 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1654 break; 1683 break;
1655 case SIOCGSTAMP: 1684 case SIOCGSTAMP:
1656 rc = -EINVAL; 1685 rc = -EINVAL;
1657 lock_kernel();
1658 if (sk) 1686 if (sk)
1659 rc = compat_sock_get_timestamp(sk, 1687 rc = compat_sock_get_timestamp(sk,
1660 (struct timeval __user*)argp); 1688 (struct timeval __user*)argp);
1661 unlock_kernel();
1662 break; 1689 break;
1663 case SIOCGSTAMPNS: 1690 case SIOCGSTAMPNS:
1664 rc = -EINVAL; 1691 rc = -EINVAL;
1665 lock_kernel();
1666 if (sk) 1692 if (sk)
1667 rc = compat_sock_get_timestampns(sk, 1693 rc = compat_sock_get_timestampns(sk,
1668 (struct timespec __user*)argp); 1694 (struct timespec __user*)argp);
1669 unlock_kernel();
1670 break; 1695 break;
1671 case SIOCGIFADDR: 1696 case SIOCGIFADDR:
1672 case SIOCSIFADDR: 1697 case SIOCSIFADDR:
@@ -1685,22 +1710,16 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
1685 rc = -EPERM; 1710 rc = -EPERM;
1686 if (!capable(CAP_NET_ADMIN)) 1711 if (!capable(CAP_NET_ADMIN))
1687 break; 1712 break;
1688 lock_kernel();
1689 rc = x25_route_ioctl(cmd, argp); 1713 rc = x25_route_ioctl(cmd, argp);
1690 unlock_kernel();
1691 break; 1714 break;
1692 case SIOCX25GSUBSCRIP: 1715 case SIOCX25GSUBSCRIP:
1693 lock_kernel();
1694 rc = compat_x25_subscr_ioctl(cmd, argp); 1716 rc = compat_x25_subscr_ioctl(cmd, argp);
1695 unlock_kernel();
1696 break; 1717 break;
1697 case SIOCX25SSUBSCRIP: 1718 case SIOCX25SSUBSCRIP:
1698 rc = -EPERM; 1719 rc = -EPERM;
1699 if (!capable(CAP_NET_ADMIN)) 1720 if (!capable(CAP_NET_ADMIN))
1700 break; 1721 break;
1701 lock_kernel();
1702 rc = compat_x25_subscr_ioctl(cmd, argp); 1722 rc = compat_x25_subscr_ioctl(cmd, argp);
1703 unlock_kernel();
1704 break; 1723 break;
1705 case SIOCX25GFACILITIES: 1724 case SIOCX25GFACILITIES:
1706 case SIOCX25SFACILITIES: 1725 case SIOCX25SFACILITIES:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab00754b..55187c8f6420 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
61 while (len > 0) { 61 while (len > 0) {
62 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
64 if (len < 2)
65 return 0;
64 switch (*p) { 66 switch (*p) {
65 case X25_FAC_REVERSE: 67 case X25_FAC_REVERSE:
66 if((p[1] & 0x81) == 0x81) { 68 if((p[1] & 0x81) == 0x81) {
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
104 len -= 2; 106 len -= 2;
105 break; 107 break;
106 case X25_FAC_CLASS_B: 108 case X25_FAC_CLASS_B:
109 if (len < 3)
110 return 0;
107 switch (*p) { 111 switch (*p) {
108 case X25_FAC_PACKET_SIZE: 112 case X25_FAC_PACKET_SIZE:
109 facilities->pacsize_in = p[1]; 113 facilities->pacsize_in = p[1];
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
125 len -= 3; 129 len -= 3;
126 break; 130 break;
127 case X25_FAC_CLASS_C: 131 case X25_FAC_CLASS_C:
132 if (len < 4)
133 return 0;
128 printk(KERN_DEBUG "X.25: unknown facility %02X, " 134 printk(KERN_DEBUG "X.25: unknown facility %02X, "
129 "values %02X, %02X, %02X\n", 135 "values %02X, %02X, %02X\n",
130 p[0], p[1], p[2], p[3]); 136 p[0], p[1], p[2], p[3]);
@@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
132 len -= 4; 138 len -= 4;
133 break; 139 break;
134 case X25_FAC_CLASS_D: 140 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2)
142 return 0;
135 switch (*p) { 143 switch (*p) {
136 case X25_FAC_CALLING_AE: 144 case X25_FAC_CALLING_AE:
137 if (p[1] > X25_MAX_DTE_FACIL_LEN) 145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
138 break; 146 return 0;
139 dte_facs->calling_len = p[2]; 147 dte_facs->calling_len = p[2];
140 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 148 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
141 *vc_fac_mask |= X25_MASK_CALLING_AE; 149 *vc_fac_mask |= X25_MASK_CALLING_AE;
142 break; 150 break;
143 case X25_FAC_CALLED_AE: 151 case X25_FAC_CALLED_AE:
144 if (p[1] > X25_MAX_DTE_FACIL_LEN) 152 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
145 break; 153 return 0;
146 dte_facs->called_len = p[2]; 154 dte_facs->called_len = p[2];
147 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 155 memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
148 *vc_fac_mask |= X25_MASK_CALLED_AE; 156 *vc_fac_mask |= X25_MASK_CALLED_AE;
149 break; 157 break;
150 default: 158 default:
151 printk(KERN_DEBUG "X.25: unknown facility %02X," 159 printk(KERN_DEBUG "X.25: unknown facility %02X,"
152 "length %d, values %02X, %02X, " 160 "length %d\n", p[0], p[1]);
153 "%02X, %02X\n",
154 p[0], p[1], p[2], p[3], p[4], p[5]);
155 break; 161 break;
156 } 162 }
157 len -= p[1] + 2; 163 len -= p[1] + 2;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 63178961efac..f729f022be69 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
119 &x25->vc_facil_mask); 119 &x25->vc_facil_mask);
120 if (len > 0) 120 if (len > 0)
121 skb_pull(skb, len); 121 skb_pull(skb, len);
122 else
123 return -1;
122 /* 124 /*
123 * Copy any Call User Data. 125 * Copy any Call User Data.
124 */ 126 */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 73e7b954ad28..4cbc942f762a 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -31,8 +31,8 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <net/x25.h> 32#include <net/x25.h>
33 33
34static LIST_HEAD(x25_neigh_list); 34LIST_HEAD(x25_neigh_list);
35static DEFINE_RWLOCK(x25_neigh_list_lock); 35DEFINE_RWLOCK(x25_neigh_list_lock);
36 36
37static void x25_t20timer_expiry(unsigned long); 37static void x25_t20timer_expiry(unsigned long);
38 38
@@ -360,16 +360,20 @@ int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
360 dev_put(dev); 360 dev_put(dev);
361 361
362 if (cmd == SIOCX25GSUBSCRIP) { 362 if (cmd == SIOCX25GSUBSCRIP) {
363 read_lock_bh(&x25_neigh_list_lock);
363 x25_subscr.extended = nb->extended; 364 x25_subscr.extended = nb->extended;
364 x25_subscr.global_facil_mask = nb->global_facil_mask; 365 x25_subscr.global_facil_mask = nb->global_facil_mask;
366 read_unlock_bh(&x25_neigh_list_lock);
365 rc = copy_to_user(arg, &x25_subscr, 367 rc = copy_to_user(arg, &x25_subscr,
366 sizeof(x25_subscr)) ? -EFAULT : 0; 368 sizeof(x25_subscr)) ? -EFAULT : 0;
367 } else { 369 } else {
368 rc = -EINVAL; 370 rc = -EINVAL;
369 if (!(x25_subscr.extended && x25_subscr.extended != 1)) { 371 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
370 rc = 0; 372 rc = 0;
373 write_lock_bh(&x25_neigh_list_lock);
371 nb->extended = x25_subscr.extended; 374 nb->extended = x25_subscr.extended;
372 nb->global_facil_mask = x25_subscr.global_facil_mask; 375 nb->global_facil_mask = x25_subscr.global_facil_mask;
376 write_unlock_bh(&x25_neigh_list_lock);
373 } 377 }
374 } 378 }
375 x25_neigh_put(nb); 379 x25_neigh_put(nb);
@@ -394,6 +398,7 @@ void __exit x25_link_free(void)
394 list_for_each_safe(entry, tmp, &x25_neigh_list) { 398 list_for_each_safe(entry, tmp, &x25_neigh_list) {
395 nb = list_entry(entry, struct x25_neigh, node); 399 nb = list_entry(entry, struct x25_neigh, node);
396 __x25_remove_neigh(nb); 400 __x25_remove_neigh(nb);
401 dev_put(nb->dev);
397 } 402 }
398 write_unlock_bh(&x25_neigh_list_lock); 403 write_unlock_bh(&x25_neigh_list_lock);
399} 404}
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index a2023ec52329..1e98bc0fe0a5 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz)
19 if (sz <= PAGE_SIZE) 19 if (sz <= PAGE_SIZE)
20 n = kzalloc(sz, GFP_KERNEL); 20 n = kzalloc(sz, GFP_KERNEL);
21 else if (hashdist) 21 else if (hashdist)
22 n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 22 n = vzalloc(sz);
23 else 23 else
24 n = (struct hlist_head *) 24 n = (struct hlist_head *)
25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 044e77898512..6e50ccd8c532 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1433,7 +1433,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1433 } 1433 }
1434 1434
1435 xdst->route = dst; 1435 xdst->route = dst;
1436 memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics)); 1436 dst_copy_metrics(dst1, dst);
1437 1437
1438 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1438 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1439 family = xfrm[i]->props.family; 1439 family = xfrm[i]->props.family;
@@ -2271,7 +2271,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
2271 if (pmtu > route_mtu_cached) 2271 if (pmtu > route_mtu_cached)
2272 pmtu = route_mtu_cached; 2272 pmtu = route_mtu_cached;
2273 2273
2274 dst->metrics[RTAX_MTU-1] = pmtu; 2274 dst_metric_set(dst, RTAX_MTU, pmtu);
2275 } while ((dst = dst->next)); 2275 } while ((dst = dst->next));
2276} 2276}
2277 2277
@@ -2349,7 +2349,7 @@ static int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2349 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2349 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2350 if (mtu > last->route_mtu_cached) 2350 if (mtu > last->route_mtu_cached)
2351 mtu = last->route_mtu_cached; 2351 mtu = last->route_mtu_cached;
2352 dst->metrics[RTAX_MTU-1] = mtu; 2352 dst_metric_set(dst, RTAX_MTU, mtu);
2353 2353
2354 if (last == first) 2354 if (last == first)
2355 break; 2355 break;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8bae6b22c846..8eb889510916 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -148,7 +148,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
148 !attrs[XFRMA_ALG_AUTH_TRUNC]) || 148 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
149 attrs[XFRMA_ALG_AEAD] || 149 attrs[XFRMA_ALG_AEAD] ||
150 attrs[XFRMA_ALG_CRYPT] || 150 attrs[XFRMA_ALG_CRYPT] ||
151 attrs[XFRMA_ALG_COMP]) 151 attrs[XFRMA_ALG_COMP] ||
152 attrs[XFRMA_TFCPAD])
152 goto out; 153 goto out;
153 break; 154 break;
154 155
@@ -165,6 +166,9 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
165 attrs[XFRMA_ALG_CRYPT]) && 166 attrs[XFRMA_ALG_CRYPT]) &&
166 attrs[XFRMA_ALG_AEAD]) 167 attrs[XFRMA_ALG_AEAD])
167 goto out; 168 goto out;
169 if (attrs[XFRMA_TFCPAD] &&
170 p->mode != XFRM_MODE_TUNNEL)
171 goto out;
168 break; 172 break;
169 173
170 case IPPROTO_COMP: 174 case IPPROTO_COMP:
@@ -172,7 +176,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
172 attrs[XFRMA_ALG_AEAD] || 176 attrs[XFRMA_ALG_AEAD] ||
173 attrs[XFRMA_ALG_AUTH] || 177 attrs[XFRMA_ALG_AUTH] ||
174 attrs[XFRMA_ALG_AUTH_TRUNC] || 178 attrs[XFRMA_ALG_AUTH_TRUNC] ||
175 attrs[XFRMA_ALG_CRYPT]) 179 attrs[XFRMA_ALG_CRYPT] ||
180 attrs[XFRMA_TFCPAD])
176 goto out; 181 goto out;
177 break; 182 break;
178 183
@@ -186,6 +191,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
186 attrs[XFRMA_ALG_CRYPT] || 191 attrs[XFRMA_ALG_CRYPT] ||
187 attrs[XFRMA_ENCAP] || 192 attrs[XFRMA_ENCAP] ||
188 attrs[XFRMA_SEC_CTX] || 193 attrs[XFRMA_SEC_CTX] ||
194 attrs[XFRMA_TFCPAD] ||
189 !attrs[XFRMA_COADDR]) 195 !attrs[XFRMA_COADDR])
190 goto out; 196 goto out;
191 break; 197 break;
@@ -439,6 +445,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
439 goto error; 445 goto error;
440 } 446 }
441 447
448 if (attrs[XFRMA_TFCPAD])
449 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
450
442 if (attrs[XFRMA_COADDR]) { 451 if (attrs[XFRMA_COADDR]) {
443 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 452 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
444 sizeof(*x->coaddr), GFP_KERNEL); 453 sizeof(*x->coaddr), GFP_KERNEL);
@@ -688,6 +697,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
688 if (x->encap) 697 if (x->encap)
689 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 698 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
690 699
700 if (x->tfcpad)
701 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
702
691 if (xfrm_mark_put(skb, &x->mark)) 703 if (xfrm_mark_put(skb, &x->mark))
692 goto nla_put_failure; 704 goto nla_put_failure;
693 705
@@ -2122,6 +2134,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2122 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2134 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2123 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2135 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2124 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2136 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2137 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2125}; 2138};
2126 2139
2127static struct xfrm_link { 2140static struct xfrm_link {
@@ -2301,6 +2314,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
2301 l += nla_total_size(sizeof(*x->calg)); 2314 l += nla_total_size(sizeof(*x->calg));
2302 if (x->encap) 2315 if (x->encap)
2303 l += nla_total_size(sizeof(*x->encap)); 2316 l += nla_total_size(sizeof(*x->encap));
2317 if (x->tfcpad)
2318 l += nla_total_size(sizeof(x->tfcpad));
2304 if (x->security) 2319 if (x->security)
2305 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 2320 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2306 x->security->ctx_len); 2321 x->security->ctx_len);
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c0efe102d655..af6e9f3de950 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -875,7 +875,7 @@ const char *sym_expand_string_value(const char *in)
875 symval = sym_get_string_value(sym); 875 symval = sym_get_string_value(sym);
876 } 876 }
877 877
878 newlen = strlen(res) + strlen(symval) + strlen(src); 878 newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
879 if (newlen > reslen) { 879 if (newlen > reslen) {
880 reslen = newlen; 880 reslen = newlen;
881 res = realloc(res, reslen); 881 res = realloc(res, reslen);
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae623494..e80da955e687 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS
39 39
40 If you are unsure as to whether this is required, answer N. 40 If you are unsure as to whether this is required, answer N.
41 41
42config SECURITY_DMESG_RESTRICT
43 bool "Restrict unprivileged access to the kernel syslog"
44 default n
45 help
46 This enforces restrictions on unprivileged users reading the kernel
47 syslog via dmesg(8).
48
49 If this option is not selected, no restrictions will be enforced
50 unless the dmesg_restrict sysctl is explicitly set to (1).
51
52 If you are unsure how to answer this question, answer N.
53
42config SECURITY 54config SECURITY
43 bool "Enable different security models" 55 bool "Enable different security models"
44 depends on SYSFS 56 depends on SYSFS
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index cf1de4462ccd..b7106f192b75 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -922,7 +922,7 @@ static int __init apparmor_init(void)
922 error = register_security(&apparmor_ops); 922 error = register_security(&apparmor_ops);
923 if (error) { 923 if (error) {
924 AA_ERROR("Unable to register AppArmor\n"); 924 AA_ERROR("Unable to register AppArmor\n");
925 goto register_security_out; 925 goto set_init_cxt_out;
926 } 926 }
927 927
928 /* Report that AppArmor successfully initialized */ 928 /* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@ static int __init apparmor_init(void)
936 936
937 return error; 937 return error;
938 938
939set_init_cxt_out:
940 aa_free_task_context(current->real_cred->security);
941
939register_security_out: 942register_security_out:
940 aa_free_root_ns(); 943 aa_free_root_ns();
941 944
@@ -944,7 +947,6 @@ alloc_out:
944 947
945 apparmor_enabled = 0; 948 apparmor_enabled = 0;
946 return error; 949 return error;
947
948} 950}
949 951
950security_initcall(apparmor_init); 952security_initcall(apparmor_init);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 52cc865f1464..4f0eadee78b8 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -306,7 +306,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
306 return ns; 306 return ns;
307 307
308fail_unconfined: 308fail_unconfined:
309 kzfree(ns->base.name); 309 kzfree(ns->base.hname);
310fail_ns: 310fail_ns:
311 kzfree(ns); 311 kzfree(ns);
312 return NULL; 312 return NULL;
diff --git a/security/commoncap.c b/security/commoncap.c
index 5e632b4857e4..04b80f9912bf 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -895,6 +895,8 @@ int cap_syslog(int type, bool from_file)
895{ 895{
896 if (type != SYSLOG_ACTION_OPEN && from_file) 896 if (type != SYSLOG_ACTION_OPEN && from_file)
897 return 0; 897 return 0;
898 if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
899 return -EPERM;
898 if ((type != SYSLOG_ACTION_READ_ALL && 900 if ((type != SYSLOG_ACTION_READ_ALL &&
899 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) 901 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
900 return -EPERM; 902 return -EPERM;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9154cf90ae1..156ef93d6f7d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4524,11 +4524,11 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
4524 if (selinux_secmark_enabled()) 4524 if (selinux_secmark_enabled())
4525 if (avc_has_perm(sksec->sid, skb->secmark, 4525 if (avc_has_perm(sksec->sid, skb->secmark,
4526 SECCLASS_PACKET, PACKET__SEND, &ad)) 4526 SECCLASS_PACKET, PACKET__SEND, &ad))
4527 return NF_DROP; 4527 return NF_DROP_ERR(-ECONNREFUSED);
4528 4528
4529 if (selinux_policycap_netpeer) 4529 if (selinux_policycap_netpeer)
4530 if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto)) 4530 if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
4531 return NF_DROP; 4531 return NF_DROP_ERR(-ECONNREFUSED);
4532 4532
4533 return NF_ACCEPT; 4533 return NF_ACCEPT;
4534} 4534}
@@ -4585,7 +4585,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4585 secmark_perm = PACKET__SEND; 4585 secmark_perm = PACKET__SEND;
4586 break; 4586 break;
4587 default: 4587 default:
4588 return NF_DROP; 4588 return NF_DROP_ERR(-ECONNREFUSED);
4589 } 4589 }
4590 if (secmark_perm == PACKET__FORWARD_OUT) { 4590 if (secmark_perm == PACKET__FORWARD_OUT) {
4591 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) 4591 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4607,7 +4607,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4607 if (secmark_active) 4607 if (secmark_active)
4608 if (avc_has_perm(peer_sid, skb->secmark, 4608 if (avc_has_perm(peer_sid, skb->secmark,
4609 SECCLASS_PACKET, secmark_perm, &ad)) 4609 SECCLASS_PACKET, secmark_perm, &ad))
4610 return NF_DROP; 4610 return NF_DROP_ERR(-ECONNREFUSED);
4611 4611
4612 if (peerlbl_active) { 4612 if (peerlbl_active) {
4613 u32 if_sid; 4613 u32 if_sid;
@@ -4617,13 +4617,13 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4617 return NF_DROP; 4617 return NF_DROP;
4618 if (avc_has_perm(peer_sid, if_sid, 4618 if (avc_has_perm(peer_sid, if_sid,
4619 SECCLASS_NETIF, NETIF__EGRESS, &ad)) 4619 SECCLASS_NETIF, NETIF__EGRESS, &ad))
4620 return NF_DROP; 4620 return NF_DROP_ERR(-ECONNREFUSED);
4621 4621
4622 if (sel_netnode_sid(addrp, family, &node_sid)) 4622 if (sel_netnode_sid(addrp, family, &node_sid))
4623 return NF_DROP; 4623 return NF_DROP;
4624 if (avc_has_perm(peer_sid, node_sid, 4624 if (avc_has_perm(peer_sid, node_sid,
4625 SECCLASS_NODE, NODE__SENDTO, &ad)) 4625 SECCLASS_NODE, NODE__SENDTO, &ad))
4626 return NF_DROP; 4626 return NF_DROP_ERR(-ECONNREFUSED);
4627 } 4627 }
4628 4628
4629 return NF_ACCEPT; 4629 return NF_ACCEPT;
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index f7e374ec4414..1b9bf9395cfe 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -625,6 +625,8 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
625 control_cache_size, (struct hpi_control_cache_info *) 625 control_cache_size, (struct hpi_control_cache_info *)
626 &phw->control_cache[0] 626 &phw->control_cache[0]
627 ); 627 );
628 if (!phw->p_cache)
629 pao->has_control_cache = 0;
628 } else 630 } else
629 pao->has_control_cache = 0; 631 pao->has_control_cache = 0;
630 632
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 22c5fc625533..2672f6591ceb 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -644,6 +644,8 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
644 interface->control_cache.size_in_bytes, 644 interface->control_cache.size_in_bytes,
645 (struct hpi_control_cache_info *) 645 (struct hpi_control_cache_info *)
646 p_control_cache_virtual); 646 p_control_cache_virtual);
647 if (!phw->p_cache)
648 err = HPI_ERROR_MEMORY_ALLOC;
647 } 649 }
648 if (!err) { 650 if (!err) {
649 err = hpios_locked_mem_get_phys_addr(&phw-> 651 err = hpios_locked_mem_get_phys_addr(&phw->
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index dda4f1c6f658..d67f4d3db911 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -571,14 +571,20 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32
571{ 571{
572 struct hpi_control_cache *p_cache = 572 struct hpi_control_cache *p_cache =
573 kmalloc(sizeof(*p_cache), GFP_KERNEL); 573 kmalloc(sizeof(*p_cache), GFP_KERNEL);
574 if (!p_cache)
575 return NULL;
576 p_cache->p_info =
577 kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
578 GFP_KERNEL);
579 if (!p_cache->p_info) {
580 kfree(p_cache);
581 return NULL;
582 }
574 p_cache->cache_size_in_bytes = size_in_bytes; 583 p_cache->cache_size_in_bytes = size_in_bytes;
575 p_cache->control_count = number_of_controls; 584 p_cache->control_count = number_of_controls;
576 p_cache->p_cache = 585 p_cache->p_cache =
577 (struct hpi_control_cache_single *)pDSP_control_buffer; 586 (struct hpi_control_cache_single *)pDSP_control_buffer;
578 p_cache->init = 0; 587 p_cache->init = 0;
579 p_cache->p_info =
580 kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
581 GFP_KERNEL);
582 return p_cache; 588 return p_cache;
583} 589}
584 590
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 3e5ca8fb519f..e377287192aa 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -225,39 +225,25 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
225{ 225{
226 struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL); 226 struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL);
227 227
228 if (ins == NULL) 228 if (ins == NULL)
229 return NULL; 229 return NULL;
230 230
231 /* better to use vmalloc for this big table */ 231 /* better to use vmalloc for this big table */
232 ins->symbol_table.nsymbols = 0;
233 ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) * 232 ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
234 DSP_MAX_SYMBOLS); 233 DSP_MAX_SYMBOLS);
235 ins->symbol_table.highest_frag_index = 0; 234 ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
236 235 ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
237 if (ins->symbol_table.symbols == NULL) { 236 if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
238 cs46xx_dsp_spos_destroy(chip); 237 cs46xx_dsp_spos_destroy(chip);
239 goto error; 238 goto error;
240 } 239 }
241 240 ins->symbol_table.nsymbols = 0;
241 ins->symbol_table.highest_frag_index = 0;
242 ins->code.offset = 0; 242 ins->code.offset = 0;
243 ins->code.size = 0; 243 ins->code.size = 0;
244 ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
245
246 if (ins->code.data == NULL) {
247 cs46xx_dsp_spos_destroy(chip);
248 goto error;
249 }
250
251 ins->nscb = 0; 244 ins->nscb = 0;
252 ins->ntask = 0; 245 ins->ntask = 0;
253
254 ins->nmodules = 0; 246 ins->nmodules = 0;
255 ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
256
257 if (ins->modules == NULL) {
258 cs46xx_dsp_spos_destroy(chip);
259 goto error;
260 }
261 247
262 /* default SPDIF input sample rate 248 /* default SPDIF input sample rate
263 to 48000 khz */ 249 to 48000 khz */
@@ -271,8 +257,8 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
271 257
272 /* set left and right validity bits and 258 /* set left and right validity bits and
273 default channel status */ 259 default channel status */
274 ins->spdif_csuv_default = 260 ins->spdif_csuv_default =
275 ins->spdif_csuv_stream = 261 ins->spdif_csuv_stream =
276 /* byte 0 */ ((unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF & 0xff)) << 24) | 262 /* byte 0 */ ((unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF & 0xff)) << 24) |
277 /* byte 1 */ ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) | 263 /* byte 1 */ ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) |
278 /* byte 3 */ (unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) | 264 /* byte 3 */ (unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) |
@@ -281,6 +267,9 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
281 return ins; 267 return ins;
282 268
283error: 269error:
270 kfree(ins->modules);
271 kfree(ins->code.data);
272 vfree(ins->symbol_table.symbols);
284 kfree(ins); 273 kfree(ins);
285 return NULL; 274 return NULL;
286} 275}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 460fb2ef7e39..18af38ebf757 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1166,6 +1166,7 @@ static const char *cs420x_models[CS420X_MODELS] = {
1166 1166
1167static struct snd_pci_quirk cs420x_cfg_tbl[] = { 1167static struct snd_pci_quirk cs420x_cfg_tbl[] = {
1168 SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53), 1168 SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
1169 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
1169 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), 1170 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
1170 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), 1171 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
1171 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27), 1172 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index ef9af3f4ace2..1bd7a540fd49 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -425,7 +425,7 @@ exit:
425static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream) 425static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
426{ 426{
427 struct snd_pcm_substream *substream = lx_stream->stream; 427 struct snd_pcm_substream *substream = lx_stream->stream;
428 const int is_capture = lx_stream->is_capture; 428 const unsigned int is_capture = lx_stream->is_capture;
429 429
430 int err; 430 int err;
431 431
@@ -473,7 +473,7 @@ static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
473 473
474static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream) 474static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
475{ 475{
476 const int is_capture = lx_stream->is_capture; 476 const unsigned int is_capture = lx_stream->is_capture;
477 int err; 477 int err;
478 478
479 snd_printd(LXP "stopping: stopping stream\n"); 479 snd_printd(LXP "stopping: stopping stream\n");
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 51afc048961d..aea621eafbb5 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -60,7 +60,7 @@ struct lx_stream {
60 snd_pcm_uframes_t frame_pos; 60 snd_pcm_uframes_t frame_pos;
61 enum lx_stream_status status; /* free, open, running, draining 61 enum lx_stream_status status; /* free, open, running, draining
62 * pause */ 62 * pause */
63 int is_capture:1; 63 unsigned int is_capture:1;
64}; 64};
65 65
66 66
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 3086b751da4a..617f98b0cbae 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -1152,7 +1152,7 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1152 struct lx_stream *lx_stream) 1152 struct lx_stream *lx_stream)
1153{ 1153{
1154 struct snd_pcm_substream *substream = lx_stream->stream; 1154 struct snd_pcm_substream *substream = lx_stream->stream;
1155 int is_capture = lx_stream->is_capture; 1155 const unsigned int is_capture = lx_stream->is_capture;
1156 int err; 1156 int err;
1157 unsigned long flags; 1157 unsigned long flags;
1158 1158
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 94a9d06b9027..3b5690d28b8b 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -25,8 +25,9 @@ config SND_SOC_ALL_CODECS
25 select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC 25 select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
26 select SND_SOC_CS42L51 if I2C 26 select SND_SOC_CS42L51 if I2C
27 select SND_SOC_CS4270 if I2C 27 select SND_SOC_CS4270 if I2C
28 select SND_SOC_CX20442
28 select SND_SOC_DA7210 if I2C 29 select SND_SOC_DA7210 if I2C
29 select SND_SOC_JZ4740 if SOC_JZ4740 30 select SND_SOC_JZ4740_CODEC if SOC_JZ4740
30 select SND_SOC_MAX98088 if I2C 31 select SND_SOC_MAX98088 if I2C
31 select SND_SOC_MAX9877 if I2C 32 select SND_SOC_MAX9877 if I2C
32 select SND_SOC_PCM3008 33 select SND_SOC_PCM3008
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d251ff54a2d3..c5ab8c805771 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -58,7 +58,7 @@
58 (1000000000 / ((rate * 1000) / samples)) 58 (1000000000 / ((rate * 1000) / samples))
59 59
60#define US_TO_SAMPLES(rate, us) \ 60#define US_TO_SAMPLES(rate, us) \
61 (rate / (1000000 / us)) 61 (rate / (1000000 / (us < 1000000 ? us : 1000000)))
62 62
63#define UTHR_FROM_PERIOD_SIZE(samples, playrate, burstrate) \ 63#define UTHR_FROM_PERIOD_SIZE(samples, playrate, burstrate) \
64 ((samples * 5000) / ((burstrate * 5000) / (burstrate - playrate))) 64 ((samples * 5000) / ((burstrate * 5000) / (burstrate - playrate)))
@@ -200,7 +200,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
200 u8 *value) 200 u8 *value)
201{ 201{
202 struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec); 202 struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
203 int val; 203 int val, ret = 0;
204 204
205 *value = reg & 0xff; 205 *value = reg & 0xff;
206 206
@@ -210,6 +210,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
210 if (val < 0) { 210 if (val < 0) {
211 dev_err(codec->dev, "Read failed (%d)\n", val); 211 dev_err(codec->dev, "Read failed (%d)\n", val);
212 value[0] = dac33_read_reg_cache(codec, reg); 212 value[0] = dac33_read_reg_cache(codec, reg);
213 ret = val;
213 } else { 214 } else {
214 value[0] = val; 215 value[0] = val;
215 dac33_write_reg_cache(codec, reg, val); 216 dac33_write_reg_cache(codec, reg, val);
@@ -218,7 +219,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
218 value[0] = dac33_read_reg_cache(codec, reg); 219 value[0] = dac33_read_reg_cache(codec, reg);
219 } 220 }
220 221
221 return 0; 222 return ret;
222} 223}
223 224
224static int dac33_write(struct snd_soc_codec *codec, unsigned int reg, 225static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -329,13 +330,18 @@ static void dac33_init_chip(struct snd_soc_codec *codec)
329 dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL)); 330 dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
330} 331}
331 332
332static inline void dac33_read_id(struct snd_soc_codec *codec) 333static inline int dac33_read_id(struct snd_soc_codec *codec)
333{ 334{
335 int i, ret = 0;
334 u8 reg; 336 u8 reg;
335 337
336 dac33_read(codec, DAC33_DEVICE_ID_MSB, &reg); 338 for (i = 0; i < 3; i++) {
337 dac33_read(codec, DAC33_DEVICE_ID_LSB, &reg); 339 ret = dac33_read(codec, DAC33_DEVICE_ID_MSB + i, &reg);
338 dac33_read(codec, DAC33_DEVICE_REV_ID, &reg); 340 if (ret < 0)
341 break;
342 }
343
344 return ret;
339} 345}
340 346
341static inline void dac33_soft_power(struct snd_soc_codec *codec, int power) 347static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
@@ -1076,6 +1082,9 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
1076 /* Number of samples under i2c latency */ 1082 /* Number of samples under i2c latency */
1077 dac33->alarm_threshold = US_TO_SAMPLES(rate, 1083 dac33->alarm_threshold = US_TO_SAMPLES(rate,
1078 dac33->mode1_latency); 1084 dac33->mode1_latency);
1085 nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
1086 dac33->alarm_threshold;
1087
1079 if (dac33->auto_fifo_config) { 1088 if (dac33->auto_fifo_config) {
1080 if (period_size <= dac33->alarm_threshold) 1089 if (period_size <= dac33->alarm_threshold)
1081 /* 1090 /*
@@ -1086,6 +1095,8 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
1086 ((dac33->alarm_threshold / period_size) + 1095 ((dac33->alarm_threshold / period_size) +
1087 (dac33->alarm_threshold % period_size ? 1096 (dac33->alarm_threshold % period_size ?
1088 1 : 0)); 1097 1 : 0));
1098 else if (period_size > nsample_limit)
1099 dac33->nsample = nsample_limit;
1089 else 1100 else
1090 dac33->nsample = period_size; 1101 dac33->nsample = period_size;
1091 } else { 1102 } else {
@@ -1097,8 +1108,7 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
1097 */ 1108 */
1098 dac33->nsample_max = substream->runtime->buffer_size - 1109 dac33->nsample_max = substream->runtime->buffer_size -
1099 period_size; 1110 period_size;
1100 nsample_limit = DAC33_BUFFER_SIZE_SAMPLES - 1111
1101 dac33->alarm_threshold;
1102 if (dac33->nsample_max > nsample_limit) 1112 if (dac33->nsample_max > nsample_limit)
1103 dac33->nsample_max = nsample_limit; 1113 dac33->nsample_max = nsample_limit;
1104 1114
@@ -1414,9 +1424,15 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
1414 dev_err(codec->dev, "Failed to power up codec: %d\n", ret); 1424 dev_err(codec->dev, "Failed to power up codec: %d\n", ret);
1415 goto err_power; 1425 goto err_power;
1416 } 1426 }
1417 dac33_read_id(codec); 1427 ret = dac33_read_id(codec);
1418 dac33_hard_power(codec, 0); 1428 dac33_hard_power(codec, 0);
1419 1429
1430 if (ret < 0) {
1431 dev_err(codec->dev, "Failed to read chip ID: %d\n", ret);
1432 ret = -ENODEV;
1433 goto err_power;
1434 }
1435
1420 /* Check if the IRQ number is valid and request it */ 1436 /* Check if the IRQ number is valid and request it */
1421 if (dac33->irq >= 0) { 1437 if (dac33->irq >= 0) {
1422 ret = request_irq(dac33->irq, dac33_interrupt_handler, 1438 ret = request_irq(dac33->irq, dac33_interrupt_handler,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 329acc1a2074..ee4fb201de60 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -119,13 +119,13 @@ static int tpa6130a2_power(int power)
119{ 119{
120 struct tpa6130a2_data *data; 120 struct tpa6130a2_data *data;
121 u8 val; 121 u8 val;
122 int ret; 122 int ret = 0;
123 123
124 BUG_ON(tpa6130a2_client == NULL); 124 BUG_ON(tpa6130a2_client == NULL);
125 data = i2c_get_clientdata(tpa6130a2_client); 125 data = i2c_get_clientdata(tpa6130a2_client);
126 126
127 mutex_lock(&data->mutex); 127 mutex_lock(&data->mutex);
128 if (power) { 128 if (power && !data->power_state) {
129 /* Power on */ 129 /* Power on */
130 if (data->power_gpio >= 0) 130 if (data->power_gpio >= 0)
131 gpio_set_value(data->power_gpio, 1); 131 gpio_set_value(data->power_gpio, 1);
@@ -153,7 +153,7 @@ static int tpa6130a2_power(int power)
153 val = tpa6130a2_read(TPA6130A2_REG_CONTROL); 153 val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
154 val &= ~TPA6130A2_SWS; 154 val &= ~TPA6130A2_SWS;
155 tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); 155 tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
156 } else { 156 } else if (!power && data->power_state) {
157 /* set SWS */ 157 /* set SWS */
158 val = tpa6130a2_read(TPA6130A2_REG_CONTROL); 158 val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
159 val |= TPA6130A2_SWS; 159 val |= TPA6130A2_SWS;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index b4f11724a63f..aca4b1ea10bb 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -186,7 +186,6 @@ static int wm8900_volatile_register(unsigned int reg)
186{ 186{
187 switch (reg) { 187 switch (reg) {
188 case WM8900_REG_ID: 188 case WM8900_REG_ID:
189 case WM8900_REG_POWER1:
190 return 1; 189 return 1;
191 default: 190 default:
192 return 0; 191 return 0;
@@ -1200,11 +1199,6 @@ static int wm8900_probe(struct snd_soc_codec *codec)
1200 return -ENODEV; 1199 return -ENODEV;
1201 } 1200 }
1202 1201
1203 /* Read back from the chip */
1204 reg = snd_soc_read(codec, WM8900_REG_POWER1);
1205 reg = (reg >> 12) & 0xf;
1206 dev_info(codec->dev, "WM8900 revision %d\n", reg);
1207
1208 wm8900_reset(codec); 1202 wm8900_reset(codec);
1209 1203
1210 /* Turn the chip on */ 1204 /* Turn the chip on */
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 2cb81538cd91..19ca782ac970 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -123,7 +123,7 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
123 reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK; 123 reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
124 break; 124 break;
125 default: 125 default:
126 WARN(1, "Unknown DCS readback method"); 126 WARN(1, "Unknown DCS readback method\n");
127 break; 127 break;
128 } 128 }
129 129
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index a3bfb2e8b70f..73d0edd8ded9 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -79,7 +79,7 @@ static void tosa_ext_control(struct snd_soc_codec *codec)
79static int tosa_startup(struct snd_pcm_substream *substream) 79static int tosa_startup(struct snd_pcm_substream *substream)
80{ 80{
81 struct snd_soc_pcm_runtime *rtd = substream->private_data; 81 struct snd_soc_pcm_runtime *rtd = substream->private_data;
82 struct snd_soc_codec *codec = rtd->card->codec; 82 struct snd_soc_codec *codec = rtd->codec;
83 83
84 /* check the jack status at stream startup */ 84 /* check the jack status at stream startup */
85 tosa_ext_control(codec); 85 tosa_ext_control(codec);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c8f3f507f54..614a8b30d87b 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -165,8 +165,11 @@ static ssize_t pmdown_time_set(struct device *dev,
165{ 165{
166 struct snd_soc_pcm_runtime *rtd = 166 struct snd_soc_pcm_runtime *rtd =
167 container_of(dev, struct snd_soc_pcm_runtime, dev); 167 container_of(dev, struct snd_soc_pcm_runtime, dev);
168 int ret;
168 169
169 strict_strtol(buf, 10, &rtd->pmdown_time); 170 ret = strict_strtol(buf, 10, &rtd->pmdown_time);
171 if (ret)
172 return ret;
170 173
171 return count; 174 return count;
172} 175}
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 7dae05d8783e..782f741cd00a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -60,7 +60,7 @@ static const struct rc_config {
60 { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */ 60 { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */
61 { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */ 61 { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */
62 { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ 62 { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
63 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi */ 63 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
64 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ 64 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
65}; 65};
66 66
@@ -183,7 +183,13 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
183 if (value > 1) 183 if (value > 1)
184 return -EINVAL; 184 return -EINVAL;
185 changed = value != mixer->audigy2nx_leds[index]; 185 changed = value != mixer->audigy2nx_leds[index];
186 err = snd_usb_ctl_msg(mixer->chip->dev, 186 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
187 err = snd_usb_ctl_msg(mixer->chip->dev,
188 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
189 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
190 !value, 0, NULL, 0, 100);
191 else
192 err = snd_usb_ctl_msg(mixer->chip->dev,
187 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, 193 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
188 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 194 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
189 value, index + 2, NULL, 0, 100); 195 value, index + 2, NULL, 0, 100);
@@ -225,8 +231,12 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
225 int i, err; 231 int i, err;
226 232
227 for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) { 233 for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
234 /* USB X-Fi S51 doesn't have a CMSS LED */
235 if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
236 continue;
228 if (i > 1 && /* Live24ext has 2 LEDs only */ 237 if (i > 1 && /* Live24ext has 2 LEDs only */
229 (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || 238 (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
239 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
230 mixer->chip->usb_id == USB_ID(0x041e, 0x3048))) 240 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
231 break; 241 break;
232 err = snd_ctl_add(mixer->chip->card, 242 err = snd_ctl_add(mixer->chip->card,
@@ -365,6 +375,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
365 375
366 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) || 376 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
367 mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || 377 mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
378 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
368 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) { 379 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
369 if ((err = snd_audigy2nx_controls_create(mixer)) < 0) 380 if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
370 return err; 381 return err;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index cff3a3c465d7..4132522ac90f 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -676,8 +676,10 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
676 if (!needs_knot) 676 if (!needs_knot)
677 return 0; 677 return 0;
678 678
679 subs->rate_list.count = count;
680 subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL); 679 subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
680 if (!subs->rate_list.list)
681 return -ENOMEM;
682 subs->rate_list.count = count;
681 subs->rate_list.mask = 0; 683 subs->rate_list.mask = 0;
682 count = 0; 684 count = 0;
683 list_for_each_entry(fp, &subs->fmt_list, list) { 685 list_for_each_entry(fp, &subs->fmt_list, list) {
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 122ec9dc4853..26aff6bf9e50 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,7 +8,11 @@ perf-trace - Read perf.data (created by perf record) and display trace output
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf trace' {record <script> | report <script> [args] } 11'perf trace' [<options>]
12'perf trace' [<options>] record <script> [<record-options>] <command>
13'perf trace' [<options>] report <script> [script-args]
14'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
15'perf trace' [<options>] <top-script> [script-args]
12 16
13DESCRIPTION 17DESCRIPTION
14----------- 18-----------
@@ -24,23 +28,53 @@ There are several variants of perf trace:
24 available via 'perf trace -l'). The following variants allow you to 28 available via 'perf trace -l'). The following variants allow you to
25 record and run those scripts: 29 record and run those scripts:
26 30
27 'perf trace record <script>' to record the events required for 'perf 31 'perf trace record <script> <command>' to record the events required
28 trace report'. <script> is the name displayed in the output of 32 for 'perf trace report'. <script> is the name displayed in the
29 'perf trace --list' i.e. the actual script name minus any language 33 output of 'perf trace --list' i.e. the actual script name minus any
30 extension. 34 language extension. If <command> is not specified, the events are
35 recorded using the -a (system-wide) 'perf record' option.
31 36
32 'perf trace report <script>' to run and display the results of 37 'perf trace report <script> [args]' to run and display the results
33 <script>. <script> is the name displayed in the output of 'perf 38 of <script>. <script> is the name displayed in the output of 'perf
34 trace --list' i.e. the actual script name minus any language 39 trace --list' i.e. the actual script name minus any language
35 extension. The perf.data output from a previous run of 'perf trace 40 extension. The perf.data output from a previous run of 'perf trace
36 record <script>' is used and should be present for this command to 41 record <script>' is used and should be present for this command to
37 succeed. 42 succeed. [args] refers to the (mainly optional) args expected by
43 the script.
44
45 'perf trace <script> <required-script-args> <command>' to both
46 record the events required for <script> and to run the <script>
47 using 'live-mode' i.e. without writing anything to disk. <script>
48 is the name displayed in the output of 'perf trace --list' i.e. the
49 actual script name minus any language extension. If <command> is
50 not specified, the events are recorded using the -a (system-wide)
51 'perf record' option. If <script> has any required args, they
52 should be specified before <command>. This mode doesn't allow for
53 optional script args to be specified; if optional script args are
54 desired, they can be specified using separate 'perf trace record'
55 and 'perf trace report' commands, with the stdout of the record step
56 piped to the stdin of the report script, using the '-o -' and '-i -'
57 options of the corresponding commands.
58
59 'perf trace <top-script>' to both record the events required for
60 <top-script> and to run the <top-script> using 'live-mode'
61 i.e. without writing anything to disk. <top-script> is the name
62 displayed in the output of 'perf trace --list' i.e. the actual
63 script name minus any language extension; a <top-script> is defined
64 as any script name ending with the string 'top'.
65
66 [<record-options>] can be passed to the record steps of 'perf trace
67 record' and 'live-mode' variants; this isn't possible however for
68 <top-script> 'live-mode' or 'perf trace report' variants.
38 69
39 See the 'SEE ALSO' section for links to language-specific 70 See the 'SEE ALSO' section for links to language-specific
40 information on how to write and run your own trace scripts. 71 information on how to write and run your own trace scripts.
41 72
42OPTIONS 73OPTIONS
43------- 74-------
75<command>...::
76 Any command you can specify in a shell.
77
44-D:: 78-D::
45--dump-raw-trace=:: 79--dump-raw-trace=::
46 Display verbose dump of the trace data. 80 Display verbose dump of the trace data.
@@ -64,6 +98,13 @@ OPTIONS
64 Generate perf-trace.[ext] starter script for given language, 98 Generate perf-trace.[ext] starter script for given language,
65 using current perf.data. 99 using current perf.data.
66 100
101-a::
102 Force system-wide collection. Scripts run without a <command>
103 normally use -a by default, while scripts run with a <command>
104 normally don't - this option allows the latter to be run in
105 system-wide mode.
106
107
67SEE ALSO 108SEE ALSO
68-------- 109--------
69linkperf:perf-record[1], linkperf:perf-trace-perl[1], 110linkperf:perf-record[1], linkperf:perf-trace-perl[1],
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4e75583ddd6d..93bd2ff001fb 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -790,7 +790,7 @@ static const char * const record_usage[] = {
790 790
791static bool force, append_file; 791static bool force, append_file;
792 792
793static const struct option options[] = { 793const struct option record_options[] = {
794 OPT_CALLBACK('e', "event", NULL, "event", 794 OPT_CALLBACK('e', "event", NULL, "event",
795 "event selector. use 'perf list' to list available events", 795 "event selector. use 'perf list' to list available events",
796 parse_events), 796 parse_events),
@@ -839,16 +839,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
839{ 839{
840 int i, j, err = -ENOMEM; 840 int i, j, err = -ENOMEM;
841 841
842 argc = parse_options(argc, argv, options, record_usage, 842 argc = parse_options(argc, argv, record_options, record_usage,
843 PARSE_OPT_STOP_AT_NON_OPTION); 843 PARSE_OPT_STOP_AT_NON_OPTION);
844 if (!argc && target_pid == -1 && target_tid == -1 && 844 if (!argc && target_pid == -1 && target_tid == -1 &&
845 !system_wide && !cpu_list) 845 !system_wide && !cpu_list)
846 usage_with_options(record_usage, options); 846 usage_with_options(record_usage, record_options);
847 847
848 if (force && append_file) { 848 if (force && append_file) {
849 fprintf(stderr, "Can't overwrite and append at the same time." 849 fprintf(stderr, "Can't overwrite and append at the same time."
850 " You need to choose between -f and -A"); 850 " You need to choose between -f and -A");
851 usage_with_options(record_usage, options); 851 usage_with_options(record_usage, record_options);
852 } else if (append_file) { 852 } else if (append_file) {
853 write_mode = WRITE_APPEND; 853 write_mode = WRITE_APPEND;
854 } else { 854 } else {
@@ -871,7 +871,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
871 if (thread_num <= 0) { 871 if (thread_num <= 0) {
872 fprintf(stderr, "Can't find all threads of pid %d\n", 872 fprintf(stderr, "Can't find all threads of pid %d\n",
873 target_pid); 873 target_pid);
874 usage_with_options(record_usage, options); 874 usage_with_options(record_usage, record_options);
875 } 875 }
876 } else { 876 } else {
877 all_tids=malloc(sizeof(pid_t)); 877 all_tids=malloc(sizeof(pid_t));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b513e40974f4..dd625808c2a5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -69,7 +69,6 @@ static int target_tid = -1;
69static pid_t *all_tids = NULL; 69static pid_t *all_tids = NULL;
70static int thread_num = 0; 70static int thread_num = 0;
71static bool inherit = false; 71static bool inherit = false;
72static int profile_cpu = -1;
73static int nr_cpus = 0; 72static int nr_cpus = 0;
74static int realtime_prio = 0; 73static int realtime_prio = 0;
75static bool group = false; 74static bool group = false;
@@ -558,13 +557,13 @@ static void print_sym_table(void)
558 else 557 else
559 printf(" (all"); 558 printf(" (all");
560 559
561 if (profile_cpu != -1) 560 if (cpu_list)
562 printf(", cpu: %d)\n", profile_cpu); 561 printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
563 else { 562 else {
564 if (target_tid != -1) 563 if (target_tid != -1)
565 printf(")\n"); 564 printf(")\n");
566 else 565 else
567 printf(", %d CPUs)\n", nr_cpus); 566 printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
568 } 567 }
569 568
570 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 569 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@ int group_fd;
1187static void start_counter(int i, int counter) 1186static void start_counter(int i, int counter)
1188{ 1187{
1189 struct perf_event_attr *attr; 1188 struct perf_event_attr *attr;
1190 int cpu; 1189 int cpu = -1;
1191 int thread_index; 1190 int thread_index;
1192 1191
1193 cpu = profile_cpu; 1192 if (target_tid == -1)
1194 if (target_tid == -1 && profile_cpu == -1)
1195 cpu = cpumap[i]; 1193 cpu = cpumap[i];
1196 1194
1197 attr = attrs + counter; 1195 attr = attrs + counter;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2f8df45c4dcb..86cfe3800e6b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,6 +10,7 @@
10#include "util/symbol.h" 10#include "util/symbol.h"
11#include "util/thread.h" 11#include "util/thread.h"
12#include "util/trace-event.h" 12#include "util/trace-event.h"
13#include "util/parse-options.h"
13#include "util/util.h" 14#include "util/util.h"
14 15
15static char const *script_name; 16static char const *script_name;
@@ -17,6 +18,7 @@ static char const *generate_script_lang;
17static bool debug_mode; 18static bool debug_mode;
18static u64 last_timestamp; 19static u64 last_timestamp;
19static u64 nr_unordered; 20static u64 nr_unordered;
21extern const struct option record_options[];
20 22
21static int default_start_script(const char *script __unused, 23static int default_start_script(const char *script __unused,
22 int argc __unused, 24 int argc __unused,
@@ -328,7 +330,7 @@ static struct script_desc *script_desc__new(const char *name)
328{ 330{
329 struct script_desc *s = zalloc(sizeof(*s)); 331 struct script_desc *s = zalloc(sizeof(*s));
330 332
331 if (s != NULL) 333 if (s != NULL && name)
332 s->name = strdup(name); 334 s->name = strdup(name);
333 335
334 return s; 336 return s;
@@ -337,6 +339,8 @@ static struct script_desc *script_desc__new(const char *name)
337static void script_desc__delete(struct script_desc *s) 339static void script_desc__delete(struct script_desc *s)
338{ 340{
339 free(s->name); 341 free(s->name);
342 free(s->half_liner);
343 free(s->args);
340 free(s); 344 free(s);
341} 345}
342 346
@@ -537,8 +541,40 @@ static char *get_script_path(const char *script_root, const char *suffix)
537 return path; 541 return path;
538} 542}
539 543
544static bool is_top_script(const char *script_path)
545{
546 return ends_with((char *)script_path, "top") == NULL ? false : true;
547}
548
549static int has_required_arg(char *script_path)
550{
551 struct script_desc *desc;
552 int n_args = 0;
553 char *p;
554
555 desc = script_desc__new(NULL);
556
557 if (read_script_info(desc, script_path))
558 goto out;
559
560 if (!desc->args)
561 goto out;
562
563 for (p = desc->args; *p; p++)
564 if (*p == '<')
565 n_args++;
566out:
567 script_desc__delete(desc);
568
569 return n_args;
570}
571
540static const char * const trace_usage[] = { 572static const char * const trace_usage[] = {
541 "perf trace [<options>] <command>", 573 "perf trace [<options>]",
574 "perf trace [<options>] record <script> [<record-options>] <command>",
575 "perf trace [<options>] report <script> [script-args]",
576 "perf trace [<options>] <script> [<record-options>] <command>",
577 "perf trace [<options>] <top-script> [script-args]",
542 NULL 578 NULL
543}; 579};
544 580
@@ -564,50 +600,81 @@ static const struct option options[] = {
564 OPT_END() 600 OPT_END()
565}; 601};
566 602
603static bool have_cmd(int argc, const char **argv)
604{
605 char **__argv = malloc(sizeof(const char *) * argc);
606
607 if (!__argv)
608 die("malloc");
609 memcpy(__argv, argv, sizeof(const char *) * argc);
610 argc = parse_options(argc, (const char **)__argv, record_options,
611 NULL, PARSE_OPT_STOP_AT_NON_OPTION);
612 free(__argv);
613
614 return argc != 0;
615}
616
567int cmd_trace(int argc, const char **argv, const char *prefix __used) 617int cmd_trace(int argc, const char **argv, const char *prefix __used)
568{ 618{
619 char *rec_script_path = NULL;
620 char *rep_script_path = NULL;
569 struct perf_session *session; 621 struct perf_session *session;
570 const char *suffix = NULL; 622 char *script_path = NULL;
571 const char **__argv; 623 const char **__argv;
572 char *script_path; 624 bool system_wide;
573 int i, err; 625 int i, j, err;
574 626
575 if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { 627 setup_scripting();
576 if (argc < 3) { 628
577 fprintf(stderr, 629 argc = parse_options(argc, argv, options, trace_usage,
578 "Please specify a record script\n"); 630 PARSE_OPT_STOP_AT_NON_OPTION);
579 return -1; 631
580 } 632 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
581 suffix = RECORD_SUFFIX; 633 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
634 if (!rec_script_path)
635 return cmd_record(argc, argv, NULL);
582 } 636 }
583 637
584 if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { 638 if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
585 if (argc < 3) { 639 rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
640 if (!rep_script_path) {
586 fprintf(stderr, 641 fprintf(stderr,
587 "Please specify a report script\n"); 642 "Please specify a valid report script"
643 "(see 'perf trace -l' for listing)\n");
588 return -1; 644 return -1;
589 } 645 }
590 suffix = REPORT_SUFFIX;
591 } 646 }
592 647
593 /* make sure PERF_EXEC_PATH is set for scripts */ 648 /* make sure PERF_EXEC_PATH is set for scripts */
594 perf_set_argv_exec_path(perf_exec_path()); 649 perf_set_argv_exec_path(perf_exec_path());
595 650
596 if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) { 651 if (argc && !script_name && !rec_script_path && !rep_script_path) {
597 char *record_script_path, *report_script_path;
598 int live_pipe[2]; 652 int live_pipe[2];
653 int rep_args;
599 pid_t pid; 654 pid_t pid;
600 655
601 record_script_path = get_script_path(argv[1], RECORD_SUFFIX); 656 rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
602 if (!record_script_path) { 657 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
603 fprintf(stderr, "record script not found\n"); 658
604 return -1; 659 if (!rec_script_path && !rep_script_path) {
660 fprintf(stderr, " Couldn't find script %s\n\n See perf"
661 " trace -l for available scripts.\n", argv[0]);
662 usage_with_options(trace_usage, options);
605 } 663 }
606 664
607 report_script_path = get_script_path(argv[1], REPORT_SUFFIX); 665 if (is_top_script(argv[0])) {
608 if (!report_script_path) { 666 rep_args = argc - 1;
609 fprintf(stderr, "report script not found\n"); 667 } else {
610 return -1; 668 int rec_args;
669
670 rep_args = has_required_arg(rep_script_path);
671 rec_args = (argc - 1) - rep_args;
672 if (rec_args < 0) {
673 fprintf(stderr, " %s script requires options."
674 "\n\n See perf trace -l for available "
675 "scripts and options.\n", argv[0]);
676 usage_with_options(trace_usage, options);
677 }
611 } 678 }
612 679
613 if (pipe(live_pipe) < 0) { 680 if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
622 } 689 }
623 690
624 if (!pid) { 691 if (!pid) {
692 system_wide = true;
693 j = 0;
694
625 dup2(live_pipe[1], 1); 695 dup2(live_pipe[1], 1);
626 close(live_pipe[0]); 696 close(live_pipe[0]);
627 697
628 __argv = malloc(6 * sizeof(const char *)); 698 if (!is_top_script(argv[0]))
629 __argv[0] = "/bin/sh"; 699 system_wide = !have_cmd(argc - rep_args,
630 __argv[1] = record_script_path; 700 &argv[rep_args]);
631 __argv[2] = "-q"; 701
632 __argv[3] = "-o"; 702 __argv = malloc((argc + 6) * sizeof(const char *));
633 __argv[4] = "-"; 703 if (!__argv)
634 __argv[5] = NULL; 704 die("malloc");
705
706 __argv[j++] = "/bin/sh";
707 __argv[j++] = rec_script_path;
708 if (system_wide)
709 __argv[j++] = "-a";
710 __argv[j++] = "-q";
711 __argv[j++] = "-o";
712 __argv[j++] = "-";
713 for (i = rep_args + 1; i < argc; i++)
714 __argv[j++] = argv[i];
715 __argv[j++] = NULL;
635 716
636 execvp("/bin/sh", (char **)__argv); 717 execvp("/bin/sh", (char **)__argv);
718 free(__argv);
637 exit(-1); 719 exit(-1);
638 } 720 }
639 721
640 dup2(live_pipe[0], 0); 722 dup2(live_pipe[0], 0);
641 close(live_pipe[1]); 723 close(live_pipe[1]);
642 724
643 __argv = malloc((argc + 3) * sizeof(const char *)); 725 __argv = malloc((argc + 4) * sizeof(const char *));
644 __argv[0] = "/bin/sh"; 726 if (!__argv)
645 __argv[1] = report_script_path; 727 die("malloc");
646 for (i = 2; i < argc; i++) 728 j = 0;
647 __argv[i] = argv[i]; 729 __argv[j++] = "/bin/sh";
648 __argv[i++] = "-i"; 730 __argv[j++] = rep_script_path;
649 __argv[i++] = "-"; 731 for (i = 1; i < rep_args + 1; i++)
650 __argv[i++] = NULL; 732 __argv[j++] = argv[i];
733 __argv[j++] = "-i";
734 __argv[j++] = "-";
735 __argv[j++] = NULL;
651 736
652 execvp("/bin/sh", (char **)__argv); 737 execvp("/bin/sh", (char **)__argv);
738 free(__argv);
653 exit(-1); 739 exit(-1);
654 } 740 }
655 741
656 if (suffix) { 742 if (rec_script_path)
657 script_path = get_script_path(argv[2], suffix); 743 script_path = rec_script_path;
658 if (!script_path) { 744 if (rep_script_path)
659 fprintf(stderr, "script not found\n"); 745 script_path = rep_script_path;
660 return -1; 746
661 } 747 if (script_path) {
662 748 system_wide = false;
663 __argv = malloc((argc + 1) * sizeof(const char *)); 749 j = 0;
664 __argv[0] = "/bin/sh"; 750
665 __argv[1] = script_path; 751 if (rec_script_path)
666 for (i = 3; i < argc; i++) 752 system_wide = !have_cmd(argc - 1, &argv[1]);
667 __argv[i - 1] = argv[i]; 753
668 __argv[argc - 1] = NULL; 754 __argv = malloc((argc + 2) * sizeof(const char *));
755 if (!__argv)
756 die("malloc");
757 __argv[j++] = "/bin/sh";
758 __argv[j++] = script_path;
759 if (system_wide)
760 __argv[j++] = "-a";
761 for (i = 2; i < argc; i++)
762 __argv[j++] = argv[i];
763 __argv[j++] = NULL;
669 764
670 execvp("/bin/sh", (char **)__argv); 765 execvp("/bin/sh", (char **)__argv);
766 free(__argv);
671 exit(-1); 767 exit(-1);
672 } 768 }
673 769
674 setup_scripting();
675
676 argc = parse_options(argc, argv, options, trace_usage,
677 PARSE_OPT_STOP_AT_NON_OPTION);
678
679 if (symbol__init() < 0) 770 if (symbol__init() < 0)
680 return -1; 771 return -1;
681 if (!script_name) 772 if (!script_name)
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index eb5846bcb565..8104895a7b67 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index 5bfaae5a6cba..33efc8673aae 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,3 +1,3 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
3 3
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 6e0b2f7755ac..7cb9db230448 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
index 6e0b2f7755ac..7cb9db230448 100644
--- a/tools/perf/scripts/perl/bin/rwtop-record
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 9f2acaaae9f0..464251a1bd7e 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e sched:sched_switch -e sched:sched_wakeup $@ 2perf record -e sched:sched_switch -e sched:sched_wakeup $@
3 3
4 4
5 5
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index 85301f2471ff..8edda9078d5d 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ 2perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index eb5846bcb565..8104895a7b67 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
index 5ecbb433caf4..b1495c9a9b20 100644
--- a/tools/perf/scripts/python/bin/futex-contention-record
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ 2perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
index d931a828126b..558754b840a9 100644
--- a/tools/perf/scripts/python/bin/netdev-times-record
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e net:net_dev_xmit -e net:net_dev_queue \ 2perf record -e net:net_dev_xmit -e net:net_dev_queue \
3 -e net:netif_receive_skb -e net:netif_rx \ 3 -e net:netif_receive_skb -e net:netif_rx \
4 -e skb:consume_skb -e skb:kfree_skb \ 4 -e skb:consume_skb -e skb:kfree_skb \
5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \ 5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
index 17a3e9bd9e8f..7493fddbe995 100644
--- a/tools/perf/scripts/python/bin/sched-migration-record
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@ 2perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 9706d9d40279..056c69521a38 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -104,9 +104,10 @@ out_destroy_form:
104 return rc; 104 return rc;
105} 105}
106 106
107static const char yes[] = "Yes", no[] = "No";
108
107bool ui__dialog_yesno(const char *msg) 109bool ui__dialog_yesno(const char *msg)
108{ 110{
109 /* newtWinChoice should really be accepting const char pointers... */ 111 /* newtWinChoice should really be accepting const char pointers... */
110 char yes[] = "Yes", no[] = "No"; 112 return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
111 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
112} 113}